code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
# coding=utf-8
import tensorflow as tf
import numpy as np
import helpers
tf.reset_default_graph()
sess = tf.InteractiveSession()
PAD = 0
EOS = 1
vocab_size = 10
input_embedding_size = 20
encoder_hidden_units = 512
decoder_hidden_units = encoder_hidden_units * 2
# define inputs
encoder_input = tf.placeholder(shape=(None, None), dtype=tf.int32, name='encoder_inputs')
# encoder_input_length = tf.placeholder(shape=(None,), dtype=tf.int32, name='encoder_input_length')
decoder_targets = tf.placeholder(shape=(None, None), dtype=tf.int32, name='decoder_target')
decoder_input = tf.placeholder(shape=(None, None), dtype=tf.int32, name='decoder_input')
# define embedding layer
embedding = tf.Variable(tf.random_uniform([vocab_size, input_embedding_size], -1.0, 1.0), dtype=tf.float32)
encoder_inputs_embedded = tf.nn.embedding_lookup(embedding, encoder_input, name='encoder_inputs_embedded')
decoder_inputs_embedded = tf.nn.embedding_lookup(embedding, decoder_input, name='decoder_inputs_embedded')
encoder_cell_fw = tf.contrib.rnn.LSTMCell(encoder_hidden_units)
encoder_cell_bw = tf.contrib.rnn.LSTMCell(encoder_hidden_units)
((encoder_fw_outputs,
encoder_bw_outputs),
(encoder_fw_final_state,
encoder_bw_final_state)) = (tf.nn.bidirectional_dynamic_rnn(cell_fw=encoder_cell_fw,
cell_bw=encoder_cell_bw,
inputs=encoder_inputs_embedded,
dtype=tf.float32,
time_major=False))
encoder_outputs = tf.concat((encoder_fw_outputs, encoder_bw_outputs), 2)
encoder_final_state_c = tf.concat((encoder_fw_final_state.c, encoder_bw_final_state.c), 1)
encoder_final_state_h = tf.concat((encoder_fw_final_state.h, encoder_bw_final_state.h), 1)
encoder_final_state = tf.contrib.rnn.LSTMStateTuple(c=encoder_final_state_c,
h=encoder_final_state_h)
decoder_cell = tf.contrib.rnn.LSTMCell(decoder_hidden_units)
encoder_max_time, batch_size = tf.unstack(tf.shape(encoder_input))
print(encoder_max_time)
print(batch_size)
# decoder_length = encoder_input_length + 3
decoder_outputs, decoder_final_state = tf.nn.dynamic_rnn(decoder_cell, encoder_outputs,
initial_state=encoder_final_state, dtype=tf.float32)
decoder_logits = tf.contrib.layers.linear(decoder_outputs, vocab_size)
decoder_prediction = tf.argmax(decoder_logits, 2)
stepwise_cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
labels=tf.one_hot(decoder_targets, depth=vocab_size, dtype=tf.float32),
logits=decoder_logits)
loss = tf.reduce_mean(stepwise_cross_entropy)
train_op = tf.train.AdamOptimizer().minimize(loss)
sess.run(tf.global_variables_initializer())
batch_ = [[6], [3, 4], [9, 8, 7]]
batch_, batch_length_ = helpers.batch(batch_)
print('batch_encoded:\n' + str(batch_))
din_, dlen_ = helpers.batch(np.ones(shape=(3, 1), dtype=np.int32), max_sequence_length=4)
print('decoder inputs:\n' + str(din_))
pred_ = sess.run(decoder_prediction,
feed_dict={
encoder_input: batch_,
decoder_input: din_,})
print('decoder predictions:\n' + str(pred_))
|
[
"tensorflow.reset_default_graph",
"numpy.ones",
"tensorflow.contrib.layers.linear",
"tensorflow.nn.bidirectional_dynamic_rnn",
"tensorflow.InteractiveSession",
"tensorflow.one_hot",
"tensorflow.contrib.rnn.LSTMStateTuple",
"tensorflow.concat",
"tensorflow.placeholder",
"helpers.batch",
"tensorflow.nn.embedding_lookup",
"tensorflow.global_variables_initializer",
"tensorflow.reduce_mean",
"tensorflow.random_uniform",
"tensorflow.nn.dynamic_rnn",
"tensorflow.argmax",
"tensorflow.shape",
"tensorflow.contrib.rnn.LSTMCell",
"tensorflow.train.AdamOptimizer"
] |
[((75, 99), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (97, 99), True, 'import tensorflow as tf\n'), ((107, 130), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {}), '()\n', (128, 130), True, 'import tensorflow as tf\n'), ((298, 371), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '(None, None)', 'dtype': 'tf.int32', 'name': '"""encoder_inputs"""'}), "(shape=(None, None), dtype=tf.int32, name='encoder_inputs')\n", (312, 371), True, 'import tensorflow as tf\n'), ((490, 563), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '(None, None)', 'dtype': 'tf.int32', 'name': '"""decoder_target"""'}), "(shape=(None, None), dtype=tf.int32, name='decoder_target')\n", (504, 563), True, 'import tensorflow as tf\n'), ((580, 652), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '(None, None)', 'dtype': 'tf.int32', 'name': '"""decoder_input"""'}), "(shape=(None, None), dtype=tf.int32, name='decoder_input')\n", (594, 652), True, 'import tensorflow as tf\n'), ((814, 899), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embedding', 'encoder_input'], {'name': '"""encoder_inputs_embedded"""'}), "(embedding, encoder_input, name='encoder_inputs_embedded'\n )\n", (836, 899), True, 'import tensorflow as tf\n'), ((921, 1006), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embedding', 'decoder_input'], {'name': '"""decoder_inputs_embedded"""'}), "(embedding, decoder_input, name='decoder_inputs_embedded'\n )\n", (943, 1006), True, 'import tensorflow as tf\n'), ((1021, 1066), 'tensorflow.contrib.rnn.LSTMCell', 'tf.contrib.rnn.LSTMCell', (['encoder_hidden_units'], {}), '(encoder_hidden_units)\n', (1044, 1066), True, 'import tensorflow as tf\n'), ((1085, 1130), 'tensorflow.contrib.rnn.LSTMCell', 'tf.contrib.rnn.LSTMCell', (['encoder_hidden_units'], {}), '(encoder_hidden_units)\n', (1108, 1130), True, 'import tensorflow as tf\n'), ((1233, 1391), 'tensorflow.nn.bidirectional_dynamic_rnn', 'tf.nn.bidirectional_dynamic_rnn', ([], {'cell_fw': 'encoder_cell_fw', 'cell_bw': 'encoder_cell_bw', 'inputs': 'encoder_inputs_embedded', 'dtype': 'tf.float32', 'time_major': '(False)'}), '(cell_fw=encoder_cell_fw, cell_bw=\n encoder_cell_bw, inputs=encoder_inputs_embedded, dtype=tf.float32,\n time_major=False)\n', (1264, 1391), True, 'import tensorflow as tf\n'), ((1651, 1705), 'tensorflow.concat', 'tf.concat', (['(encoder_fw_outputs, encoder_bw_outputs)', '(2)'], {}), '((encoder_fw_outputs, encoder_bw_outputs), 2)\n', (1660, 1705), True, 'import tensorflow as tf\n'), ((1731, 1797), 'tensorflow.concat', 'tf.concat', (['(encoder_fw_final_state.c, encoder_bw_final_state.c)', '(1)'], {}), '((encoder_fw_final_state.c, encoder_bw_final_state.c), 1)\n', (1740, 1797), True, 'import tensorflow as tf\n'), ((1822, 1888), 'tensorflow.concat', 'tf.concat', (['(encoder_fw_final_state.h, encoder_bw_final_state.h)', '(1)'], {}), '((encoder_fw_final_state.h, encoder_bw_final_state.h), 1)\n', (1831, 1888), True, 'import tensorflow as tf\n'), ((1911, 1990), 'tensorflow.contrib.rnn.LSTMStateTuple', 'tf.contrib.rnn.LSTMStateTuple', ([], {'c': 'encoder_final_state_c', 'h': 'encoder_final_state_h'}), '(c=encoder_final_state_c, h=encoder_final_state_h)\n', (1940, 1990), True, 'import tensorflow as tf\n'), ((2060, 2105), 'tensorflow.contrib.rnn.LSTMCell', 'tf.contrib.rnn.LSTMCell', (['decoder_hidden_units'], {}), '(decoder_hidden_units)\n', (2083, 2105), True, 'import tensorflow as tf\n'), ((2301, 2407), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', (['decoder_cell', 'encoder_outputs'], {'initial_state': 'encoder_final_state', 'dtype': 'tf.float32'}), '(decoder_cell, encoder_outputs, initial_state=\n encoder_final_state, dtype=tf.float32)\n', (2318, 2407), True, 'import tensorflow as tf\n'), ((2478, 2531), 'tensorflow.contrib.layers.linear', 'tf.contrib.layers.linear', (['decoder_outputs', 'vocab_size'], {}), '(decoder_outputs, vocab_size)\n', (2502, 2531), True, 'import tensorflow as tf\n'), ((2553, 2581), 'tensorflow.argmax', 'tf.argmax', (['decoder_logits', '(2)'], {}), '(decoder_logits, 2)\n', (2562, 2581), True, 'import tensorflow as tf\n'), ((2760, 2798), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['stepwise_cross_entropy'], {}), '(stepwise_cross_entropy)\n', (2774, 2798), True, 'import tensorflow as tf\n'), ((2955, 2976), 'helpers.batch', 'helpers.batch', (['batch_'], {}), '(batch_)\n', (2968, 2976), False, 'import helpers\n'), ((703, 767), 'tensorflow.random_uniform', 'tf.random_uniform', (['[vocab_size, input_embedding_size]', '(-1.0)', '(1.0)'], {}), '([vocab_size, input_embedding_size], -1.0, 1.0)\n', (720, 767), True, 'import tensorflow as tf\n'), ((2149, 2172), 'tensorflow.shape', 'tf.shape', (['encoder_input'], {}), '(encoder_input)\n', (2157, 2172), True, 'import tensorflow as tf\n'), ((2860, 2893), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2891, 2893), True, 'import tensorflow as tf\n'), ((3046, 3083), 'numpy.ones', 'np.ones', ([], {'shape': '(3, 1)', 'dtype': 'np.int32'}), '(shape=(3, 1), dtype=np.int32)\n', (3053, 3083), True, 'import numpy as np\n'), ((2660, 2723), 'tensorflow.one_hot', 'tf.one_hot', (['decoder_targets'], {'depth': 'vocab_size', 'dtype': 'tf.float32'}), '(decoder_targets, depth=vocab_size, dtype=tf.float32)\n', (2670, 2723), True, 'import tensorflow as tf\n'), ((2810, 2834), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {}), '()\n', (2832, 2834), True, 'import tensorflow as tf\n')]
|
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numpy as np
import open3d as o3d
from random_geometry_points.plane import Plane
def mean_map_entropy(pc_map, map_tips=None, KNN_RAD=1):
MIN_KNN = 5
map_tree = o3d.geometry.KDTreeFlann(pc_map)
points = np.asarray(pc_map.points)
metric = []
for i in range(points.shape[0]):
point = points[i]
[k, idx, _] = map_tree.search_radius_vector_3d(point, KNN_RAD)
if len(idx) > MIN_KNN:
cov = np.cov(points[idx].T)
det = np.linalg.det(2 * np.pi * np.e * cov)
if det > 0:
metric.append(0.5 * np.log(det))
return 0 if len(metric) == 0 else np.mean(metric)
def mean_plane_variance(pc_map, map_tips=None, KNN_RAD=1):
MIN_KNN = 5
map_tree = o3d.geometry.KDTreeFlann(pc_map)
points = np.asarray(pc_map.points)
metric = []
for i in range(points.shape[0]):
point = points[i]
[k, idx, _] = map_tree.search_radius_vector_3d(point, KNN_RAD)
if len(idx) > MIN_KNN:
cov = np.cov(points[idx].T)
eigenvalues = np.linalg.eig(cov)[0]
metric.append(min(eigenvalues))
return 0 if len(metric) == 0 else np.mean(metric)
def orth_mme(pc_map, map_tips, knn_rad=0.5):
map_tree = o3d.geometry.KDTreeFlann(pc_map)
points = np.asarray(pc_map.points)
orth_axes_stats = []
orth_list = map_tips['orth_list']
for k, chosen_points in enumerate(orth_list):
metric = []
plane_error = []
for i in range(chosen_points.shape[0]):
point = chosen_points[i]
[_, idx, _] = map_tree.search_radius_vector_3d(point, knn_rad)
if len(idx) > 5:
metric.append(mme(points[idx]))
avg_metric = np.mean(metric)
orth_axes_stats.append(avg_metric)
return np.sum(orth_axes_stats)
def orth_mpv(pc_map, map_tips, knn_rad=1):
map_tree = o3d.geometry.KDTreeFlann(pc_map)
points = np.asarray(pc_map.points)
orth_axes_stats = []
orth_list = map_tips['orth_list']
for k, chosen_points in enumerate(orth_list):
metric = []
plane_error = []
for i in range(chosen_points.shape[0]):
point = chosen_points[i]
[_, idx, _] = map_tree.search_radius_vector_3d(point, knn_rad)
if len(idx) > 5:
metric.append(mpv(points[idx]))
avg_metric = np.median(metric)
orth_axes_stats.append(avg_metric)
return np.sum(orth_axes_stats)
def mme(points):
cov = np.cov(points.T)
det = np.linalg.det(2 * np.pi * np.e * cov)
return 0.5 * np.log(det) if det > 0 else -math.inf
def mpv(points):
cov = np.cov(points.T)
eigenvalues = np.linalg.eig(cov)[0]
return min(eigenvalues)
def rpe(T_gt, T_est):
seq_len = len(T_gt)
err = 0
for i in range(seq_len):
for j in range(seq_len):
d_gt = T_gt[i] @ np.linalg.inv(T_gt[j])
d_est = T_est[i] @ np.linalg.inv(T_est[j])
dt = d_est[:3, 3] - d_gt[:3, 3]
err += np.linalg.norm(dt) ** 2
return err
|
[
"numpy.sum",
"numpy.log",
"numpy.median",
"numpy.asarray",
"open3d.geometry.KDTreeFlann",
"numpy.linalg.eig",
"numpy.mean",
"numpy.linalg.inv",
"numpy.linalg.norm",
"numpy.linalg.det",
"numpy.cov"
] |
[((797, 829), 'open3d.geometry.KDTreeFlann', 'o3d.geometry.KDTreeFlann', (['pc_map'], {}), '(pc_map)\n', (821, 829), True, 'import open3d as o3d\n'), ((843, 868), 'numpy.asarray', 'np.asarray', (['pc_map.points'], {}), '(pc_map.points)\n', (853, 868), True, 'import numpy as np\n'), ((1367, 1399), 'open3d.geometry.KDTreeFlann', 'o3d.geometry.KDTreeFlann', (['pc_map'], {}), '(pc_map)\n', (1391, 1399), True, 'import open3d as o3d\n'), ((1413, 1438), 'numpy.asarray', 'np.asarray', (['pc_map.points'], {}), '(pc_map.points)\n', (1423, 1438), True, 'import numpy as np\n'), ((1870, 1902), 'open3d.geometry.KDTreeFlann', 'o3d.geometry.KDTreeFlann', (['pc_map'], {}), '(pc_map)\n', (1894, 1902), True, 'import open3d as o3d\n'), ((1916, 1941), 'numpy.asarray', 'np.asarray', (['pc_map.points'], {}), '(pc_map.points)\n', (1926, 1941), True, 'import numpy as np\n'), ((2441, 2464), 'numpy.sum', 'np.sum', (['orth_axes_stats'], {}), '(orth_axes_stats)\n', (2447, 2464), True, 'import numpy as np\n'), ((2525, 2557), 'open3d.geometry.KDTreeFlann', 'o3d.geometry.KDTreeFlann', (['pc_map'], {}), '(pc_map)\n', (2549, 2557), True, 'import open3d as o3d\n'), ((2571, 2596), 'numpy.asarray', 'np.asarray', (['pc_map.points'], {}), '(pc_map.points)\n', (2581, 2596), True, 'import numpy as np\n'), ((3115, 3138), 'numpy.sum', 'np.sum', (['orth_axes_stats'], {}), '(orth_axes_stats)\n', (3121, 3138), True, 'import numpy as np\n'), ((3168, 3184), 'numpy.cov', 'np.cov', (['points.T'], {}), '(points.T)\n', (3174, 3184), True, 'import numpy as np\n'), ((3195, 3232), 'numpy.linalg.det', 'np.linalg.det', (['(2 * np.pi * np.e * cov)'], {}), '(2 * np.pi * np.e * cov)\n', (3208, 3232), True, 'import numpy as np\n'), ((3317, 3333), 'numpy.cov', 'np.cov', (['points.T'], {}), '(points.T)\n', (3323, 3333), True, 'import numpy as np\n'), ((1258, 1273), 'numpy.mean', 'np.mean', (['metric'], {}), '(metric)\n', (1265, 1273), True, 'import numpy as np\n'), ((1792, 1807), 'numpy.mean', 'np.mean', (['metric'], {}), '(metric)\n', (1799, 1807), True, 'import numpy as np\n'), ((2365, 2380), 'numpy.mean', 'np.mean', (['metric'], {}), '(metric)\n', (2372, 2380), True, 'import numpy as np\n'), ((3037, 3054), 'numpy.median', 'np.median', (['metric'], {}), '(metric)\n', (3046, 3054), True, 'import numpy as np\n'), ((3352, 3370), 'numpy.linalg.eig', 'np.linalg.eig', (['cov'], {}), '(cov)\n', (3365, 3370), True, 'import numpy as np\n'), ((1068, 1089), 'numpy.cov', 'np.cov', (['points[idx].T'], {}), '(points[idx].T)\n', (1074, 1089), True, 'import numpy as np\n'), ((1108, 1145), 'numpy.linalg.det', 'np.linalg.det', (['(2 * np.pi * np.e * cov)'], {}), '(2 * np.pi * np.e * cov)\n', (1121, 1145), True, 'import numpy as np\n'), ((1639, 1660), 'numpy.cov', 'np.cov', (['points[idx].T'], {}), '(points[idx].T)\n', (1645, 1660), True, 'import numpy as np\n'), ((3250, 3261), 'numpy.log', 'np.log', (['det'], {}), '(det)\n', (3256, 3261), True, 'import numpy as np\n'), ((1687, 1705), 'numpy.linalg.eig', 'np.linalg.eig', (['cov'], {}), '(cov)\n', (1700, 1705), True, 'import numpy as np\n'), ((3553, 3575), 'numpy.linalg.inv', 'np.linalg.inv', (['T_gt[j]'], {}), '(T_gt[j])\n', (3566, 3575), True, 'import numpy as np\n'), ((3607, 3630), 'numpy.linalg.inv', 'np.linalg.inv', (['T_est[j]'], {}), '(T_est[j])\n', (3620, 3630), True, 'import numpy as np\n'), ((3694, 3712), 'numpy.linalg.norm', 'np.linalg.norm', (['dt'], {}), '(dt)\n', (3708, 3712), True, 'import numpy as np\n'), ((1206, 1217), 'numpy.log', 'np.log', (['det'], {}), '(det)\n', (1212, 1217), True, 'import numpy as np\n')]
|
import torch
from torch.utils.data import TensorDataset
from torchvision import datasets, transforms
from base import BaseDataLoader, BaseDataLoader_2
from torch.utils.data import DataLoader
import numpy as np
import pandas as pd
from .utils import readmts_uci_har, transform_labels
class MnistDataLoader(BaseDataLoader):
"""
MNIST data loading demo using BaseDataLoader
"""
def __init__(self, data_dir, batch_size, shuffle=True, validation_split=0.0, num_workers=1, training=True):
trsfm = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
self.data_dir = data_dir
self.dataset = datasets.MNIST(self.data_dir, train=training, download=True, transform=trsfm)
super().__init__(self.dataset, batch_size, shuffle, validation_split, num_workers)
class HumanActivityRecognitionDataLoader2(BaseDataLoader):
"""
HumanActivityRecognition data loading demo using BaseDataLoader
"""
def __init__(self, data_dir, batch_size, shuffle=True, validation_split=0.0, test_split=0.0, num_workers=1,
training=True):
x_train, y_train, x_test, y_test = readmts_uci_har(data_dir)
x_train = x_train.swapaxes(1, 2)
x_test = x_test.swapaxes(1, 2)
y_train, y_test = transform_labels(y_train, y_test)
for i in range(len(x_train)):
for j in range(len(x_test)):
c = (x_train[i] == x_test[j])
d = c.all()
if d:
break
X = np.concatenate((x_train, x_test))
Y = np.concatenate((y_train, y_test))
X = torch.from_numpy(X).float()
Y = torch.from_numpy(Y)
dataset = TensorDataset(X, Y)
super().__init__(dataset, batch_size, shuffle, validation_split, test_split, num_workers, normalization=True)
class HumanActivityRecognitionDataLoader1(BaseDataLoader):
"""
HumanActivityRecognition data loading demo using BaseDataLoader
"""
def __init__(self, data_dir, batch_size, shuffle=True, validation_split=0.0, test_split=0.0, num_workers=1,
training=True):
INPUT_SIGNAL_TYPES = [
"body_acc_x_",
"body_acc_y_",
"body_acc_z_",
"body_gyro_x_",
"body_gyro_y_",
"body_gyro_z_",
"total_acc_x_",
"total_acc_y_",
"total_acc_z_"
]
# Output classes to learn how to classify
LABELS = [
"WALKING",
"WALKING_UPSTAIRS",
"WALKING_DOWNSTAIRS",
"SITTING",
"STANDING",
"LAYING"
]
DATASET_PATH = data_dir
TRAIN = "/train/"
TEST = "/test/"
# Load "X" (the neural network's training and testing inputs)
def load_X(X_signals_paths):
"""
Given attribute (train or test) of feature, read all 9 features into an
np ndarray of shape [sample_sequence_idx, time_step, feature_num]
argument: X_signals_paths str attribute of feature: 'train' or 'test'
return: np ndarray, tensor of features
"""
X_signals = []
for signal_type_path in X_signals_paths:
file = open(signal_type_path, 'rb')
# Read dataset from disk, dealing with text files' syntax
X_signals.append(
[np.array(serie, dtype=np.float32) for serie in [
row.replace(b' ', b' ').strip().split(b' ') for row in file
]]
)
file.close()
return np.transpose(np.array(X_signals), (1, 2, 0))
X_train_signals_paths = [
DATASET_PATH + TRAIN + "Inertial Signals/" + signal + "train.txt" for signal in INPUT_SIGNAL_TYPES
]
X_test_signals_paths = [
DATASET_PATH + TEST + "Inertial Signals/" + signal + "test.txt" for signal in INPUT_SIGNAL_TYPES
]
x_train = load_X(X_train_signals_paths)
x_test = load_X(X_test_signals_paths)
x_train = x_train.swapaxes(1, 2)
x_test = x_test.swapaxes(1, 2)
# Load "y" (the neural network's training and testing outputs)
def load_y(y_path):
"""
Read Y file of values to be predicted
argument: y_path str attibute of Y: 'train' or 'test'
return: Y ndarray / tensor of the 6 one_hot labels of each sample
"""
file = open(y_path, 'rb')
# Read dataset from disk, dealing with text file's syntax
y_ = np.array(
[elem for elem in [
row.replace(b' ', b' ').strip().split(b' ') for row in file
]],
dtype=np.int32
)
file.close()
# Substract 1 to each output class for friendly 0-based indexing
# return one_hot(y_ - 1)
return y_ - 1
y_train_path = DATASET_PATH + TRAIN + "y_train.txt"
y_test_path = DATASET_PATH + TEST + "y_test.txt"
y_train = load_y(y_train_path)
y_test = load_y(y_test_path)
X = np.concatenate((x_train, x_test))
Y = np.concatenate((y_train, y_test))
Y = Y.reshape((len(Y), ))
X = torch.from_numpy(X).float()
Y = torch.from_numpy(Y).long()
dataset = TensorDataset(X, Y)
super().__init__(dataset, batch_size, shuffle, validation_split, test_split, num_workers, normalization=True)
class HumanActivityRecognitionDataLoader3(BaseDataLoader_2):
"""
HumanActivityRecognition data loading demo using BaseDataLoader
"""
def __init__(self, data_dir, batch_size, shuffle=True, validation_split=0.0, test_split=0.0, num_workers=1,
training=True):
INPUT_SIGNAL_TYPES = [
"body_acc_x_",
"body_acc_y_",
"body_acc_z_",
"body_gyro_x_",
"body_gyro_y_",
"body_gyro_z_",
"total_acc_x_",
"total_acc_y_",
"total_acc_z_"
]
# Output classes to learn how to classify
LABELS = [
"WALKING",
"WALKING_UPSTAIRS",
"WALKING_DOWNSTAIRS",
"SITTING",
"STANDING",
"LAYING"
]
DATASET_PATH = data_dir
TRAIN = "/train/"
TEST = "/test/"
# Load "X" (the neural network's training and testing inputs)
def load_X(X_signals_paths):
"""
Given attribute (train or test) of feature, read all 9 features into an
np ndarray of shape [sample_sequence_idx, time_step, feature_num]
argument: X_signals_paths str attribute of feature: 'train' or 'test'
return: np ndarray, tensor of features
"""
X_signals = []
for signal_type_path in X_signals_paths:
file = open(signal_type_path, 'rb')
# Read dataset from disk, dealing with text files' syntax
X_signals.append(
[np.array(serie, dtype=np.float32) for serie in [
row.replace(b' ', b' ').strip().split(b' ') for row in file
]]
)
file.close()
return np.transpose(np.array(X_signals), (1, 2, 0))
X_train_signals_paths = [
DATASET_PATH + TRAIN + "Inertial Signals/" + signal + "train.txt" for signal in INPUT_SIGNAL_TYPES
]
X_test_signals_paths = [
DATASET_PATH + TEST + "Inertial Signals/" + signal + "test.txt" for signal in INPUT_SIGNAL_TYPES
]
x_train = load_X(X_train_signals_paths)
x_test = load_X(X_test_signals_paths)
x_train = x_train.swapaxes(1, 2)
x_test = x_test.swapaxes(1, 2)
# Load "y" (the neural network's training and testing outputs)
def load_y(y_path):
"""
Read Y file of values to be predicted
argument: y_path str attibute of Y: 'train' or 'test'
return: Y ndarray / tensor of the 6 one_hot labels of each sample
"""
file = open(y_path, 'rb')
# Read dataset from disk, dealing with text file's syntax
y_ = np.array(
[elem for elem in [
row.replace(b' ', b' ').strip().split(b' ') for row in file
]],
dtype=np.int32
)
file.close()
# Substract 1 to each output class for friendly 0-based indexing
# return one_hot(y_ - 1)
return y_ - 1
y_train_path = DATASET_PATH + TRAIN + "y_train.txt"
y_test_path = DATASET_PATH + TEST + "y_test.txt"
y_train = load_y(y_train_path)
y_test = load_y(y_test_path)
X = np.concatenate((x_train, x_test))
Y = np.concatenate((y_train, y_test))
n_train = len(x_train)
n_test = len(x_test) // 2
n_val = n_test
idx_full = np.arange(len(X))
test_idx = idx_full[-n_test:]
valid_idx = idx_full[-(n_test+n_val):-n_test]
train_idx = idx_full[:n_train]
Y = Y.reshape((len(Y), ))
X = torch.from_numpy(X).float()
Y = torch.from_numpy(Y).long()
dataset = TensorDataset(X, Y)
super().__init__(dataset, batch_size, shuffle, train_idx, valid_idx, test_idx, num_workers, normalization=True)
class HumanActivityRecognitionDataLoader(BaseDataLoader_2):
"""
HumanActivityRecognition data loading demo using BaseDataLoader
"""
def __init__(self, data_dir, batch_size, shuffle=True, validation_split=0.0, test_split=0.0, num_workers=1,
training=True):
INPUT_SIGNAL_TYPES = [
"body_acc_x_",
"body_acc_y_",
"body_acc_z_",
"body_gyro_x_",
"body_gyro_y_",
"body_gyro_z_",
"total_acc_x_",
"total_acc_y_",
"total_acc_z_"
]
# Output classes to learn how to classify
LABELS = [
"WALKING",
"WALKING_UPSTAIRS",
"WALKING_DOWNSTAIRS",
"SITTING",
"STANDING",
"LAYING"
]
DATASET_PATH = data_dir
TRAIN = "/train/"
TEST = "/test/"
# Load "X" (the neural network's training and testing inputs)
def load_X(X_signals_paths):
"""
Given attribute (train or test) of feature, read all 9 features into an
np ndarray of shape [sample_sequence_idx, time_step, feature_num]
argument: X_signals_paths str attribute of feature: 'train' or 'test'
return: np ndarray, tensor of features
"""
X_signals = []
for signal_type_path in X_signals_paths:
file = open(signal_type_path, 'rb')
# Read dataset from disk, dealing with text files' syntax
X_signals.append(
[np.array(serie, dtype=np.float32) for serie in [
row.replace(b' ', b' ').strip().split(b' ') for row in file
]]
)
file.close()
return np.transpose(np.array(X_signals), (1, 2, 0))
X_train_signals_paths = [
DATASET_PATH + TRAIN + "Inertial Signals/" + signal + "train.txt" for signal in INPUT_SIGNAL_TYPES
]
X_test_signals_paths = [
DATASET_PATH + TEST + "Inertial Signals/" + signal + "test.txt" for signal in INPUT_SIGNAL_TYPES
]
x_train = load_X(X_train_signals_paths)
x_test = load_X(X_test_signals_paths)
x_train = x_train.swapaxes(1, 2)
x_test = x_test.swapaxes(1, 2)
# Load "y" (the neural network's training and testing outputs)
def load_y(y_path):
"""
Read Y file of values to be predicted
argument: y_path str attibute of Y: 'train' or 'test'
return: Y ndarray / tensor of the 6 one_hot labels of each sample
"""
file = open(y_path, 'rb')
# Read dataset from disk, dealing with text file's syntax
y_ = np.array(
[elem for elem in [
row.replace(b' ', b' ').strip().split(b' ') for row in file
]],
dtype=np.int32
)
file.close()
# Substract 1 to each output class for friendly 0-based indexing
# return one_hot(y_ - 1)
return y_ - 1
y_train_path = DATASET_PATH + TRAIN + "y_train.txt"
y_test_path = DATASET_PATH + TEST + "y_test.txt"
y_train = load_y(y_train_path)
y_test = load_y(y_test_path)
X = np.concatenate((x_train, x_test))
Y = np.concatenate((y_train, y_test))
n_train = len(x_train)
n_test = len(x_test)
n_val = n_test
idx_full = np.arange(len(X))
test_idx = idx_full[-n_test:]
train_idx = idx_full[:n_train]
np.random.seed(123)
np.random.shuffle(train_idx)
valid_idx = train_idx[-n_test//2:]
train_idx = train_idx[:-n_test//2]
Y = Y.reshape((len(Y), ))
X = torch.from_numpy(X).float()
Y = torch.from_numpy(Y).long()
dataset = TensorDataset(X, Y)
super().__init__(dataset, batch_size, shuffle, train_idx, valid_idx, test_idx, num_workers, normalization=True)
|
[
"numpy.random.seed",
"numpy.random.shuffle",
"torchvision.transforms.ToTensor",
"numpy.array",
"torch.utils.data.TensorDataset",
"torchvision.transforms.Normalize",
"torchvision.datasets.MNIST",
"numpy.concatenate",
"torch.from_numpy"
] |
[((694, 771), 'torchvision.datasets.MNIST', 'datasets.MNIST', (['self.data_dir'], {'train': 'training', 'download': '(True)', 'transform': 'trsfm'}), '(self.data_dir, train=training, download=True, transform=trsfm)\n', (708, 771), False, 'from torchvision import datasets, transforms\n'), ((1583, 1616), 'numpy.concatenate', 'np.concatenate', (['(x_train, x_test)'], {}), '((x_train, x_test))\n', (1597, 1616), True, 'import numpy as np\n'), ((1629, 1662), 'numpy.concatenate', 'np.concatenate', (['(y_train, y_test)'], {}), '((y_train, y_test))\n', (1643, 1662), True, 'import numpy as np\n'), ((1716, 1735), 'torch.from_numpy', 'torch.from_numpy', (['Y'], {}), '(Y)\n', (1732, 1735), False, 'import torch\n'), ((1755, 1774), 'torch.utils.data.TensorDataset', 'TensorDataset', (['X', 'Y'], {}), '(X, Y)\n', (1768, 1774), False, 'from torch.utils.data import TensorDataset\n'), ((5289, 5322), 'numpy.concatenate', 'np.concatenate', (['(x_train, x_test)'], {}), '((x_train, x_test))\n', (5303, 5322), True, 'import numpy as np\n'), ((5335, 5368), 'numpy.concatenate', 'np.concatenate', (['(y_train, y_test)'], {}), '((y_train, y_test))\n', (5349, 5368), True, 'import numpy as np\n'), ((5503, 5522), 'torch.utils.data.TensorDataset', 'TensorDataset', (['X', 'Y'], {}), '(X, Y)\n', (5516, 5522), False, 'from torch.utils.data import TensorDataset\n'), ((9038, 9071), 'numpy.concatenate', 'np.concatenate', (['(x_train, x_test)'], {}), '((x_train, x_test))\n', (9052, 9071), True, 'import numpy as np\n'), ((9084, 9117), 'numpy.concatenate', 'np.concatenate', (['(y_train, y_test)'], {}), '((y_train, y_test))\n', (9098, 9117), True, 'import numpy as np\n'), ((9510, 9529), 'torch.utils.data.TensorDataset', 'TensorDataset', (['X', 'Y'], {}), '(X, Y)\n', (9523, 9529), False, 'from torch.utils.data import TensorDataset\n'), ((13047, 13080), 'numpy.concatenate', 'np.concatenate', (['(x_train, x_test)'], {}), '((x_train, x_test))\n', (13061, 13080), True, 'import numpy as np\n'), ((13093, 13126), 'numpy.concatenate', 'np.concatenate', (['(y_train, y_test)'], {}), '((y_train, y_test))\n', (13107, 13126), True, 'import numpy as np\n'), ((13335, 13354), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (13349, 13354), True, 'import numpy as np\n'), ((13363, 13391), 'numpy.random.shuffle', 'np.random.shuffle', (['train_idx'], {}), '(train_idx)\n', (13380, 13391), True, 'import numpy as np\n'), ((13613, 13632), 'torch.utils.data.TensorDataset', 'TensorDataset', (['X', 'Y'], {}), '(X, Y)\n', (13626, 13632), False, 'from torch.utils.data import TensorDataset\n'), ((549, 570), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (568, 570), False, 'from torchvision import datasets, transforms\n'), ((584, 626), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.1307,)', '(0.3081,)'], {}), '((0.1307,), (0.3081,))\n', (604, 626), False, 'from torchvision import datasets, transforms\n'), ((1676, 1695), 'torch.from_numpy', 'torch.from_numpy', (['X'], {}), '(X)\n', (1692, 1695), False, 'import torch\n'), ((3747, 3766), 'numpy.array', 'np.array', (['X_signals'], {}), '(X_signals)\n', (3755, 3766), True, 'import numpy as np\n'), ((5417, 5436), 'torch.from_numpy', 'torch.from_numpy', (['X'], {}), '(X)\n', (5433, 5436), False, 'import torch\n'), ((5457, 5476), 'torch.from_numpy', 'torch.from_numpy', (['Y'], {}), '(Y)\n', (5473, 5476), False, 'import torch\n'), ((7496, 7515), 'numpy.array', 'np.array', (['X_signals'], {}), '(X_signals)\n', (7504, 7515), True, 'import numpy as np\n'), ((9424, 9443), 'torch.from_numpy', 'torch.from_numpy', (['X'], {}), '(X)\n', (9440, 9443), False, 'import torch\n'), ((9464, 9483), 'torch.from_numpy', 'torch.from_numpy', (['Y'], {}), '(Y)\n', (9480, 9483), False, 'import torch\n'), ((11505, 11524), 'numpy.array', 'np.array', (['X_signals'], {}), '(X_signals)\n', (11513, 11524), True, 'import numpy as np\n'), ((13527, 13546), 'torch.from_numpy', 'torch.from_numpy', (['X'], {}), '(X)\n', (13543, 13546), False, 'import torch\n'), ((13567, 13586), 'torch.from_numpy', 'torch.from_numpy', (['Y'], {}), '(Y)\n', (13583, 13586), False, 'import torch\n'), ((3510, 3543), 'numpy.array', 'np.array', (['serie'], {'dtype': 'np.float32'}), '(serie, dtype=np.float32)\n', (3518, 3543), True, 'import numpy as np\n'), ((7259, 7292), 'numpy.array', 'np.array', (['serie'], {'dtype': 'np.float32'}), '(serie, dtype=np.float32)\n', (7267, 7292), True, 'import numpy as np\n'), ((11268, 11301), 'numpy.array', 'np.array', (['serie'], {'dtype': 'np.float32'}), '(serie, dtype=np.float32)\n', (11276, 11301), True, 'import numpy as np\n')]
|
#-------------------------------------------------------------------------------
#
# Aggregated Magnetic Model
#
# Author: <NAME> <<EMAIL>>
#
#-------------------------------------------------------------------------------
# Copyright (C) 2018 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
from collections import namedtuple
from numpy import inf, zeros, asarray
from .._pymm import (
GEOCENTRIC_SPHERICAL, GEODETIC_ABOVE_WGS84, GEOCENTRIC_CARTESIAN,
convert, vrot_sph2geod, vrot_sph2cart,
)
from .model import GeomagneticModel
Component = namedtuple("_Component", ["model", "scale", "parameters"])
def _validity_overlap(validity1, validity2):
start1, end1 = validity1
start2, end2 = validity2
start = max(start1, start2)
end = min(end1, end2)
if end < start:
return -inf, -inf
return start, end
class ComposedGeomagneticModel(GeomagneticModel):
""" Composed Earth magnetic field model aggregating multiple models
into one.
"""
def __init__(self, *models):
self._parameters = set()
self._components = []
self._validity = (-inf, inf)
for model in models:
self.push(model)
def push(self, model, scale=1.0, **parameters):
""" Add model. """
self._parameters.update(model.parameters)
self._validity = _validity_overlap(self.validity, model.validity)
self._components.append(Component(model, scale, parameters))
@property
def validity(self):
return self._validity
@property
def parameters(self):
""" required parameters. """
return tuple(self._parameters)
def eval(self, time, location,
input_coordinate_system=GEOCENTRIC_SPHERICAL,
output_coordinate_system=GEOCENTRIC_SPHERICAL,
**options):
# convert input coordinates to spherical coordinates
coord_sph = convert(
location, input_coordinate_system, GEOCENTRIC_SPHERICAL
)
# get output dimension
time = asarray(time)
location = asarray(location)
if time.ndim > (location.ndim - 1):
shape = time.shape
else:
shape = location.shape[:-1]
result = zeros(shape + (3,))
final_scale = options.pop("scale", None)
for model, scale, params in self._components:
args = options.copy()
args.update(params)
result += model.eval(time, coord_sph, scale=scale, **args)
# rotate result to the desired coordinate frame
if output_coordinate_system == GEODETIC_ABOVE_WGS84:
if input_coordinate_system == GEODETIC_ABOVE_WGS84:
coord_out = location
else:
coord_out = convert(
coord_sph, GEOCENTRIC_SPHERICAL, GEODETIC_ABOVE_WGS84
)
result = vrot_sph2geod(result, coord_out[..., 0] - coord_sph[..., 0])
elif output_coordinate_system == GEOCENTRIC_CARTESIAN:
result = vrot_sph2cart(result, coord_sph[..., 0], coord_sph[..., 1])
# apply the final scale
if final_scale is not None:
result *= final_scale
return result
|
[
"numpy.asarray",
"numpy.zeros",
"collections.namedtuple"
] |
[((1678, 1736), 'collections.namedtuple', 'namedtuple', (['"""_Component"""', "['model', 'scale', 'parameters']"], {}), "('_Component', ['model', 'scale', 'parameters'])\n", (1688, 1736), False, 'from collections import namedtuple\n'), ((3161, 3174), 'numpy.asarray', 'asarray', (['time'], {}), '(time)\n', (3168, 3174), False, 'from numpy import inf, zeros, asarray\n'), ((3194, 3211), 'numpy.asarray', 'asarray', (['location'], {}), '(location)\n', (3201, 3211), False, 'from numpy import inf, zeros, asarray\n'), ((3359, 3378), 'numpy.zeros', 'zeros', (['(shape + (3,))'], {}), '(shape + (3,))\n', (3364, 3378), False, 'from numpy import inf, zeros, asarray\n')]
|
import operator
import sqlalchemy
import pandas as pd
import numpy as np
from math import ceil
DEFAULT_VARCHAR_LENGTH=100
def get_detected_column_types(df):
""" Get data type of each columns ('DATETIME', 'NUMERIC' or 'STRING')
Parameters:
df (df): pandas dataframe
Returns
df (df): dataframe that all datatypes are converted (df)
"""
assert isinstance(df, pd.DataFrame), 'Parameter must be DataFrame'
for c in df.columns:
# Convert column to string
col_data = df[c].map(str)
col_data = col_data.replace("NaT", None)
col_data = col_data.replace("NaN", None)
# Check NULL column
if(df[c].isnull().values.all()):
continue
# Check DATETIME
try:
# Check if it's able to convert column to datetime
# if column is datetime, then skip to convert
if 'datetime' in str(col_data.dtype):
continue
df[c] = pd.to_datetime(col_data)
continue
except ValueError:
pass
# Check NUMERIC
try:
# Drop NaN rows
series = df[c].dropna()
# if column_name is int or float, then skip to convert
if 'int' in str(col_data.dtype) or 'float' in str(col_data.dtype):
continue
# Check if it can be converted to numeric
df[c] = pd.to_numeric(series)
except ValueError:
pass
return df
def get_max_length_columns(df):
""" find maximum length of value in each column and ceil it
Parameters:
df (df): dataframe
Returns
arr_max_len_columns (array): array of length for each column
arr_max_decimal (array): array of maximum decimal for float, double, and decimal datatype, otherwise its value is zero
"""
assert isinstance(df, pd.DataFrame), 'Parameter must be DataFrame'
measurer = np.vectorize(len)
arr_max_len_columns = []
arr_max_decimal = []
for i, x in enumerate(measurer(df.values.astype(str)).max(axis=0)):
if 'float' in str(df.iloc[:, i].dtype):
col_data = df.iloc[:, i].map(str).str.extract('\.(.*)')
max_decimal = measurer(col_data.values.astype(str)).max(axis=0)[0]
arr_max_decimal.append(max_decimal)
else:
arr_max_decimal.append(0)
arr_max_len_columns.append(ceil(x / 10) * 10)
return arr_max_len_columns, arr_max_decimal
def convert_df_datatype_to_sqlalchemy_datatype(df):
""" convert dataframe's data type into SQLAlchemy's data type
Parameters:
df (df): dataframe
Returns:
dtype_dict (dict): dict of data type of each column in SQLAlchemy standard
"""
assert isinstance(df, pd.DataFrame), 'Parameter must be DataFrame'
arr_max_len_columns, arr_max_decimal = get_max_length_columns(df)
dtype_dict = {}
for i, col_name in enumerate(df.columns):
if(df[col_name].isnull().values.all()):
dtype_dict[col_name] = sqlalchemy.types.VARCHAR(DEFAULT_VARCHAR_LENGTH)
elif 'bool' in str(df[col_name].dtype):
# Compatible with SQL-Server and MySQL, since MySQL doesn't have BOOLEAN.
dtype_dict[col_name] = sqlalchemy.types.INTEGER()
elif 'int' in str(df[col_name].dtype):
dtype_dict[col_name] = sqlalchemy.types.INTEGER()
elif 'float' in str(df[col_name].dtype):
if df[col_name].dropna().apply(float.is_integer).all():
dtype_dict[col_name] = sqlalchemy.types.INTEGER()
else:
dtype_dict[col_name] = sqlalchemy.types.DECIMAL(precision=arr_max_len_columns[i], scale=arr_max_decimal[i])
elif 'datetime' in str(df[col_name].dtype):
dtype_dict[col_name] = sqlalchemy.types.DateTime()
elif 'object' in str(df[col_name].dtype):
# check the limit of varhcar, if the length exeeds, then use TEXT
if arr_max_len_columns[i] > 1000:
dtype_dict[col_name] = sqlalchemy.types.Text()
else:
dtype_dict[col_name] = sqlalchemy.types.VARCHAR(length=arr_max_len_columns[i])
else:
dtype_dict[col_name] = sqlalchemy.types.VARCHAR(length=arr_max_len_columns[i])
return dtype_dict
def get_datatype_each_col(df):
""" main function to call sub-function in order to find data type and data length for each column
Parameters:
df (df): dataframe
Returns:
dtype_dict (dict): dict of data type of each column in SQLAlchemy standard (dict)
"""
assert isinstance(df, pd.DataFrame), 'Parameter must be DataFrame'
df = get_detected_column_types(df)
dtype_dict = convert_df_datatype_to_sqlalchemy_datatype(df)
del df
return dtype_dict
|
[
"numpy.vectorize",
"sqlalchemy.types.DateTime",
"math.ceil",
"sqlalchemy.types.INTEGER",
"sqlalchemy.types.VARCHAR",
"pandas.to_datetime",
"sqlalchemy.types.Text",
"sqlalchemy.types.DECIMAL",
"pandas.to_numeric"
] |
[((1950, 1967), 'numpy.vectorize', 'np.vectorize', (['len'], {}), '(len)\n', (1962, 1967), True, 'import numpy as np\n'), ((983, 1007), 'pandas.to_datetime', 'pd.to_datetime', (['col_data'], {}), '(col_data)\n', (997, 1007), True, 'import pandas as pd\n'), ((1422, 1443), 'pandas.to_numeric', 'pd.to_numeric', (['series'], {}), '(series)\n', (1435, 1443), True, 'import pandas as pd\n'), ((3058, 3106), 'sqlalchemy.types.VARCHAR', 'sqlalchemy.types.VARCHAR', (['DEFAULT_VARCHAR_LENGTH'], {}), '(DEFAULT_VARCHAR_LENGTH)\n', (3082, 3106), False, 'import sqlalchemy\n'), ((2427, 2439), 'math.ceil', 'ceil', (['(x / 10)'], {}), '(x / 10)\n', (2431, 2439), False, 'from math import ceil\n'), ((3276, 3302), 'sqlalchemy.types.INTEGER', 'sqlalchemy.types.INTEGER', ([], {}), '()\n', (3300, 3302), False, 'import sqlalchemy\n'), ((3385, 3411), 'sqlalchemy.types.INTEGER', 'sqlalchemy.types.INTEGER', ([], {}), '()\n', (3409, 3411), False, 'import sqlalchemy\n'), ((3568, 3594), 'sqlalchemy.types.INTEGER', 'sqlalchemy.types.INTEGER', ([], {}), '()\n', (3592, 3594), False, 'import sqlalchemy\n'), ((3652, 3741), 'sqlalchemy.types.DECIMAL', 'sqlalchemy.types.DECIMAL', ([], {'precision': 'arr_max_len_columns[i]', 'scale': 'arr_max_decimal[i]'}), '(precision=arr_max_len_columns[i], scale=\n arr_max_decimal[i])\n', (3676, 3741), False, 'import sqlalchemy\n'), ((3824, 3851), 'sqlalchemy.types.DateTime', 'sqlalchemy.types.DateTime', ([], {}), '()\n', (3849, 3851), False, 'import sqlalchemy\n'), ((4251, 4306), 'sqlalchemy.types.VARCHAR', 'sqlalchemy.types.VARCHAR', ([], {'length': 'arr_max_len_columns[i]'}), '(length=arr_max_len_columns[i])\n', (4275, 4306), False, 'import sqlalchemy\n'), ((4065, 4088), 'sqlalchemy.types.Text', 'sqlalchemy.types.Text', ([], {}), '()\n', (4086, 4088), False, 'import sqlalchemy\n'), ((4146, 4201), 'sqlalchemy.types.VARCHAR', 'sqlalchemy.types.VARCHAR', ([], {'length': 'arr_max_len_columns[i]'}), '(length=arr_max_len_columns[i])\n', (4170, 4201), False, 'import sqlalchemy\n')]
|
from typing import Optional, Callable
import torch
import numpy as np
from PIL.Image import Image
from ..transforms import TargetHandler
class NormalizeBothInputAndTarget:
transform: Callable[[Image], Image]
target_handler: TargetHandler
def __init__(
self,
transform: Callable[[Image], Image],
target_handler: Optional[TargetHandler] = None,
):
self.target_handler = target_handler
self.transform = transform
def forward(self, x: Image) -> torch.Tensor:
image_data = np.array(x)
std = image_data.std()
mean = image_data.mean()
erased_img = self.transform(x)
erased_img_data = torch.tensor(np.array(erased_img), dtype=torch.float32)
normed_img_data = (erased_img_data - mean) / std
target = self.target_handler.get()
if target is None:
raise RuntimeError("target has not generated.")
if not isinstance(target, Image):
raise TypeError("the generated target must be an PIL.Image")
target_data = torch.tensor(np.array(target), dtype=torch.float32)
self.target_handler.set((target_data - mean) / std)
return normed_img_data
|
[
"numpy.array"
] |
[((554, 565), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (562, 565), True, 'import numpy as np\n'), ((708, 728), 'numpy.array', 'np.array', (['erased_img'], {}), '(erased_img)\n', (716, 728), True, 'import numpy as np\n'), ((1088, 1104), 'numpy.array', 'np.array', (['target'], {}), '(target)\n', (1096, 1104), True, 'import numpy as np\n')]
|
import os
import sys
import lmdb
import json
import torch
import pickle
import random
import msgpack
import numpy as np
import msgpack_numpy
# from transformers import AutoTokenizer
from lz4.frame import compress, decompress
from os.path import exists, abspath, dirname
from sklearn.metrics.pairwise import cosine_similarity
from PIL import Image, ImageFont, ImageDraw, ImageEnhance
import pprint
pp = pprint.PrettyPrinter()
msgpack_numpy.patch()
origin_img_dir = '/data/share/UNITER/origin_imgs/flickr30k/flickr30k-images/'
def load_txt_db(db_dir):
# db loading
env_in = lmdb.open(db_dir, readonly=True, create=False)
txn_in = env_in.begin()
db = {}
for key, value in txn_in.cursor():
db[key] = value
print('db length:', len(db)) # db length: 443757
env_in.close()
return db
def load_img_db(img_dir, conf_th=0.2, max_bb=100, min_bb=10, num_bb=36, compress=False):
if conf_th == -1:
db_name = f'feat_numbb{num_bb}'
name2nbb = defaultdict(lambda: num_bb)
else:
db_name = f'feat_th{conf_th}_max{max_bb}_min{min_bb}'
nbb = f'nbb_th{conf_th}_max{max_bb}_min{min_bb}.json'
if not os.path.exists(f'{img_dir}/{nbb}'):
# nbb is not pre-computed
name2nbb = None
else:
name2nbb = json.load(open(f'{img_dir}/{nbb}'))
# => {'coco_test2015_000000043222.npz': 57, ...}
if compress:
db_name += '_compressed'
if name2nbb is None:
if compress:
db_name = 'all_compressed'
else:
db_name = 'all'
# db loading
env = lmdb.open(f'{img_dir}/{db_name}', readonly=True, create=False)
txn = env.begin(buffers=True)
return name2nbb, txn
def load_single_img(txn, file_name, compress=False):
# load single image with its file_name
dump = txn.get(file_name.encode('utf-8'))
if compress:
with io.BytesIO(dump) as reader:
img_dump = np.load(reader, allow_pickle=True)
img_dump = {'features': img_dump['features'],
'norm_bb': img_dump['norm_bb']}
else:
img_dump = msgpack.loads(dump, raw=False)
return img_dump
def get_concat_h(im1, im2):
dst = Image.new('RGB', (im1.width + im2.width, im1.height))
dst.paste(im1, (0, 0))
dst.paste(im2, (im1.width, 0))
return dst
def draw_bounding_box(img_name, img_bb, outline=(0, 0, 0, 255)):
source_img = Image.open(origin_img_dir + img_name).convert("RGB")
width, height = source_img.size
draw = ImageDraw.Draw(source_img, 'RGBA')
p1 = (width*img_bb[0], height*img_bb[1])
p2 = (width*img_bb[2], height*img_bb[3])
draw.rectangle((p1, p2), outline=outline, width=2)
# draw.text((img_bb[0], img_bb[1]), "something123", font=ImageFont.truetype("font_path123"))
return source_img
# source_img.save('bb_' + img_name, "JPEG")
def crop_bb(img_name, img_bbs):
source_img = Image.open(origin_img_dir + img_name).convert("RGB")
width, height = source_img.size
for i in range(img_bbs.shape[0]):
p1 = (width*img_bbs[i][0], height*img_bbs[i][1])
p2 = (width*img_bbs[i][2], height*img_bbs[i][3])
crop = source_img.crop((p1[0], p1[1], p2[0], p2[1]))
crop.save('crop_%d.jpg'%(i), 'JPEG')
def main():
NUM_LABELS = 1600
# tokenizer = AutoTokenizer.from_pretrained('bert-base-cased')
id2tok = json.load(open('id2tok.json'))
labels_ids = json.load(open('object_labels_ids.json'))
def convert_ids_to_tokens(i):
if isinstance(i, int):
return id2tok[str(i)]
else:
i = list(i)
return [id2tok[str(ii)] for ii in i]
def get_label_str(i):
if isinstance(i, int):
return convert_ids_to_tokens(labels_ids[i])
else:
i = list(i)
return [convert_ids_to_tokens(labels_ids[ii]) if ii > 0 else '[BACKGROUND]' for ii in i]
def get_hard_labels(soft_labels, top_k=3):
if len(soft_labels.shape) < 2:
soft_labels = soft_labels.reshape(1, -1)
sorted_labels = soft_labels.argsort(axis=-1)[:, ::-1][:, :top_k]
sorted_labels = sorted_labels - 1
res = []
for l in sorted_labels:
res.append(get_label_str(l))
return res
checkpoint = torch.load(
"/data/private/cc/experiment/MMP/pretrained_ckpts/pretrained/uniter-base.pt")
emb_weight = checkpoint['uniter.embeddings.word_embeddings.weight']
txt_db_old = load_txt_db('/data/share/UNITER/ve/txt_db/ve_train.db')
txt_db_new = load_txt_db(
'/data/share/UNITER/ve/da/GloVe/seed2/txt_db/ve_train.db')
name2nbb, img_db_txn = load_img_db('/data/share/UNITER/ve/img_db/flickr30k')
def display(k):
d1 = msgpack.loads(decompress(txt_db_old[k.split(b'_')[1]]), raw=False)
d2 = msgpack.loads(decompress(txt_db_new[k]), raw=False)
# input_1 = tokenizer.convet_ids_to_tokens(d1['input_ids'])
# input_2 = tokenizer.convert_ids_to_tokens(d2['input_ids'])
input_1 = convert_ids_to_tokens(d1['input_ids'])
input_2 = convert_ids_to_tokens(d2['input_ids'])
input_3 = convert_ids_to_tokens(d2['mix_input_ids'])
hard_labels = get_hard_labels(d2['mix_soft_labels'])
# img1 = load_single_img(img_db_txn, d1['img_fname'])
img = load_single_img(img_db_txn, d2['img_fname'])
origin_img_name = str(k).split('_')[1].split('#')[0]
im1 = draw_bounding_box(origin_img_name, img['norm_bb'][d2['mix_index']])
im2 = draw_bounding_box(d2['mix_img_flk_id'], d2['mix_bb'], (200, 0, 0, 255))
cat_im = get_concat_h(im1, im2)
cat_im.save('bb_' + origin_img_name + '_' + d2['mix_img_flk_id'], 'JPEG')
# crop_bb(origin_img_name, img['norm_bb'])
return input_1, input_2, input_3, hard_labels
# print(list(txt_db_new.keys())[:10])
pp.pprint(display(list(txt_db_new.keys())[3]))
# pp.pprint(display(list(txt_db_new.keys())[1]))
# pp.pprint(display(list(txt_db_new.keys())[2]))
# import ipdb
# ipdb.set_trace()
if __name__ == '__main__':
main()
|
[
"msgpack_numpy.patch",
"PIL.Image.new",
"msgpack.loads",
"numpy.load",
"torch.load",
"os.path.exists",
"PIL.Image.open",
"pprint.PrettyPrinter",
"lmdb.open",
"PIL.ImageDraw.Draw",
"lz4.frame.decompress"
] |
[((403, 425), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {}), '()\n', (423, 425), False, 'import pprint\n'), ((427, 448), 'msgpack_numpy.patch', 'msgpack_numpy.patch', ([], {}), '()\n', (446, 448), False, 'import msgpack_numpy\n'), ((583, 629), 'lmdb.open', 'lmdb.open', (['db_dir'], {'readonly': '(True)', 'create': '(False)'}), '(db_dir, readonly=True, create=False)\n', (592, 629), False, 'import lmdb\n'), ((1610, 1672), 'lmdb.open', 'lmdb.open', (['f"""{img_dir}/{db_name}"""'], {'readonly': '(True)', 'create': '(False)'}), "(f'{img_dir}/{db_name}', readonly=True, create=False)\n", (1619, 1672), False, 'import lmdb\n'), ((2226, 2279), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(im1.width + im2.width, im1.height)'], {}), "('RGB', (im1.width + im2.width, im1.height))\n", (2235, 2279), False, 'from PIL import Image, ImageFont, ImageDraw, ImageEnhance\n'), ((2541, 2575), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['source_img', '"""RGBA"""'], {}), "(source_img, 'RGBA')\n", (2555, 2575), False, 'from PIL import Image, ImageFont, ImageDraw, ImageEnhance\n'), ((4305, 4403), 'torch.load', 'torch.load', (['"""/data/private/cc/experiment/MMP/pretrained_ckpts/pretrained/uniter-base.pt"""'], {}), "(\n '/data/private/cc/experiment/MMP/pretrained_ckpts/pretrained/uniter-base.pt'\n )\n", (4315, 4403), False, 'import torch\n'), ((2136, 2166), 'msgpack.loads', 'msgpack.loads', (['dump'], {'raw': '(False)'}), '(dump, raw=False)\n', (2149, 2166), False, 'import msgpack\n'), ((1169, 1203), 'os.path.exists', 'os.path.exists', (['f"""{img_dir}/{nbb}"""'], {}), "(f'{img_dir}/{nbb}')\n", (1183, 1203), False, 'import os\n'), ((1958, 1992), 'numpy.load', 'np.load', (['reader'], {'allow_pickle': '(True)'}), '(reader, allow_pickle=True)\n', (1965, 1992), True, 'import numpy as np\n'), ((2440, 2477), 'PIL.Image.open', 'Image.open', (['(origin_img_dir + img_name)'], {}), '(origin_img_dir + img_name)\n', (2450, 2477), False, 'from PIL import Image, ImageFont, ImageDraw, ImageEnhance\n'), ((2938, 2975), 'PIL.Image.open', 'Image.open', (['(origin_img_dir + img_name)'], {}), '(origin_img_dir + img_name)\n', (2948, 2975), False, 'from PIL import Image, ImageFont, ImageDraw, ImageEnhance\n'), ((4855, 4880), 'lz4.frame.decompress', 'decompress', (['txt_db_new[k]'], {}), '(txt_db_new[k])\n', (4865, 4880), False, 'from lz4.frame import compress, decompress\n')]
|
# Solving reinforcement learning problems using pgpelib with parallelization
# and with observation normalization
# ==========================================================================
#
# This example demonstrates how to solve locomotion tasks.
# The following techniques are used:
#
# - dynamic population size
# - observation normalization
# - parallelization (using the ray library)
#
# Because we are using both parallelization and observation normalization,
# we will have to synchronize the observation stats between the remote
# workers and the main process.
# We demonstrate how to do this synchronization using ray,
# however the logic is applicable to other parallelization libraries.
from pgpelib import PGPE
from pgpelib.policies import Policy, LinearPolicy, MLPPolicy
from pgpelib.restore import to_torch_module
from pgpelib.runningstat import RunningStat
from typing import Tuple, Iterable
from numbers import Real
import numpy as np
import torch
import gym
import ray
import multiprocessing as mp
from time import sleep
import pickle
# Here is the gym environment to solve.
ENV_NAME = 'Walker2d-v2'
# The environment we are considering to solve is a locomotion problem.
# It defines an "alive bonus" to encourage the agent to stand on its
# feet without falling.
# However, such alive bonuses might drive the evolution process into
# generating agents which focus ONLY on standing on their feet (without
# progressing), just to collect these bonuses.
# We therefore remove this alive bonus by subtracting 1.0 at every
# simulator timestep.
DECREASE_REWARDS_BY = 1.0
# Ray supports stateful parallelization via remote actors.
# An actor is a class instance which lives on different process than
# the main process, and which stores its state.
# Here, we define a remote actor class (which will be instantiated
# multiple times, so that we will be able to use the instances for
# parallelized evaluation of our solutions)
@ray.remote
class Worker:
def __init__(self, policy, decrease_rewards_by):
policy: Policy
self.policy: Policy = policy
self.decrease_rewards_by = decrease_rewards_by
def set_main_obs_stats(self, rs):
# Set the main observation stats of the remote worker.
# The goal of this function is to receive the observation
# stats from the main process.
rs: RunningStat
self.policy.set_main_obs_stats(rs)
def pop_collected_obs_stats(self):
# Pop the observation stats collected by the worker.
# At the time of synchronization, the main process will call
# this method of each remote worker, and update its main
# observation stats with those collected data.
return self.policy.pop_collected_obs_stats()
def run(self, d):
# Run a each solution in the dictionary d.
# The dictionary d is expected in this format:
# { solution_index1: solution1,
# solution_index2: solution2,
# ... }
# and the result will be:
# { solution_index1: (cumulative_reward1, number_of_interactions1)
# solution_index2: (cumulative_reward2, number_of_interactions2)
# ... }
return self.policy.set_params_and_run_all(
d,
decrease_rewards_by=self.decrease_rewards_by
)
# Set the number of workers to be instantiated as the number of CPUs.
NUM_WORKERS = mp.cpu_count()
# List of workers.
# Initialized as a list containing `None`s in the beginning.
WORKERS = [None] * NUM_WORKERS
def prepare_workers(policy, decrease_rewards_by):
# Fill the WORKERS list.
# Initialize the ray library.
ray.init()
# For each index i of WORKERS list, fill the i-th element with a new
# worker instance.
for i in range(len(WORKERS)):
WORKERS[i] = Worker.remote(policy, decrease_rewards_by)
Reals = Iterable[Real]
def evaluate_solutions(solutions: Iterable[np.ndarray]) -> Tuple[Reals, Reals]:
# This function evaluates the given solutions in parallel.
# Get the number of solutions
nslns = len(solutions)
if len(WORKERS) > nslns:
# If the number of workers is greater than the number of solutions
# then the workers that we are going to actually use here
# is the first `nslns` amount of workers, not all of them.
workers = WORKERS[:nslns]
else:
# If the number of solutions is equal to or greater than the
# number of workers, then we will use all of the instantiated
# workers.
workers = WORKERS
# Number of workers that are going to be used now.
nworkers = len(workers)
# To each worker, we aim to send a dictionary, each dictionary being
# in this form:
# { solution_index1: solution1, solution_index2: solution2, ...}
# We keep those dictionaries in the `to_worker` variable.
# to_worker[i] stores the dictionary to be sent to the i-th worker.
to_worker = [dict() for _ in range(nworkers)]
# Iterate over the solutions and assign them one by one to the
# workers.
i_worker = 0
for i, solution in enumerate(solutions):
to_worker[i_worker][i] = solution
i_worker = (i_worker + 1) % nworkers
# Each worker executes the solution dictionary assigned to itself.
# The results are then collected to the list `worker_results`.
# The workers do their tasks in parallel.
worker_results = ray.get(
[
workers[i].run.remote(to_worker[i])
for i in range(nworkers)
]
)
# Allocate a list for storing the fitnesses, and another list for
# storing the number of interactions.
fitnesses = [None] * nslns
num_interactions = [None] * nslns
# For each worker:
for worker_result in worker_results:
# For each solution and its index mentioned in the worker's
# result dictionary:
for i, result in worker_result.items():
fitness, timesteps = result
# Store the i-th solution's fitness in the fitnesses list
fitnesses[i] = fitness
# Store the i-th solution's number of interactions in the
# num_interactions list.
num_interactions[i] = timesteps
# Return the fitnesses and the number of interactions lists.
return fitnesses, num_interactions
def sync_obs_stats(main_policy: Policy):
# This function synchronizes the observation stats of the
# main process and of the main workers.
# Collect observation stats from the remote workers
collected_stats = ray.get(
[
worker.pop_collected_obs_stats.remote()
for worker in WORKERS
]
)
# In the main process, update the main policy's
# observation stats with the stats collected from the remote workers.
for stats in collected_stats:
main_policy.update_main_obs_stats(stats)
# To each worker, send the main policy's up-to-date stats.
ray.get(
[
worker.set_main_obs_stats.remote(
main_policy.get_main_obs_stats()
)
for worker in WORKERS
]
)
def main():
# This is the main function.
# The main evolution procedure will be defined here.
# Make a linear policy.
policy = LinearPolicy(
env_name=ENV_NAME, # Name of the environment
observation_normalization=True
)
# Prepare the workers
prepare_workers(policy, DECREASE_REWARDS_BY)
# Initial solution
x0 = np.zeros(policy.get_parameters_count(), dtype='float32')
# The following are the Walker2d-v2 hyperparameters used in the paper:
# ClipUp: A Simple and Powerful Optimizer for Distribution-based
# Policy Evolution
N = policy.get_parameters_count()
max_speed = 0.015
center_learning_rate = max_speed / 2.0
radius = max_speed * 15
# Compute the stdev_init from the radius:
stdev_init = np.sqrt((radius ** 2) / N)
popsize = 100
popsize_max = 800
# Below we initialize our PGPE solver.
pgpe = PGPE(
solution_length=N,
popsize=popsize,
popsize_max=popsize_max,
num_interactions=int(popsize * 1000 * (3 / 4)),
center_init=x0,
center_learning_rate=center_learning_rate,
optimizer='clipup',
optimizer_config={'max_speed': max_speed},
stdev_init=stdev_init,
stdev_learning_rate=0.1,
stdev_max_change=0.2,
solution_ranking=True,
dtype='float32'
)
num_iterations = 500
# The main loop of the evolutionary computation
for i in range(1, 1 + num_iterations):
total_episodes = 0
while True:
# Get the solutions from the pgpe solver
solutions = pgpe.ask()
# Evaluate the solutions in parallel and get the fitnesses
fitnesses, num_interactions = evaluate_solutions(solutions)
sync_obs_stats(policy)
# Send the pgpe solver the received fitnesses
iteration_finished = pgpe.tell(fitnesses, num_interactions)
total_episodes += len(fitnesses)
if iteration_finished:
break
print(
"Iteration:", i,
" median score:", np.median(fitnesses),
" num.episodes:", total_episodes
)
print("Visualizing the center solution...")
# Get the center solution
center_solution = pgpe.center.copy()
# Make the gym environment for visualizing the center solution
env = gym.make(ENV_NAME)
# Load the center solution into the policy
policy.set_parameters(center_solution)
# Save the policy into a pickle file
with open(__file__ + '.pickle', 'wb') as f:
pickle.dump(policy, f)
# Convert the policy to a PyTorch module
net = to_torch_module(policy)
while True:
print("Please choose: 1> Visualize the agent 2> Quit")
response = input(">>")
if response == '1':
cumulative_reward = 0.0
# Reset the environment, and get the observation of the initial
# state into a variable.
observation = env.reset()
# Visualize the initial state
env.render()
# Main loop of the trajectory
while True:
with torch.no_grad():
action = net(
torch.as_tensor(observation, dtype=torch.float32)
).numpy()
if isinstance(env.action_space, gym.spaces.Box):
interaction = action
elif isinstance(env.action_space, gym.spaces.Discrete):
interaction = int(np.argmax(action))
else:
assert False, "Unknown action space"
observation, reward, done, info = env.step(interaction)
env.render()
cumulative_reward += reward
if done:
break
print("cumulative_reward", cumulative_reward)
elif response == '2':
break
else:
print('Unrecognized response:', repr(response))
if __name__ == "__main__":
main()
|
[
"ray.init",
"pickle.dump",
"gym.make",
"pgpelib.policies.LinearPolicy",
"numpy.argmax",
"numpy.median",
"multiprocessing.cpu_count",
"torch.as_tensor",
"pgpelib.restore.to_torch_module",
"torch.no_grad",
"numpy.sqrt"
] |
[((3508, 3522), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (3520, 3522), True, 'import multiprocessing as mp\n'), ((3754, 3764), 'ray.init', 'ray.init', ([], {}), '()\n', (3762, 3764), False, 'import ray\n'), ((7389, 7452), 'pgpelib.policies.LinearPolicy', 'LinearPolicy', ([], {'env_name': 'ENV_NAME', 'observation_normalization': '(True)'}), '(env_name=ENV_NAME, observation_normalization=True)\n', (7401, 7452), False, 'from pgpelib.policies import Policy, LinearPolicy, MLPPolicy\n'), ((8030, 8054), 'numpy.sqrt', 'np.sqrt', (['(radius ** 2 / N)'], {}), '(radius ** 2 / N)\n', (8037, 8054), True, 'import numpy as np\n'), ((9655, 9673), 'gym.make', 'gym.make', (['ENV_NAME'], {}), '(ENV_NAME)\n', (9663, 9673), False, 'import gym\n'), ((9942, 9965), 'pgpelib.restore.to_torch_module', 'to_torch_module', (['policy'], {}), '(policy)\n', (9957, 9965), False, 'from pgpelib.restore import to_torch_module\n'), ((9863, 9885), 'pickle.dump', 'pickle.dump', (['policy', 'f'], {}), '(policy, f)\n', (9874, 9885), False, 'import pickle\n'), ((9378, 9398), 'numpy.median', 'np.median', (['fitnesses'], {}), '(fitnesses)\n', (9387, 9398), True, 'import numpy as np\n'), ((10451, 10466), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10464, 10466), False, 'import torch\n'), ((10823, 10840), 'numpy.argmax', 'np.argmax', (['action'], {}), '(action)\n', (10832, 10840), True, 'import numpy as np\n'), ((10526, 10575), 'torch.as_tensor', 'torch.as_tensor', (['observation'], {'dtype': 'torch.float32'}), '(observation, dtype=torch.float32)\n', (10541, 10575), False, 'import torch\n')]
|
import lue.data_model as ldm
import numpy as np
import csv
def export_partition_shape_results(
lue_dataset,
csv_writer):
# Assert that the number of array shapes for which experiments where
# performed is 1
lue_array = lue_dataset.array.array
assert lue_array.shape.value.nr_arrays == 1
# For each array shape for which experiments where performed
lue_measurement = lue_dataset.benchmark.measurement
array_shapes = lue_measurement.array_shape.value[:]
assert np.all(array_shapes == array_shapes[0])
count = lue_measurement.duration.value.array_shape[:][0]
lue_partition = lue_dataset.partition.partition
partition_shape = lue_measurement.partition_shape.value[:]
nr_partitions = lue_measurement.nr_partitions.value[:,-1]
assert len(partition_shape) == len(nr_partitions)
if count == 1:
assert False, "Implement!"
else:
# Write the following columns:
# - partition_shape
# - nr_partitions
# - {mean,std}_duration
csv_writer.writerow([
# "partition_shape",
"partition_size",
"nr_partitions",
"mean_duration",
"std_duration",
])
mean_duration = \
lue_partition.properties["mean_duration_{}".format(0)].value[:]
std_duration = \
lue_partition.properties["std_duration_{}".format(0)].value[:]
for n in range(len(partition_shape)):
csv_writer.writerow([
# "{},{}".format(*partition_shape[n]),
np.prod(partition_shape[n]),
nr_partitions[n],
mean_duration[n],
std_duration[n],
])
def export_strong_scaling_results(
lue_dataset,
csv_writer):
lue_measurement = lue_dataset.benchmark.measurement
count = lue_measurement.duration.value.array_shape[:][0]
nr_workers = lue_measurement.nr_workers.value[:]
sort_idxs = np.argsort(nr_workers)
nr_workers = nr_workers[sort_idxs]
if count == 1:
# Write the following columns:
# - nr_workers
# - relative_speed_up
# - relative_efficiency
# - lups
csv_writer.writerow([
"nr_workers",
"duration",
"relative_speed_up",
"relative_efficiency",
"lups",
])
lue_scaling = lue_dataset.benchmark.scaling
duration = lue_measurement.duration.value[:][sort_idxs]
relative_speed_up = lue_scaling.relative_speed_up.value[:][sort_idxs]
relative_efficiency = lue_scaling.relative_efficiency.value[:][sort_idxs]
lups = lue_scaling.lups.value[:][sort_idxs]
for n in range(len(nr_workers)):
csv_writer.writerow([
nr_workers[n],
duration[n][0],
relative_speed_up[n][0],
relative_efficiency[n][0],
lups[n][0],
])
else:
# Write the following columns:
# - nr_workers
# - {mean,std}_duration
# - {mean,std}_relative_efficiency
# - {mean,std}_lups
csv_writer.writerow([
"nr_workers",
"mean_duration",
"std_duration",
"mean_relative_efficiency",
"std_relative_efficiency",
"mean_lups",
"std_lups",
])
lue_scaling = lue_dataset.benchmark.scaling
mean_duration = lue_scaling.mean_duration.value[:][sort_idxs]
std_duration = lue_scaling.std_duration.value[:][sort_idxs]
mean_relative_efficiency = lue_scaling.mean_relative_efficiency.value[:][sort_idxs]
std_relative_efficiency = lue_scaling.std_relative_efficiency.value[:][sort_idxs]
mean_lups = lue_scaling.mean_lups.value[:][sort_idxs]
std_lups = lue_scaling.std_lups.value[:][sort_idxs]
for n in range(len(nr_workers)):
csv_writer.writerow([
nr_workers[n],
mean_duration[n],
std_duration[n],
mean_relative_efficiency[n],
std_relative_efficiency[n],
mean_lups[n],
std_lups[n],
])
def export_weak_scaling_results(
lue_dataset,
csv_writer):
lue_measurement = lue_dataset.benchmark.measurement
count = lue_measurement.duration.value.array_shape[:][0]
nr_workers = lue_measurement.nr_workers.value[:]
sort_idxs = np.argsort(nr_workers)
nr_workers = nr_workers[sort_idxs]
if count == 1:
# Write the following columns:
# - nr_workers
# - duration
# - relative_efficiency
# - lups
csv_writer.writerow([
"nr_workers",
"duration",
"relative_efficiency",
"lups",
])
lue_scaling = lue_dataset.benchmark.scaling
duration = lue_measurement.duration.value[:]
relative_efficiency = lue_scaling.relative_efficiency.value[:][sort_idxs]
lups = lue_scaling.lups.value[:][sort_idxs]
for n in range(len(nr_workers)):
csv_writer.writerow([
nr_workers[n],
duration[n][0],
relative_efficiency[n][0],
lups[n][0],
])
else:
# Write the following columns:
# - nr_workers
# - {mean,std}_duration
# - {mean,std}_relative_efficiency
# - {mean,std}_lups
csv_writer.writerow([
"nr_workers",
"mean_duration",
"std_duration",
"mean_relative_efficiency",
"std_relative_efficiency",
"mean_lups",
"std_lups",
])
lue_scaling = lue_dataset.benchmark.scaling
mean_duration = lue_scaling.mean_duration.value[:][sort_idxs]
std_duration = lue_scaling.std_duration.value[:][sort_idxs]
mean_relative_efficiency = lue_scaling.mean_relative_efficiency.value[:][sort_idxs]
std_relative_efficiency = lue_scaling.std_relative_efficiency.value[:][sort_idxs]
mean_lups = lue_scaling.mean_lups.value[:][sort_idxs]
std_lups = lue_scaling.std_lups.value[:][sort_idxs]
for n in range(len(nr_workers)):
csv_writer.writerow([
nr_workers[n],
mean_duration[n],
std_duration[n],
mean_relative_efficiency[n],
std_relative_efficiency[n],
mean_lups[n],
std_lups[n],
])
def export_results(
lue_dataset_pathname,
csv_file_pathname):
lue_dataset = ldm.open_dataset(lue_dataset_pathname, "r")
kind = lue_dataset.benchmark.meta_information.kind.value[:][0]
with open(csv_file_pathname, "w") as csv_file:
csv_writer = csv.writer(csv_file)
export_by_kind = {
"partition_shape": export_partition_shape_results,
"strong_scaling": export_strong_scaling_results,
"weak_scaling": export_weak_scaling_results,
}
export_by_kind[kind](lue_dataset, csv_writer)
|
[
"csv.writer",
"numpy.prod",
"numpy.argsort",
"lue.data_model.open_dataset",
"numpy.all"
] |
[((512, 551), 'numpy.all', 'np.all', (['(array_shapes == array_shapes[0])'], {}), '(array_shapes == array_shapes[0])\n', (518, 551), True, 'import numpy as np\n'), ((2065, 2087), 'numpy.argsort', 'np.argsort', (['nr_workers'], {}), '(nr_workers)\n', (2075, 2087), True, 'import numpy as np\n'), ((4753, 4775), 'numpy.argsort', 'np.argsort', (['nr_workers'], {}), '(nr_workers)\n', (4763, 4775), True, 'import numpy as np\n'), ((7092, 7135), 'lue.data_model.open_dataset', 'ldm.open_dataset', (['lue_dataset_pathname', '"""r"""'], {}), "(lue_dataset_pathname, 'r')\n", (7108, 7135), True, 'import lue.data_model as ldm\n'), ((7276, 7296), 'csv.writer', 'csv.writer', (['csv_file'], {}), '(csv_file)\n', (7286, 7296), False, 'import csv\n'), ((1638, 1665), 'numpy.prod', 'np.prod', (['partition_shape[n]'], {}), '(partition_shape[n])\n', (1645, 1665), True, 'import numpy as np\n')]
|
import os
from statistics import mean
import numpy as np
import matplotlib.pyplot as pyplot
from simtk import unit
from simtk.openmm.app.pdbfile import PDBFile
from foldamers.cg_model.cgmodel import CGModel
from foldamers.parameters.reweight import *
from foldamers.thermo.calc import *
from foldamers.ensembles.ens_build import *
from cg_openmm.simulation.rep_exch import *
from cg_openmm.simulation.tools import *
# Job settings
scan_sc_bb_bb_sc_torsions = True
calculate_dQ = True
calculate_free_energies = True
evaluate_heat_capacity = True
output_directory = "output"
if not os.path.exists(output_directory):
os.mkdir(output_directory)
# Number of grid points to scan (around initial angle definition)
grid_points = 3
# Configure Yank (replica exchange) simulation settings
print_frequency = 5 # Number of steps to skip when printing output
total_simulation_time = 500.0 * unit.picosecond
simulation_time_step = 5.0 * unit.femtosecond
number_replicas = 30
min_temp = 1.0 * unit.kelvin
max_temp = 400.0 * unit.kelvin
temperature_list = get_temperature_list(min_temp, max_temp, number_replicas)
# Model settings
polymer_length = 12
backbone_lengths = [1]
sidechain_lengths = [1]
sidechain_positions = [0]
include_bond_forces = False
include_bond_angle_forces = True
include_nonbonded_forces = True
include_torsion_forces = True
constrain_bonds = True
# Bond definitions
bond_length = 7.5 * unit.angstrom
bond_lengths = {
"bb_bb_bond_length": bond_length,
"bb_sc_bond_length": bond_length,
"sc_sc_bond_length": bond_length,
}
bond_force_constant = 0 * unit.kilocalorie_per_mole / unit.nanometer / unit.nanometer
bond_force_constants = {
"bb_bb_bond_k": bond_force_constant,
"bb_sc_bond_k": bond_force_constant,
"sc_sc_bond_k": bond_force_constant,
}
# Particle definitions
mass = 100.0 * unit.amu
masses = {"backbone_bead_masses": mass, "sidechain_bead_masses": mass}
r_min = 3.0 * bond_length # Lennard-Jones potential r_min
sigma = r_min / (2.0 ** (1 / 6)) # Factor of /(2.0**(1/6)) is applied to convert r_min to sigma
sigmas = {"bb_sigma": sigma, "sc_sigma": sigma}
epsilon = 0.05 * unit.kilocalorie_per_mole
epsilons = {"bb_eps": epsilon, "sc_eps": epsilon}
# Bond angle definitions
bond_angle_force_constant = 0.0001 * unit.kilocalorie_per_mole / unit.radian / unit.radian
bond_angle_force_constants = {
"bb_bb_bb_angle_k": bond_angle_force_constant,
"bb_bb_sc_angle_k": bond_angle_force_constant,
}
bb_bb_bb_equil_bond_angle = 120.0 * (
3.14 / 180.0
) # OpenMM expects angle definitions in units of radians
bb_bb_sc_equil_bond_angle = 120.0 * (3.14 / 180.0)
equil_bond_angles = {
"bb_bb_bb_angle_0": bb_bb_bb_equil_bond_angle,
"bb_bb_sc_angle_0": bb_bb_sc_equil_bond_angle,
}
# Torsion angle definitions (Used to establish a scanning range below)
torsion_force_constant = 0.01 * unit.kilocalorie_per_mole / unit.radian / unit.radian
if scan_sc_bb_bb_sc_torsions == True:
torsion_force_constants = {
"bb_bb_bb_bb_torsion_k": torsion_force_constant,
"sc_bb_bb_sc_torsion_k": torsion_force_constant,
}
bb_bb_bb_bb_equil_torsion_angle = 78.0 * (
3.14 / 180.0
) # OpenMM defaults to units of radians for angle definitions
sc_bb_bb_sc_equil_torsion_angle = 120.0 * (3.14 / 180.0)
equil_torsion_angles = {
"bb_bb_bb_bb_torsion_0": bb_bb_bb_bb_equil_torsion_angle,
"sc_bb_bb_sc_torsion_0": sc_bb_bb_sc_equil_torsion_angle,
}
torsion_periodicities = {"bb_bb_bb_bb_period": 1, "sc_bb_bb_sc_period": 1}
else:
torsion_force_constants = {"bb_bb_bb_bb_torsion_k": torsion_force_constant}
bb_bb_bb_bb_equil_torsion_angle = 78.0 * (
3.14 / 180.0
) # OpenMM defaults to units of radians for angle definitions
equil_torsion_angles = {"bb_bb_bb_bb_torsion_0": bb_bb_bb_bb_equil_torsion_angle}
torsion_periodicities = {"bb_bb_bb_bb_period": 1}
# Get initial positions from local file
positions = PDBFile("helix.pdb").getPositions()
# Build a coarse grained model
cgmodel = CGModel(
polymer_length=polymer_length,
backbone_lengths=backbone_lengths,
sidechain_lengths=sidechain_lengths,
sidechain_positions=sidechain_positions,
masses=masses,
sigmas=sigmas,
epsilons=epsilons,
bond_lengths=bond_lengths,
bond_force_constants=bond_force_constants,
bond_angle_force_constants=bond_angle_force_constants,
torsion_force_constants=torsion_force_constants,
equil_bond_angles=equil_bond_angles,
equil_torsion_angles=equil_torsion_angles,
torsion_periodicities=torsion_periodicities,
include_nonbonded_forces=include_nonbonded_forces,
include_bond_forces=include_bond_forces,
include_bond_angle_forces=include_bond_angle_forces,
include_torsion_forces=include_torsion_forces,
constrain_bonds=constrain_bonds,
positions=positions,
)
# Run test simulations (NVT) with this coarse-grained model at the minimum and maximum temperatures
# to make sure the parameters are reasonable before attempting replica exchange simulations
# (If high-T simulations fail then we need to modify the model parameters)
test_simulation_time = 50.0 * unit.picosecond
print_frequency = 5
temperature = temperature_list[0]
output_directory = str("test_" + str(round(temperature._value, 1)))
if not os.path.exists(output_directory):
os.mkdir(output_directory)
run_simulation(
cgmodel,
output_directory,
test_simulation_time,
simulation_time_step,
temperature,
print_frequency,
)
temperature = temperature_list[-1]
output_directory = str("test_" + str(round(temperature._value, 1)))
if not os.path.exists(output_directory):
os.mkdir(output_directory)
run_simulation(
cgmodel,
output_directory,
test_simulation_time,
simulation_time_step,
temperature,
print_frequency,
)
# Reset the output directory
output_directory = "output"
if not os.path.exists(output_directory):
os.mkdir(output_directory)
# Create a list of the torsion angles that we will investigate in our parameter scan
bb_bb_bb_bb_equil_torsion_angles = [
float(bb_bb_bb_bb_equil_torsion_angle + i * 0.05) for i in range(-grid_points, grid_points, 1)
]
if scan_sc_bb_bb_sc_torsions == True:
sc_bb_bb_sc_equil_torsion_angles = [
float(sc_bb_bb_sc_equil_torsion_angle + i * 0.05)
for i in range(-grid_points, grid_points, 1)
]
else:
sc_bb_bb_sc_equil_torsion_angles = [0.0]
if calculate_dQ:
# Set parameters for evaluating native contacts
native_structure_contact_distance_cutoff = 1.00 * cgmodel.get_sigma(
0
) # This distance cutoff determines which nonbonded interactions are considered 'native' contacts
native_fraction_cutoff = (
0.95 # The threshold fraction of native contacts above which a pose is considered 'native'
)
nonnative_fraction_cutoff = 0.95 # The threshold fraction of native contacts below which a pose is considered 'nonnative'
native_ensemble_size = 10
nonnative_ensemble_size = 10
decorrelate = True
# Build arrays to store data for each model parameter scan/grid point
dQ_list = []
df_ij_list = []
ddf_ij_list = []
Delta_u_list = []
dDelta_u_list = []
Delta_s_list = []
dDelta_s_list = []
C_v_list = []
dC_v_list = []
# This is where we start evaluating the properties of models with different equilibrium torsion angles
for sc_bb_bb_sc_equil_torsion_angle in sc_bb_bb_sc_equil_torsion_angles:
for bb_bb_bb_bb_equil_torsion_angle in bb_bb_bb_bb_equil_torsion_angles:
if scan_sc_bb_bb_sc_torsions == True:
equil_torsion_angles = {
"bb_bb_bb_bb_torsion_0": bb_bb_bb_bb_equil_torsion_angle,
"sc_bb_bb_sc_torsion_0": sc_bb_bb_sc_equil_torsion_angle,
}
else:
equil_torsion_angles = {"bb_bb_bb_bb_torsion_0": bb_bb_bb_bb_equil_torsion_angle}
# Build a coarse grained model that has the torsion parameters for this grid point.
positions = PDBFile("helix.pdb").getPositions()
cgmodel = CGModel(
polymer_length=polymer_length,
backbone_lengths=backbone_lengths,
sidechain_lengths=sidechain_lengths,
sidechain_positions=sidechain_positions,
masses=masses,
sigmas=sigmas,
epsilons=epsilons,
bond_lengths=bond_lengths,
bond_force_constants=bond_force_constants,
bond_angle_force_constants=bond_angle_force_constants,
torsion_force_constants=torsion_force_constants,
equil_bond_angles=equil_bond_angles,
equil_torsion_angles=equil_torsion_angles,
torsion_periodicities=torsion_periodicities,
include_nonbonded_forces=include_nonbonded_forces,
include_bond_forces=include_bond_forces,
include_bond_angle_forces=include_bond_angle_forces,
include_torsion_forces=include_torsion_forces,
constrain_bonds=constrain_bonds,
positions=positions,
)
if scan_sc_bb_bb_sc_torsions == True:
output_data = str(
str(output_directory)
+ "/torsions_"
+ str(round(bb_bb_bb_bb_equil_torsion_angle * (180.0 / 3.14), 1))
+ "_"
+ str(round(sc_bb_bb_sc_equil_torsion_angle * (180.0 / 3.14), 1))
+ ".nc"
)
file_name = str(
str(output_directory)
+ "/re_min_"
+ str(round(bb_bb_bb_bb_equil_torsion_angle * (180.0 / 3.14), 1))
+ "_"
+ str(round(sc_bb_bb_sc_equil_torsion_angle * (180.0 / 3.14), 1))
+ ".pdb"
)
else:
output_data = str(
str(output_directory)
+ "/torsions_"
+ str(round(bb_bb_bb_bb_equil_torsion_angle * (180.0 / 3.14), 1))
+ ".nc"
)
file_name = str(
str(output_directory)
+ "/re_min_"
+ str(round(bb_bb_bb_bb_equil_torsion_angle * (180.0 / 3.14), 1))
+ ".pdb"
)
if os.path.exists(file_name):
print("\n")
print("Reading existing simulation data for a coarse grained model")
print(
"with bb_bb_bb_bb torsion angles of "
+ str(round(bb_bb_bb_bb_equil_torsion_angle * (180.0 / 3.14), 1))
+ " degrees."
)
if scan_sc_bb_bb_sc_torsions == True:
print(
"and sc_bb_bb_sc torsion angles of "
+ str(round(sc_bb_bb_sc_equil_torsion_angle * (180.0 / 3.14), 1))
+ " degrees."
)
print("\n")
# Search for existing data, and reading it if possible
replica_energies, replica_positions, replica_states = read_replica_exchange_data(
system=cgmodel.system,
topology=cgmodel.topology,
temperature_list=temperature_list,
output_data=output_data,
print_frequency=print_frequency,
)
# Find the lowest energy pose for this model
native_structure = PDBFile(file_name).getPositions()
else:
print("\n")
print("Performing simulations for a coarse grained model")
print(
"with bb_bb_bb_bb torsion angles of "
+ str(round(bb_bb_bb_bb_equil_torsion_angle * (180.0 / 3.14), 1))
+ " degrees."
)
if scan_sc_bb_bb_sc_torsions == True:
print(
"and sc_bb_bb_sc torsion angles of "
+ str(round(sc_bb_bb_sc_equil_torsion_angle * (180.0 / 3.14), 1))
+ " degrees."
)
print("\n")
# Run a replica exchange simulation with this cgmodel
replica_energies, replica_positions, replica_states = run_replica_exchange(
cgmodel.topology,
cgmodel.system,
cgmodel.positions,
temperature_list=temperature_list,
simulation_time_step=simulation_time_step,
total_simulation_time=total_simulation_time,
print_frequency=print_frequency,
output_data=output_data,
)
native_structure = get_native_structure(
replica_positions, replica_energies, temperature_list
)
file = open(file_name, "w")
PDBFile.writeFile(cgmodel.topology, native_structure, file=file)
file.close()
if calculate_dQ:
native_structure_contact_distance_cutoff = 1.15 * cgmodel.get_sigma(
0
) # This distance cutoff determines which nonbonded interactions are considered 'native' contacts
native_fraction_cutoff = 0.95 # The threshold fraction of native contacts above which a pose is considered 'native'
nonnative_fraction_cutoff = 0.95 # The threshold fraction of native contacts below which a pose is considered 'nonnative'
native_ensemble_size = 10
nonnative_ensemble_size = 100
decorrelate = True
(
native_ensemble,
native_ensemble_energies,
nonnative_ensemble,
nonnative_ensemble_energies,
) = get_ensembles_from_replica_positions(
cgmodel,
replica_positions,
replica_energies,
temperature_list,
decorrelate=decorrelate,
native_fraction_cutoff=native_fraction_cutoff,
nonnative_fraction_cutoff=nonnative_fraction_cutoff,
native_structure_contact_distance_cutoff=native_structure_contact_distance_cutoff,
native_ensemble_size=native_ensemble_size,
nonnative_ensemble_size=nonnative_ensemble_size,
)
if (
len(native_ensemble_energies) != native_ensemble_size
or len(nonnative_ensemble_energies) != nonnative_ensemble_size
):
print(
"ERROR: attempt to generate native and nonnative ensembles was unsuccessful."
)
print(
str(len(native_ensemble_energies))
+ " native ensemble members were generated ("
+ str(native_ensemble_size)
+ " were requested),"
)
print(
"and "
+ str(len(nonnative_ensemble_energies))
+ " non-native ensemble members were generated ("
+ str(nonnative_ensemble_size)
+ " were requested)."
)
print(
"Try adjusting the 'native_structure_distance_cutoff' parameter (current value="
+ str(native_structure_contact_distance_cutoff.__div__(cgmodel.get_sigma(0)))
+ "*'bb_sigma'),"
)
print(
"and the 'nonnative_fraction_cutoff' parameter (current value="
+ str(nonnative_fraction_cutoff)
+ ")"
)
print("to see if either of these approaches fixes the problem.")
exit()
if scan_sc_bb_bb_sc_torsions == True:
nonnative_ensemble_directory = str(
str(output_directory)
+ "/ens_"
+ str(round(bb_bb_bb_bb_equil_torsion_angle * (180.0 / 3.14), 1))
+ "_"
+ str(round(sc_bb_bb_sc_equil_torsion_angle * (180.0 / 3.14), 1))
+ "_nonnative"
)
native_ensemble_directory = str(
str(output_directory)
+ "/ens_"
+ str(round(bb_bb_bb_bb_equil_torsion_angle * (180.0 / 3.14), 1))
+ "_"
+ str(round(sc_bb_bb_sc_equil_torsion_angle * (180.0 / 3.14), 1))
+ "_native"
)
else:
nonnative_ensemble_directory = str(
str(output_directory)
+ "/ens_"
+ str(round(bb_bb_bb_bb_equil_torsion_angle * (180.0 / 3.14), 1))
+ "_nonnative"
)
native_ensemble_directory = str(
str(output_directory)
+ "/ens_"
+ str(round(bb_bb_bb_bb_equil_torsion_angle * (180.0 / 3.14), 1))
+ "_native"
)
# We build an ensemble of nonnative poses for energetic comparison with the native pose.
if os.path.exists(nonnative_ensemble_directory):
nonnative_ensemble, nonnative_ensemble_energies = get_ensemble_data(
cgmodel, nonnative_ensemble_directory
)
if len(nonnative_ensemble) != nonnative_ensemble_size:
print(
"ERROR: "
+ str(len(nonnative_ensemble_energies))
+ " nonnative poses were found in existing output folders, but "
+ str(nonnative_ensemble_size)
+ " poses were requested."
)
print(
"This probably means that the requested ensemble size changed since the script was last run."
)
exit()
else:
os.mkdir(nonnative_ensemble_directory)
for pose in nonnative_ensemble:
cgmodel.positions = pose
write_ensemble_pdb(cgmodel, ensemble_directory=nonnative_ensemble_directory)
nonnative_ensemble_Q = []
for pose in nonnative_ensemble:
Q = fraction_native_contacts(cgmodel, pose, native_structure)
nonnative_ensemble_Q.append(Q)
nonnative_ensemble_Q = np.array([Q for Q in nonnative_ensemble_Q])
mean_nonnative_contacts = mean(nonnative_ensemble_Q)
print(
"The mean fraction of native contacts for this model is: "
+ str(mean_nonnative_contacts)
)
# We build an ensemble of native poses in order to understand the energy distribution around the folded state.
if os.path.exists(native_ensemble_directory):
native_ensemble, native_ensemble_energies = get_ensemble_data(
cgmodel, native_ensemble_directory
)
if len(native_ensemble_energies) != native_ensemble_size:
print(
"ERROR: "
+ str(len(native_ensemble_energies))
+ " native poses were found in existing output folders, but "
+ str(native_ensemble_size)
+ " poses were requested."
)
print(
"This probably means that the requested ensemble size changed since the script was last run."
)
exit()
else:
os.mkdir(native_ensemble_directory)
for pose in native_ensemble:
cgmodel.positions = pose
write_ensemble_pdb(cgmodel, ensemble_directory=native_ensemble_directory)
# Get the average change in the fraction of native contacts during folding (dQ),
# calculated as the difference between the average fraction of native contacts
# in the nonnative ensemble.
# A large dQ means the model/structure has a stable folded state.
# A small dQ means the model/structure does not have a stable folded state.
dQ = 1.0 - mean_nonnative_contacts
dQ_list.append(dQ)
if calculate_free_energies:
num_intermediate_states = 1
mbar, E_kn, E_expect, dE_expect, new_temp_list = get_mbar_expectation(
replica_energies, temperature_list, num_intermediate_states
)
df_ij, ddf_ij = get_free_energy_differences(mbar)
df_ij_list.append(df_ij)
ddf_ij_list.append(ddf_ij)
Delta_s, dDelta_s = get_entropy_differences(mbar)
Delta_s_list.append(Delta_s)
dDelta_s_list.append(dDelta_s)
Delta_u, dDelta_u = get_enthalpy_differences(mbar)
Delta_u_list.append(Delta_u)
dDelta_u_list.append(dDelta_u)
if evaluate_heat_capacity:
C_v, dC_v, new_temperature_list = get_heat_capacity(
replica_energies, temperature_list, num_intermediate_states=1
)
C_v_list.append(C_v)
dC_v_list.append(dC_v)
if scan_sc_bb_bb_sc_torsions == True:
file_name = "dQ_for_variable_equil_torsion_angles.png"
figure = pyplot.figure(1)
bb_bb_bb_bb_equil_torsion_angles = np.array(
[float(equil_torsion_angle) for equil_torsion_angle in bb_bb_bb_bb_equil_torsion_angles]
)
sc_bb_bb_sc_equil_torsion_angles = np.array(
[float(equil_torsion_angle) for equil_torsion_angle in sc_bb_bb_sc_equil_torsion_angles]
)
x = np.unique(bb_bb_bb_bb_equil_torsion_angles * (180.0 / 3.14))
y = np.unique(sc_bb_bb_sc_equil_torsion_angles * (180.0 / 3.14))
X, Y = np.meshgrid(x, y)
Z = dQ_list.reshape(len(x), len(y))
pyplot.xlabel(r"$ \alpha_{0}^{BB-BB-BB-BB} $ ( Degrees )")
pyplot.ylabel(r"$ \alpha_{0}^{SC-BB-BB-SC} $ ( Degrees )")
pyplot.title("dQ (Change in native contacts during folding)")
pyplot.pcolormesh(X, Y, Z)
pyplot.colorbar()
pyplot.savefig(file_name)
pyplot.show()
pyplot.close()
if calculate_dQ:
file_name = "dQ_for_variable_bb_bb_bb_bb_torsion_angle.png"
figure = pyplot.figure(1)
x = np.array([float(angle * (180.0 / 3.14)) for angle in bb_bb_bb_bb_equil_torsion_angles])
y = np.array([float(dQ) for dQ in dQ_list])
pyplot.xlabel(r"$ \alpha_{0}^{BB-BB-BB-BB} $ ( Degrees )")
pyplot.ylabel(r"$\Delta$Q")
pyplot.title(r"$\Delta$Q (Change in native contacts) during folding")
pyplot.plot(x, y)
pyplot.savefig(file_name)
pyplot.show()
pyplot.close()
if calculate_free_energies:
file_name = "free_energies_for_variable_bb_bb_bb_bb_torsion_angle.png"
figure = pyplot.figure(1)
legend_title = r"$ \alpha_{0}^{BB-BB-BB-BB} $ (Degrees)"
legend_labels = np.array(
[float(round(angle * (180.0 / 3.14), 1)) for angle in bb_bb_bb_bb_equil_torsion_angles]
)
temperatures = np.array([temperature for temperature in new_temp_list])
index = 0
for df_ij, ddf_ij in zip(df_ij_list, ddf_ij_list):
df_ij = np.array([df_ij[i][0] for i in range(len(df_ij))])
ddf_ij = np.array([ddf_ij[i][0] for i in range(len(ddf_ij))])
(line,) = pyplot.plot(temperatures, df_ij)
line.set_label(legend_labels[index])
index = index + 1
pyplot.xlabel("Temperature (Kelvin)")
pyplot.ylabel(r"Dimensionless free energy differences $\mathit{F}$")
pyplot.title(r"$\mathit{F}$ for variable $\alpha_{0}^{BB-BB-BB-BB}$")
pyplot.legend(legend_labels)
pyplot.savefig(file_name)
pyplot.show()
pyplot.close()
file_name = "entropies_for_variable_bb_bb_bb_bb_torsion_angle.png"
figure = pyplot.figure(1)
legend_title = r"$ \alpha_{0}^{BB-BB-BB-BB} $ (Degrees)"
legend_labels = np.array(
[float(round(angle * (180.0 / 3.14), 1)) for angle in bb_bb_bb_bb_equil_torsion_angles]
)
temperatures = np.array([temperature for temperature in new_temp_list])
index = 0
for Delta_s in Delta_s_list:
delta_s = np.array([Delta_s[i][0] for i in range(len(Delta_s))])
(line,) = pyplot.plot(temperatures, delta_s)
line.set_label(legend_labels[index])
index = index + 1
pyplot.xlabel("Temperature (Kelvin)")
pyplot.ylabel("Entropy differences ($\Delta$S)")
pyplot.title(r"Entropy for variable $\alpha_{0}^{BB-BB-BB-BB}$")
pyplot.legend(legend_labels)
pyplot.savefig(file_name)
pyplot.show()
pyplot.close()
if evaluate_heat_capacity:
file_name = "heat_capacity_for_variable_bb_bb_bb_bb_torsion_angle.png"
figure = pyplot.figure(1)
legend_title = r"$ \alpha_{0}^{BB-BB-BB-BB} $ (Degrees)"
legend_labels = np.array(
[float(round(angle * (180.0 / 3.14), 1)) for angle in bb_bb_bb_bb_equil_torsion_angles]
)
temperatures = np.array([temperature for temperature in new_temp_list])
index = 0
for C_v, dC_v in zip(C_v_list, dC_v_list):
C_v = np.array([C_v[i] for i in range(len(C_v))])
dC_v = np.array([dC_v[i] for i in range(len(dC_v))])
pyplot.errorbar(temperatures, C_v, yerr=dC_v, figure=figure, label=legend_labels[index])
index = index + 1
pyplot.xlabel("Temperature ( Kelvin )")
pyplot.ylabel(r"C$_{v}$ ( kcal/mol * Kelvin )")
pyplot.title(r"Heat capacity for variable $\epsilon$")
pyplot.legend(legend_labels)
pyplot.savefig(file_name)
pyplot.show()
pyplot.close()
exit()
|
[
"matplotlib.pyplot.title",
"os.mkdir",
"simtk.openmm.app.pdbfile.PDBFile.writeFile",
"matplotlib.pyplot.figure",
"numpy.unique",
"numpy.meshgrid",
"matplotlib.pyplot.close",
"os.path.exists",
"matplotlib.pyplot.colorbar",
"foldamers.cg_model.cgmodel.CGModel",
"simtk.openmm.app.pdbfile.PDBFile",
"matplotlib.pyplot.errorbar",
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"statistics.mean",
"matplotlib.pyplot.pcolormesh",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.plot",
"numpy.array",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((4025, 4818), 'foldamers.cg_model.cgmodel.CGModel', 'CGModel', ([], {'polymer_length': 'polymer_length', 'backbone_lengths': 'backbone_lengths', 'sidechain_lengths': 'sidechain_lengths', 'sidechain_positions': 'sidechain_positions', 'masses': 'masses', 'sigmas': 'sigmas', 'epsilons': 'epsilons', 'bond_lengths': 'bond_lengths', 'bond_force_constants': 'bond_force_constants', 'bond_angle_force_constants': 'bond_angle_force_constants', 'torsion_force_constants': 'torsion_force_constants', 'equil_bond_angles': 'equil_bond_angles', 'equil_torsion_angles': 'equil_torsion_angles', 'torsion_periodicities': 'torsion_periodicities', 'include_nonbonded_forces': 'include_nonbonded_forces', 'include_bond_forces': 'include_bond_forces', 'include_bond_angle_forces': 'include_bond_angle_forces', 'include_torsion_forces': 'include_torsion_forces', 'constrain_bonds': 'constrain_bonds', 'positions': 'positions'}), '(polymer_length=polymer_length, backbone_lengths=backbone_lengths,\n sidechain_lengths=sidechain_lengths, sidechain_positions=\n sidechain_positions, masses=masses, sigmas=sigmas, epsilons=epsilons,\n bond_lengths=bond_lengths, bond_force_constants=bond_force_constants,\n bond_angle_force_constants=bond_angle_force_constants,\n torsion_force_constants=torsion_force_constants, equil_bond_angles=\n equil_bond_angles, equil_torsion_angles=equil_torsion_angles,\n torsion_periodicities=torsion_periodicities, include_nonbonded_forces=\n include_nonbonded_forces, include_bond_forces=include_bond_forces,\n include_bond_angle_forces=include_bond_angle_forces,\n include_torsion_forces=include_torsion_forces, constrain_bonds=\n constrain_bonds, positions=positions)\n', (4032, 4818), False, 'from foldamers.cg_model.cgmodel import CGModel\n'), ((581, 613), 'os.path.exists', 'os.path.exists', (['output_directory'], {}), '(output_directory)\n', (595, 613), False, 'import os\n'), ((619, 645), 'os.mkdir', 'os.mkdir', (['output_directory'], {}), '(output_directory)\n', (627, 645), False, 'import os\n'), ((5297, 5329), 'os.path.exists', 'os.path.exists', (['output_directory'], {}), '(output_directory)\n', (5311, 5329), False, 'import os\n'), ((5335, 5361), 'os.mkdir', 'os.mkdir', (['output_directory'], {}), '(output_directory)\n', (5343, 5361), False, 'import os\n'), ((5615, 5647), 'os.path.exists', 'os.path.exists', (['output_directory'], {}), '(output_directory)\n', (5629, 5647), False, 'import os\n'), ((5653, 5679), 'os.mkdir', 'os.mkdir', (['output_directory'], {}), '(output_directory)\n', (5661, 5679), False, 'import os\n'), ((5888, 5920), 'os.path.exists', 'os.path.exists', (['output_directory'], {}), '(output_directory)\n', (5902, 5920), False, 'import os\n'), ((5926, 5952), 'os.mkdir', 'os.mkdir', (['output_directory'], {}), '(output_directory)\n', (5934, 5952), False, 'import os\n'), ((21292, 21308), 'matplotlib.pyplot.figure', 'pyplot.figure', (['(1)'], {}), '(1)\n', (21305, 21308), True, 'import matplotlib.pyplot as pyplot\n'), ((21622, 21682), 'numpy.unique', 'np.unique', (['(bb_bb_bb_bb_equil_torsion_angles * (180.0 / 3.14))'], {}), '(bb_bb_bb_bb_equil_torsion_angles * (180.0 / 3.14))\n', (21631, 21682), True, 'import numpy as np\n'), ((21691, 21751), 'numpy.unique', 'np.unique', (['(sc_bb_bb_sc_equil_torsion_angles * (180.0 / 3.14))'], {}), '(sc_bb_bb_sc_equil_torsion_angles * (180.0 / 3.14))\n', (21700, 21751), True, 'import numpy as np\n'), ((21763, 21780), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (21774, 21780), True, 'import numpy as np\n'), ((21826, 21884), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['"""$ \\\\alpha_{0}^{BB-BB-BB-BB} $ ( Degrees )"""'], {}), "('$ \\\\alpha_{0}^{BB-BB-BB-BB} $ ( Degrees )')\n", (21839, 21884), True, 'import matplotlib.pyplot as pyplot\n'), ((21889, 21947), 'matplotlib.pyplot.ylabel', 'pyplot.ylabel', (['"""$ \\\\alpha_{0}^{SC-BB-BB-SC} $ ( Degrees )"""'], {}), "('$ \\\\alpha_{0}^{SC-BB-BB-SC} $ ( Degrees )')\n", (21902, 21947), True, 'import matplotlib.pyplot as pyplot\n'), ((21952, 22013), 'matplotlib.pyplot.title', 'pyplot.title', (['"""dQ (Change in native contacts during folding)"""'], {}), "('dQ (Change in native contacts during folding)')\n", (21964, 22013), True, 'import matplotlib.pyplot as pyplot\n'), ((22018, 22044), 'matplotlib.pyplot.pcolormesh', 'pyplot.pcolormesh', (['X', 'Y', 'Z'], {}), '(X, Y, Z)\n', (22035, 22044), True, 'import matplotlib.pyplot as pyplot\n'), ((22049, 22066), 'matplotlib.pyplot.colorbar', 'pyplot.colorbar', ([], {}), '()\n', (22064, 22066), True, 'import matplotlib.pyplot as pyplot\n'), ((22071, 22096), 'matplotlib.pyplot.savefig', 'pyplot.savefig', (['file_name'], {}), '(file_name)\n', (22085, 22096), True, 'import matplotlib.pyplot as pyplot\n'), ((22101, 22114), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (22112, 22114), True, 'import matplotlib.pyplot as pyplot\n'), ((22119, 22133), 'matplotlib.pyplot.close', 'pyplot.close', ([], {}), '()\n', (22131, 22133), True, 'import matplotlib.pyplot as pyplot\n'), ((22229, 22245), 'matplotlib.pyplot.figure', 'pyplot.figure', (['(1)'], {}), '(1)\n', (22242, 22245), True, 'import matplotlib.pyplot as pyplot\n'), ((22396, 22454), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['"""$ \\\\alpha_{0}^{BB-BB-BB-BB} $ ( Degrees )"""'], {}), "('$ \\\\alpha_{0}^{BB-BB-BB-BB} $ ( Degrees )')\n", (22409, 22454), True, 'import matplotlib.pyplot as pyplot\n'), ((22459, 22486), 'matplotlib.pyplot.ylabel', 'pyplot.ylabel', (['"""$\\\\Delta$Q"""'], {}), "('$\\\\Delta$Q')\n", (22472, 22486), True, 'import matplotlib.pyplot as pyplot\n'), ((22491, 22560), 'matplotlib.pyplot.title', 'pyplot.title', (['"""$\\\\Delta$Q (Change in native contacts) during folding"""'], {}), "('$\\\\Delta$Q (Change in native contacts) during folding')\n", (22503, 22560), True, 'import matplotlib.pyplot as pyplot\n'), ((22565, 22582), 'matplotlib.pyplot.plot', 'pyplot.plot', (['x', 'y'], {}), '(x, y)\n', (22576, 22582), True, 'import matplotlib.pyplot as pyplot\n'), ((22587, 22612), 'matplotlib.pyplot.savefig', 'pyplot.savefig', (['file_name'], {}), '(file_name)\n', (22601, 22612), True, 'import matplotlib.pyplot as pyplot\n'), ((22617, 22630), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (22628, 22630), True, 'import matplotlib.pyplot as pyplot\n'), ((22635, 22649), 'matplotlib.pyplot.close', 'pyplot.close', ([], {}), '()\n', (22647, 22649), True, 'import matplotlib.pyplot as pyplot\n'), ((22767, 22783), 'matplotlib.pyplot.figure', 'pyplot.figure', (['(1)'], {}), '(1)\n', (22780, 22783), True, 'import matplotlib.pyplot as pyplot\n'), ((22996, 23052), 'numpy.array', 'np.array', (['[temperature for temperature in new_temp_list]'], {}), '([temperature for temperature in new_temp_list])\n', (23004, 23052), True, 'import numpy as np\n'), ((23386, 23423), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['"""Temperature (Kelvin)"""'], {}), "('Temperature (Kelvin)')\n", (23399, 23423), True, 'import matplotlib.pyplot as pyplot\n'), ((23428, 23496), 'matplotlib.pyplot.ylabel', 'pyplot.ylabel', (['"""Dimensionless free energy differences $\\\\mathit{F}$"""'], {}), "('Dimensionless free energy differences $\\\\mathit{F}$')\n", (23441, 23496), True, 'import matplotlib.pyplot as pyplot\n'), ((23501, 23571), 'matplotlib.pyplot.title', 'pyplot.title', (['"""$\\\\mathit{F}$ for variable $\\\\alpha_{0}^{BB-BB-BB-BB}$"""'], {}), "('$\\\\mathit{F}$ for variable $\\\\alpha_{0}^{BB-BB-BB-BB}$')\n", (23513, 23571), True, 'import matplotlib.pyplot as pyplot\n'), ((23575, 23603), 'matplotlib.pyplot.legend', 'pyplot.legend', (['legend_labels'], {}), '(legend_labels)\n', (23588, 23603), True, 'import matplotlib.pyplot as pyplot\n'), ((23608, 23633), 'matplotlib.pyplot.savefig', 'pyplot.savefig', (['file_name'], {}), '(file_name)\n', (23622, 23633), True, 'import matplotlib.pyplot as pyplot\n'), ((23638, 23651), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (23649, 23651), True, 'import matplotlib.pyplot as pyplot\n'), ((23656, 23670), 'matplotlib.pyplot.close', 'pyplot.close', ([], {}), '()\n', (23668, 23670), True, 'import matplotlib.pyplot as pyplot\n'), ((23756, 23772), 'matplotlib.pyplot.figure', 'pyplot.figure', (['(1)'], {}), '(1)\n', (23769, 23772), True, 'import matplotlib.pyplot as pyplot\n'), ((23985, 24041), 'numpy.array', 'np.array', (['[temperature for temperature in new_temp_list]'], {}), '([temperature for temperature in new_temp_list])\n', (23993, 24041), True, 'import numpy as np\n'), ((24291, 24328), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['"""Temperature (Kelvin)"""'], {}), "('Temperature (Kelvin)')\n", (24304, 24328), True, 'import matplotlib.pyplot as pyplot\n'), ((24333, 24382), 'matplotlib.pyplot.ylabel', 'pyplot.ylabel', (['"""Entropy differences ($\\\\Delta$S)"""'], {}), "('Entropy differences ($\\\\Delta$S)')\n", (24346, 24382), True, 'import matplotlib.pyplot as pyplot\n'), ((24386, 24450), 'matplotlib.pyplot.title', 'pyplot.title', (['"""Entropy for variable $\\\\alpha_{0}^{BB-BB-BB-BB}$"""'], {}), "('Entropy for variable $\\\\alpha_{0}^{BB-BB-BB-BB}$')\n", (24398, 24450), True, 'import matplotlib.pyplot as pyplot\n'), ((24455, 24483), 'matplotlib.pyplot.legend', 'pyplot.legend', (['legend_labels'], {}), '(legend_labels)\n', (24468, 24483), True, 'import matplotlib.pyplot as pyplot\n'), ((24488, 24513), 'matplotlib.pyplot.savefig', 'pyplot.savefig', (['file_name'], {}), '(file_name)\n', (24502, 24513), True, 'import matplotlib.pyplot as pyplot\n'), ((24518, 24531), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (24529, 24531), True, 'import matplotlib.pyplot as pyplot\n'), ((24536, 24550), 'matplotlib.pyplot.close', 'pyplot.close', ([], {}), '()\n', (24548, 24550), True, 'import matplotlib.pyplot as pyplot\n'), ((24667, 24683), 'matplotlib.pyplot.figure', 'pyplot.figure', (['(1)'], {}), '(1)\n', (24680, 24683), True, 'import matplotlib.pyplot as pyplot\n'), ((24896, 24952), 'numpy.array', 'np.array', (['[temperature for temperature in new_temp_list]'], {}), '([temperature for temperature in new_temp_list])\n', (24904, 24952), True, 'import numpy as np\n'), ((25261, 25300), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['"""Temperature ( Kelvin )"""'], {}), "('Temperature ( Kelvin )')\n", (25274, 25300), True, 'import matplotlib.pyplot as pyplot\n'), ((25305, 25351), 'matplotlib.pyplot.ylabel', 'pyplot.ylabel', (['"""C$_{v}$ ( kcal/mol * Kelvin )"""'], {}), "('C$_{v}$ ( kcal/mol * Kelvin )')\n", (25318, 25351), True, 'import matplotlib.pyplot as pyplot\n'), ((25357, 25411), 'matplotlib.pyplot.title', 'pyplot.title', (['"""Heat capacity for variable $\\\\epsilon$"""'], {}), "('Heat capacity for variable $\\\\epsilon$')\n", (25369, 25411), True, 'import matplotlib.pyplot as pyplot\n'), ((25416, 25444), 'matplotlib.pyplot.legend', 'pyplot.legend', (['legend_labels'], {}), '(legend_labels)\n', (25429, 25444), True, 'import matplotlib.pyplot as pyplot\n'), ((25449, 25474), 'matplotlib.pyplot.savefig', 'pyplot.savefig', (['file_name'], {}), '(file_name)\n', (25463, 25474), True, 'import matplotlib.pyplot as pyplot\n'), ((25479, 25492), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (25490, 25492), True, 'import matplotlib.pyplot as pyplot\n'), ((25497, 25511), 'matplotlib.pyplot.close', 'pyplot.close', ([], {}), '()\n', (25509, 25511), True, 'import matplotlib.pyplot as pyplot\n'), ((3947, 3967), 'simtk.openmm.app.pdbfile.PDBFile', 'PDBFile', (['"""helix.pdb"""'], {}), "('helix.pdb')\n", (3954, 3967), False, 'from simtk.openmm.app.pdbfile import PDBFile\n'), ((8023, 8816), 'foldamers.cg_model.cgmodel.CGModel', 'CGModel', ([], {'polymer_length': 'polymer_length', 'backbone_lengths': 'backbone_lengths', 'sidechain_lengths': 'sidechain_lengths', 'sidechain_positions': 'sidechain_positions', 'masses': 'masses', 'sigmas': 'sigmas', 'epsilons': 'epsilons', 'bond_lengths': 'bond_lengths', 'bond_force_constants': 'bond_force_constants', 'bond_angle_force_constants': 'bond_angle_force_constants', 'torsion_force_constants': 'torsion_force_constants', 'equil_bond_angles': 'equil_bond_angles', 'equil_torsion_angles': 'equil_torsion_angles', 'torsion_periodicities': 'torsion_periodicities', 'include_nonbonded_forces': 'include_nonbonded_forces', 'include_bond_forces': 'include_bond_forces', 'include_bond_angle_forces': 'include_bond_angle_forces', 'include_torsion_forces': 'include_torsion_forces', 'constrain_bonds': 'constrain_bonds', 'positions': 'positions'}), '(polymer_length=polymer_length, backbone_lengths=backbone_lengths,\n sidechain_lengths=sidechain_lengths, sidechain_positions=\n sidechain_positions, masses=masses, sigmas=sigmas, epsilons=epsilons,\n bond_lengths=bond_lengths, bond_force_constants=bond_force_constants,\n bond_angle_force_constants=bond_angle_force_constants,\n torsion_force_constants=torsion_force_constants, equil_bond_angles=\n equil_bond_angles, equil_torsion_angles=equil_torsion_angles,\n torsion_periodicities=torsion_periodicities, include_nonbonded_forces=\n include_nonbonded_forces, include_bond_forces=include_bond_forces,\n include_bond_angle_forces=include_bond_angle_forces,\n include_torsion_forces=include_torsion_forces, constrain_bonds=\n constrain_bonds, positions=positions)\n', (8030, 8816), False, 'from foldamers.cg_model.cgmodel import CGModel\n'), ((10174, 10199), 'os.path.exists', 'os.path.exists', (['file_name'], {}), '(file_name)\n', (10188, 10199), False, 'import os\n'), ((23277, 23309), 'matplotlib.pyplot.plot', 'pyplot.plot', (['temperatures', 'df_ij'], {}), '(temperatures, df_ij)\n', (23288, 23309), True, 'import matplotlib.pyplot as pyplot\n'), ((24180, 24214), 'matplotlib.pyplot.plot', 'pyplot.plot', (['temperatures', 'delta_s'], {}), '(temperatures, delta_s)\n', (24191, 24214), True, 'import matplotlib.pyplot as pyplot\n'), ((25141, 25234), 'matplotlib.pyplot.errorbar', 'pyplot.errorbar', (['temperatures', 'C_v'], {'yerr': 'dC_v', 'figure': 'figure', 'label': 'legend_labels[index]'}), '(temperatures, C_v, yerr=dC_v, figure=figure, label=\n legend_labels[index])\n', (25156, 25234), True, 'import matplotlib.pyplot as pyplot\n'), ((12636, 12700), 'simtk.openmm.app.pdbfile.PDBFile.writeFile', 'PDBFile.writeFile', (['cgmodel.topology', 'native_structure'], {'file': 'file'}), '(cgmodel.topology, native_structure, file=file)\n', (12653, 12700), False, 'from simtk.openmm.app.pdbfile import PDBFile\n'), ((16996, 17040), 'os.path.exists', 'os.path.exists', (['nonnative_ensemble_directory'], {}), '(nonnative_ensemble_directory)\n', (17010, 17040), False, 'import os\n'), ((18316, 18359), 'numpy.array', 'np.array', (['[Q for Q in nonnative_ensemble_Q]'], {}), '([Q for Q in nonnative_ensemble_Q])\n', (18324, 18359), True, 'import numpy as np\n'), ((18398, 18424), 'statistics.mean', 'mean', (['nonnative_ensemble_Q'], {}), '(nonnative_ensemble_Q)\n', (18402, 18424), False, 'from statistics import mean\n'), ((18719, 18760), 'os.path.exists', 'os.path.exists', (['native_ensemble_directory'], {}), '(native_ensemble_directory)\n', (18733, 18760), False, 'import os\n'), ((7969, 7989), 'simtk.openmm.app.pdbfile.PDBFile', 'PDBFile', (['"""helix.pdb"""'], {}), "('helix.pdb')\n", (7976, 7989), False, 'from simtk.openmm.app.pdbfile import PDBFile\n'), ((17844, 17882), 'os.mkdir', 'os.mkdir', (['nonnative_ensemble_directory'], {}), '(nonnative_ensemble_directory)\n', (17852, 17882), False, 'import os\n'), ((19549, 19584), 'os.mkdir', 'os.mkdir', (['native_ensemble_directory'], {}), '(native_ensemble_directory)\n', (19557, 19584), False, 'import os\n'), ((11283, 11301), 'simtk.openmm.app.pdbfile.PDBFile', 'PDBFile', (['file_name'], {}), '(file_name)\n', (11290, 11301), False, 'from simtk.openmm.app.pdbfile import PDBFile\n')]
|
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for tensorflow_graphics.datasets.features.camera_feature."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow_graphics.datasets.features import camera_feature
class CameraFeatureTest(tfds.testing.FeatureExpectationsTestCase):
"""Test Cases for Camera FeatureConnector."""
def __get_camera_params(self):
pose = {'R': np.eye(3).astype(np.float32),
't': np.zeros(3).astype(np.float32)}
f = 35.
optical_center = (640 / 2, 480 / 2)
return pose, f, optical_center
def test_simple_camera(self):
"""Tests camera parameters with fixed focal length, no skew and no aspect ratio."""
expected_pose, expected_f, expected_center = self.__get_camera_params()
expected_intrinsics = np.asarray([[expected_f, 0, expected_center[0]],
[0, expected_f, expected_center[1]],
[0, 0, 1]], dtype=np.float32)
expected_camera = {'pose': expected_pose, 'intrinsics': expected_intrinsics}
inputs = {'f': expected_f, 'optical_center': expected_center,
'pose': expected_pose}
lookat_inputs = {
'f': expected_f,
'optical_center': expected_center,
'pose': {
'look_at': np.array([0, 0, -1], dtype=np.float32),
'up': np.array([0, 1, 0], dtype=np.float32),
'position': np.array([0, 0, 0], dtype=np.float32)
}
}
raising_pose_entry = {
'f': expected_f,
'optical_center': expected_center,
'pose': np.eye(4)
}
raising_pose_inputs = {
'f': expected_f,
'optical_center': expected_center,
'pose': {'rot': np.eye(3), 'trans': np.zeros(3)}
}
raising_lookat_inputs = {
'f': expected_f,
'optical_center': expected_center,
'pose': {
'l': np.array([0, 0, -1], dtype=np.float32),
'up': np.array([0, 1, 0], dtype=np.float32),
'C': np.array([0, 0, 0], dtype=np.float32)
}
}
self.assertFeature(
feature=camera_feature.Camera(),
shape={
'pose': {
'R': (3, 3),
't': (3,)
},
'intrinsics': (3, 3)
},
dtype={
'pose': {
'R': tf.float32,
't': tf.float32
},
'intrinsics': tf.float32
},
tests=[
tfds.testing.FeatureExpectationItem(
value=inputs,
expected=expected_camera,
),
tfds.testing.FeatureExpectationItem(
value=lookat_inputs,
expected=expected_camera
),
tfds.testing.FeatureExpectationItem(
value=raising_pose_inputs,
raise_cls=ValueError,
raise_msg='Wrong keys for pose feature provided'
),
tfds.testing.FeatureExpectationItem(
value=raising_lookat_inputs,
raise_cls=ValueError,
raise_msg='Wrong keys for pose feature provided'
),
tfds.testing.FeatureExpectationItem(
value=raising_pose_entry,
raise_cls=ValueError,
raise_msg='Pose needs to be a dictionary'
),
],
)
def test_camera_with_aspect_ratio_and_skew(self):
"""Tests camera parameters with fixed focal length, aspect_ratio and skew."""
expected_pose, expected_f, expected_center = self.__get_camera_params()
expected_aspect_ratio = expected_center[0] / expected_center[1]
expected_skew = 0.6
expected_intrinsics = np.asarray(
[[expected_f, expected_skew, expected_center[0]],
[0, expected_aspect_ratio * expected_f, expected_center[1]],
[0, 0, 1]], dtype=np.float32)
expected_camera = {'pose': expected_pose, 'intrinsics': expected_intrinsics}
inputs = {'f': expected_f,
'optical_center': expected_center,
'skew': expected_skew,
'aspect_ratio': expected_aspect_ratio,
'pose': expected_pose}
self.assertFeature(
feature=camera_feature.Camera(),
shape={
'pose': {
'R': (3, 3),
't': (3,)
},
'intrinsics': (3, 3)
},
dtype={
'pose': {
'R': tf.float32,
't': tf.float32
},
'intrinsics': tf.float32
},
tests=[
tfds.testing.FeatureExpectationItem(
value=inputs,
expected=expected_camera,
),
],
)
def test_full_camera_calibration_matrix(self):
"""Tests camera parameters with different focal length per camera axis and skew."""
expected_pose, _, expected_optical_center = self.__get_camera_params()
expected_skew = 0.6
expected_f = (35., 40.)
expected_intrinsics = np.array(
[[expected_f[0], expected_skew, expected_optical_center[0]],
[0, expected_f[1], expected_optical_center[1]],
[0, 0, 1]], dtype=np.float32)
expected_camera = {'pose': expected_pose, 'intrinsics': expected_intrinsics}
inputs = {'f': expected_f,
'optical_center': expected_optical_center,
'skew': expected_skew, 'pose': expected_pose}
raising_inputs = {'f': expected_f,
'aspect_ratio': 1.5,
'optical_center': expected_optical_center,
'skew': expected_skew, 'pose': expected_pose}
self.assertFeature(
feature=camera_feature.Camera(),
shape={
'pose': {
'R': (3, 3),
't': (3,)
},
'intrinsics': (3, 3)
},
dtype={
'pose': {
'R': tf.float32,
't': tf.float32
},
'intrinsics': tf.float32
},
tests=[
tfds.testing.FeatureExpectationItem(
value=inputs,
expected=expected_camera,
),
tfds.testing.FeatureExpectationItem(
value=raising_inputs,
raise_cls=ValueError,
raise_msg='If aspect ratio is provided, f needs to '
'be a single float',
),
],
)
if __name__ == '__main__':
tfds.testing.test_main()
|
[
"numpy.asarray",
"numpy.zeros",
"tensorflow_graphics.datasets.features.camera_feature.Camera",
"tensorflow_datasets.testing.test_main",
"numpy.array",
"tensorflow_datasets.testing.FeatureExpectationItem",
"numpy.eye"
] |
[((7184, 7208), 'tensorflow_datasets.testing.test_main', 'tfds.testing.test_main', ([], {}), '()\n', (7206, 7208), True, 'import tensorflow_datasets as tfds\n'), ((1490, 1609), 'numpy.asarray', 'np.asarray', (['[[expected_f, 0, expected_center[0]], [0, expected_f, expected_center[1]],\n [0, 0, 1]]'], {'dtype': 'np.float32'}), '([[expected_f, 0, expected_center[0]], [0, expected_f,\n expected_center[1]], [0, 0, 1]], dtype=np.float32)\n', (1500, 1609), True, 'import numpy as np\n'), ((4411, 4571), 'numpy.asarray', 'np.asarray', (['[[expected_f, expected_skew, expected_center[0]], [0, expected_aspect_ratio *\n expected_f, expected_center[1]], [0, 0, 1]]'], {'dtype': 'np.float32'}), '([[expected_f, expected_skew, expected_center[0]], [0, \n expected_aspect_ratio * expected_f, expected_center[1]], [0, 0, 1]],\n dtype=np.float32)\n', (4421, 4571), True, 'import numpy as np\n'), ((5724, 5875), 'numpy.array', 'np.array', (['[[expected_f[0], expected_skew, expected_optical_center[0]], [0, expected_f\n [1], expected_optical_center[1]], [0, 0, 1]]'], {'dtype': 'np.float32'}), '([[expected_f[0], expected_skew, expected_optical_center[0]], [0,\n expected_f[1], expected_optical_center[1]], [0, 0, 1]], dtype=np.float32)\n', (5732, 5875), True, 'import numpy as np\n'), ((2285, 2294), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (2291, 2294), True, 'import numpy as np\n'), ((1998, 2036), 'numpy.array', 'np.array', (['[0, 0, -1]'], {'dtype': 'np.float32'}), '([0, 0, -1], dtype=np.float32)\n', (2006, 2036), True, 'import numpy as np\n'), ((2056, 2093), 'numpy.array', 'np.array', (['[0, 1, 0]'], {'dtype': 'np.float32'}), '([0, 1, 0], dtype=np.float32)\n', (2064, 2093), True, 'import numpy as np\n'), ((2119, 2156), 'numpy.array', 'np.array', (['[0, 0, 0]'], {'dtype': 'np.float32'}), '([0, 0, 0], dtype=np.float32)\n', (2127, 2156), True, 'import numpy as np\n'), ((2422, 2431), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (2428, 2431), True, 'import numpy as np\n'), ((2442, 2453), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (2450, 2453), True, 'import numpy as np\n'), ((2594, 2632), 'numpy.array', 'np.array', (['[0, 0, -1]'], {'dtype': 'np.float32'}), '([0, 0, -1], dtype=np.float32)\n', (2602, 2632), True, 'import numpy as np\n'), ((2652, 2689), 'numpy.array', 'np.array', (['[0, 1, 0]'], {'dtype': 'np.float32'}), '([0, 1, 0], dtype=np.float32)\n', (2660, 2689), True, 'import numpy as np\n'), ((2708, 2745), 'numpy.array', 'np.array', (['[0, 0, 0]'], {'dtype': 'np.float32'}), '([0, 0, 0], dtype=np.float32)\n', (2716, 2745), True, 'import numpy as np\n'), ((2803, 2826), 'tensorflow_graphics.datasets.features.camera_feature.Camera', 'camera_feature.Camera', ([], {}), '()\n', (2824, 2826), False, 'from tensorflow_graphics.datasets.features import camera_feature\n'), ((4920, 4943), 'tensorflow_graphics.datasets.features.camera_feature.Camera', 'camera_feature.Camera', ([], {}), '()\n', (4941, 4943), False, 'from tensorflow_graphics.datasets.features import camera_feature\n'), ((6385, 6408), 'tensorflow_graphics.datasets.features.camera_feature.Camera', 'camera_feature.Camera', ([], {}), '()\n', (6406, 6408), False, 'from tensorflow_graphics.datasets.features import camera_feature\n'), ((1098, 1107), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (1104, 1107), True, 'import numpy as np\n'), ((1145, 1156), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1153, 1156), True, 'import numpy as np\n'), ((3174, 3249), 'tensorflow_datasets.testing.FeatureExpectationItem', 'tfds.testing.FeatureExpectationItem', ([], {'value': 'inputs', 'expected': 'expected_camera'}), '(value=inputs, expected=expected_camera)\n', (3209, 3249), True, 'import tensorflow_datasets as tfds\n'), ((3310, 3397), 'tensorflow_datasets.testing.FeatureExpectationItem', 'tfds.testing.FeatureExpectationItem', ([], {'value': 'lookat_inputs', 'expected': 'expected_camera'}), '(value=lookat_inputs, expected=\n expected_camera)\n', (3345, 3397), True, 'import tensorflow_datasets as tfds\n'), ((3452, 3591), 'tensorflow_datasets.testing.FeatureExpectationItem', 'tfds.testing.FeatureExpectationItem', ([], {'value': 'raising_pose_inputs', 'raise_cls': 'ValueError', 'raise_msg': '"""Wrong keys for pose feature provided"""'}), "(value=raising_pose_inputs, raise_cls=\n ValueError, raise_msg='Wrong keys for pose feature provided')\n", (3487, 3591), True, 'import tensorflow_datasets as tfds\n'), ((3662, 3803), 'tensorflow_datasets.testing.FeatureExpectationItem', 'tfds.testing.FeatureExpectationItem', ([], {'value': 'raising_lookat_inputs', 'raise_cls': 'ValueError', 'raise_msg': '"""Wrong keys for pose feature provided"""'}), "(value=raising_lookat_inputs, raise_cls=\n ValueError, raise_msg='Wrong keys for pose feature provided')\n", (3697, 3803), True, 'import tensorflow_datasets as tfds\n'), ((3874, 4005), 'tensorflow_datasets.testing.FeatureExpectationItem', 'tfds.testing.FeatureExpectationItem', ([], {'value': 'raising_pose_entry', 'raise_cls': 'ValueError', 'raise_msg': '"""Pose needs to be a dictionary"""'}), "(value=raising_pose_entry, raise_cls=\n ValueError, raise_msg='Pose needs to be a dictionary')\n", (3909, 4005), True, 'import tensorflow_datasets as tfds\n'), ((5291, 5366), 'tensorflow_datasets.testing.FeatureExpectationItem', 'tfds.testing.FeatureExpectationItem', ([], {'value': 'inputs', 'expected': 'expected_camera'}), '(value=inputs, expected=expected_camera)\n', (5326, 5366), True, 'import tensorflow_datasets as tfds\n'), ((6756, 6831), 'tensorflow_datasets.testing.FeatureExpectationItem', 'tfds.testing.FeatureExpectationItem', ([], {'value': 'inputs', 'expected': 'expected_camera'}), '(value=inputs, expected=expected_camera)\n', (6791, 6831), True, 'import tensorflow_datasets as tfds\n'), ((6892, 7052), 'tensorflow_datasets.testing.FeatureExpectationItem', 'tfds.testing.FeatureExpectationItem', ([], {'value': 'raising_inputs', 'raise_cls': 'ValueError', 'raise_msg': '"""If aspect ratio is provided, f needs to be a single float"""'}), "(value=raising_inputs, raise_cls=\n ValueError, raise_msg=\n 'If aspect ratio is provided, f needs to be a single float')\n", (6927, 7052), True, 'import tensorflow_datasets as tfds\n')]
|
import ray
from ray import serve
import requests
import os
import pickle
import numpy as np
import asyncio
# Models locations
RANDOM_FOREST_MODEL_PATH = os.path.join("wine-quality_random_forest.pkl")
XGBOOST_MODEL_PATH = os.path.join("wine-quality_xgboost.pkl")
GRBOOST_MODEL_PATH = os.path.join("wine-quality_grboost.pkl")
# Start Ray
ray.init()
# Start Serve
serve.start()
#define deployments
@serve.deployment(route_prefix="/randomforest")
class RandomForestModel:
def __init__(self, path):
with open(path, "rb") as f:
self.model = pickle.load(f)
async def __call__(self, request):
payload = await request.json()
return self.serve(payload)
def serve(self, request):
input_vector = [
request["fixed acidity"],
request["volatile acidity"],
request["citric acid"],
request["residual sugar"],
request["chlorides"],
request["free sulfur dioxide"],
request["total sulfur dioxide"],
request["density"],
request["pH"],
request["sulphates"],
request["alcohol"],
]
prediction = self.model.predict([input_vector])[0]
return {"result": str(prediction)}
@serve.deployment(route_prefix="/grboost")
class GRBoostModel:
def __init__(self, path):
with open(path, "rb") as f:
self.model = pickle.load(f)
async def __call__(self, request):
payload = await request.json()
return self.serve(payload)
def serve(self, request):
input_vector = np.array([
request["fixed acidity"],
request["volatile acidity"],
request["citric acid"],
request["residual sugar"],
request["chlorides"],
request["free sulfur dioxide"],
request["total sulfur dioxide"],
request["density"],
request["pH"],
request["sulphates"],
request["alcohol"],
])
prediction = self.model.predict(input_vector.reshape(1,11))[0]
return {"result": str(prediction)}
@serve.deployment(route_prefix="/xgboost")
class XGBoostModel:
def __init__(self, path):
with open(path, "rb") as f:
self.model = pickle.load(f)
async def __call__(self, request):
payload = await request.json()
return self.serve(payload)
def serve(self, request):
input_vector = np.array([
request["fixed acidity"],
request["volatile acidity"],
request["citric acid"],
request["residual sugar"],
request["chlorides"],
request["free sulfur dioxide"],
request["total sulfur dioxide"],
request["density"],
request["pH"],
request["sulphates"],
request["alcohol"],
])
prediction = self.model.predict(input_vector.reshape(1,11))[0]
return {"result": str(prediction)}
RandomForestModel.deploy(RANDOM_FOREST_MODEL_PATH)
XGBoostModel.deploy(XGBOOST_MODEL_PATH)
GRBoostModel.deploy(GRBOOST_MODEL_PATH)
@serve.deployment(route_prefix="/speculative")
class Speculative:
def __init__(self):
self.rfhandle = RandomForestModel.get_handle(sync=False)
self.xgboosthandle = XGBoostModel.get_handle(sync=False)
self.grboosthandle = GRBoostModel.get_handle(sync=False)
async def __call__(self, request):
payload = await request.json()
f1, f2, f3 = await asyncio.gather(self.rfhandle.serve.remote(payload),
self.xgboosthandle.serve.remote(payload), self.grboosthandle.serve.remote(payload))
rfresurlt = ray.get(f1)['result']
xgresurlt = ray.get(f2)['result']
grresult = ray.get(f3)['result']
ones = []
zeros = []
if rfresurlt == "1":
ones.append("Random forest")
else:
zeros.append("Random forest")
if xgresurlt == "1":
ones.append("XGBoost")
else:
zeros.append("XGBoost")
if grresult == "1":
ones.append("Gradient boost")
else:
zeros.append("Gradient boost")
if len(ones) >= 2:
return {"result": "1", "methods": ones}
else:
return {"result": "0", "methods": zeros}
Speculative.deploy()
sample_request_input = {
"fixed acidity": -0.70071875,
"volatile acidity": 0.34736425,
"citric acid": -1.34012182,
"residual sugar": -0.16942723,
"chlorides": -0.1586918,
"free sulfur dioxide": 1.06389977,
"total sulfur dioxide": -0.10545198,
"density": -0.66075704,
"pH": 0.70550789,
"sulphates": -0.46118037,
"alcohol": 0.26002813,
}
print(requests.get("http://localhost:8000/randomforest", json=sample_request_input).text)
print(requests.get("http://localhost:8000/grboost", json=sample_request_input).text)
print(requests.get("http://localhost:8000/xgboost", json=sample_request_input).text)
print(requests.get("http://localhost:8000/speculative", json=sample_request_input).text)
|
[
"ray.init",
"ray.serve.deployment",
"ray.get",
"pickle.load",
"numpy.array",
"requests.get",
"ray.serve.start",
"os.path.join"
] |
[((155, 201), 'os.path.join', 'os.path.join', (['"""wine-quality_random_forest.pkl"""'], {}), "('wine-quality_random_forest.pkl')\n", (167, 201), False, 'import os\n'), ((223, 263), 'os.path.join', 'os.path.join', (['"""wine-quality_xgboost.pkl"""'], {}), "('wine-quality_xgboost.pkl')\n", (235, 263), False, 'import os\n'), ((285, 325), 'os.path.join', 'os.path.join', (['"""wine-quality_grboost.pkl"""'], {}), "('wine-quality_grboost.pkl')\n", (297, 325), False, 'import os\n'), ((339, 349), 'ray.init', 'ray.init', ([], {}), '()\n', (347, 349), False, 'import ray\n'), ((365, 378), 'ray.serve.start', 'serve.start', ([], {}), '()\n', (376, 378), False, 'from ray import serve\n'), ((400, 446), 'ray.serve.deployment', 'serve.deployment', ([], {'route_prefix': '"""/randomforest"""'}), "(route_prefix='/randomforest')\n", (416, 446), False, 'from ray import serve\n'), ((1264, 1305), 'ray.serve.deployment', 'serve.deployment', ([], {'route_prefix': '"""/grboost"""'}), "(route_prefix='/grboost')\n", (1280, 1305), False, 'from ray import serve\n'), ((2140, 2181), 'ray.serve.deployment', 'serve.deployment', ([], {'route_prefix': '"""/xgboost"""'}), "(route_prefix='/xgboost')\n", (2156, 2181), False, 'from ray import serve\n'), ((3148, 3193), 'ray.serve.deployment', 'serve.deployment', ([], {'route_prefix': '"""/speculative"""'}), "(route_prefix='/speculative')\n", (3164, 3193), False, 'from ray import serve\n'), ((1600, 1898), 'numpy.array', 'np.array', (["[request['fixed acidity'], request['volatile acidity'], request[\n 'citric acid'], request['residual sugar'], request['chlorides'],\n request['free sulfur dioxide'], request['total sulfur dioxide'],\n request['density'], request['pH'], request['sulphates'], request['alcohol']\n ]"], {}), "([request['fixed acidity'], request['volatile acidity'], request[\n 'citric acid'], request['residual sugar'], request['chlorides'],\n request['free sulfur dioxide'], request['total sulfur dioxide'],\n request['density'], request['pH'], request['sulphates'], request[\n 'alcohol']])\n", (1608, 1898), True, 'import numpy as np\n'), ((2476, 2774), 'numpy.array', 'np.array', (["[request['fixed acidity'], request['volatile acidity'], request[\n 'citric acid'], request['residual sugar'], request['chlorides'],\n request['free sulfur dioxide'], request['total sulfur dioxide'],\n request['density'], request['pH'], request['sulphates'], request['alcohol']\n ]"], {}), "([request['fixed acidity'], request['volatile acidity'], request[\n 'citric acid'], request['residual sugar'], request['chlorides'],\n request['free sulfur dioxide'], request['total sulfur dioxide'],\n request['density'], request['pH'], request['sulphates'], request[\n 'alcohol']])\n", (2484, 2774), True, 'import numpy as np\n'), ((4777, 4854), 'requests.get', 'requests.get', (['"""http://localhost:8000/randomforest"""'], {'json': 'sample_request_input'}), "('http://localhost:8000/randomforest', json=sample_request_input)\n", (4789, 4854), False, 'import requests\n'), ((4867, 4939), 'requests.get', 'requests.get', (['"""http://localhost:8000/grboost"""'], {'json': 'sample_request_input'}), "('http://localhost:8000/grboost', json=sample_request_input)\n", (4879, 4939), False, 'import requests\n'), ((4952, 5024), 'requests.get', 'requests.get', (['"""http://localhost:8000/xgboost"""'], {'json': 'sample_request_input'}), "('http://localhost:8000/xgboost', json=sample_request_input)\n", (4964, 5024), False, 'import requests\n'), ((5037, 5113), 'requests.get', 'requests.get', (['"""http://localhost:8000/speculative"""'], {'json': 'sample_request_input'}), "('http://localhost:8000/speculative', json=sample_request_input)\n", (5049, 5113), False, 'import requests\n'), ((563, 577), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (574, 577), False, 'import pickle\n'), ((1417, 1431), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1428, 1431), False, 'import pickle\n'), ((2293, 2307), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2304, 2307), False, 'import pickle\n'), ((3710, 3721), 'ray.get', 'ray.get', (['f1'], {}), '(f1)\n', (3717, 3721), False, 'import ray\n'), ((3752, 3763), 'ray.get', 'ray.get', (['f2'], {}), '(f2)\n', (3759, 3763), False, 'import ray\n'), ((3793, 3804), 'ray.get', 'ray.get', (['f3'], {}), '(f3)\n', (3800, 3804), False, 'import ray\n')]
|
import keras
from keras.layers import Activation
from keras.models import load_model
from keras import backend as K
from keras.utils.generic_utils import get_custom_objects
import tensorflow as tf
import numpy as np
import pandas as pd
import timeit
import sys
import argparse
# Constants
#window_size = 1024
def windowNoOverlay(data, window_size): # Without overlay
windowed_data = []
i = 0
while(i + window_size-1 < len(data)):
windowed_data.append(data[i:(i+window_size)])
i += window_size
if (i != len(data)):
i = len(data) - window_size
windowed_data.append(data[i:len(data)]) # add the rest
return windowed_data
def parser_args(cmd_args):
parser = argparse.ArgumentParser(sys.argv[0], description="", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-e", "--exp", type=str, action="store", default="pairwise_distances", help="Experiment")
parser.add_argument("-d", "--dataset", type=str, action="store", default="PigArtPressure", help="Dataset name")
return parser.parse_args(cmd_args)
# obtaining arguments from command line
args = parser_args(sys.argv[1:])
dataset = args.dataset
exp = args.exp
def swish(x, beta = 1):
return (x * K.sigmoid(beta * x))
get_custom_objects().update({'Swish': Activation(swish)})
# Swish Activation
#class Swish(Activation):
# def __init__(self, activation, **kwargs):
# super(Swish, self).__init__(activation, **kwargs)
# self.__name__ = 'swish'
#def swish(x):
# return (K.sigmoid(x) * x)
#get_custom_objects().update({'swish': Swish(swish)})
encoder = load_model('../models/' + exp + '/new_train/' + 'encoder_' + dataset + ".h5", compile = False)
if (exp == "pairwise_distances"):
data = np.genfromtxt('../data/' + exp + '/' + dataset + '.txt', delimiter=' ',)
print("Data shape:", data.shape)
elif (exp == "similarity_search"):
data = np.genfromtxt('../data/' + exp + '/' + dataset + '/' + 'Data.txt', delimiter=' ',)
print("Data shape:", data.shape)
print("Encoding the queries as well")
for i in range(1, 6):
query = np.genfromtxt('../data/' + exp + '/' + dataset + '/' + 'Query' + str(i) + '.txt', delimiter=' ',)
query.shape = 1, query.shape[0], 1
query = encoder.predict(query)
query.shape = query.shape[1]
np.savetxt('../data/' + exp + '/' + dataset + '/coded_data/Query' + str (i) + '.txt', query)
del query
else:
data = np.genfromtxt('../data/' + exp + '/' + dataset + '/' + dataset + '_test.txt', delimiter=' ',)
print("Data shape:", data.shape)
# Getting rid of the NaNs and infs with interpolation
if (len(data.shape) == 1):
data = np.array(pd.Series(data).interpolate())
serie_length = 1024
# 'Windowing'
data = np.array(windowNoOverlay(data, serie_length))
print("Window Data shape:", data.shape)
else:
serie_length = data.shape[1]
print("Serie length:", serie_length)
data.shape = data.shape[0], serie_length, 1
# Workaround to load the libraries so it doesn't count in the timer,
# in production these libraries would be already loaded
coded_data = encoder.predict(data)
start = timeit.default_timer()
coded_data = encoder.predict(data)
print("Coded Data shape:", coded_data.shape)
stop = timeit.default_timer()
print("Time to code the serie:", stop - start)
coded_data.shape = coded_data.shape[0], coded_data.shape[1]
if (exp == "similarity_search"):
np.savetxt('../data/' + exp + '/' + dataset + '/coded_data/' + 'Data.txt', coded_data)
elif(exp == "pairwise_distances"):
np.savetxt('../data/' + exp + '/coded_data/' + dataset + '_coded.txt', coded_data)
else:
np.savetxt('../data/' + exp + '/' + dataset + '/' + dataset + '_coded.txt', coded_data)
|
[
"keras.models.load_model",
"argparse.ArgumentParser",
"keras.layers.Activation",
"timeit.default_timer",
"numpy.savetxt",
"numpy.genfromtxt",
"keras.utils.generic_utils.get_custom_objects",
"pandas.Series",
"keras.backend.sigmoid"
] |
[((1566, 1662), 'keras.models.load_model', 'load_model', (["('../models/' + exp + '/new_train/' + 'encoder_' + dataset + '.h5')"], {'compile': '(False)'}), "('../models/' + exp + '/new_train/' + 'encoder_' + dataset +\n '.h5', compile=False)\n", (1576, 1662), False, 'from keras.models import load_model\n'), ((3046, 3068), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (3066, 3068), False, 'import timeit\n'), ((3158, 3180), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (3178, 3180), False, 'import timeit\n'), ((681, 794), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['sys.argv[0]'], {'description': '""""""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(sys.argv[0], description='', formatter_class=\n argparse.ArgumentDefaultsHelpFormatter)\n", (704, 794), False, 'import argparse\n'), ((1704, 1775), 'numpy.genfromtxt', 'np.genfromtxt', (["('../data/' + exp + '/' + dataset + '.txt')"], {'delimiter': '""" """'}), "('../data/' + exp + '/' + dataset + '.txt', delimiter=' ')\n", (1717, 1775), True, 'import numpy as np\n'), ((3324, 3414), 'numpy.savetxt', 'np.savetxt', (["('../data/' + exp + '/' + dataset + '/coded_data/' + 'Data.txt')", 'coded_data'], {}), "('../data/' + exp + '/' + dataset + '/coded_data/' + 'Data.txt',\n coded_data)\n", (3334, 3414), True, 'import numpy as np\n'), ((1206, 1225), 'keras.backend.sigmoid', 'K.sigmoid', (['(beta * x)'], {}), '(beta * x)\n', (1215, 1225), True, 'from keras import backend as K\n'), ((1228, 1248), 'keras.utils.generic_utils.get_custom_objects', 'get_custom_objects', ([], {}), '()\n', (1246, 1248), False, 'from keras.utils.generic_utils import get_custom_objects\n'), ((1266, 1283), 'keras.layers.Activation', 'Activation', (['swish'], {}), '(swish)\n', (1276, 1283), False, 'from keras.layers import Activation\n'), ((1854, 1939), 'numpy.genfromtxt', 'np.genfromtxt', (["('../data/' + exp + '/' + dataset + '/' + 'Data.txt')"], {'delimiter': '""" """'}), "('../data/' + exp + '/' + dataset + '/' + 'Data.txt',\n delimiter=' ')\n", (1867, 1939), True, 'import numpy as np\n'), ((2365, 2461), 'numpy.genfromtxt', 'np.genfromtxt', (["('../data/' + exp + '/' + dataset + '/' + dataset + '_test.txt')"], {'delimiter': '""" """'}), "('../data/' + exp + '/' + dataset + '/' + dataset +\n '_test.txt', delimiter=' ')\n", (2378, 2461), True, 'import numpy as np\n'), ((3447, 3533), 'numpy.savetxt', 'np.savetxt', (["('../data/' + exp + '/coded_data/' + dataset + '_coded.txt')", 'coded_data'], {}), "('../data/' + exp + '/coded_data/' + dataset + '_coded.txt',\n coded_data)\n", (3457, 3533), True, 'import numpy as np\n'), ((3537, 3628), 'numpy.savetxt', 'np.savetxt', (["('../data/' + exp + '/' + dataset + '/' + dataset + '_coded.txt')", 'coded_data'], {}), "('../data/' + exp + '/' + dataset + '/' + dataset + '_coded.txt',\n coded_data)\n", (3547, 3628), True, 'import numpy as np\n'), ((2592, 2607), 'pandas.Series', 'pd.Series', (['data'], {}), '(data)\n', (2601, 2607), True, 'import pandas as pd\n')]
|
import numpy as np
from igp2 import AgentState, plot_map
from igp2.data import ScenarioConfig, InDScenario
from igp2.opendrive.map import Map
import matplotlib.pyplot as plt
from shapely.ops import unary_union
from grit.core.data_processing import get_episode_frames
from grit.core.feature_extraction import FeatureExtractor
from grit.occlusion_detection.occlusion_detection_geometry import OcclusionDetector2D
from grit.core.base import get_base_dir
def get_feature_extractor(episode_idx=1, scenario_name="bendplatz"):
scenario_map = Map.parse_from_opendrive(get_base_dir() + f"/scenarios/maps/{scenario_name}.xodr")
return FeatureExtractor(scenario_map, scenario_name, episode_idx)
def plot_occlusion(frame_id=153, episode_idx=1, *frame, plot_occlusions=True, all_vehicles=False,
scenario_name="bendplatz"):
feature_extractor = get_feature_extractor(episode_idx=episode_idx, scenario_name=scenario_name)
occlusions = feature_extractor.occlusions[frame_id]
scenario_config = ScenarioConfig.load(get_base_dir() + f"/scenarios/configs/{scenario_name}.json")
scenario = InDScenario(scenario_config)
episode = scenario.load_episode(feature_extractor.episode_idx)
# Take a step every 25 recorded frames (1s)
# episode_frames contain for each second the list of frames for all vehicles alive that moment
episode_frames = get_episode_frames(episode, exclude_parked_cars=False, exclude_bicycles=True, step=25)
ego_id = list(occlusions.keys())[0]
ego_occlusions = occlusions[ego_id]
ego = episode_frames[frame_id][ego_id]
plot_map(feature_extractor.scenario_map, scenario_config=scenario_config, plot_buildings=True)
if plot_occlusions:
lane_occlusions_all = []
for road_occlusions in ego_occlusions:
for lane_occlusions in ego_occlusions[road_occlusions]:
lane_occlusion = ego_occlusions[road_occlusions][lane_occlusions]
if lane_occlusion is not None:
lane_occlusions_all.append(lane_occlusion)
OcclusionDetector2D.plot_area_from_list(lane_occlusions_all, color="r", alpha=0.5)
if all_vehicles:
for aid, state in episode_frames[frame_id].items():
plt.text(*state.position, aid)
plt.plot(*list(zip(*OcclusionDetector2D.get_box(state).boundary)), color="black")
if frame:
for aid, state in frame[0].items():
plt.text(*state.position, aid)
plt.plot(*list(zip(*OcclusionDetector2D.get_box(state).boundary)))
plt.plot(*list(zip(*OcclusionDetector2D.get_box(ego).boundary)))
def find_lane_at(point, scenario_name="bendplatz"):
scenario_map = Map.parse_from_opendrive(get_base_dir() + f"/scenarios/maps/{scenario_name}.xodr")
lanes = scenario_map.lanes_at(point)
for lane in lanes:
plot_map(scenario_map)
lane = scenario_map.get_lane(lane.parent_road.id, lane.id)
plt.plot(*list(zip(*[x for x in lane.midline.coords])))
plt.show()
def get_occlusions_and_ego(frame=153, episode_idx=1):
feature_extractor = get_feature_extractor(episode_idx)
occlusions = feature_extractor.occlusions[frame]
ego_id = list(occlusions.keys())[0]
ego_occlusions = occlusions[ego_id]
occlusions = []
for road_occlusions in ego_occlusions:
for lane_occlusions in ego_occlusions[road_occlusions]:
lane_occlusion = ego_occlusions[road_occlusions][lane_occlusions]
if lane_occlusion is not None:
occlusions.append(lane_occlusion)
occlusions = unary_union(occlusions)
return ego_id, occlusions
def test_occluded_area_no_vehicle_in_oncoming_lanes():
mfe = get_feature_extractor()
lane_path = [mfe.scenario_map.get_lane(8, -1, 0)]
ego_id, occlusions = get_occlusions_and_ego()
state0 = AgentState(time=0,
position=np.array((45.67, -46.72)),
velocity=np.array((0, 0)),
acceleration=np.array((0, 0)),
heading=lane_path[0].get_heading_at(45.67, -46.72)
)
state1 = AgentState(time=0,
position=np.array((62.88, -20.96)),
velocity=np.array((0, 0)),
acceleration=np.array((0, 0)),
heading=np.deg2rad(-120)
)
state_ego = AgentState(time=0,
position=np.array((43.88, -44.25)),
velocity=np.array((0, 0)),
acceleration=np.array((0, 0)),
heading=np.deg2rad(45)
)
frame = {ego_id: state_ego, 0: state0, 1: state1}
# plot_occlusion(153, 1, frame)
oncoming_vehicle_id, oncoming_vehicle_dist = mfe.oncoming_vehicle(0, lane_path, frame)
missing = mfe.is_oncoming_vehicle_missing(oncoming_vehicle_dist, lane_path, occlusions)
plt.show()
assert missing
def set_up_frame_ep3_frame100(third_agent_position, third_agent_heading):
"""
The third agent is the possible oncoming vehicle.
State 1 is the target vehicle.
"""
episode_idx = 3
frame_id = 100
mfe = get_feature_extractor(episode_idx=episode_idx)
lane_path = [mfe.scenario_map.get_lane(1, 1, 0),
mfe.scenario_map.get_lane(9, -1, 0)]
ego_id, occlusions = get_occlusions_and_ego(frame=frame_id, episode_idx=episode_idx)
state0 = AgentState(time=0,
position=np.array((45.67, -46.72)),
velocity=np.array((0, 0)),
acceleration=np.array((0, 0)),
heading=lane_path[0].get_heading_at(45.67, -46.72)
)
state1 = AgentState(time=0,
position=np.array(third_agent_position),
velocity=np.array((0, 0)),
acceleration=np.array((0, 0)),
heading=np.deg2rad(third_agent_heading)
)
state_ego = AgentState(time=0,
position=np.array((43.88, -44.25)),
velocity=np.array((0, 0)),
acceleration=np.array((0, 0)),
heading=np.deg2rad(45)
)
target_id = 0
frame = {target_id: state0, 1: state1, ego_id: state_ego}
# plot_occlusion(frame_id, episode_idx, frame)
oncoming_vehicle_id, oncoming_vehicle_dist = mfe.oncoming_vehicle(target_id, lane_path, frame)
missing = mfe.is_oncoming_vehicle_missing(oncoming_vehicle_dist, lane_path, occlusions)
plt.show()
return missing
def test_occluded_area_vehicle_in_oncoming_lanes():
missing = set_up_frame_ep3_frame100((62.88, -20.96), -110)
assert missing
def test_occluded_area_vehicle_in_oncoming_lanes_2():
missing = set_up_frame_ep3_frame100((60.12, -33.10), 140)
assert missing
def test_occluded_area_vehicle_in_oncoming_lanes_3():
missing = set_up_frame_ep3_frame100((49.12, -30.13), -45)
assert missing
def test_occluded_area_vehicle_in_oncoming_lanes_4():
missing = set_up_frame_ep3_frame100((53.81, -38.10), 170)
assert not missing
def test_occluded_area_vehicle_in_oncoming_lanes_5():
missing = set_up_frame_ep3_frame100((56.46, -38.11), -45)
assert missing
def test_occluded_area_vehicle_in_oncoming_lanes_6():
missing = set_up_frame_ep3_frame100((55.75, -37.73), 180)
assert not missing
# Tests for missing vehicle ahead.
def test_the_vehicle_in_front_is_hidden():
"""
State1 is the possible vehicle in front.
"""
episode_idx = 6
frame_id = 50
mfe = get_feature_extractor(episode_idx=episode_idx)
lane_path = [mfe.scenario_map.get_lane(1, 1, 0)]
ego_id, occlusions = get_occlusions_and_ego(frame=frame_id, episode_idx=episode_idx)
state_target = AgentState(time=0,
position=np.array((34.58, -56.93)),
velocity=np.array((0, 0)),
acceleration=np.array((0, 0)),
heading=lane_path[0].get_heading_at(45.67, -46.72)
)
state1 = AgentState(time=0,
position=np.array((39.90, -52.22)),
velocity=np.array((0, 0)),
acceleration=np.array((0, 0)),
heading=np.deg2rad(45)
)
state_ego = AgentState(time=0,
position=np.array((34.62, -11.01)),
velocity=np.array((0, 0)),
acceleration=np.array((0, 0)),
heading=np.deg2rad(-45)
)
target_id = 0
frame = {target_id: state_target, 1: state1, ego_id: state_ego}
# plot_occlusion(frame_id, episode_idx, frame)
vehicle_in_front_id, vehicle_in_front_dist = mfe.vehicle_in_front(target_id, lane_path, frame)
missing = mfe.is_vehicle_in_front_missing(vehicle_in_front_dist, target_id, lane_path, frame, occlusions)
plt.show()
assert missing
def test_vehicle_is_behind():
"""
State1 is the possible vehicle in front.
"""
episode_idx = 6
frame_id = 50
mfe = get_feature_extractor(episode_idx=episode_idx)
lane_path = [mfe.scenario_map.get_lane(3, -1, 0)]
ego_id, occlusions = get_occlusions_and_ego(frame=frame_id, episode_idx=episode_idx)
state_target = AgentState(time=0,
position=np.array((76.54, -11.56)),
velocity=np.array((0, 0)),
acceleration=np.array((0, 0)),
heading=lane_path[0].get_heading_at(76.54, -11.56)
)
state1 = AgentState(time=0,
position=np.array((68.24, -20.61)),
velocity=np.array((0, 0)),
acceleration=np.array((0, 0)),
heading=np.deg2rad(45)
)
state_ego = AgentState(time=0,
position=np.array((34.62, -11.01)),
velocity=np.array((0, 0)),
acceleration=np.array((0, 0)),
heading=np.deg2rad(-45)
)
target_id = 0
frame = {target_id: state_target, 1: state1, ego_id: state_ego}
# plot_occlusion(frame_id, episode_idx, frame)
vehicle_in_front_id, vehicle_in_front_dist = mfe.vehicle_in_front(target_id, lane_path, frame)
missing = mfe.is_vehicle_in_front_missing(vehicle_in_front_dist, target_id, lane_path, frame, occlusions)
plt.show()
assert missing
def test_no_vehicle_in_front_2():
"""
State1 is the possible vehicle in front.
"""
episode_idx = 6
frame_id = 50
mfe = get_feature_extractor(episode_idx=episode_idx)
lane_path = [mfe.scenario_map.get_lane(3, -1, 0)]
ego_id, occlusions = get_occlusions_and_ego(frame=frame_id, episode_idx=episode_idx)
state_target = AgentState(time=0,
position=np.array((72.77, -9.44)),
velocity=np.array((0, 0)),
acceleration=np.array((0, 0)),
heading=lane_path[0].get_heading_at(72.77, -9.44)
)
state1 = AgentState(time=0,
position=np.array((66.29, -16.77)),
velocity=np.array((0, 0)),
acceleration=np.array((0, 0)),
heading=np.deg2rad(45)
)
state_ego = AgentState(time=0,
position=np.array((34.62, -11.01)),
velocity=np.array((0, 0)),
acceleration=np.array((0, 0)),
heading=np.deg2rad(-45)
)
target_id = 0
frame = {target_id: state_target, 1: state1, ego_id: state_ego}
# plot_occlusion(frame_id, episode_idx, frame)
vehicle_in_front_id, vehicle_in_front_dist = mfe.vehicle_in_front(target_id, lane_path, frame)
missing = mfe.is_vehicle_in_front_missing(vehicle_in_front_dist, target_id, lane_path, frame, occlusions)
plt.show()
assert not missing
def test_occlusion_far_away():
episode_idx = 7
frame_id = 200
mfe = get_feature_extractor(episode_idx=episode_idx)
lane_path = [mfe.scenario_map.get_lane(2, 2, 0),
mfe.scenario_map.get_lane(10, -1, 0)]
ego_id, occlusions = get_occlusions_and_ego(frame=frame_id, episode_idx=episode_idx)
state_target = AgentState(time=0,
position=np.array((84.70, -60.43)),
velocity=np.array((0, 0)),
acceleration=np.array((0, 0)),
heading=lane_path[0].get_heading_at(84.70, -60.43)
)
state_ego = AgentState(time=0,
position=np.array((73.39, -56.32)),
velocity=np.array((0, 0)),
acceleration=np.array((0, 0)),
heading=np.deg2rad(-45)
)
target_id = 0
frame = {target_id: state_target, ego_id: state_ego}
# plot_occlusion(frame_id, episode_idx, frame)
vehicle_in_front_id, vehicle_in_front_dist = mfe.vehicle_in_front(target_id, lane_path, frame)
missing = mfe.is_vehicle_in_front_missing(vehicle_in_front_dist, target_id, lane_path, frame, occlusions)
plt.show()
assert not missing
def test_occlusion_close_enough():
episode_idx = 7
frame_id = 200
mfe = get_feature_extractor(episode_idx=episode_idx)
lane_path = [mfe.scenario_map.get_lane(10, -1, 0)]
ego_id, occlusions = get_occlusions_and_ego(frame=frame_id, episode_idx=episode_idx)
state_target = AgentState(time=0,
position=np.array((61.59, -34.41)),
velocity=np.array((0, 0)),
acceleration=np.array((0, 0)),
heading=lane_path[0].get_heading_at(61.59, -34.41)
)
state_ego = AgentState(time=0,
position=np.array((73.39, -56.32)),
velocity=np.array((0, 0)),
acceleration=np.array((0, 0)),
heading=np.deg2rad(-45)
)
target_id = 0
frame = {target_id: state_target, ego_id: state_ego}
# plot_occlusion(frame_id, episode_idx, frame)
vehicle_in_front_id, vehicle_in_front_dist = mfe.vehicle_in_front(target_id, lane_path, frame)
missing = mfe.is_vehicle_in_front_missing(vehicle_in_front_dist, target_id, lane_path, frame, occlusions)
plt.show()
assert missing
def test_occlusion_between_vehicle_in_front():
"""
State1 is the possible vehicle in front.
"""
episode_idx = 6
frame_id = 42
mfe = get_feature_extractor(episode_idx=episode_idx)
lane_path = [mfe.scenario_map.get_lane(1, 1, 0),
mfe.scenario_map.get_lane(7, -1, 0)]
ego_id, occlusions = get_occlusions_and_ego(frame=frame_id, episode_idx=episode_idx)
state_target = AgentState(time=0,
position=np.array((33.07, -58.33)),
velocity=np.array((0, 0)),
acceleration=np.array((0, 0)),
heading=lane_path[0].get_heading_at(33.07, -58.33)
)
state1 = AgentState(time=0,
position=np.array((43.62, -48.47)),
velocity=np.array((0, 0)),
acceleration=np.array((0, 0)),
heading=np.deg2rad(45)
)
state_ego = AgentState(time=0,
position=np.array((73.39, -56.32)),
velocity=np.array((0, 0)),
acceleration=np.array((0, 0)),
heading=np.deg2rad(-45)
)
target_id = 0
frame = {target_id: state_target, ego_id: state_ego, 1: state1}
# plot_occlusion(frame_id, episode_idx, frame)
vehicle_in_front_id, vehicle_in_front_dist = mfe.vehicle_in_front(target_id, lane_path, frame)
missing = mfe.is_vehicle_in_front_missing(vehicle_in_front_dist, target_id, lane_path, frame, occlusions)
plt.show()
assert missing
# find_lane_at((32.7, -59.4))
# plot_occlusion(42, 5, scenario_name="bendplatz")
# plt.show()
|
[
"grit.core.feature_extraction.FeatureExtractor",
"grit.core.data_processing.get_episode_frames",
"shapely.ops.unary_union",
"matplotlib.pyplot.show",
"grit.occlusion_detection.occlusion_detection_geometry.OcclusionDetector2D.plot_area_from_list",
"numpy.deg2rad",
"igp2.plot_map",
"grit.core.base.get_base_dir",
"matplotlib.pyplot.text",
"igp2.data.InDScenario",
"numpy.array",
"grit.occlusion_detection.occlusion_detection_geometry.OcclusionDetector2D.get_box"
] |
[((637, 695), 'grit.core.feature_extraction.FeatureExtractor', 'FeatureExtractor', (['scenario_map', 'scenario_name', 'episode_idx'], {}), '(scenario_map, scenario_name, episode_idx)\n', (653, 695), False, 'from grit.core.feature_extraction import FeatureExtractor\n'), ((1118, 1146), 'igp2.data.InDScenario', 'InDScenario', (['scenario_config'], {}), '(scenario_config)\n', (1129, 1146), False, 'from igp2.data import ScenarioConfig, InDScenario\n'), ((1383, 1474), 'grit.core.data_processing.get_episode_frames', 'get_episode_frames', (['episode'], {'exclude_parked_cars': '(False)', 'exclude_bicycles': '(True)', 'step': '(25)'}), '(episode, exclude_parked_cars=False, exclude_bicycles=\n True, step=25)\n', (1401, 1474), False, 'from grit.core.data_processing import get_episode_frames\n'), ((1600, 1698), 'igp2.plot_map', 'plot_map', (['feature_extractor.scenario_map'], {'scenario_config': 'scenario_config', 'plot_buildings': '(True)'}), '(feature_extractor.scenario_map, scenario_config=scenario_config,\n plot_buildings=True)\n', (1608, 1698), False, 'from igp2 import AgentState, plot_map\n'), ((3591, 3614), 'shapely.ops.unary_union', 'unary_union', (['occlusions'], {}), '(occlusions)\n', (3602, 3614), False, 'from shapely.ops import unary_union\n'), ((4984, 4994), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4992, 4994), True, 'import matplotlib.pyplot as plt\n'), ((6701, 6711), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6709, 6711), True, 'import matplotlib.pyplot as plt\n'), ((9196, 9206), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9204, 9206), True, 'import matplotlib.pyplot as plt\n'), ((10809, 10819), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10817, 10819), True, 'import matplotlib.pyplot as plt\n'), ((12424, 12434), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12432, 12434), True, 'import matplotlib.pyplot as plt\n'), ((13752, 13762), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13760, 13762), True, 'import matplotlib.pyplot as plt\n'), ((15033, 15043), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15041, 15043), True, 'import matplotlib.pyplot as plt\n'), ((16715, 16725), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16723, 16725), True, 'import matplotlib.pyplot as plt\n'), ((2070, 2156), 'grit.occlusion_detection.occlusion_detection_geometry.OcclusionDetector2D.plot_area_from_list', 'OcclusionDetector2D.plot_area_from_list', (['lane_occlusions_all'], {'color': '"""r"""', 'alpha': '(0.5)'}), "(lane_occlusions_all, color='r',\n alpha=0.5)\n", (2109, 2156), False, 'from grit.occlusion_detection.occlusion_detection_geometry import OcclusionDetector2D\n'), ((2852, 2874), 'igp2.plot_map', 'plot_map', (['scenario_map'], {}), '(scenario_map)\n', (2860, 2874), False, 'from igp2 import AgentState, plot_map\n'), ((3014, 3024), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3022, 3024), True, 'import matplotlib.pyplot as plt\n'), ((568, 582), 'grit.core.base.get_base_dir', 'get_base_dir', ([], {}), '()\n', (580, 582), False, 'from grit.core.base import get_base_dir\n'), ((1042, 1056), 'grit.core.base.get_base_dir', 'get_base_dir', ([], {}), '()\n', (1054, 1056), False, 'from grit.core.base import get_base_dir\n'), ((2247, 2277), 'matplotlib.pyplot.text', 'plt.text', (['*state.position', 'aid'], {}), '(*state.position, aid)\n', (2255, 2277), True, 'import matplotlib.pyplot as plt\n'), ((2443, 2473), 'matplotlib.pyplot.text', 'plt.text', (['*state.position', 'aid'], {}), '(*state.position, aid)\n', (2451, 2473), True, 'import matplotlib.pyplot as plt\n'), ((2721, 2735), 'grit.core.base.get_base_dir', 'get_base_dir', ([], {}), '()\n', (2733, 2735), False, 'from grit.core.base import get_base_dir\n'), ((3908, 3933), 'numpy.array', 'np.array', (['(45.67, -46.72)'], {}), '((45.67, -46.72))\n', (3916, 3933), True, 'import numpy as np\n'), ((3968, 3984), 'numpy.array', 'np.array', (['(0, 0)'], {}), '((0, 0))\n', (3976, 3984), True, 'import numpy as np\n'), ((4023, 4039), 'numpy.array', 'np.array', (['(0, 0)'], {}), '((0, 0))\n', (4031, 4039), True, 'import numpy as np\n'), ((4208, 4233), 'numpy.array', 'np.array', (['(62.88, -20.96)'], {}), '((62.88, -20.96))\n', (4216, 4233), True, 'import numpy as np\n'), ((4268, 4284), 'numpy.array', 'np.array', (['(0, 0)'], {}), '((0, 0))\n', (4276, 4284), True, 'import numpy as np\n'), ((4323, 4339), 'numpy.array', 'np.array', (['(0, 0)'], {}), '((0, 0))\n', (4331, 4339), True, 'import numpy as np\n'), ((4373, 4389), 'numpy.deg2rad', 'np.deg2rad', (['(-120)'], {}), '(-120)\n', (4383, 4389), True, 'import numpy as np\n'), ((4488, 4513), 'numpy.array', 'np.array', (['(43.88, -44.25)'], {}), '((43.88, -44.25))\n', (4496, 4513), True, 'import numpy as np\n'), ((4551, 4567), 'numpy.array', 'np.array', (['(0, 0)'], {}), '((0, 0))\n', (4559, 4567), True, 'import numpy as np\n'), ((4609, 4625), 'numpy.array', 'np.array', (['(0, 0)'], {}), '((0, 0))\n', (4617, 4625), True, 'import numpy as np\n'), ((4662, 4676), 'numpy.deg2rad', 'np.deg2rad', (['(45)'], {}), '(45)\n', (4672, 4676), True, 'import numpy as np\n'), ((5556, 5581), 'numpy.array', 'np.array', (['(45.67, -46.72)'], {}), '((45.67, -46.72))\n', (5564, 5581), True, 'import numpy as np\n'), ((5616, 5632), 'numpy.array', 'np.array', (['(0, 0)'], {}), '((0, 0))\n', (5624, 5632), True, 'import numpy as np\n'), ((5671, 5687), 'numpy.array', 'np.array', (['(0, 0)'], {}), '((0, 0))\n', (5679, 5687), True, 'import numpy as np\n'), ((5856, 5886), 'numpy.array', 'np.array', (['third_agent_position'], {}), '(third_agent_position)\n', (5864, 5886), True, 'import numpy as np\n'), ((5921, 5937), 'numpy.array', 'np.array', (['(0, 0)'], {}), '((0, 0))\n', (5929, 5937), True, 'import numpy as np\n'), ((5976, 5992), 'numpy.array', 'np.array', (['(0, 0)'], {}), '((0, 0))\n', (5984, 5992), True, 'import numpy as np\n'), ((6026, 6057), 'numpy.deg2rad', 'np.deg2rad', (['third_agent_heading'], {}), '(third_agent_heading)\n', (6036, 6057), True, 'import numpy as np\n'), ((6156, 6181), 'numpy.array', 'np.array', (['(43.88, -44.25)'], {}), '((43.88, -44.25))\n', (6164, 6181), True, 'import numpy as np\n'), ((6219, 6235), 'numpy.array', 'np.array', (['(0, 0)'], {}), '((0, 0))\n', (6227, 6235), True, 'import numpy as np\n'), ((6277, 6293), 'numpy.array', 'np.array', (['(0, 0)'], {}), '((0, 0))\n', (6285, 6293), True, 'import numpy as np\n'), ((6330, 6344), 'numpy.deg2rad', 'np.deg2rad', (['(45)'], {}), '(45)\n', (6340, 6344), True, 'import numpy as np\n'), ((8025, 8050), 'numpy.array', 'np.array', (['(34.58, -56.93)'], {}), '((34.58, -56.93))\n', (8033, 8050), True, 'import numpy as np\n'), ((8091, 8107), 'numpy.array', 'np.array', (['(0, 0)'], {}), '((0, 0))\n', (8099, 8107), True, 'import numpy as np\n'), ((8152, 8168), 'numpy.array', 'np.array', (['(0, 0)'], {}), '((0, 0))\n', (8160, 8168), True, 'import numpy as np\n'), ((8349, 8373), 'numpy.array', 'np.array', (['(39.9, -52.22)'], {}), '((39.9, -52.22))\n', (8357, 8373), True, 'import numpy as np\n'), ((8409, 8425), 'numpy.array', 'np.array', (['(0, 0)'], {}), '((0, 0))\n', (8417, 8425), True, 'import numpy as np\n'), ((8464, 8480), 'numpy.array', 'np.array', (['(0, 0)'], {}), '((0, 0))\n', (8472, 8480), True, 'import numpy as np\n'), ((8514, 8528), 'numpy.deg2rad', 'np.deg2rad', (['(45)'], {}), '(45)\n', (8524, 8528), True, 'import numpy as np\n'), ((8627, 8652), 'numpy.array', 'np.array', (['(34.62, -11.01)'], {}), '((34.62, -11.01))\n', (8635, 8652), True, 'import numpy as np\n'), ((8690, 8706), 'numpy.array', 'np.array', (['(0, 0)'], {}), '((0, 0))\n', (8698, 8706), True, 'import numpy as np\n'), ((8748, 8764), 'numpy.array', 'np.array', (['(0, 0)'], {}), '((0, 0))\n', (8756, 8764), True, 'import numpy as np\n'), ((8801, 8816), 'numpy.deg2rad', 'np.deg2rad', (['(-45)'], {}), '(-45)\n', (8811, 8816), True, 'import numpy as np\n'), ((9638, 9663), 'numpy.array', 'np.array', (['(76.54, -11.56)'], {}), '((76.54, -11.56))\n', (9646, 9663), True, 'import numpy as np\n'), ((9704, 9720), 'numpy.array', 'np.array', (['(0, 0)'], {}), '((0, 0))\n', (9712, 9720), True, 'import numpy as np\n'), ((9765, 9781), 'numpy.array', 'np.array', (['(0, 0)'], {}), '((0, 0))\n', (9773, 9781), True, 'import numpy as np\n'), ((9962, 9987), 'numpy.array', 'np.array', (['(68.24, -20.61)'], {}), '((68.24, -20.61))\n', (9970, 9987), True, 'import numpy as np\n'), ((10022, 10038), 'numpy.array', 'np.array', (['(0, 0)'], {}), '((0, 0))\n', (10030, 10038), True, 'import numpy as np\n'), ((10077, 10093), 'numpy.array', 'np.array', (['(0, 0)'], {}), '((0, 0))\n', (10085, 10093), True, 'import numpy as np\n'), ((10127, 10141), 'numpy.deg2rad', 'np.deg2rad', (['(45)'], {}), '(45)\n', (10137, 10141), True, 'import numpy as np\n'), ((10240, 10265), 'numpy.array', 'np.array', (['(34.62, -11.01)'], {}), '((34.62, -11.01))\n', (10248, 10265), True, 'import numpy as np\n'), ((10303, 10319), 'numpy.array', 'np.array', (['(0, 0)'], {}), '((0, 0))\n', (10311, 10319), True, 'import numpy as np\n'), ((10361, 10377), 'numpy.array', 'np.array', (['(0, 0)'], {}), '((0, 0))\n', (10369, 10377), True, 'import numpy as np\n'), ((10414, 10429), 'numpy.deg2rad', 'np.deg2rad', (['(-45)'], {}), '(-45)\n', (10424, 10429), True, 'import numpy as np\n'), ((11255, 11279), 'numpy.array', 'np.array', (['(72.77, -9.44)'], {}), '((72.77, -9.44))\n', (11263, 11279), True, 'import numpy as np\n'), ((11320, 11336), 'numpy.array', 'np.array', (['(0, 0)'], {}), '((0, 0))\n', (11328, 11336), True, 'import numpy as np\n'), ((11381, 11397), 'numpy.array', 'np.array', (['(0, 0)'], {}), '((0, 0))\n', (11389, 11397), True, 'import numpy as np\n'), ((11577, 11602), 'numpy.array', 'np.array', (['(66.29, -16.77)'], {}), '((66.29, -16.77))\n', (11585, 11602), True, 'import numpy as np\n'), ((11637, 11653), 'numpy.array', 'np.array', (['(0, 0)'], {}), '((0, 0))\n', (11645, 11653), True, 'import numpy as np\n'), ((11692, 11708), 'numpy.array', 'np.array', (['(0, 0)'], {}), '((0, 0))\n', (11700, 11708), True, 'import numpy as np\n'), ((11742, 11756), 'numpy.deg2rad', 'np.deg2rad', (['(45)'], {}), '(45)\n', (11752, 11756), True, 'import numpy as np\n'), ((11855, 11880), 'numpy.array', 'np.array', (['(34.62, -11.01)'], {}), '((34.62, -11.01))\n', (11863, 11880), True, 'import numpy as np\n'), ((11918, 11934), 'numpy.array', 'np.array', (['(0, 0)'], {}), '((0, 0))\n', (11926, 11934), True, 'import numpy as np\n'), ((11976, 11992), 'numpy.array', 'np.array', (['(0, 0)'], {}), '((0, 0))\n', (11984, 11992), True, 'import numpy as np\n'), ((12029, 12044), 'numpy.deg2rad', 'np.deg2rad', (['(-45)'], {}), '(-45)\n', (12039, 12044), True, 'import numpy as np\n'), ((12864, 12888), 'numpy.array', 'np.array', (['(84.7, -60.43)'], {}), '((84.7, -60.43))\n', (12872, 12888), True, 'import numpy as np\n'), ((12930, 12946), 'numpy.array', 'np.array', (['(0, 0)'], {}), '((0, 0))\n', (12938, 12946), True, 'import numpy as np\n'), ((12991, 13007), 'numpy.array', 'np.array', (['(0, 0)'], {}), '((0, 0))\n', (12999, 13007), True, 'import numpy as np\n'), ((13194, 13219), 'numpy.array', 'np.array', (['(73.39, -56.32)'], {}), '((73.39, -56.32))\n', (13202, 13219), True, 'import numpy as np\n'), ((13257, 13273), 'numpy.array', 'np.array', (['(0, 0)'], {}), '((0, 0))\n', (13265, 13273), True, 'import numpy as np\n'), ((13315, 13331), 'numpy.array', 'np.array', (['(0, 0)'], {}), '((0, 0))\n', (13323, 13331), True, 'import numpy as np\n'), ((13368, 13383), 'numpy.deg2rad', 'np.deg2rad', (['(-45)'], {}), '(-45)\n', (13378, 13383), True, 'import numpy as np\n'), ((14145, 14170), 'numpy.array', 'np.array', (['(61.59, -34.41)'], {}), '((61.59, -34.41))\n', (14153, 14170), True, 'import numpy as np\n'), ((14211, 14227), 'numpy.array', 'np.array', (['(0, 0)'], {}), '((0, 0))\n', (14219, 14227), True, 'import numpy as np\n'), ((14272, 14288), 'numpy.array', 'np.array', (['(0, 0)'], {}), '((0, 0))\n', (14280, 14288), True, 'import numpy as np\n'), ((14475, 14500), 'numpy.array', 'np.array', (['(73.39, -56.32)'], {}), '((73.39, -56.32))\n', (14483, 14500), True, 'import numpy as np\n'), ((14538, 14554), 'numpy.array', 'np.array', (['(0, 0)'], {}), '((0, 0))\n', (14546, 14554), True, 'import numpy as np\n'), ((14596, 14612), 'numpy.array', 'np.array', (['(0, 0)'], {}), '((0, 0))\n', (14604, 14612), True, 'import numpy as np\n'), ((14649, 14664), 'numpy.deg2rad', 'np.deg2rad', (['(-45)'], {}), '(-45)\n', (14659, 14664), True, 'import numpy as np\n'), ((15544, 15569), 'numpy.array', 'np.array', (['(33.07, -58.33)'], {}), '((33.07, -58.33))\n', (15552, 15569), True, 'import numpy as np\n'), ((15610, 15626), 'numpy.array', 'np.array', (['(0, 0)'], {}), '((0, 0))\n', (15618, 15626), True, 'import numpy as np\n'), ((15671, 15687), 'numpy.array', 'np.array', (['(0, 0)'], {}), '((0, 0))\n', (15679, 15687), True, 'import numpy as np\n'), ((15868, 15893), 'numpy.array', 'np.array', (['(43.62, -48.47)'], {}), '((43.62, -48.47))\n', (15876, 15893), True, 'import numpy as np\n'), ((15928, 15944), 'numpy.array', 'np.array', (['(0, 0)'], {}), '((0, 0))\n', (15936, 15944), True, 'import numpy as np\n'), ((15983, 15999), 'numpy.array', 'np.array', (['(0, 0)'], {}), '((0, 0))\n', (15991, 15999), True, 'import numpy as np\n'), ((16033, 16047), 'numpy.deg2rad', 'np.deg2rad', (['(45)'], {}), '(45)\n', (16043, 16047), True, 'import numpy as np\n'), ((16146, 16171), 'numpy.array', 'np.array', (['(73.39, -56.32)'], {}), '((73.39, -56.32))\n', (16154, 16171), True, 'import numpy as np\n'), ((16209, 16225), 'numpy.array', 'np.array', (['(0, 0)'], {}), '((0, 0))\n', (16217, 16225), True, 'import numpy as np\n'), ((16267, 16283), 'numpy.array', 'np.array', (['(0, 0)'], {}), '((0, 0))\n', (16275, 16283), True, 'import numpy as np\n'), ((16320, 16335), 'numpy.deg2rad', 'np.deg2rad', (['(-45)'], {}), '(-45)\n', (16330, 16335), True, 'import numpy as np\n'), ((2578, 2610), 'grit.occlusion_detection.occlusion_detection_geometry.OcclusionDetector2D.get_box', 'OcclusionDetector2D.get_box', (['ego'], {}), '(ego)\n', (2605, 2610), False, 'from grit.occlusion_detection.occlusion_detection_geometry import OcclusionDetector2D\n'), ((2310, 2344), 'grit.occlusion_detection.occlusion_detection_geometry.OcclusionDetector2D.get_box', 'OcclusionDetector2D.get_box', (['state'], {}), '(state)\n', (2337, 2344), False, 'from grit.occlusion_detection.occlusion_detection_geometry import OcclusionDetector2D\n'), ((2506, 2540), 'grit.occlusion_detection.occlusion_detection_geometry.OcclusionDetector2D.get_box', 'OcclusionDetector2D.get_box', (['state'], {}), '(state)\n', (2533, 2540), False, 'from grit.occlusion_detection.occlusion_detection_geometry import OcclusionDetector2D\n')]
|
import glob
import json
import argparse
import os
import os.path as path
from functools import partial
from tqdm import tqdm
import pandas as pd
import numpy as np
import scipy
import plotnine as p9
from scipy.stats import bootstrap
from nlproar.dataset import SNLIDataset, SSTDataset, IMDBDataset, BabiDataset, MimicDataset
def ratio_confint(partial_df):
"""Implementes a ratio-confidence interval
The idea is to project to logits space, then assume a normal distribution,
and then project back to the inital space.
Method proposed here: https://stats.stackexchange.com/questions/263516
"""
column_name = partial_df.loc[:, 'test_metric'].iat[0]
x = partial_df.loc[:, column_name].to_numpy()
mean = np.mean(x)
if np.all(x[0] == x):
lower = mean
upper = mean
else:
res = bootstrap((x, ), np.mean, confidence_level=0.95, random_state=np.random.default_rng(0))
lower = res.confidence_interval.low
upper = res.confidence_interval.high
return pd.Series({
'lower': lower,
'mean': mean,
'upper': upper,
'format': f'${mean:.0%}^{{+{upper-mean:.1%}}}_{{-{mean-lower:.1%}}}$'.replace('%', '\\%'),
'n': len(x)
})
def dataset_stats(Loader, cachedir):
dataset = Loader(cachedir=cachedir, model_type='rnn', num_workers=0)
dataset.prepare_data()
dataset.setup('fit')
dataset.setup('test')
summaries = {}
dataloaders = [
('train', dataset.train_dataloader()),
('val', dataset.val_dataloader()),
('test', dataset.test_dataloader())
]
for split_name, split_iter in dataloaders:
lengths = []
for batch in tqdm(split_iter, desc=f'Summarizing {split_name} split', leave=False):
lengths += batch.length.tolist()
summaries[split_name] = {
'length': np.mean(lengths),
'count': len(lengths),
}
return pd.Series({
'dataset': dataset.name,
'vocab_size': len(dataset.vocabulary),
'train_size': summaries['train']['count'],
'valid_size': summaries['val']['count'],
'test_size': summaries['test']['count'],
'avg_length': np.average(
[summary['length'] for summary in summaries.values()],
weights=[summary['count'] for summary in summaries.values()]
)
})
thisdir = path.dirname(path.realpath(__file__))
parser = argparse.ArgumentParser()
parser.add_argument('--persistent-dir',
action='store',
default=path.realpath(path.join(thisdir, '..')),
type=str,
help='Directory where all persistent data will be stored')
parser.add_argument('--stage',
action='store',
default='both',
type=str,
choices=['preprocess', 'plot', 'both'],
help='Which export stage should be performed. Mostly just useful for debugging.')
if __name__ == "__main__":
pd.set_option('display.max_rows', None)
args, unknown = parser.parse_known_args()
dataset_mapping = pd.DataFrame([
{'dataset': 'sst', 'dataset_pretty': 'SST', 'test_metric': 'f1_test', 'reference': '$81\\%$'},
{'dataset': 'snli', 'dataset_pretty': 'SNLI', 'test_metric': 'f1_test', 'reference': '$88\\%$'},
{'dataset': 'imdb', 'dataset_pretty': 'IMDB', 'test_metric': 'f1_test', 'reference': '$78\\%$'},
{'dataset': 'mimic-a', 'dataset_pretty': 'Anemia', 'test_metric': 'f1_test', 'reference': '$92\\%$'},
{'dataset': 'mimic-d', 'dataset_pretty': 'Diabetes', 'test_metric': 'f1_test', 'reference': '$79\\%$'},
{'dataset': 'babi-1', 'dataset_pretty': 'bAbI-1', 'test_metric': 'acc_test', 'reference': '$100\\%$'},
{'dataset': 'babi-2', 'dataset_pretty': 'bAbI-2', 'test_metric': 'acc_test', 'reference': '$48\\%$'},
{'dataset': 'babi-3', 'dataset_pretty': 'bAbI-3', 'test_metric': 'acc_test', 'reference': '$62\\%$'}
])
model_mapping = pd.DataFrame([
{'model_type': 'rnn', 'model_type_pretty': 'BiLSTM-Attention'},
{'model_type': 'roberta', 'model_type_pretty': 'RoBERTa'}
])
datasets = {
'sst': SSTDataset,
'snli': SNLIDataset,
'imdb': IMDBDataset,
'babi-1': partial(BabiDataset, task=1),
'babi-2': partial(BabiDataset, task=2),
'babi-3': partial(BabiDataset, task=3),
'mimic-d': partial(MimicDataset, subset='diabetes', mimicdir=f'{args.persistent_dir}/mimic'),
'mimic-a': partial(MimicDataset, subset='anemia', mimicdir=f'{args.persistent_dir}/mimic'),
}
if args.stage in ['both', 'preprocess']:
# Read JSON files into dataframe
results = []
for file in tqdm(glob.glob(f'{args.persistent_dir}/results/roar/*_s-[0-9].json'),
desc='Loading .json files'):
with open(file, 'r') as fp:
try:
results.append(json.load(fp))
except json.decoder.JSONDecodeError:
print(f'{file} has a format error')
results_df = pd.DataFrame(results)
# Summarize each dataset
summaries = []
for dataset_loader in tqdm(datasets.values(), desc='Summarizing datasets'):
summaries.append(dataset_stats(dataset_loader, cachedir=args.persistent_dir + '/cache'))
summaries_df = pd.DataFrame(summaries)
df = (results_df
.merge(dataset_mapping, on='dataset')
.groupby(['dataset', 'dataset_pretty', 'reference', 'model_type'])
.apply(ratio_confint)
.reset_index()
.merge(summaries_df, on='dataset')
.merge(model_mapping, on='model_type')
.drop(['lower', 'upper', 'n', 'mean', 'dataset', 'model_type'], axis=1)
)
if args.stage in ['preprocess']:
os.makedirs(f'{args.persistent_dir}/pandas', exist_ok=True)
df.to_pickle(f'{args.persistent_dir}/pandas/dataset.pd.pkl.xz')
if args.stage in ['plot']:
df = pd.read_pickle(f'{args.persistent_dir}/pandas/dataset.pd.pkl.xz')
if args.stage in ['both', 'plot']:
print(df)
print(df
.reset_index()
.rename(columns={
'dataset_pretty': 'Dataset',
'format': 'Faithfulness'
})
.pivot(
index=['Dataset'],
columns='model_type_pretty',
values='Faithfulness'
)
.style.to_latex()
)
print(df
.reset_index()
.rename(columns={
'dataset_pretty': 'Dataset',
'format': 'Faithfulness'
})
.pivot(
index=['Dataset', 'train_size', 'valid_size', 'test_size', 'reference'],
columns='model_type_pretty',
values='Faithfulness'
)
.style.to_latex()
)
|
[
"pandas.DataFrame",
"functools.partial",
"tqdm.tqdm",
"json.load",
"argparse.ArgumentParser",
"os.makedirs",
"os.path.join",
"os.path.realpath",
"numpy.random.default_rng",
"numpy.mean",
"glob.glob",
"pandas.read_pickle",
"pandas.set_option",
"numpy.all"
] |
[((2434, 2459), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2457, 2459), False, 'import argparse\n'), ((737, 747), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (744, 747), True, 'import numpy as np\n'), ((756, 773), 'numpy.all', 'np.all', (['(x[0] == x)'], {}), '(x[0] == x)\n', (762, 773), True, 'import numpy as np\n'), ((2400, 2423), 'os.path.realpath', 'path.realpath', (['__file__'], {}), '(__file__)\n', (2413, 2423), True, 'import os.path as path\n'), ((3041, 3080), 'pandas.set_option', 'pd.set_option', (['"""display.max_rows"""', 'None'], {}), "('display.max_rows', None)\n", (3054, 3080), True, 'import pandas as pd\n'), ((3150, 4014), 'pandas.DataFrame', 'pd.DataFrame', (["[{'dataset': 'sst', 'dataset_pretty': 'SST', 'test_metric': 'f1_test',\n 'reference': '$81\\\\%$'}, {'dataset': 'snli', 'dataset_pretty': 'SNLI',\n 'test_metric': 'f1_test', 'reference': '$88\\\\%$'}, {'dataset': 'imdb',\n 'dataset_pretty': 'IMDB', 'test_metric': 'f1_test', 'reference':\n '$78\\\\%$'}, {'dataset': 'mimic-a', 'dataset_pretty': 'Anemia',\n 'test_metric': 'f1_test', 'reference': '$92\\\\%$'}, {'dataset':\n 'mimic-d', 'dataset_pretty': 'Diabetes', 'test_metric': 'f1_test',\n 'reference': '$79\\\\%$'}, {'dataset': 'babi-1', 'dataset_pretty':\n 'bAbI-1', 'test_metric': 'acc_test', 'reference': '$100\\\\%$'}, {\n 'dataset': 'babi-2', 'dataset_pretty': 'bAbI-2', 'test_metric':\n 'acc_test', 'reference': '$48\\\\%$'}, {'dataset': 'babi-3',\n 'dataset_pretty': 'bAbI-3', 'test_metric': 'acc_test', 'reference':\n '$62\\\\%$'}]"], {}), "([{'dataset': 'sst', 'dataset_pretty': 'SST', 'test_metric':\n 'f1_test', 'reference': '$81\\\\%$'}, {'dataset': 'snli',\n 'dataset_pretty': 'SNLI', 'test_metric': 'f1_test', 'reference':\n '$88\\\\%$'}, {'dataset': 'imdb', 'dataset_pretty': 'IMDB', 'test_metric':\n 'f1_test', 'reference': '$78\\\\%$'}, {'dataset': 'mimic-a',\n 'dataset_pretty': 'Anemia', 'test_metric': 'f1_test', 'reference':\n '$92\\\\%$'}, {'dataset': 'mimic-d', 'dataset_pretty': 'Diabetes',\n 'test_metric': 'f1_test', 'reference': '$79\\\\%$'}, {'dataset': 'babi-1',\n 'dataset_pretty': 'bAbI-1', 'test_metric': 'acc_test', 'reference':\n '$100\\\\%$'}, {'dataset': 'babi-2', 'dataset_pretty': 'bAbI-2',\n 'test_metric': 'acc_test', 'reference': '$48\\\\%$'}, {'dataset':\n 'babi-3', 'dataset_pretty': 'bAbI-3', 'test_metric': 'acc_test',\n 'reference': '$62\\\\%$'}])\n", (3162, 4014), True, 'import pandas as pd\n'), ((4058, 4200), 'pandas.DataFrame', 'pd.DataFrame', (["[{'model_type': 'rnn', 'model_type_pretty': 'BiLSTM-Attention'}, {\n 'model_type': 'roberta', 'model_type_pretty': 'RoBERTa'}]"], {}), "([{'model_type': 'rnn', 'model_type_pretty': 'BiLSTM-Attention'\n }, {'model_type': 'roberta', 'model_type_pretty': 'RoBERTa'}])\n", (4070, 4200), True, 'import pandas as pd\n'), ((1696, 1765), 'tqdm.tqdm', 'tqdm', (['split_iter'], {'desc': 'f"""Summarizing {split_name} split"""', 'leave': '(False)'}), "(split_iter, desc=f'Summarizing {split_name} split', leave=False)\n", (1700, 1765), False, 'from tqdm import tqdm\n'), ((4339, 4367), 'functools.partial', 'partial', (['BabiDataset'], {'task': '(1)'}), '(BabiDataset, task=1)\n', (4346, 4367), False, 'from functools import partial\n'), ((4387, 4415), 'functools.partial', 'partial', (['BabiDataset'], {'task': '(2)'}), '(BabiDataset, task=2)\n', (4394, 4415), False, 'from functools import partial\n'), ((4435, 4463), 'functools.partial', 'partial', (['BabiDataset'], {'task': '(3)'}), '(BabiDataset, task=3)\n', (4442, 4463), False, 'from functools import partial\n'), ((4484, 4570), 'functools.partial', 'partial', (['MimicDataset'], {'subset': '"""diabetes"""', 'mimicdir': 'f"""{args.persistent_dir}/mimic"""'}), "(MimicDataset, subset='diabetes', mimicdir=\n f'{args.persistent_dir}/mimic')\n", (4491, 4570), False, 'from functools import partial\n'), ((4586, 4665), 'functools.partial', 'partial', (['MimicDataset'], {'subset': '"""anemia"""', 'mimicdir': 'f"""{args.persistent_dir}/mimic"""'}), "(MimicDataset, subset='anemia', mimicdir=f'{args.persistent_dir}/mimic')\n", (4593, 4665), False, 'from functools import partial\n'), ((5165, 5186), 'pandas.DataFrame', 'pd.DataFrame', (['results'], {}), '(results)\n', (5177, 5186), True, 'import pandas as pd\n'), ((5452, 5475), 'pandas.DataFrame', 'pd.DataFrame', (['summaries'], {}), '(summaries)\n', (5464, 5475), True, 'import pandas as pd\n'), ((5930, 5989), 'os.makedirs', 'os.makedirs', (['f"""{args.persistent_dir}/pandas"""'], {'exist_ok': '(True)'}), "(f'{args.persistent_dir}/pandas', exist_ok=True)\n", (5941, 5989), False, 'import os\n'), ((6106, 6171), 'pandas.read_pickle', 'pd.read_pickle', (['f"""{args.persistent_dir}/pandas/dataset.pd.pkl.xz"""'], {}), "(f'{args.persistent_dir}/pandas/dataset.pd.pkl.xz')\n", (6120, 6171), True, 'import pandas as pd\n'), ((1869, 1885), 'numpy.mean', 'np.mean', (['lengths'], {}), '(lengths)\n', (1876, 1885), True, 'import numpy as np\n'), ((2578, 2602), 'os.path.join', 'path.join', (['thisdir', '""".."""'], {}), "(thisdir, '..')\n", (2587, 2602), True, 'import os.path as path\n'), ((4806, 4869), 'glob.glob', 'glob.glob', (['f"""{args.persistent_dir}/results/roar/*_s-[0-9].json"""'], {}), "(f'{args.persistent_dir}/results/roar/*_s-[0-9].json')\n", (4815, 4869), False, 'import glob\n'), ((903, 927), 'numpy.random.default_rng', 'np.random.default_rng', (['(0)'], {}), '(0)\n', (924, 927), True, 'import numpy as np\n'), ((5020, 5033), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (5029, 5033), False, 'import json\n')]
|
import numpy as np
import matplotlib.pyplot as plt
# close all figures
plt.close('all')
years = np.array([1960,1961,1962,1963,1964,1965,1966,1967,1968,1969,1970,1971,1972,1973,1974,1975,1976,1977,1978,1979,1980,1981,1982,1983,1984,1985,1986,1987,1988,1989,1990,1991,1992,1993,1994,1995,1996,1997,1998,1999,2000,2001,2002,2003,2004,2005,2006,2007,2008,2009,2010,2011,2012,2013,2014])
usaGDP = np.array([543300000000.,563300000000.,605100000000.,638600000000.,685800000000.,743700000000.,815000000000.,861700000000.,942500000000.,1019900000000.,1075884000000.,1167770000000.,1282449000000.,1428549000000.,1548825000000.,1688923000000.,1877587000000.,2085951000000.,2356571000000.,2632143000000.,2862505000000.,3210956000000.,3344991000000.,3638137000000.,4040693000000.,4346734000000.,4590155000000.,4870217000000.,5252629000000.,5657693000000.,5979589000000.,6174043000000.,6539299000000.,6878718000000.,7308755000000.,7664060000000.,8100201000000.,8608515000000.,9089168000000.,9660624000000.,10284779000000.,10621824000000.,10977514000000.,11510670000000.,12274928000000.,13093726000000.,13855888000000.,14477635000000.,14718582000000.,14418739000000.,14964372000000.,15517926000000.,16163158000000.,16768053000000.,17419000000000.])
# GDP data from the worldbank http://data.worldbank.org/indicator/NY.GDP.MKTP.CD/countries/US?display=graph
# CPI data from bureau of labor statistics http://data.bls.gov/pdq/SurveyOutputServlet
usaCPI = np.array([29.6, 29.9, 30.2, 30.6, 31.0, 31.5, 32.4, 33.4, 34.8, 36.7, 38.8, 40.5, 41.8, 44.4, 49.3, 53.8, 56.9, 60.6, 65.2, 72.6, 82.4, 90.9, 96.5, 99.6, 103.9, 107.6, 109.6, 113.6, 118.3, 124.0, 130.7, 136.2, 140.3, 144.5, 148.2, 152.4, 156.9, 160.5, 163.0, 166.6, 172.2, 177.1, 179.9, 184.0, 188.9, 195.3, 201.6, 207.342, 215.303, 214.537, 218.056, 224.939, 229.594, 232.957, 236.736])
plt.figure()
plt.plot(years, usaGDP)
plt.xlabel('Year')
plt.ylabel('GDP in Current USD')
plt.grid(True)
plt.show()
# Adjust GDP for 1960 USD
usaGDP1960 = usaGDP / (usaCPI / usaCPI[0])
plt.figure()
plt.plot(years, usaGDP1960)
plt.xlabel('Year')
plt.ylabel('GDP adjusted for inflation in 1960 USD')
plt.grid(True)
plt.show()
# Adjust GDP for 2014 USD
usaGDP2014 = usaGDP / (usaCPI / usaCPI[-1])
plt.figure()
plt.plot(years, usaGDP2014)
plt.xlabel('Year')
plt.ylabel('GDP adjusted for inflation in 2014 USD')
plt.grid(True)
plt.show()
# population from world bank
usaPop = np.array([180671000,183691000,186538000,189242000,191889000,194303000,196560000,198712000,200706000,202677000,205052000,207661000,209896000,211909000,213854000,215973000,218035000,220239000,222585000,225055000,227225000,229466000,231664000,233792000,235825000,237924000,240133000,242289000,244499000,246819000,249623000,252981000,256514000,259919000,263126000,266278000,269394000,272657000,275854000,279040000,282162411,284968955,287625193,290107933,292805298,295516599,298379912,301231207,304093966,306771529,309347057,311721632,314112078,316497531,318857056])
usaGDPpercapita = usaGDP / usaPop
plt.figure()
plt.plot(years, usaGDPpercapita)
plt.xlabel('Year')
plt.ylabel('GDP per capita in Current USD')
plt.grid(True)
plt.show()
# adjust GDP per Capita to 1960s numbers
usaGDPpercapita1960 = usaGDPpercapita / (usaCPI / usaCPI[0])
plt.figure()
plt.plot(years, usaGDPpercapita1960)
plt.xlabel('Year')
plt.ylabel('GDP per capita adjusted for inflation in 1960 USD')
plt.grid(True)
plt.show()
# adjust GDP per Capita to 2014s numbers
usaGDPpercapita2014 = usaGDPpercapita / (usaCPI / usaCPI[-1])
plt.figure()
plt.plot(years, usaGDPpercapita2014)
plt.xlabel('Year')
plt.ylabel('GDP per capita adjusted for inflation to 2014 USD')
plt.grid(True)
plt.show()
# define a function to adjust the CPI based on an over or under estimation of
# the inflation rate, where rate is the percent increase or decrease change
# where a precentage overesimate of 5% would be inputted as 1.05
def adjustCPI(cpi, rate):
demo = []
for i, j in enumerate(cpi):
demo.append(j * (rate**i))
return demo
# what if we underestimated inflation?
cpiOverFive = adjustCPI(usaCPI, 1.005)
# what if we underestimated inflation?
cpiUnderFive = adjustCPI(usaCPI, 0.995)
# adjust GDP per Capita to 2014s numbers
usaGDPpercapita2014OverFive = usaGDPpercapita / (cpiOverFive / cpiOverFive[-1])
usaGDPpercapita2014UnderFive = usaGDPpercapita / (cpiUnderFive / cpiUnderFive[-1])
plt.figure()
plt.plot(years, usaGDPpercapita2014, label='normal')
plt.plot(years, usaGDPpercapita2014OverFive, label='under')
plt.plot(years, usaGDPpercapita2014UnderFive, label='over')
plt.legend()
plt.xlabel('Year')
plt.ylabel('GDP per capita adjusted for inflation to 2014 USD')
plt.grid(True)
plt.show()
years2 = np.array([1962,1963,1964,1965,1966,1967,1968,1969,1970,1971,1972,1973,1974,1975,1976,1977,1978,1979,1980,1981,1982,1983,1984,1985,1986,1987,1988,1989,1990,1991,1992,1993,1994,1995,1996,1997,1998,1999,2000,2001,2002,2003,2004,2005,2006,2007,2008,2009,2010,2011,2012,2013,2014])
usaGNI = np.array([612178550047.646,646233886826.65,692328219512.945,753294530375.941,824183577234.192,868295290971.962,952033980993.251,1027990251284.03,1098553055567.61,1183038457083.86,1320921418184.74,1548458249174.67,1711839855738.22,1842214711486.27,1958767403397.59,2117456144199.84,2401109359261.26,2751769589536.9,3048093901726.34,3303883972259.98,3297652203866.24,3411202239818.87,3828479505092.12,4164905103485.73,4601500378186.56,5200354088055.45,5765196251790.1,5888830786924.1,6029529322891.06,6164277951121.71,6612706041742.15,6883086506452.91,7302781827892.38,7760854970064.45,8184808773787.28,8558708987900.82,8869581532268.98,9425292191447.05,10178500697503.7,10498594829042.2,10776200783181,11589035965657.3,12790914724399.8,13693955258225.3,14345564947204.5,14651211130474,15002428215985,14740580035992.9,15143137264678.1,15727290871234.6,16501015978642.4,17001290051112.6,17611490812741.3])
# GNI data atlas method from the worldbank http://databank.worldbank.org/data/reports.aspx?source=2&country=USA&series=&period=#
# CPI data from bureau of labor statistics http://data.bls.gov/pdq/SurveyOutputServlet
usaCPI2 = np.array([30.2, 30.6, 31.0, 31.5, 32.4, 33.4, 34.8, 36.7, 38.8, 40.5, 41.8, 44.4, 49.3, 53.8, 56.9, 60.6, 65.2, 72.6, 82.4, 90.9, 96.5, 99.6, 103.9, 107.6, 109.6, 113.6, 118.3, 124.0, 130.7, 136.2, 140.3, 144.5, 148.2, 152.4, 156.9, 160.5, 163.0, 166.6, 172.2, 177.1, 179.9, 184.0, 188.9, 195.3, 201.6, 207.342, 215.303, 214.537, 218.056, 224.939, 229.594, 232.957, 236.736])
plt.figure()
plt.plot(years2, usaGNI)
plt.xlabel('Year')
plt.ylabel('GNI in Current USD')
plt.grid(True)
plt.show()
# Adjust GNI for 1962 USD
usaGNI1962 = usaGNI / (usaCPI2 / usaCPI2[0])
plt.figure()
plt.plot(years2, usaGNI1962)
plt.xlabel('Year')
plt.ylabel('GNI adjusted for inflation to 1962 USD')
plt.grid(True)
plt.show()
# Adjust GNI for 2014 USD
usaGNI2014 = usaGNI / (usaCPI2 / usaCPI2[-1])
plt.figure()
plt.plot(years2, usaGNI2014)
plt.xlabel('Year')
plt.ylabel('GNI adjusted for inflation to 2014 USD')
plt.grid(True)
plt.show()
# population from world bank
usaPop = np.array([186538000,189242000,191889000,194303000,196560000,198712000,200706000,202677000,205052000,207661000,209896000,211909000,213854000,215973000,218035000,220239000,222585000,225055000,227225000,229466000,231664000,233792000,235825000,237924000,240133000,242289000,244499000,246819000,249623000,252981000,256514000,259919000,263126000,266278000,269394000,272657000,275854000,279040000,282162411,284968955,287625193,290107933,292805298,295516599,298379912,301231207,304093966,306771529,309347057,311721632,314112078,316497531,318857056])
usaGNIpercapita = usaGNI / usaPop
plt.figure()
plt.plot(years2, usaGNIpercapita)
plt.xlabel('Year')
plt.ylabel('GNI per capita in Current USD')
plt.grid(True)
plt.show()
# adjust GNI per Capita to 1962s numbers
usaGNIpercapita1962 = usaGNIpercapita / (usaCPI2 / usaCPI2[0])
plt.figure()
plt.plot(years2, usaGNIpercapita1962)
plt.xlabel('Year')
plt.ylabel('GNI per capita adjusted for inflation to 1962 USD')
plt.grid(True)
plt.show()
# adjust GNI per Capita to 2014s numbers
usaGNIpercapita2014 = usaGNIpercapita / (usaCPI2 / usaCPI2[-1])
plt.figure()
plt.plot(years2, usaGNIpercapita2014)
plt.xlabel('Year')
plt.ylabel('GNI per capita adjusted for inflation to 2014 USD')
plt.grid(True)
plt.show()
# close all figs
plt.close('all')
# save the final plots
# plot of the GDP and GNI in current USD
plt.figure()
plt.plot(years, usaGDP / 1.e12, '-k', label='GDP')
plt.plot(years2, usaGNI / 1.e12, '--b', label='GNI')
plt.xlabel('Year')
plt.ylabel('Trillion USD')
plt.legend(loc=4)
plt.grid(True)
plt.show()
plt.savefig('images/usaGDPandGNI.png')
# plot of GDP and GNI per capita in current USD
plt.figure()
plt.plot(years, usaGDPpercapita, '-k', label='GDP')
plt.plot(years2, usaGNIpercapita, '--b', label='GNI')
plt.xlabel('Year')
plt.ylabel('USD per capita')
plt.legend(loc=4)
plt.grid(True)
plt.show()
plt.savefig('images/usaGDPandGNI_perCapita.png')
# plot of GDP and GNI per capita in 2014 USD
plt.figure()
plt.plot(years, usaGDPpercapita2014, '-k', label='GDP')
plt.plot(years2, usaGNIpercapita2014, '--b', label='GNI')
plt.xlabel('Year')
plt.ylabel('USD per capita adjusted for inflation to 2014 levels')
plt.legend(loc=4)
plt.grid(True)
plt.show()
plt.savefig('images/usaGDPandGNI_perCapita_2014.png')
# plot of GDP at 0.5, 1, and 2 perecent estimations
# what if CPI has underestimated inflation?
cpiUnderHalf = adjustCPI(usaCPI, 1.005)
cpiUnderOne = adjustCPI(usaCPI, 1.01)
cpiUnderTwo = adjustCPI(usaCPI, 1.02)
# what if CPI has overestimated inflation?
cpiOverHalf = adjustCPI(usaCPI, 0.995)
cpiOverOne = adjustCPI(usaCPI, 0.99)
cpiOverTwo = adjustCPI(usaCPI, 0.98)
# recalculate GDP basedd on the CPI values
usaGDPpercapita2014UnderHalf = usaGDPpercapita / (cpiUnderHalf / cpiUnderHalf[-1])
usaGDPpercapita2014UnderOne = usaGDPpercapita / (cpiUnderOne / cpiUnderOne[-1])
usaGDPpercapita2014UnderTwo = usaGDPpercapita / (cpiUnderTwo / cpiUnderTwo[-1])
usaGDPpercapita2014OverHalf = usaGDPpercapita / (cpiOverHalf / cpiOverHalf[-1])
usaGDPpercapita2014OverOne = usaGDPpercapita / (cpiOverOne / cpiOverOne[-1])
usaGDPpercapita2014OverTwo = usaGDPpercapita / (cpiOverTwo / cpiOverTwo[-1])
plt.figure()
plt.plot(years, usaGDPpercapita2014, '-k', label='Adjusted to 2014 CPI')
plt.plot(years, usaGDPpercapita2014UnderHalf, '--k', label='CPI each year adjusted +0.5%')
plt.plot(years, usaGDPpercapita2014OverHalf, '-.k', label='CPI each year adjusted -0.5%')
plt.xlabel('Year')
plt.ylabel('GDP per capita adjusted for inflation (USD)')
plt.legend(loc=4)
plt.grid(True)
plt.show()
plt.savefig('images/usaGDPandGNI_perCapita_2014_half.png')
plt.figure()
plt.plot(years, usaGDPpercapita2014, '-k', label='Adjusted to 2014 CPI')
plt.plot(years, usaGDPpercapita2014UnderOne, '--k', label='CPI each year adjusted +1.0%')
plt.plot(years, usaGDPpercapita2014OverOne, '-.k', label='CPI each year adjusted -1.0%')
plt.xlabel('Year')
plt.ylabel('GDP per capita adjusted for inflation (USD)')
plt.legend(loc=4)
plt.grid(True)
plt.show()
plt.savefig('images/usaGDPandGNI_perCapita_2014_one.png')
plt.figure()
plt.plot(years, usaGDPpercapita2014, '-k', label='Adjusted to 2014 CPI')
plt.plot(years, usaGDPpercapita2014UnderTwo, '--k', label='CPI each year adjusted +2.0%')
plt.plot(years, usaGDPpercapita2014OverTwo, '-.k', label='CPI each year adjusted -2.0%')
plt.xlabel('Year')
plt.ylabel('GDP per capita adjusted for inflation (USD)')
plt.legend(loc=4)
plt.grid(True)
plt.show()
plt.savefig('images/usaGDPandGNI_perCapita_2014_two.png')
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"numpy.array",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig"
] |
[((74, 90), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (83, 90), True, 'import matplotlib.pyplot as plt\n'), ((100, 456), 'numpy.array', 'np.array', (['[1960, 1961, 1962, 1963, 1964, 1965, 1966, 1967, 1968, 1969, 1970, 1971, \n 1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979, 1980, 1981, 1982, 1983,\n 1984, 1985, 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995,\n 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,\n 2008, 2009, 2010, 2011, 2012, 2013, 2014]'], {}), '([1960, 1961, 1962, 1963, 1964, 1965, 1966, 1967, 1968, 1969, 1970,\n 1971, 1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979, 1980, 1981, 1982,\n 1983, 1984, 1985, 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994,\n 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006,\n 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014])\n', (108, 456), True, 'import numpy as np\n'), ((396, 1409), 'numpy.array', 'np.array', (['[543300000000.0, 563300000000.0, 605100000000.0, 638600000000.0, \n 685800000000.0, 743700000000.0, 815000000000.0, 861700000000.0, \n 942500000000.0, 1019900000000.0, 1075884000000.0, 1167770000000.0, \n 1282449000000.0, 1428549000000.0, 1548825000000.0, 1688923000000.0, \n 1877587000000.0, 2085951000000.0, 2356571000000.0, 2632143000000.0, \n 2862505000000.0, 3210956000000.0, 3344991000000.0, 3638137000000.0, \n 4040693000000.0, 4346734000000.0, 4590155000000.0, 4870217000000.0, \n 5252629000000.0, 5657693000000.0, 5979589000000.0, 6174043000000.0, \n 6539299000000.0, 6878718000000.0, 7308755000000.0, 7664060000000.0, \n 8100201000000.0, 8608515000000.0, 9089168000000.0, 9660624000000.0, \n 10284779000000.0, 10621824000000.0, 10977514000000.0, 11510670000000.0,\n 12274928000000.0, 13093726000000.0, 13855888000000.0, 14477635000000.0,\n 14718582000000.0, 14418739000000.0, 14964372000000.0, 15517926000000.0,\n 16163158000000.0, 16768053000000.0, 17419000000000.0]'], {}), '([543300000000.0, 563300000000.0, 605100000000.0, 638600000000.0, \n 685800000000.0, 743700000000.0, 815000000000.0, 861700000000.0, \n 942500000000.0, 1019900000000.0, 1075884000000.0, 1167770000000.0, \n 1282449000000.0, 1428549000000.0, 1548825000000.0, 1688923000000.0, \n 1877587000000.0, 2085951000000.0, 2356571000000.0, 2632143000000.0, \n 2862505000000.0, 3210956000000.0, 3344991000000.0, 3638137000000.0, \n 4040693000000.0, 4346734000000.0, 4590155000000.0, 4870217000000.0, \n 5252629000000.0, 5657693000000.0, 5979589000000.0, 6174043000000.0, \n 6539299000000.0, 6878718000000.0, 7308755000000.0, 7664060000000.0, \n 8100201000000.0, 8608515000000.0, 9089168000000.0, 9660624000000.0, \n 10284779000000.0, 10621824000000.0, 10977514000000.0, 11510670000000.0,\n 12274928000000.0, 13093726000000.0, 13855888000000.0, 14477635000000.0,\n 14718582000000.0, 14418739000000.0, 14964372000000.0, 15517926000000.0,\n 16163158000000.0, 16768053000000.0, 17419000000000.0])\n', (404, 1409), True, 'import numpy as np\n'), ((1448, 1857), 'numpy.array', 'np.array', (['[29.6, 29.9, 30.2, 30.6, 31.0, 31.5, 32.4, 33.4, 34.8, 36.7, 38.8, 40.5, \n 41.8, 44.4, 49.3, 53.8, 56.9, 60.6, 65.2, 72.6, 82.4, 90.9, 96.5, 99.6,\n 103.9, 107.6, 109.6, 113.6, 118.3, 124.0, 130.7, 136.2, 140.3, 144.5, \n 148.2, 152.4, 156.9, 160.5, 163.0, 166.6, 172.2, 177.1, 179.9, 184.0, \n 188.9, 195.3, 201.6, 207.342, 215.303, 214.537, 218.056, 224.939, \n 229.594, 232.957, 236.736]'], {}), '([29.6, 29.9, 30.2, 30.6, 31.0, 31.5, 32.4, 33.4, 34.8, 36.7, 38.8,\n 40.5, 41.8, 44.4, 49.3, 53.8, 56.9, 60.6, 65.2, 72.6, 82.4, 90.9, 96.5,\n 99.6, 103.9, 107.6, 109.6, 113.6, 118.3, 124.0, 130.7, 136.2, 140.3, \n 144.5, 148.2, 152.4, 156.9, 160.5, 163.0, 166.6, 172.2, 177.1, 179.9, \n 184.0, 188.9, 195.3, 201.6, 207.342, 215.303, 214.537, 218.056, 224.939,\n 229.594, 232.957, 236.736])\n', (1456, 1857), True, 'import numpy as np\n'), ((1837, 1849), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1847, 1849), True, 'import matplotlib.pyplot as plt\n'), ((1850, 1873), 'matplotlib.pyplot.plot', 'plt.plot', (['years', 'usaGDP'], {}), '(years, usaGDP)\n', (1858, 1873), True, 'import matplotlib.pyplot as plt\n'), ((1874, 1892), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Year"""'], {}), "('Year')\n", (1884, 1892), True, 'import matplotlib.pyplot as plt\n'), ((1893, 1925), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""GDP in Current USD"""'], {}), "('GDP in Current USD')\n", (1903, 1925), True, 'import matplotlib.pyplot as plt\n'), ((1926, 1940), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1934, 1940), True, 'import matplotlib.pyplot as plt\n'), ((1941, 1951), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1949, 1951), True, 'import matplotlib.pyplot as plt\n'), ((2025, 2037), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2035, 2037), True, 'import matplotlib.pyplot as plt\n'), ((2038, 2065), 'matplotlib.pyplot.plot', 'plt.plot', (['years', 'usaGDP1960'], {}), '(years, usaGDP1960)\n', (2046, 2065), True, 'import matplotlib.pyplot as plt\n'), ((2066, 2084), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Year"""'], {}), "('Year')\n", (2076, 2084), True, 'import matplotlib.pyplot as plt\n'), ((2085, 2137), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""GDP adjusted for inflation in 1960 USD"""'], {}), "('GDP adjusted for inflation in 1960 USD')\n", (2095, 2137), True, 'import matplotlib.pyplot as plt\n'), ((2138, 2152), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (2146, 2152), True, 'import matplotlib.pyplot as plt\n'), ((2153, 2163), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2161, 2163), True, 'import matplotlib.pyplot as plt\n'), ((2238, 2250), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2248, 2250), True, 'import matplotlib.pyplot as plt\n'), ((2251, 2278), 'matplotlib.pyplot.plot', 'plt.plot', (['years', 'usaGDP2014'], {}), '(years, usaGDP2014)\n', (2259, 2278), True, 'import matplotlib.pyplot as plt\n'), ((2279, 2297), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Year"""'], {}), "('Year')\n", (2289, 2297), True, 'import matplotlib.pyplot as plt\n'), ((2298, 2350), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""GDP adjusted for inflation in 2014 USD"""'], {}), "('GDP adjusted for inflation in 2014 USD')\n", (2308, 2350), True, 'import matplotlib.pyplot as plt\n'), ((2351, 2365), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (2359, 2365), True, 'import matplotlib.pyplot as plt\n'), ((2366, 2376), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2374, 2376), True, 'import matplotlib.pyplot as plt\n'), ((2418, 3077), 'numpy.array', 'np.array', (['[180671000, 183691000, 186538000, 189242000, 191889000, 194303000, \n 196560000, 198712000, 200706000, 202677000, 205052000, 207661000, \n 209896000, 211909000, 213854000, 215973000, 218035000, 220239000, \n 222585000, 225055000, 227225000, 229466000, 231664000, 233792000, \n 235825000, 237924000, 240133000, 242289000, 244499000, 246819000, \n 249623000, 252981000, 256514000, 259919000, 263126000, 266278000, \n 269394000, 272657000, 275854000, 279040000, 282162411, 284968955, \n 287625193, 290107933, 292805298, 295516599, 298379912, 301231207, \n 304093966, 306771529, 309347057, 311721632, 314112078, 316497531, 318857056\n ]'], {}), '([180671000, 183691000, 186538000, 189242000, 191889000, 194303000,\n 196560000, 198712000, 200706000, 202677000, 205052000, 207661000, \n 209896000, 211909000, 213854000, 215973000, 218035000, 220239000, \n 222585000, 225055000, 227225000, 229466000, 231664000, 233792000, \n 235825000, 237924000, 240133000, 242289000, 244499000, 246819000, \n 249623000, 252981000, 256514000, 259919000, 263126000, 266278000, \n 269394000, 272657000, 275854000, 279040000, 282162411, 284968955, \n 287625193, 290107933, 292805298, 295516599, 298379912, 301231207, \n 304093966, 306771529, 309347057, 311721632, 314112078, 316497531, \n 318857056])\n', (2426, 3077), True, 'import numpy as np\n'), ((3015, 3027), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3025, 3027), True, 'import matplotlib.pyplot as plt\n'), ((3028, 3060), 'matplotlib.pyplot.plot', 'plt.plot', (['years', 'usaGDPpercapita'], {}), '(years, usaGDPpercapita)\n', (3036, 3060), True, 'import matplotlib.pyplot as plt\n'), ((3061, 3079), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Year"""'], {}), "('Year')\n", (3071, 3079), True, 'import matplotlib.pyplot as plt\n'), ((3080, 3123), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""GDP per capita in Current USD"""'], {}), "('GDP per capita in Current USD')\n", (3090, 3123), True, 'import matplotlib.pyplot as plt\n'), ((3124, 3138), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (3132, 3138), True, 'import matplotlib.pyplot as plt\n'), ((3139, 3149), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3147, 3149), True, 'import matplotlib.pyplot as plt\n'), ((3255, 3267), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3265, 3267), True, 'import matplotlib.pyplot as plt\n'), ((3268, 3304), 'matplotlib.pyplot.plot', 'plt.plot', (['years', 'usaGDPpercapita1960'], {}), '(years, usaGDPpercapita1960)\n', (3276, 3304), True, 'import matplotlib.pyplot as plt\n'), ((3305, 3323), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Year"""'], {}), "('Year')\n", (3315, 3323), True, 'import matplotlib.pyplot as plt\n'), ((3324, 3387), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""GDP per capita adjusted for inflation in 1960 USD"""'], {}), "('GDP per capita adjusted for inflation in 1960 USD')\n", (3334, 3387), True, 'import matplotlib.pyplot as plt\n'), ((3388, 3402), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (3396, 3402), True, 'import matplotlib.pyplot as plt\n'), ((3403, 3413), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3411, 3413), True, 'import matplotlib.pyplot as plt\n'), ((3520, 3532), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3530, 3532), True, 'import matplotlib.pyplot as plt\n'), ((3533, 3569), 'matplotlib.pyplot.plot', 'plt.plot', (['years', 'usaGDPpercapita2014'], {}), '(years, usaGDPpercapita2014)\n', (3541, 3569), True, 'import matplotlib.pyplot as plt\n'), ((3570, 3588), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Year"""'], {}), "('Year')\n", (3580, 3588), True, 'import matplotlib.pyplot as plt\n'), ((3589, 3652), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""GDP per capita adjusted for inflation to 2014 USD"""'], {}), "('GDP per capita adjusted for inflation to 2014 USD')\n", (3599, 3652), True, 'import matplotlib.pyplot as plt\n'), ((3653, 3667), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (3661, 3667), True, 'import matplotlib.pyplot as plt\n'), ((3668, 3678), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3676, 3678), True, 'import matplotlib.pyplot as plt\n'), ((4399, 4411), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4409, 4411), True, 'import matplotlib.pyplot as plt\n'), ((4412, 4464), 'matplotlib.pyplot.plot', 'plt.plot', (['years', 'usaGDPpercapita2014'], {'label': '"""normal"""'}), "(years, usaGDPpercapita2014, label='normal')\n", (4420, 4464), True, 'import matplotlib.pyplot as plt\n'), ((4465, 4524), 'matplotlib.pyplot.plot', 'plt.plot', (['years', 'usaGDPpercapita2014OverFive'], {'label': '"""under"""'}), "(years, usaGDPpercapita2014OverFive, label='under')\n", (4473, 4524), True, 'import matplotlib.pyplot as plt\n'), ((4525, 4584), 'matplotlib.pyplot.plot', 'plt.plot', (['years', 'usaGDPpercapita2014UnderFive'], {'label': '"""over"""'}), "(years, usaGDPpercapita2014UnderFive, label='over')\n", (4533, 4584), True, 'import matplotlib.pyplot as plt\n'), ((4585, 4597), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4595, 4597), True, 'import matplotlib.pyplot as plt\n'), ((4598, 4616), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Year"""'], {}), "('Year')\n", (4608, 4616), True, 'import matplotlib.pyplot as plt\n'), ((4617, 4680), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""GDP per capita adjusted for inflation to 2014 USD"""'], {}), "('GDP per capita adjusted for inflation to 2014 USD')\n", (4627, 4680), True, 'import matplotlib.pyplot as plt\n'), ((4681, 4695), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (4689, 4695), True, 'import matplotlib.pyplot as plt\n'), ((4696, 4706), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4704, 4706), True, 'import matplotlib.pyplot as plt\n'), ((4718, 5062), 'numpy.array', 'np.array', (['[1962, 1963, 1964, 1965, 1966, 1967, 1968, 1969, 1970, 1971, 1972, 1973, \n 1974, 1975, 1976, 1977, 1978, 1979, 1980, 1981, 1982, 1983, 1984, 1985,\n 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997,\n 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009,\n 2010, 2011, 2012, 2013, 2014]'], {}), '([1962, 1963, 1964, 1965, 1966, 1967, 1968, 1969, 1970, 1971, 1972,\n 1973, 1974, 1975, 1976, 1977, 1978, 1979, 1980, 1981, 1982, 1983, 1984,\n 1985, 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996,\n 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008,\n 2009, 2010, 2011, 2012, 2013, 2014])\n', (4726, 5062), True, 'import numpy as np\n'), ((5004, 6016), 'numpy.array', 'np.array', (['[612178550047.646, 646233886826.65, 692328219512.945, 753294530375.941, \n 824183577234.192, 868295290971.962, 952033980993.251, 1027990251284.03,\n 1098553055567.61, 1183038457083.86, 1320921418184.74, 1548458249174.67,\n 1711839855738.22, 1842214711486.27, 1958767403397.59, 2117456144199.84,\n 2401109359261.26, 2751769589536.9, 3048093901726.34, 3303883972259.98, \n 3297652203866.24, 3411202239818.87, 3828479505092.12, 4164905103485.73,\n 4601500378186.56, 5200354088055.45, 5765196251790.1, 5888830786924.1, \n 6029529322891.06, 6164277951121.71, 6612706041742.15, 6883086506452.91,\n 7302781827892.38, 7760854970064.45, 8184808773787.28, 8558708987900.82,\n 8869581532268.98, 9425292191447.05, 10178500697503.7, 10498594829042.2,\n 10776200783181, 11589035965657.3, 12790914724399.8, 13693955258225.3, \n 14345564947204.5, 14651211130474, 15002428215985, 14740580035992.9, \n 15143137264678.1, 15727290871234.6, 16501015978642.4, 17001290051112.6,\n 17611490812741.3]'], {}), '([612178550047.646, 646233886826.65, 692328219512.945, \n 753294530375.941, 824183577234.192, 868295290971.962, 952033980993.251,\n 1027990251284.03, 1098553055567.61, 1183038457083.86, 1320921418184.74,\n 1548458249174.67, 1711839855738.22, 1842214711486.27, 1958767403397.59,\n 2117456144199.84, 2401109359261.26, 2751769589536.9, 3048093901726.34, \n 3303883972259.98, 3297652203866.24, 3411202239818.87, 3828479505092.12,\n 4164905103485.73, 4601500378186.56, 5200354088055.45, 5765196251790.1, \n 5888830786924.1, 6029529322891.06, 6164277951121.71, 6612706041742.15, \n 6883086506452.91, 7302781827892.38, 7760854970064.45, 8184808773787.28,\n 8558708987900.82, 8869581532268.98, 9425292191447.05, 10178500697503.7,\n 10498594829042.2, 10776200783181, 11589035965657.3, 12790914724399.8, \n 13693955258225.3, 14345564947204.5, 14651211130474, 15002428215985, \n 14740580035992.9, 15143137264678.1, 15727290871234.6, 16501015978642.4,\n 17001290051112.6, 17611490812741.3])\n', (5012, 6016), True, 'import numpy as np\n'), ((6137, 6535), 'numpy.array', 'np.array', (['[30.2, 30.6, 31.0, 31.5, 32.4, 33.4, 34.8, 36.7, 38.8, 40.5, 41.8, 44.4, \n 49.3, 53.8, 56.9, 60.6, 65.2, 72.6, 82.4, 90.9, 96.5, 99.6, 103.9, \n 107.6, 109.6, 113.6, 118.3, 124.0, 130.7, 136.2, 140.3, 144.5, 148.2, \n 152.4, 156.9, 160.5, 163.0, 166.6, 172.2, 177.1, 179.9, 184.0, 188.9, \n 195.3, 201.6, 207.342, 215.303, 214.537, 218.056, 224.939, 229.594, \n 232.957, 236.736]'], {}), '([30.2, 30.6, 31.0, 31.5, 32.4, 33.4, 34.8, 36.7, 38.8, 40.5, 41.8,\n 44.4, 49.3, 53.8, 56.9, 60.6, 65.2, 72.6, 82.4, 90.9, 96.5, 99.6, 103.9,\n 107.6, 109.6, 113.6, 118.3, 124.0, 130.7, 136.2, 140.3, 144.5, 148.2, \n 152.4, 156.9, 160.5, 163.0, 166.6, 172.2, 177.1, 179.9, 184.0, 188.9, \n 195.3, 201.6, 207.342, 215.303, 214.537, 218.056, 224.939, 229.594, \n 232.957, 236.736])\n', (6145, 6535), True, 'import numpy as np\n'), ((6514, 6526), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6524, 6526), True, 'import matplotlib.pyplot as plt\n'), ((6527, 6551), 'matplotlib.pyplot.plot', 'plt.plot', (['years2', 'usaGNI'], {}), '(years2, usaGNI)\n', (6535, 6551), True, 'import matplotlib.pyplot as plt\n'), ((6552, 6570), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Year"""'], {}), "('Year')\n", (6562, 6570), True, 'import matplotlib.pyplot as plt\n'), ((6571, 6603), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""GNI in Current USD"""'], {}), "('GNI in Current USD')\n", (6581, 6603), True, 'import matplotlib.pyplot as plt\n'), ((6604, 6618), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (6612, 6618), True, 'import matplotlib.pyplot as plt\n'), ((6619, 6629), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6627, 6629), True, 'import matplotlib.pyplot as plt\n'), ((6705, 6717), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6715, 6717), True, 'import matplotlib.pyplot as plt\n'), ((6718, 6746), 'matplotlib.pyplot.plot', 'plt.plot', (['years2', 'usaGNI1962'], {}), '(years2, usaGNI1962)\n', (6726, 6746), True, 'import matplotlib.pyplot as plt\n'), ((6747, 6765), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Year"""'], {}), "('Year')\n", (6757, 6765), True, 'import matplotlib.pyplot as plt\n'), ((6766, 6818), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""GNI adjusted for inflation to 1962 USD"""'], {}), "('GNI adjusted for inflation to 1962 USD')\n", (6776, 6818), True, 'import matplotlib.pyplot as plt\n'), ((6819, 6833), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (6827, 6833), True, 'import matplotlib.pyplot as plt\n'), ((6834, 6844), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6842, 6844), True, 'import matplotlib.pyplot as plt\n'), ((6921, 6933), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6931, 6933), True, 'import matplotlib.pyplot as plt\n'), ((6934, 6962), 'matplotlib.pyplot.plot', 'plt.plot', (['years2', 'usaGNI2014'], {}), '(years2, usaGNI2014)\n', (6942, 6962), True, 'import matplotlib.pyplot as plt\n'), ((6963, 6981), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Year"""'], {}), "('Year')\n", (6973, 6981), True, 'import matplotlib.pyplot as plt\n'), ((6982, 7034), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""GNI adjusted for inflation to 2014 USD"""'], {}), "('GNI adjusted for inflation to 2014 USD')\n", (6992, 7034), True, 'import matplotlib.pyplot as plt\n'), ((7035, 7049), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (7043, 7049), True, 'import matplotlib.pyplot as plt\n'), ((7050, 7060), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7058, 7060), True, 'import matplotlib.pyplot as plt\n'), ((7102, 7734), 'numpy.array', 'np.array', (['[186538000, 189242000, 191889000, 194303000, 196560000, 198712000, \n 200706000, 202677000, 205052000, 207661000, 209896000, 211909000, \n 213854000, 215973000, 218035000, 220239000, 222585000, 225055000, \n 227225000, 229466000, 231664000, 233792000, 235825000, 237924000, \n 240133000, 242289000, 244499000, 246819000, 249623000, 252981000, \n 256514000, 259919000, 263126000, 266278000, 269394000, 272657000, \n 275854000, 279040000, 282162411, 284968955, 287625193, 290107933, \n 292805298, 295516599, 298379912, 301231207, 304093966, 306771529, \n 309347057, 311721632, 314112078, 316497531, 318857056]'], {}), '([186538000, 189242000, 191889000, 194303000, 196560000, 198712000,\n 200706000, 202677000, 205052000, 207661000, 209896000, 211909000, \n 213854000, 215973000, 218035000, 220239000, 222585000, 225055000, \n 227225000, 229466000, 231664000, 233792000, 235825000, 237924000, \n 240133000, 242289000, 244499000, 246819000, 249623000, 252981000, \n 256514000, 259919000, 263126000, 266278000, 269394000, 272657000, \n 275854000, 279040000, 282162411, 284968955, 287625193, 290107933, \n 292805298, 295516599, 298379912, 301231207, 304093966, 306771529, \n 309347057, 311721632, 314112078, 316497531, 318857056])\n', (7110, 7734), True, 'import numpy as np\n'), ((7679, 7691), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7689, 7691), True, 'import matplotlib.pyplot as plt\n'), ((7692, 7725), 'matplotlib.pyplot.plot', 'plt.plot', (['years2', 'usaGNIpercapita'], {}), '(years2, usaGNIpercapita)\n', (7700, 7725), True, 'import matplotlib.pyplot as plt\n'), ((7726, 7744), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Year"""'], {}), "('Year')\n", (7736, 7744), True, 'import matplotlib.pyplot as plt\n'), ((7745, 7788), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""GNI per capita in Current USD"""'], {}), "('GNI per capita in Current USD')\n", (7755, 7788), True, 'import matplotlib.pyplot as plt\n'), ((7789, 7803), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (7797, 7803), True, 'import matplotlib.pyplot as plt\n'), ((7804, 7814), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7812, 7814), True, 'import matplotlib.pyplot as plt\n'), ((7922, 7934), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7932, 7934), True, 'import matplotlib.pyplot as plt\n'), ((7935, 7972), 'matplotlib.pyplot.plot', 'plt.plot', (['years2', 'usaGNIpercapita1962'], {}), '(years2, usaGNIpercapita1962)\n', (7943, 7972), True, 'import matplotlib.pyplot as plt\n'), ((7973, 7991), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Year"""'], {}), "('Year')\n", (7983, 7991), True, 'import matplotlib.pyplot as plt\n'), ((7992, 8055), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""GNI per capita adjusted for inflation to 1962 USD"""'], {}), "('GNI per capita adjusted for inflation to 1962 USD')\n", (8002, 8055), True, 'import matplotlib.pyplot as plt\n'), ((8056, 8070), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (8064, 8070), True, 'import matplotlib.pyplot as plt\n'), ((8071, 8081), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8079, 8081), True, 'import matplotlib.pyplot as plt\n'), ((8190, 8202), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8200, 8202), True, 'import matplotlib.pyplot as plt\n'), ((8203, 8240), 'matplotlib.pyplot.plot', 'plt.plot', (['years2', 'usaGNIpercapita2014'], {}), '(years2, usaGNIpercapita2014)\n', (8211, 8240), True, 'import matplotlib.pyplot as plt\n'), ((8241, 8259), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Year"""'], {}), "('Year')\n", (8251, 8259), True, 'import matplotlib.pyplot as plt\n'), ((8260, 8323), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""GNI per capita adjusted for inflation to 2014 USD"""'], {}), "('GNI per capita adjusted for inflation to 2014 USD')\n", (8270, 8323), True, 'import matplotlib.pyplot as plt\n'), ((8324, 8338), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (8332, 8338), True, 'import matplotlib.pyplot as plt\n'), ((8339, 8349), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8347, 8349), True, 'import matplotlib.pyplot as plt\n'), ((8370, 8386), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (8379, 8386), True, 'import matplotlib.pyplot as plt\n'), ((8456, 8468), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8466, 8468), True, 'import matplotlib.pyplot as plt\n'), ((8469, 8529), 'matplotlib.pyplot.plot', 'plt.plot', (['years', '(usaGDP / 1000000000000.0)', '"""-k"""'], {'label': '"""GDP"""'}), "(years, usaGDP / 1000000000000.0, '-k', label='GDP')\n", (8477, 8529), True, 'import matplotlib.pyplot as plt\n'), ((8520, 8582), 'matplotlib.pyplot.plot', 'plt.plot', (['years2', '(usaGNI / 1000000000000.0)', '"""--b"""'], {'label': '"""GNI"""'}), "(years2, usaGNI / 1000000000000.0, '--b', label='GNI')\n", (8528, 8582), True, 'import matplotlib.pyplot as plt\n'), ((8573, 8591), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Year"""'], {}), "('Year')\n", (8583, 8591), True, 'import matplotlib.pyplot as plt\n'), ((8592, 8618), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Trillion USD"""'], {}), "('Trillion USD')\n", (8602, 8618), True, 'import matplotlib.pyplot as plt\n'), ((8619, 8636), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(4)'}), '(loc=4)\n', (8629, 8636), True, 'import matplotlib.pyplot as plt\n'), ((8637, 8651), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (8645, 8651), True, 'import matplotlib.pyplot as plt\n'), ((8652, 8662), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8660, 8662), True, 'import matplotlib.pyplot as plt\n'), ((8663, 8701), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""images/usaGDPandGNI.png"""'], {}), "('images/usaGDPandGNI.png')\n", (8674, 8701), True, 'import matplotlib.pyplot as plt\n'), ((8752, 8764), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8762, 8764), True, 'import matplotlib.pyplot as plt\n'), ((8765, 8816), 'matplotlib.pyplot.plot', 'plt.plot', (['years', 'usaGDPpercapita', '"""-k"""'], {'label': '"""GDP"""'}), "(years, usaGDPpercapita, '-k', label='GDP')\n", (8773, 8816), True, 'import matplotlib.pyplot as plt\n'), ((8817, 8870), 'matplotlib.pyplot.plot', 'plt.plot', (['years2', 'usaGNIpercapita', '"""--b"""'], {'label': '"""GNI"""'}), "(years2, usaGNIpercapita, '--b', label='GNI')\n", (8825, 8870), True, 'import matplotlib.pyplot as plt\n'), ((8871, 8889), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Year"""'], {}), "('Year')\n", (8881, 8889), True, 'import matplotlib.pyplot as plt\n'), ((8890, 8918), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""USD per capita"""'], {}), "('USD per capita')\n", (8900, 8918), True, 'import matplotlib.pyplot as plt\n'), ((8919, 8936), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(4)'}), '(loc=4)\n', (8929, 8936), True, 'import matplotlib.pyplot as plt\n'), ((8937, 8951), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (8945, 8951), True, 'import matplotlib.pyplot as plt\n'), ((8952, 8962), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8960, 8962), True, 'import matplotlib.pyplot as plt\n'), ((8963, 9011), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""images/usaGDPandGNI_perCapita.png"""'], {}), "('images/usaGDPandGNI_perCapita.png')\n", (8974, 9011), True, 'import matplotlib.pyplot as plt\n'), ((9059, 9071), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9069, 9071), True, 'import matplotlib.pyplot as plt\n'), ((9072, 9127), 'matplotlib.pyplot.plot', 'plt.plot', (['years', 'usaGDPpercapita2014', '"""-k"""'], {'label': '"""GDP"""'}), "(years, usaGDPpercapita2014, '-k', label='GDP')\n", (9080, 9127), True, 'import matplotlib.pyplot as plt\n'), ((9128, 9185), 'matplotlib.pyplot.plot', 'plt.plot', (['years2', 'usaGNIpercapita2014', '"""--b"""'], {'label': '"""GNI"""'}), "(years2, usaGNIpercapita2014, '--b', label='GNI')\n", (9136, 9185), True, 'import matplotlib.pyplot as plt\n'), ((9186, 9204), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Year"""'], {}), "('Year')\n", (9196, 9204), True, 'import matplotlib.pyplot as plt\n'), ((9205, 9271), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""USD per capita adjusted for inflation to 2014 levels"""'], {}), "('USD per capita adjusted for inflation to 2014 levels')\n", (9215, 9271), True, 'import matplotlib.pyplot as plt\n'), ((9272, 9289), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(4)'}), '(loc=4)\n', (9282, 9289), True, 'import matplotlib.pyplot as plt\n'), ((9290, 9304), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (9298, 9304), True, 'import matplotlib.pyplot as plt\n'), ((9305, 9315), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9313, 9315), True, 'import matplotlib.pyplot as plt\n'), ((9316, 9369), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""images/usaGDPandGNI_perCapita_2014.png"""'], {}), "('images/usaGDPandGNI_perCapita_2014.png')\n", (9327, 9369), True, 'import matplotlib.pyplot as plt\n'), ((10269, 10281), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10279, 10281), True, 'import matplotlib.pyplot as plt\n'), ((10282, 10354), 'matplotlib.pyplot.plot', 'plt.plot', (['years', 'usaGDPpercapita2014', '"""-k"""'], {'label': '"""Adjusted to 2014 CPI"""'}), "(years, usaGDPpercapita2014, '-k', label='Adjusted to 2014 CPI')\n", (10290, 10354), True, 'import matplotlib.pyplot as plt\n'), ((10355, 10450), 'matplotlib.pyplot.plot', 'plt.plot', (['years', 'usaGDPpercapita2014UnderHalf', '"""--k"""'], {'label': '"""CPI each year adjusted +0.5%"""'}), "(years, usaGDPpercapita2014UnderHalf, '--k', label=\n 'CPI each year adjusted +0.5%')\n", (10363, 10450), True, 'import matplotlib.pyplot as plt\n'), ((10446, 10540), 'matplotlib.pyplot.plot', 'plt.plot', (['years', 'usaGDPpercapita2014OverHalf', '"""-.k"""'], {'label': '"""CPI each year adjusted -0.5%"""'}), "(years, usaGDPpercapita2014OverHalf, '-.k', label=\n 'CPI each year adjusted -0.5%')\n", (10454, 10540), True, 'import matplotlib.pyplot as plt\n'), ((10536, 10554), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Year"""'], {}), "('Year')\n", (10546, 10554), True, 'import matplotlib.pyplot as plt\n'), ((10555, 10612), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""GDP per capita adjusted for inflation (USD)"""'], {}), "('GDP per capita adjusted for inflation (USD)')\n", (10565, 10612), True, 'import matplotlib.pyplot as plt\n'), ((10613, 10630), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(4)'}), '(loc=4)\n', (10623, 10630), True, 'import matplotlib.pyplot as plt\n'), ((10631, 10645), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (10639, 10645), True, 'import matplotlib.pyplot as plt\n'), ((10646, 10656), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10654, 10656), True, 'import matplotlib.pyplot as plt\n'), ((10657, 10715), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""images/usaGDPandGNI_perCapita_2014_half.png"""'], {}), "('images/usaGDPandGNI_perCapita_2014_half.png')\n", (10668, 10715), True, 'import matplotlib.pyplot as plt\n'), ((10716, 10728), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10726, 10728), True, 'import matplotlib.pyplot as plt\n'), ((10729, 10801), 'matplotlib.pyplot.plot', 'plt.plot', (['years', 'usaGDPpercapita2014', '"""-k"""'], {'label': '"""Adjusted to 2014 CPI"""'}), "(years, usaGDPpercapita2014, '-k', label='Adjusted to 2014 CPI')\n", (10737, 10801), True, 'import matplotlib.pyplot as plt\n'), ((10802, 10896), 'matplotlib.pyplot.plot', 'plt.plot', (['years', 'usaGDPpercapita2014UnderOne', '"""--k"""'], {'label': '"""CPI each year adjusted +1.0%"""'}), "(years, usaGDPpercapita2014UnderOne, '--k', label=\n 'CPI each year adjusted +1.0%')\n", (10810, 10896), True, 'import matplotlib.pyplot as plt\n'), ((10892, 10985), 'matplotlib.pyplot.plot', 'plt.plot', (['years', 'usaGDPpercapita2014OverOne', '"""-.k"""'], {'label': '"""CPI each year adjusted -1.0%"""'}), "(years, usaGDPpercapita2014OverOne, '-.k', label=\n 'CPI each year adjusted -1.0%')\n", (10900, 10985), True, 'import matplotlib.pyplot as plt\n'), ((10981, 10999), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Year"""'], {}), "('Year')\n", (10991, 10999), True, 'import matplotlib.pyplot as plt\n'), ((11000, 11057), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""GDP per capita adjusted for inflation (USD)"""'], {}), "('GDP per capita adjusted for inflation (USD)')\n", (11010, 11057), True, 'import matplotlib.pyplot as plt\n'), ((11058, 11075), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(4)'}), '(loc=4)\n', (11068, 11075), True, 'import matplotlib.pyplot as plt\n'), ((11076, 11090), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (11084, 11090), True, 'import matplotlib.pyplot as plt\n'), ((11091, 11101), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11099, 11101), True, 'import matplotlib.pyplot as plt\n'), ((11102, 11159), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""images/usaGDPandGNI_perCapita_2014_one.png"""'], {}), "('images/usaGDPandGNI_perCapita_2014_one.png')\n", (11113, 11159), True, 'import matplotlib.pyplot as plt\n'), ((11160, 11172), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (11170, 11172), True, 'import matplotlib.pyplot as plt\n'), ((11173, 11245), 'matplotlib.pyplot.plot', 'plt.plot', (['years', 'usaGDPpercapita2014', '"""-k"""'], {'label': '"""Adjusted to 2014 CPI"""'}), "(years, usaGDPpercapita2014, '-k', label='Adjusted to 2014 CPI')\n", (11181, 11245), True, 'import matplotlib.pyplot as plt\n'), ((11246, 11340), 'matplotlib.pyplot.plot', 'plt.plot', (['years', 'usaGDPpercapita2014UnderTwo', '"""--k"""'], {'label': '"""CPI each year adjusted +2.0%"""'}), "(years, usaGDPpercapita2014UnderTwo, '--k', label=\n 'CPI each year adjusted +2.0%')\n", (11254, 11340), True, 'import matplotlib.pyplot as plt\n'), ((11336, 11429), 'matplotlib.pyplot.plot', 'plt.plot', (['years', 'usaGDPpercapita2014OverTwo', '"""-.k"""'], {'label': '"""CPI each year adjusted -2.0%"""'}), "(years, usaGDPpercapita2014OverTwo, '-.k', label=\n 'CPI each year adjusted -2.0%')\n", (11344, 11429), True, 'import matplotlib.pyplot as plt\n'), ((11425, 11443), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Year"""'], {}), "('Year')\n", (11435, 11443), True, 'import matplotlib.pyplot as plt\n'), ((11444, 11501), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""GDP per capita adjusted for inflation (USD)"""'], {}), "('GDP per capita adjusted for inflation (USD)')\n", (11454, 11501), True, 'import matplotlib.pyplot as plt\n'), ((11502, 11519), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(4)'}), '(loc=4)\n', (11512, 11519), True, 'import matplotlib.pyplot as plt\n'), ((11520, 11534), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (11528, 11534), True, 'import matplotlib.pyplot as plt\n'), ((11535, 11545), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11543, 11545), True, 'import matplotlib.pyplot as plt\n'), ((11546, 11603), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""images/usaGDPandGNI_perCapita_2014_two.png"""'], {}), "('images/usaGDPandGNI_perCapita_2014_two.png')\n", (11557, 11603), True, 'import matplotlib.pyplot as plt\n')]
|
from typing import List, Union
import numpy as np
def slice_to_list(
start: Union[int, None],
stop: Union[int, None],
step: Union[int, None],
size: int = None,
) -> List[int]:
if start is None and stop is None:
if size is None:
raise ValueError("size required when start and stop are None")
else:
stop = size
elif stop is not None:
if stop < 0:
if size is None:
raise ValueError(
"size required when using negative stop index")
stop = size + stop
if stop < 0:
raise ValueError("negative stop index out of range")
l = list(
range(
start if start is not None else 0,
stop if stop is not None else size,
step if step is not None else 1,
))
if np.min(l) < 0:
raise ValueError("negative start index not allowed")
return l
|
[
"numpy.min"
] |
[((862, 871), 'numpy.min', 'np.min', (['l'], {}), '(l)\n', (868, 871), True, 'import numpy as np\n')]
|
# Load pickled data
import cv2
import pickle
import numpy as np
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
import tensorflow as tf
import MyAlexNet
import DataAugmentation as func
import glob
import csv
# TODO: Fill this in based on where you saved the training and testing data
training_file = "train.p"
validation_file = "valid.p"
testing_file = "test.p"
with open(training_file, mode='rb') as f:
train = pickle.load(f)
with open(validation_file, mode='rb') as f:
valid = pickle.load(f)
with open(testing_file, mode='rb') as f:
test = pickle.load(f)
X_train, y_train, X_train_size, X_train_bbox = train['features'], train['labels'], train['sizes'], train['coords']
X_valid, y_valid, X_valid_size, X_valid_bbox = valid['features'], valid['labels'], valid['sizes'], valid['coords']
X_test, y_test, X_test_size, X_test_bbox = test['features'], test['labels'], test['sizes'], test['coords']
# TODO: Number of training examples
n_train = len(X_train_size)
# TODO: Number of validation examples
print(len(X_valid_size))
n_validation = len(X_valid_size)
# TODO: Number of testing examples.
n_test = len(X_test_size)
# TODO: What's the shape of an traffic sign image?
print(X_train.shape)
# TODO: How many unique classes/labels there are in the dataset.
n_classes = len(np.unique(y_train))
# TODO: Number of training examples
n_train = len(X_train_size)
# TODO: Number of testing examples.
n_test = len(X_test_size)
# TODO: What's the shape of an traffic sign image?
image_shape = X_train.shape
# TODO: How many unique classes/labels there are in the dataset.
n_classes = len(np.unique(y_train))
img_size = X_train.shape[1] # Size of input images
print(img_size)
print("Number of training examples =", n_train)
print("Number of testing examples =", n_test)
print("Image data shape =", image_shape)
print("Number of classes =", n_classes)
### Data exploration visualization goes here.
# Visualizations will be shown in the notebook.
num_of_samples = []
plt.figure(figsize=(12, 16.5))
for i in range(0, n_classes):
plt.subplot(11, 4, i + 1)
x_selected = X_train[y_train == i]
plt.imshow(x_selected[0, :, :, :]) # draw the first image of each class
plt.title(i)
plt.axis('off')
num_of_samples.append(len(x_selected))
plt.show()
# Plot number of images per class
plt.figure(figsize=(12, 4))
plt.bar(range(0, n_classes), num_of_samples)
plt.title("Distribution of the training dataset")
plt.xlabel("Class number")
plt.ylabel("Number of images")
plt.show()
print("Min number of images in training data per class =", min(num_of_samples))
print("Max number of images in training data per class =", max(num_of_samples))
### Data exploration visualization goes here.
# Visualizations will be shown in the notebook.
num_of_samples = []
plt.figure(figsize=(12, 16.5))
for i in range(0, n_classes):
plt.subplot(11, 4, i + 1)
x_selected = X_valid[y_valid == i]
plt.imshow(x_selected[0, :, :, :]) # draw the first image of each class
plt.title(i)
plt.axis('off')
num_of_samples.append(len(x_selected))
plt.show()
# Plot number of images per class
plt.figure(figsize=(12, 4))
plt.bar(range(0, n_classes), num_of_samples)
plt.title("Distribution of the validation dataset")
plt.xlabel("Class number")
plt.ylabel("Number of images")
plt.show()
print("Min number of images in vlidation data per class =", min(num_of_samples))
print("Max number of images in validation data per class =", max(num_of_samples))
### Data exploration visualization goes here.
# Visualizations will be shown in the notebook.
num_of_samples = []
plt.figure(figsize=(12, 16.5))
for i in range(0, n_classes):
plt.subplot(11, 4, i + 1)
x_selected = X_test[y_test == i]
plt.imshow(x_selected[0, :, :, :]) # draw the first image of each class
plt.title(i)
plt.axis('off')
num_of_samples.append(len(x_selected))
plt.show()
# Plot number of images per class
plt.figure(figsize=(12, 4))
plt.bar(range(0, n_classes), num_of_samples)
plt.title("Distribution of the test dataset")
plt.xlabel("Class number")
plt.ylabel("Number of images")
plt.show()
print("Min number of images in test data per class =", min(num_of_samples))
print("Max number of images in test data per class =", max(num_of_samples))
### For Data Augmentation
# X_train_aug = []
# y_train_aug = []
# def create_data(n):
# for i in range(100):
# img=X_train[i]
# X_train_aug.append(img)
# y_train_aug.append(y_train[i])
# #Generate n new images out of each input image
# for j in range(n):
# X_train_aug.append(augment_img(img))
# y_train_aug.append(y_train[i])
# X_train_crop = np.ndarray(shape=[X_train.shape[0],IMAGE_SIZE,IMAGE_SIZE,
# 3],dtype = np.uint8)
# for i in range(n_train):
# X_train_crop[i] = crop_img(X_train[i])
# print(i)
print(X_train.shape)
print(X_train.dtype)
print(y_train.shape)
print(y_train.dtype)
print(X_valid.shape)
print(X_valid.dtype)
print(y_valid.shape)
print(y_train.dtype)
print(X_test.shape)
print(X_test.dtype)
print(y_test.shape)
print(y_test.dtype)
filename = "updated_test.p"
file = open(filename, 'rb')
X_test = pickle.load(file)
filename = "updated_train.p"
file = open(filename, 'rb')
X_train = pickle.load(file)
filename = "updated_valid.p"
file = open(filename, 'rb')
X_valid = pickle.load(file)
test = X_train[10000]
transformation = func.transform_img(test)
augmentation = func.augment_img(test)
func.show_imgs(test, transformation, augmentation)
print(X_train.shape)
print(X_train.dtype)
print(y_train.shape)
print(y_train.dtype)
print(X_valid.shape)
print(X_valid.dtype)
print(y_valid.shape)
print(y_train.dtype)
print(X_test.shape)
print(X_test.dtype)
print(y_test.shape)
print(y_test.dtype)
# Data Normalization
print(np.mean(X_train))
X_train = (X_train - np.mean(X_train)) / 255.0
print(np.mean(X_train))
print(np.mean(X_valid))
X_valid = (X_valid - np.mean(X_valid)) / 255.0
print(np.mean(X_valid))
print(np.mean(X_test))
X_test = (X_test - np.mean(X_test)) / 255.0
print(np.mean(X_test))
## Shuffle the training dataset
print(X_train.shape)
print(y_train.shape)
X_train, y_train = shuffle(X_train, y_train)
print(X_train.shape)
print(y_train.shape)
print('done')
EPOCHS = 90
BATCH_SIZE = 128
print('done')
tf.reset_default_graph()
x = tf.placeholder(tf.float32, (None, 51, 51, 3))
y = tf.placeholder(tf.int32, (None))
keep_prob = tf.placeholder(tf.float32) # probability to keep units
one_hot_y = tf.one_hot(y, 43)
print('done')
rate = 0.0005
save_file = './new_model.ckpt'
logits = MyAlexNet.AlexNet(x)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=one_hot_y)
loss_operation = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate = rate)
training_operation = optimizer.minimize(loss_operation)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))
accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
Saver = tf.train.Saver()
def evaluate(X_data, y_data):
num_examples = len(X_data)
total_accuracy = 0
sess = tf.get_default_session()
for offset in range(0, num_examples, BATCH_SIZE):
batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE]
accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y, keep_prob: 1.0})
total_accuracy += (accuracy * len(batch_x))
return total_accuracy / num_examples
print('done')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
num_examples = len(X_train)
print("Training...")
print()
for i in range(EPOCHS):
X_train, y_train = shuffle(X_train, y_train)
print("Epoch: ", i)
for offset in range(0, num_examples, BATCH_SIZE):
end = offset + BATCH_SIZE
batch_x, batch_y = X_train[offset:end], y_train[offset:end]
sess.run(training_operation, feed_dict={x: batch_x, y: batch_y, keep_prob: 0.75})
validation_accuracy = evaluate(X_valid, y_valid)
print("EPOCH {} ...".format(i + 1))
print("Validation Accuracy = {:.3f}".format(validation_accuracy))
print()
Saver.save(sess,save_file)
print("Model saved")
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver2 = tf.train.import_meta_graph('./Trained Model/final_model.ckpt.meta')
saver2.restore(sess, "./Trained Model/final_model.ckpt")
test_accuracy = evaluate(X_test, y_test)
print("Test Set Accuracy = {:.3f}".format(test_accuracy))
graph = tf.get_default_graph()
signs_class=[]
with open('signnames.csv', 'rt') as csvfile:
reader = csv.DictReader(csvfile, delimiter=',')
for row in reader:
signs_class.append((row['SignName']))
my_labels = [37,38,17,15,12,13,1,0,35,20,3,5]
test = func.load_images("./new_images1/")
test_images=X_test_data=np.uint8(np.zeros((len(test),51,51,3)))
test_images_labels=np.ndarray(shape=[len(test)],dtype=np.uint8)
test_images[0:12]=test[0:12]
test_images_labels[0:12]=my_labels[0:12]
plt.figure(figsize=(12, 8))
for i in range(len(test)):
plt.subplot(3, 4, i+1)
plt.imshow(test[i])
plt.title(signs_class[my_labels[i]])
plt.axis('off')
plt.show()
test_images=(test_images-np.mean(test_images))/255.0
### Visualize the softmax probabilities here.
with tf.Session(graph=graph) as sess:
sess.run(tf.global_variables_initializer())
saver2 = tf.train.import_meta_graph('./Trained Model/final_model.ckpt.meta')
saver2.restore(sess, "./Trained Model/final_model.ckpt")
new_test_accuracy = evaluate(test_images, test_images_labels)
print("New Test Set Accuracy = {:.3f}".format(new_test_accuracy))
softmax_logits = tf.nn.softmax(logits)
top_k = tf.nn.top_k(softmax_logits, k=5)
with tf.Session(graph=graph) as sess:
sess.run(tf.global_variables_initializer())
saver2 = tf.train.import_meta_graph('./Trained Model/final_model.ckpt.meta')
saver2.restore(sess, "./Trained Model/final_model.ckpt")
my_softmax_logits = sess.run(softmax_logits, feed_dict={x: test_images, keep_prob: 1.0})
my_top_k = sess.run(top_k, feed_dict={x: test_images, keep_prob: 1.0})
print(len(test))
plt.figure(figsize=(16, 21))
for i in range(12):
plt.subplot(12, 2, 2*i+1)
plt.imshow(test[i])
plt.title(i)
plt.axis('off')
plt.subplot(12, 2, 2*i+2)
plt.barh(np.arange(1, 6, 1), my_top_k.values[i, :])
labs=[signs_class[j] for j in my_top_k.indices[i]]
plt.yticks(np.arange(1, 6, 1), labs)
plt.show()
my_labels = [3, 11, 1, 12, 38, 34, 18, 25]
test = []
for i, img in enumerate(glob.glob('./new_images2/*x.png')):
image = func.crop_img(cv2.imread(img))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
test.append(image)
test_images=X_test_data=np.uint8(np.zeros((len(test),51,51,3)))
test_images_labels=np.ndarray(shape=[len(test)],dtype=np.uint8)
test_images[0:len(test)]=test[0:len(test)]
test_images_labels[0:len(test)]=my_labels[0:len(test)]
plt.figure(figsize=(12, 8))
for i in range(len(test)):
plt.subplot(3, 4, i+1)
plt.imshow(test[i])
plt.title(signs_class[my_labels[i]])
plt.axis('off')
plt.show()
test_images=(test_images-np.mean(test_images))/255.0
### Visualize the softmax probabilities here.
with tf.Session(graph=graph) as sess:
sess.run(tf.global_variables_initializer())
saver2 = tf.train.import_meta_graph('./Trained Model/final_model.ckpt.meta')
saver2.restore(sess, "./Trained Model/final_model.ckpt")
new_test_accuracy = evaluate(test_images, test_images_labels)
print("New Test Set Accuracy = {:.3f}".format(new_test_accuracy))
softmax_logits = tf.nn.softmax(logits)
top_k = tf.nn.top_k(softmax_logits, k=5)
with tf.Session(graph=graph) as sess:
sess.run(tf.global_variables_initializer())
saver2 = tf.train.import_meta_graph('./Trained Model/final_model.ckpt.meta')
saver2.restore(sess, "./Trained Model/final_model.ckpt")
my_softmax_logits = sess.run(softmax_logits, feed_dict={x: test_images, keep_prob: 1.0})
my_top_k = sess.run(top_k, feed_dict={x: test_images, keep_prob: 1.0})
print(len(test))
plt.figure(figsize=(16, 21))
for i in range(len(test)):
plt.subplot(12, 2, 2*i+1)
plt.imshow(test[i])
plt.title(i)
plt.axis('off')
plt.subplot(12, 2, 2*i+2)
plt.barh(np.arange(1, 6, 1), my_top_k.values[i, :])
labs=[signs_class[j] for j in my_top_k.indices[i]]
plt.yticks(np.arange(1, 6, 1), labs)
plt.show()
### Visualize your network's feature maps here.
### Feel free to use as many code cells as needed.
# image_input: the test image being fed into the network to produce the feature maps
# tf_activation: should be a tf variable name used during your training procedure that represents the calculated state of a specific weight layer
# activation_min/max: can be used to view the activation contrast in more detail, by default matplot sets min and max to the actual min and max values of the output
# plt_num: used to plot out multiple different weight feature map sets on the same block, just extend the plt number for each new feature map entry
#
#def outputFeatureMap(image_input, tf_activation, activation_min=-1, activation_max=-1 ,plt_num=1):
# # Here make sure to preprocess your image_input in a way your network expects
# # with size, normalization, ect if needed
# # image_input =
# # Note: x should be the same name as your network's tensorflow data placeholder variable
# # If you get an error tf_activation is not defined it may be having trouble
# #accessing the variable from inside a function
# activation = tf_activation.eval(session=sess,feed_dict={x : image_input})
# featuremaps = activation.shape[3]
# plt.figure(plt_num, figsize=(15,15))
# for featuremap in range(featuremaps):
# plt.subplot(6,8, featuremap+1) # sets the number of feature maps to show on each row and column
# plt.title('FeatureMap ' + str(featuremap)) # displays the feature map number
# if activation_min != -1 & activation_max != -1:
# plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmin =activation_min, vmax=activation_max, cmap="gray")
# elif activation_max != -1:
# plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmax=activation_max, cmap="gray")
# elif activation_min !=-1:
# plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmin=activation_min, cmap="gray")
# else:
# plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", cmap="gray")
#
#
#
#
#test1=X_train[6500]
#plt.imshow(test1)
#test1= (test1- np.mean(test1)) / 255.0
#outputFeatureMap(test1)
|
[
"matplotlib.pyplot.title",
"DataAugmentation.transform_img",
"tensorflow.reset_default_graph",
"matplotlib.pyplot.figure",
"pickle.load",
"numpy.mean",
"numpy.arange",
"glob.glob",
"tensorflow.get_default_graph",
"numpy.unique",
"matplotlib.pyplot.xlabel",
"tensorflow.nn.softmax",
"tensorflow.one_hot",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"cv2.cvtColor",
"tensorflow.nn.top_k",
"matplotlib.pyplot.imshow",
"tensorflow.placeholder",
"tensorflow.cast",
"matplotlib.pyplot.show",
"tensorflow.train.Saver",
"tensorflow.global_variables_initializer",
"csv.DictReader",
"tensorflow.reduce_mean",
"tensorflow.Session",
"DataAugmentation.load_images",
"DataAugmentation.augment_img",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.subplot",
"tensorflow.train.import_meta_graph",
"tensorflow.argmax",
"matplotlib.pyplot.axis",
"cv2.imread",
"DataAugmentation.show_imgs",
"MyAlexNet.AlexNet",
"sklearn.utils.shuffle",
"tensorflow.train.AdamOptimizer",
"tensorflow.get_default_session"
] |
[((2052, 2082), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 16.5)'}), '(figsize=(12, 16.5))\n', (2062, 2082), True, 'import matplotlib.pyplot as plt\n'), ((2347, 2357), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2355, 2357), True, 'import matplotlib.pyplot as plt\n'), ((2396, 2423), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 4)'}), '(figsize=(12, 4))\n', (2406, 2423), True, 'import matplotlib.pyplot as plt\n'), ((2471, 2520), 'matplotlib.pyplot.title', 'plt.title', (['"""Distribution of the training dataset"""'], {}), "('Distribution of the training dataset')\n", (2480, 2520), True, 'import matplotlib.pyplot as plt\n'), ((2522, 2548), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Class number"""'], {}), "('Class number')\n", (2532, 2548), True, 'import matplotlib.pyplot as plt\n'), ((2550, 2580), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of images"""'], {}), "('Number of images')\n", (2560, 2580), True, 'import matplotlib.pyplot as plt\n'), ((2582, 2592), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2590, 2592), True, 'import matplotlib.pyplot as plt\n'), ((2879, 2909), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 16.5)'}), '(figsize=(12, 16.5))\n', (2889, 2909), True, 'import matplotlib.pyplot as plt\n'), ((3174, 3184), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3182, 3184), True, 'import matplotlib.pyplot as plt\n'), ((3223, 3250), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 4)'}), '(figsize=(12, 4))\n', (3233, 3250), True, 'import matplotlib.pyplot as plt\n'), ((3298, 3349), 'matplotlib.pyplot.title', 'plt.title', (['"""Distribution of the validation dataset"""'], {}), "('Distribution of the validation dataset')\n", (3307, 3349), True, 'import matplotlib.pyplot as plt\n'), ((3351, 3377), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Class number"""'], {}), "('Class number')\n", (3361, 3377), True, 'import matplotlib.pyplot as plt\n'), ((3379, 3409), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of images"""'], {}), "('Number of images')\n", (3389, 3409), True, 'import matplotlib.pyplot as plt\n'), ((3411, 3421), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3419, 3421), True, 'import matplotlib.pyplot as plt\n'), ((3711, 3741), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 16.5)'}), '(figsize=(12, 16.5))\n', (3721, 3741), True, 'import matplotlib.pyplot as plt\n'), ((4004, 4014), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4012, 4014), True, 'import matplotlib.pyplot as plt\n'), ((4053, 4080), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 4)'}), '(figsize=(12, 4))\n', (4063, 4080), True, 'import matplotlib.pyplot as plt\n'), ((4128, 4173), 'matplotlib.pyplot.title', 'plt.title', (['"""Distribution of the test dataset"""'], {}), "('Distribution of the test dataset')\n", (4137, 4173), True, 'import matplotlib.pyplot as plt\n'), ((4175, 4201), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Class number"""'], {}), "('Class number')\n", (4185, 4201), True, 'import matplotlib.pyplot as plt\n'), ((4203, 4233), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of images"""'], {}), "('Number of images')\n", (4213, 4233), True, 'import matplotlib.pyplot as plt\n'), ((4235, 4245), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4243, 4245), True, 'import matplotlib.pyplot as plt\n'), ((5367, 5384), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (5378, 5384), False, 'import pickle\n'), ((5457, 5474), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (5468, 5474), False, 'import pickle\n'), ((5547, 5564), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (5558, 5564), False, 'import pickle\n'), ((5608, 5632), 'DataAugmentation.transform_img', 'func.transform_img', (['test'], {}), '(test)\n', (5626, 5632), True, 'import DataAugmentation as func\n'), ((5649, 5671), 'DataAugmentation.augment_img', 'func.augment_img', (['test'], {}), '(test)\n', (5665, 5671), True, 'import DataAugmentation as func\n'), ((5673, 5723), 'DataAugmentation.show_imgs', 'func.show_imgs', (['test', 'transformation', 'augmentation'], {}), '(test, transformation, augmentation)\n', (5687, 5723), True, 'import DataAugmentation as func\n'), ((6412, 6437), 'sklearn.utils.shuffle', 'shuffle', (['X_train', 'y_train'], {}), '(X_train, y_train)\n', (6419, 6437), False, 'from sklearn.utils import shuffle\n'), ((6550, 6574), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (6572, 6574), True, 'import tensorflow as tf\n'), ((6582, 6627), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(None, 51, 51, 3)'], {}), '(tf.float32, (None, 51, 51, 3))\n', (6596, 6627), True, 'import tensorflow as tf\n'), ((6633, 6663), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', 'None'], {}), '(tf.int32, None)\n', (6647, 6663), True, 'import tensorflow as tf\n'), ((6679, 6705), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (6693, 6705), True, 'import tensorflow as tf\n'), ((6748, 6765), 'tensorflow.one_hot', 'tf.one_hot', (['y', '(43)'], {}), '(y, 43)\n', (6758, 6765), True, 'import tensorflow as tf\n'), ((6844, 6864), 'MyAlexNet.AlexNet', 'MyAlexNet.AlexNet', (['x'], {}), '(x)\n', (6861, 6864), False, 'import MyAlexNet\n'), ((6882, 6954), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'logits': 'logits', 'labels': 'one_hot_y'}), '(logits=logits, labels=one_hot_y)\n', (6921, 6954), True, 'import tensorflow as tf\n'), ((6973, 7002), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['cross_entropy'], {}), '(cross_entropy)\n', (6987, 7002), True, 'import tensorflow as tf\n'), ((7016, 7058), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'rate'}), '(learning_rate=rate)\n', (7038, 7058), True, 'import tensorflow as tf\n'), ((7289, 7305), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (7303, 7305), True, 'import tensorflow as tf\n'), ((9190, 9224), 'DataAugmentation.load_images', 'func.load_images', (['"""./new_images1/"""'], {}), "('./new_images1/')\n", (9206, 9224), True, 'import DataAugmentation as func\n'), ((9428, 9455), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (9438, 9455), True, 'import matplotlib.pyplot as plt\n'), ((9602, 9612), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9610, 9612), True, 'import matplotlib.pyplot as plt\n'), ((10104, 10125), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), '(logits)\n', (10117, 10125), True, 'import tensorflow as tf\n'), ((10135, 10167), 'tensorflow.nn.top_k', 'tf.nn.top_k', (['softmax_logits'], {'k': '(5)'}), '(softmax_logits, k=5)\n', (10146, 10167), True, 'import tensorflow as tf\n'), ((10589, 10617), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 21)'}), '(figsize=(16, 21))\n', (10599, 10617), True, 'import matplotlib.pyplot as plt\n'), ((10922, 10932), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10930, 10932), True, 'import matplotlib.pyplot as plt\n'), ((11408, 11435), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (11418, 11435), True, 'import matplotlib.pyplot as plt\n'), ((11582, 11592), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11590, 11592), True, 'import matplotlib.pyplot as plt\n'), ((12084, 12105), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), '(logits)\n', (12097, 12105), True, 'import tensorflow as tf\n'), ((12115, 12147), 'tensorflow.nn.top_k', 'tf.nn.top_k', (['softmax_logits'], {'k': '(5)'}), '(softmax_logits, k=5)\n', (12126, 12147), True, 'import tensorflow as tf\n'), ((12569, 12597), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 21)'}), '(figsize=(16, 21))\n', (12579, 12597), True, 'import matplotlib.pyplot as plt\n'), ((12909, 12919), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12917, 12919), True, 'import matplotlib.pyplot as plt\n'), ((458, 472), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (469, 472), False, 'import pickle\n'), ((531, 545), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (542, 545), False, 'import pickle\n'), ((600, 614), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (611, 614), False, 'import pickle\n'), ((1345, 1363), 'numpy.unique', 'np.unique', (['y_train'], {}), '(y_train)\n', (1354, 1363), True, 'import numpy as np\n'), ((1661, 1679), 'numpy.unique', 'np.unique', (['y_train'], {}), '(y_train)\n', (1670, 1679), True, 'import numpy as np\n'), ((2119, 2144), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(11)', '(4)', '(i + 1)'], {}), '(11, 4, i + 1)\n', (2130, 2144), True, 'import matplotlib.pyplot as plt\n'), ((2190, 2224), 'matplotlib.pyplot.imshow', 'plt.imshow', (['x_selected[0, :, :, :]'], {}), '(x_selected[0, :, :, :])\n', (2200, 2224), True, 'import matplotlib.pyplot as plt\n'), ((2268, 2280), 'matplotlib.pyplot.title', 'plt.title', (['i'], {}), '(i)\n', (2277, 2280), True, 'import matplotlib.pyplot as plt\n'), ((2286, 2301), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2294, 2301), True, 'import matplotlib.pyplot as plt\n'), ((2946, 2971), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(11)', '(4)', '(i + 1)'], {}), '(11, 4, i + 1)\n', (2957, 2971), True, 'import matplotlib.pyplot as plt\n'), ((3017, 3051), 'matplotlib.pyplot.imshow', 'plt.imshow', (['x_selected[0, :, :, :]'], {}), '(x_selected[0, :, :, :])\n', (3027, 3051), True, 'import matplotlib.pyplot as plt\n'), ((3095, 3107), 'matplotlib.pyplot.title', 'plt.title', (['i'], {}), '(i)\n', (3104, 3107), True, 'import matplotlib.pyplot as plt\n'), ((3113, 3128), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3121, 3128), True, 'import matplotlib.pyplot as plt\n'), ((3778, 3803), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(11)', '(4)', '(i + 1)'], {}), '(11, 4, i + 1)\n', (3789, 3803), True, 'import matplotlib.pyplot as plt\n'), ((3847, 3881), 'matplotlib.pyplot.imshow', 'plt.imshow', (['x_selected[0, :, :, :]'], {}), '(x_selected[0, :, :, :])\n', (3857, 3881), True, 'import matplotlib.pyplot as plt\n'), ((3925, 3937), 'matplotlib.pyplot.title', 'plt.title', (['i'], {}), '(i)\n', (3934, 3937), True, 'import matplotlib.pyplot as plt\n'), ((3943, 3958), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3951, 3958), True, 'import matplotlib.pyplot as plt\n'), ((6025, 6041), 'numpy.mean', 'np.mean', (['X_train'], {}), '(X_train)\n', (6032, 6041), True, 'import numpy as np\n'), ((6098, 6114), 'numpy.mean', 'np.mean', (['X_train'], {}), '(X_train)\n', (6105, 6114), True, 'import numpy as np\n'), ((6125, 6141), 'numpy.mean', 'np.mean', (['X_valid'], {}), '(X_valid)\n', (6132, 6141), True, 'import numpy as np\n'), ((6198, 6214), 'numpy.mean', 'np.mean', (['X_valid'], {}), '(X_valid)\n', (6205, 6214), True, 'import numpy as np\n'), ((6225, 6240), 'numpy.mean', 'np.mean', (['X_test'], {}), '(X_test)\n', (6232, 6240), True, 'import numpy as np\n'), ((6294, 6309), 'numpy.mean', 'np.mean', (['X_test'], {}), '(X_test)\n', (6301, 6309), True, 'import numpy as np\n'), ((7153, 7173), 'tensorflow.argmax', 'tf.argmax', (['logits', '(1)'], {}), '(logits, 1)\n', (7162, 7173), True, 'import tensorflow as tf\n'), ((7175, 7198), 'tensorflow.argmax', 'tf.argmax', (['one_hot_y', '(1)'], {}), '(one_hot_y, 1)\n', (7184, 7198), True, 'import tensorflow as tf\n'), ((7237, 7276), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (7244, 7276), True, 'import tensorflow as tf\n'), ((7407, 7431), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (7429, 7431), True, 'import tensorflow as tf\n'), ((7805, 7817), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (7815, 7817), True, 'import tensorflow as tf\n'), ((8587, 8599), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (8597, 8599), True, 'import tensorflow as tf\n'), ((8672, 8739), 'tensorflow.train.import_meta_graph', 'tf.train.import_meta_graph', (['"""./Trained Model/final_model.ckpt.meta"""'], {}), "('./Trained Model/final_model.ckpt.meta')\n", (8698, 8739), True, 'import tensorflow as tf\n'), ((8924, 8946), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (8944, 8946), True, 'import tensorflow as tf\n'), ((9025, 9063), 'csv.DictReader', 'csv.DictReader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (9039, 9063), False, 'import csv\n'), ((9489, 9513), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(4)', '(i + 1)'], {}), '(3, 4, i + 1)\n', (9500, 9513), True, 'import matplotlib.pyplot as plt\n'), ((9517, 9536), 'matplotlib.pyplot.imshow', 'plt.imshow', (['test[i]'], {}), '(test[i])\n', (9527, 9536), True, 'import matplotlib.pyplot as plt\n'), ((9543, 9579), 'matplotlib.pyplot.title', 'plt.title', (['signs_class[my_labels[i]]'], {}), '(signs_class[my_labels[i]])\n', (9552, 9579), True, 'import matplotlib.pyplot as plt\n'), ((9585, 9600), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (9593, 9600), True, 'import matplotlib.pyplot as plt\n'), ((9720, 9743), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (9730, 9743), True, 'import tensorflow as tf\n'), ((9816, 9883), 'tensorflow.train.import_meta_graph', 'tf.train.import_meta_graph', (['"""./Trained Model/final_model.ckpt.meta"""'], {}), "('./Trained Model/final_model.ckpt.meta')\n", (9842, 9883), True, 'import tensorflow as tf\n'), ((10174, 10197), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (10184, 10197), True, 'import tensorflow as tf\n'), ((10270, 10337), 'tensorflow.train.import_meta_graph', 'tf.train.import_meta_graph', (['"""./Trained Model/final_model.ckpt.meta"""'], {}), "('./Trained Model/final_model.ckpt.meta')\n", (10296, 10337), True, 'import tensorflow as tf\n'), ((10644, 10673), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(12)', '(2)', '(2 * i + 1)'], {}), '(12, 2, 2 * i + 1)\n', (10655, 10673), True, 'import matplotlib.pyplot as plt\n'), ((10675, 10694), 'matplotlib.pyplot.imshow', 'plt.imshow', (['test[i]'], {}), '(test[i])\n', (10685, 10694), True, 'import matplotlib.pyplot as plt\n'), ((10701, 10713), 'matplotlib.pyplot.title', 'plt.title', (['i'], {}), '(i)\n', (10710, 10713), True, 'import matplotlib.pyplot as plt\n'), ((10719, 10734), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (10727, 10734), True, 'import matplotlib.pyplot as plt\n'), ((10740, 10769), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(12)', '(2)', '(2 * i + 2)'], {}), '(12, 2, 2 * i + 2)\n', (10751, 10769), True, 'import matplotlib.pyplot as plt\n'), ((11015, 11048), 'glob.glob', 'glob.glob', (['"""./new_images2/*x.png"""'], {}), "('./new_images2/*x.png')\n", (11024, 11048), False, 'import glob\n'), ((11108, 11146), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (11120, 11146), False, 'import cv2\n'), ((11469, 11493), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(4)', '(i + 1)'], {}), '(3, 4, i + 1)\n', (11480, 11493), True, 'import matplotlib.pyplot as plt\n'), ((11497, 11516), 'matplotlib.pyplot.imshow', 'plt.imshow', (['test[i]'], {}), '(test[i])\n', (11507, 11516), True, 'import matplotlib.pyplot as plt\n'), ((11523, 11559), 'matplotlib.pyplot.title', 'plt.title', (['signs_class[my_labels[i]]'], {}), '(signs_class[my_labels[i]])\n', (11532, 11559), True, 'import matplotlib.pyplot as plt\n'), ((11565, 11580), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (11573, 11580), True, 'import matplotlib.pyplot as plt\n'), ((11700, 11723), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (11710, 11723), True, 'import tensorflow as tf\n'), ((11796, 11863), 'tensorflow.train.import_meta_graph', 'tf.train.import_meta_graph', (['"""./Trained Model/final_model.ckpt.meta"""'], {}), "('./Trained Model/final_model.ckpt.meta')\n", (11822, 11863), True, 'import tensorflow as tf\n'), ((12154, 12177), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (12164, 12177), True, 'import tensorflow as tf\n'), ((12250, 12317), 'tensorflow.train.import_meta_graph', 'tf.train.import_meta_graph', (['"""./Trained Model/final_model.ckpt.meta"""'], {}), "('./Trained Model/final_model.ckpt.meta')\n", (12276, 12317), True, 'import tensorflow as tf\n'), ((12631, 12660), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(12)', '(2)', '(2 * i + 1)'], {}), '(12, 2, 2 * i + 1)\n', (12642, 12660), True, 'import matplotlib.pyplot as plt\n'), ((12662, 12681), 'matplotlib.pyplot.imshow', 'plt.imshow', (['test[i]'], {}), '(test[i])\n', (12672, 12681), True, 'import matplotlib.pyplot as plt\n'), ((12688, 12700), 'matplotlib.pyplot.title', 'plt.title', (['i'], {}), '(i)\n', (12697, 12700), True, 'import matplotlib.pyplot as plt\n'), ((12706, 12721), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (12714, 12721), True, 'import matplotlib.pyplot as plt\n'), ((12727, 12756), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(12)', '(2)', '(2 * i + 2)'], {}), '(12, 2, 2 * i + 2)\n', (12738, 12756), True, 'import matplotlib.pyplot as plt\n'), ((6065, 6081), 'numpy.mean', 'np.mean', (['X_train'], {}), '(X_train)\n', (6072, 6081), True, 'import numpy as np\n'), ((6165, 6181), 'numpy.mean', 'np.mean', (['X_valid'], {}), '(X_valid)\n', (6172, 6181), True, 'import numpy as np\n'), ((6262, 6277), 'numpy.mean', 'np.mean', (['X_test'], {}), '(X_test)\n', (6269, 6277), True, 'import numpy as np\n'), ((7841, 7874), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (7872, 7874), True, 'import tensorflow as tf\n'), ((8005, 8030), 'sklearn.utils.shuffle', 'shuffle', (['X_train', 'y_train'], {}), '(X_train, y_train)\n', (8012, 8030), False, 'from sklearn.utils import shuffle\n'), ((8623, 8656), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (8654, 8656), True, 'import tensorflow as tf\n'), ((9639, 9659), 'numpy.mean', 'np.mean', (['test_images'], {}), '(test_images)\n', (9646, 9659), True, 'import numpy as np\n'), ((9767, 9800), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (9798, 9800), True, 'import tensorflow as tf\n'), ((10221, 10254), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (10252, 10254), True, 'import tensorflow as tf\n'), ((10780, 10798), 'numpy.arange', 'np.arange', (['(1)', '(6)', '(1)'], {}), '(1, 6, 1)\n', (10789, 10798), True, 'import numpy as np\n'), ((10895, 10913), 'numpy.arange', 'np.arange', (['(1)', '(6)', '(1)'], {}), '(1, 6, 1)\n', (10904, 10913), True, 'import numpy as np\n'), ((11078, 11093), 'cv2.imread', 'cv2.imread', (['img'], {}), '(img)\n', (11088, 11093), False, 'import cv2\n'), ((11619, 11639), 'numpy.mean', 'np.mean', (['test_images'], {}), '(test_images)\n', (11626, 11639), True, 'import numpy as np\n'), ((11747, 11780), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (11778, 11780), True, 'import tensorflow as tf\n'), ((12201, 12234), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (12232, 12234), True, 'import tensorflow as tf\n'), ((12767, 12785), 'numpy.arange', 'np.arange', (['(1)', '(6)', '(1)'], {}), '(1, 6, 1)\n', (12776, 12785), True, 'import numpy as np\n'), ((12882, 12900), 'numpy.arange', 'np.arange', (['(1)', '(6)', '(1)'], {}), '(1, 6, 1)\n', (12891, 12900), True, 'import numpy as np\n')]
|
# Third party inports
import tensorflow as tf
import numpy as np
# batch_sizexheightxwidthxdepthxchan
def diceLoss(y_true, y_pred):
top = 2*tf.reduce_sum(y_true * y_pred, [1, 2, 3])
bottom = tf.maximum(tf.reduce_sum(y_true+y_pred, [1, 2, 3]), 1e-5)
dice = tf.reduce_mean(top/bottom)
return -dice
def gradientLoss(penalty='l1'):
def loss(y_true, y_pred):
dy = tf.abs(y_pred[:, 1:, :, :, :] - y_pred[:, :-1, :, :, :])
dx = tf.abs(y_pred[:, :, 1:, :, :] - y_pred[:, :, :-1, :, :])
dz = tf.abs(y_pred[:, :, :, 1:, :] - y_pred[:, :, :, :-1, :])
if (penalty == 'l2'):
dy = dy * dy
dx = dx * dx
dz = dz * dz
d = tf.reduce_mean(dx)+tf.reduce_mean(dy)+tf.reduce_mean(dz)
return d/3.0
return loss
def gradientLoss2D():
def loss(y_true, y_pred):
dy = tf.abs(y_pred[:, 1:, :, :] - y_pred[:, :-1, :, :])
dx = tf.abs(y_pred[:, :, 1:, :] - y_pred[:, :, :-1, :])
dy = dy * dy
dx = dx * dx
d = tf.reduce_mean(dx)+tf.reduce_mean(dy)
return d/2.0
return loss
def cc3D(win=[9, 9, 9], voxel_weights=None):
def loss(I, J):
I2 = I*I
J2 = J*J
IJ = I*J
filt = tf.ones([win[0], win[1], win[2], 1, 1])
I_sum = tf.nn.conv3d(I, filt, [1, 1, 1, 1, 1], "SAME")
J_sum = tf.nn.conv3d(J, filt, [1, 1, 1, 1, 1], "SAME")
I2_sum = tf.nn.conv3d(I2, filt, [1, 1, 1, 1, 1], "SAME")
J2_sum = tf.nn.conv3d(J2, filt, [1, 1, 1, 1, 1], "SAME")
IJ_sum = tf.nn.conv3d(IJ, filt, [1, 1, 1, 1, 1], "SAME")
win_size = win[0]*win[1]*win[2]
u_I = I_sum/win_size
u_J = J_sum/win_size
cross = IJ_sum - u_J*I_sum - u_I*J_sum + u_I*u_J*win_size
I_var = I2_sum - 2 * u_I * I_sum + u_I*u_I*win_size
J_var = J2_sum - 2 * u_J * J_sum + u_J*u_J*win_size
cc = cross*cross / (I_var*J_var+1e-5)
# if(voxel_weights is not None):
# cc = cc * voxel_weights
return -1.0*tf.reduce_mean(cc)
return loss
def cc2D(win=[9, 9]):
def loss(I, J):
I2 = tf.multiply(I, I)
J2 = tf.multiply(J, J)
IJ = tf.multiply(I, J)
sum_filter = tf.ones([win[0], win[1], 1, 1])
I_sum = tf.nn.conv2d(I, sum_filter, [1, 1, 1, 1], "SAME")
J_sum = tf.nn.conv2d(J, sum_filter, [1, 1, 1, 1], "SAME")
I2_sum = tf.nn.conv2d(I2, sum_filter, [1, 1, 1, 1], "SAME")
J2_sum = tf.nn.conv2d(J2, sum_filter, [1, 1, 1, 1], "SAME")
IJ_sum = tf.nn.conv2d(IJ, sum_filter, [1, 1, 1, 1], "SAME")
win_size = win[0]*win[1]
u_I = I_sum/win_size
u_J = J_sum/win_size
cross = IJ_sum - u_J*I_sum - u_I*J_sum + u_I*u_J*win_size
I_var = I2_sum - 2 * u_I * I_sum + u_I*u_I*win_size
J_var = J2_sum - 2 * u_J * J_sum + u_J*u_J*win_size
cc = cross*cross / (I_var*J_var + np.finfo(float).eps)
return -1.0*tf.reduce_mean(cc)
return loss
|
[
"tensorflow.ones",
"tensorflow.abs",
"tensorflow.reduce_sum",
"tensorflow.reduce_mean",
"tensorflow.nn.conv3d",
"tensorflow.multiply",
"numpy.finfo",
"tensorflow.nn.conv2d"
] |
[((272, 300), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(top / bottom)'], {}), '(top / bottom)\n', (286, 300), True, 'import tensorflow as tf\n'), ((148, 189), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(y_true * y_pred)', '[1, 2, 3]'], {}), '(y_true * y_pred, [1, 2, 3])\n', (161, 189), True, 'import tensorflow as tf\n'), ((214, 255), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(y_true + y_pred)', '[1, 2, 3]'], {}), '(y_true + y_pred, [1, 2, 3])\n', (227, 255), True, 'import tensorflow as tf\n'), ((393, 449), 'tensorflow.abs', 'tf.abs', (['(y_pred[:, 1:, :, :, :] - y_pred[:, :-1, :, :, :])'], {}), '(y_pred[:, 1:, :, :, :] - y_pred[:, :-1, :, :, :])\n', (399, 449), True, 'import tensorflow as tf\n'), ((463, 519), 'tensorflow.abs', 'tf.abs', (['(y_pred[:, :, 1:, :, :] - y_pred[:, :, :-1, :, :])'], {}), '(y_pred[:, :, 1:, :, :] - y_pred[:, :, :-1, :, :])\n', (469, 519), True, 'import tensorflow as tf\n'), ((533, 589), 'tensorflow.abs', 'tf.abs', (['(y_pred[:, :, :, 1:, :] - y_pred[:, :, :, :-1, :])'], {}), '(y_pred[:, :, :, 1:, :] - y_pred[:, :, :, :-1, :])\n', (539, 589), True, 'import tensorflow as tf\n'), ((870, 920), 'tensorflow.abs', 'tf.abs', (['(y_pred[:, 1:, :, :] - y_pred[:, :-1, :, :])'], {}), '(y_pred[:, 1:, :, :] - y_pred[:, :-1, :, :])\n', (876, 920), True, 'import tensorflow as tf\n'), ((934, 984), 'tensorflow.abs', 'tf.abs', (['(y_pred[:, :, 1:, :] - y_pred[:, :, :-1, :])'], {}), '(y_pred[:, :, 1:, :] - y_pred[:, :, :-1, :])\n', (940, 984), True, 'import tensorflow as tf\n'), ((1251, 1290), 'tensorflow.ones', 'tf.ones', (['[win[0], win[1], win[2], 1, 1]'], {}), '([win[0], win[1], win[2], 1, 1])\n', (1258, 1290), True, 'import tensorflow as tf\n'), ((1308, 1354), 'tensorflow.nn.conv3d', 'tf.nn.conv3d', (['I', 'filt', '[1, 1, 1, 1, 1]', '"""SAME"""'], {}), "(I, filt, [1, 1, 1, 1, 1], 'SAME')\n", (1320, 1354), True, 'import tensorflow as tf\n'), ((1371, 1417), 'tensorflow.nn.conv3d', 'tf.nn.conv3d', (['J', 'filt', '[1, 1, 1, 1, 1]', '"""SAME"""'], {}), "(J, filt, [1, 1, 1, 1, 1], 'SAME')\n", (1383, 1417), True, 'import tensorflow as tf\n'), ((1435, 1482), 'tensorflow.nn.conv3d', 'tf.nn.conv3d', (['I2', 'filt', '[1, 1, 1, 1, 1]', '"""SAME"""'], {}), "(I2, filt, [1, 1, 1, 1, 1], 'SAME')\n", (1447, 1482), True, 'import tensorflow as tf\n'), ((1500, 1547), 'tensorflow.nn.conv3d', 'tf.nn.conv3d', (['J2', 'filt', '[1, 1, 1, 1, 1]', '"""SAME"""'], {}), "(J2, filt, [1, 1, 1, 1, 1], 'SAME')\n", (1512, 1547), True, 'import tensorflow as tf\n'), ((1565, 1612), 'tensorflow.nn.conv3d', 'tf.nn.conv3d', (['IJ', 'filt', '[1, 1, 1, 1, 1]', '"""SAME"""'], {}), "(IJ, filt, [1, 1, 1, 1, 1], 'SAME')\n", (1577, 1612), True, 'import tensorflow as tf\n'), ((2136, 2153), 'tensorflow.multiply', 'tf.multiply', (['I', 'I'], {}), '(I, I)\n', (2147, 2153), True, 'import tensorflow as tf\n'), ((2167, 2184), 'tensorflow.multiply', 'tf.multiply', (['J', 'J'], {}), '(J, J)\n', (2178, 2184), True, 'import tensorflow as tf\n'), ((2198, 2215), 'tensorflow.multiply', 'tf.multiply', (['I', 'J'], {}), '(I, J)\n', (2209, 2215), True, 'import tensorflow as tf\n'), ((2238, 2269), 'tensorflow.ones', 'tf.ones', (['[win[0], win[1], 1, 1]'], {}), '([win[0], win[1], 1, 1])\n', (2245, 2269), True, 'import tensorflow as tf\n'), ((2287, 2336), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['I', 'sum_filter', '[1, 1, 1, 1]', '"""SAME"""'], {}), "(I, sum_filter, [1, 1, 1, 1], 'SAME')\n", (2299, 2336), True, 'import tensorflow as tf\n'), ((2353, 2402), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['J', 'sum_filter', '[1, 1, 1, 1]', '"""SAME"""'], {}), "(J, sum_filter, [1, 1, 1, 1], 'SAME')\n", (2365, 2402), True, 'import tensorflow as tf\n'), ((2420, 2470), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['I2', 'sum_filter', '[1, 1, 1, 1]', '"""SAME"""'], {}), "(I2, sum_filter, [1, 1, 1, 1], 'SAME')\n", (2432, 2470), True, 'import tensorflow as tf\n'), ((2488, 2538), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['J2', 'sum_filter', '[1, 1, 1, 1]', '"""SAME"""'], {}), "(J2, sum_filter, [1, 1, 1, 1], 'SAME')\n", (2500, 2538), True, 'import tensorflow as tf\n'), ((2556, 2606), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['IJ', 'sum_filter', '[1, 1, 1, 1]', '"""SAME"""'], {}), "(IJ, sum_filter, [1, 1, 1, 1], 'SAME')\n", (2568, 2606), True, 'import tensorflow as tf\n'), ((746, 764), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['dz'], {}), '(dz)\n', (760, 764), True, 'import tensorflow as tf\n'), ((1041, 1059), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['dx'], {}), '(dx)\n', (1055, 1059), True, 'import tensorflow as tf\n'), ((1060, 1078), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['dy'], {}), '(dy)\n', (1074, 1078), True, 'import tensorflow as tf\n'), ((2043, 2061), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['cc'], {}), '(cc)\n', (2057, 2061), True, 'import tensorflow as tf\n'), ((2971, 2989), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['cc'], {}), '(cc)\n', (2985, 2989), True, 'import tensorflow as tf\n'), ((708, 726), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['dx'], {}), '(dx)\n', (722, 726), True, 'import tensorflow as tf\n'), ((727, 745), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['dy'], {}), '(dy)\n', (741, 745), True, 'import tensorflow as tf\n'), ((2930, 2945), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (2938, 2945), True, 'import numpy as np\n')]
|
from __future__ import annotations
import re
from pathlib import Path
from logging import getLogger, Logger
from fileinput import hook_compressed
from dataclasses import dataclass, field, fields
from typing import Iterator, get_type_hints, Generator
import numpy as np
import numpy.typing as npt
from pysam import TabixFile
from .data import Data
from .genotypes import GenotypesRefAlt
@dataclass
class Extra:
"""
An extra field on a line in the .hap file
Attributes
----------
name: str
The name of the extra field
fmt: str = "s"
The python fmt string of the field value; indicates how to format the value
description: str = ""
A description of the extra field
"""
name: str
fmt: str = "s"
description: str = ""
_type: type = field(init=False, repr=False)
def __post_init(self):
if self.fmt.endswith("s"):
self._type = str
elif self.fmt.endswith("d"):
self._type = int
elif self.fmt.endswith("f"):
self._type = float
else:
raise ValueError("Unsupported extra type '{}'!".format(self.fmt[-1]))
@classmethod
def from_hap_spec(cls, line: str) -> Extra:
"""
Convert an "extra" line in the header of a .hap file into an Extra object
Parameters
----------
line: str
An "extra" field, as it appears declared in the header
Returns
-------
Extra
An Extra object
"""
line = line[3:].split("\t")
return cls(name=line[0], fmt=line[1], description=line[2])
def to_hap_spec(self, line_type_symbol: str) -> str:
"""
Convert an Extra object into a header line in the .hap format spec
Parameters
----------
hap_id: str
The ID of the haplotype associated with this variant
Returns
-------
str
A valid variant line (V) in the .hap format spec
"""
return (
"#"
+ line_type_symbol
+ "\t"
+ "\t".join((self.name, self.fmt, self.description))
)
@property
def fmt_str(self) -> str:
"""
Convert an Extra into a fmt string
Retruns
-------
str
A python format string (ex: "{beta:.3f}")
"""
return "{" + self.name + ":" + self.fmt + "}"
# We declare this class to be a dataclass to automatically define __init__ and a few
# other methods.
@dataclass
class Variant:
"""
A variant within the .hap format spec
In order to use this class with the Haplotypes class, you should
1) add properties to the class for each of extra fields
2) override the _extras property to describe the header declaration
Attributes
----------
start: int
The chromosomal start position of the variant
end: int
The chromosomal end position of the variant
In most cases this will be the same as the start position
id: str
The variant's unique ID
allele: str
The allele of this variant within the Haplotype
_extras: tuple[Extra]
Extra fields for the haplotype
Examples
--------
Let's extend this class and add an extra field called "score"
>>> from dataclasses import dataclass, field
>>> @dataclass
>>> class CustomVariant(Variant):
... score: float
... _extras: tuple = (
... Extra("score", ".3f", "Importance of inclusion"),
... )
"""
start: int
end: int
id: str
allele: str
_extras: tuple = field(default=tuple(), init=False, repr=False)
@property
def ID(self):
"""
Create an alias for the id property
"""
return self.id
@property
# TODO: use @cached_property in py3.8
def _fmt(self):
extras = ""
if len(self._extras):
extras = "\t" + "\t".join(extra.fmt_str for extra in self._extras)
return "V\t{hap:s}\t{start:d}\t{end:d}\t{id:s}\t{allele:s}" + extras
@classmethod
def from_hap_spec(cls: Variant, line: str) -> tuple[str, Variant]:
"""
Convert a variant line into a Variant object in the .hap format spec
Note that this implementation does NOT support having more extra fields than
appear in the header
Parameters
----------
line: str
A variant (V) line from the .hap file
Returns
-------
tuple[str, Variant]
The haplotype ID and Variant object for the variant
"""
assert line[0] == "V", "Attempting to init a Variant with a non-V line"
line = line[2:].split("\t")
hap_id = line[0]
var_fields = {}
idx = 1
for name, val in get_type_hints(cls).items():
if not name.startswith("_"):
var_fields[name] = val(line[idx])
idx += 1
return hap_id, cls(**var_fields)
def to_hap_spec(self, hap_id: str) -> str:
"""
Convert a Variant object into a variant line in the .hap format spec
Parameters
----------
hap_id: str
The ID of the haplotype associated with this variant
Returns
-------
str
A valid variant line (V) in the .hap format spec
"""
return self._fmt.format(**self.__dict__, hap=hap_id)
@classmethod
def extras_head(cls) -> tuple:
"""
Return the header lines of the extra fields that are supported
Returns
-------
tuple
The header lines of the extra fields
"""
return tuple(extra.to_hap_spec("V") for extra in cls._extras)
# We declare this class to be a dataclass to automatically define __init__ and a few
# other methods.
@dataclass
class Haplotype:
"""
A haplotype within the .hap format spec
In order to use this class with the Haplotypes class, you should
1) add properties to the class for each of extra fields
2) override the _extras property to describe the header declaration
Attributes
----------
chrom: str
The contig to which this haplotype belongs
start: int
The chromosomal start position of the haplotype
end: int
The chromosomal end position of the haplotype
id: str
The haplotype's unique ID
variants: list[Variant]
A list of the variants in this haplotype
_extras: tuple[Extra]
Extra fields for the haplotype
Examples
--------
Let's extend this class and add an extra field called "ancestry"
>>> from dataclasses import dataclass, field
>>> @dataclass
>>> class CustomHaplotype(Haplotype):
... ancestry: str
... _extras: tuple = (
... Extra("ancestry", "s", "Local ancestry"),
... )
"""
chrom: str
start: int
end: int
id: str
variants: tuple = field(default_factory=tuple, init=False)
_extras: tuple = field(default=tuple(), init=False, repr=False)
@property
def ID(self):
"""
Create an alias for the id property
"""
return self.id
@property
# TODO: use @cached_property in py3.8
def _fmt(self):
extras = ""
if len(self._extras):
extras = "\t" + "\t".join(extra.fmt_str for extra in self._extras)
return "H\t{chrom:s}\t{start:d}\t{end:d}\t{id:s}" + extras
@property
# TODO: use @cached_property in py3.8
def varIDs(self):
return {var.id for var in self.variants}
@classmethod
def from_hap_spec(
cls: Haplotype, line: str, variants: tuple = tuple()
) -> Haplotype:
"""
Convert a variant line into a Haplotype object in the .hap format spec
Note that this implementation does NOT support having more extra fields than
appear in the header
Parameters
----------
line: str
A variant (H) line from the .hap file
Returns
-------
Haplotype
The Haplotype object for the variant
"""
assert line[0] == "H", "Attempting to init a Haplotype with a non-H line"
line = line[2:].split("\t")
hap_fields = {}
idx = 0
for name, val in get_type_hints(cls).items():
if name != "variants" and not name.startswith("_"):
hap_fields[name] = val(line[idx])
idx += 1
hap = cls(**hap_fields)
hap.variants = variants
return hap
def to_hap_spec(self) -> str:
"""
Convert a Haplotype object into a haplotype line in the .hap format spec
Returns
-------
str
A valid haplotype line (H) in the .hap format spec
"""
return self._fmt.format(**self.__dict__)
@classmethod
def extras_head(cls) -> tuple:
"""
Return the header lines of the extra fields that are supported
Returns
-------
tuple
The header lines of the extra fields
"""
return tuple(extra.to_hap_spec("H") for extra in cls._extras)
def transform(
self, genotypes: GenotypesRefAlt, samples: list[str] = None
) -> npt.NDArray[bool]:
"""
Transform a genotypes matrix via the current haplotype
Each entry in the returned matrix denotes the presence of the current haplotype
in each chromosome of each sample in the Genotypes object
Parameters
----------
genotypes : GenotypesRefAlt
The genotypes which to transform using the current haplotype
If the genotypes have not been loaded into the Genotypes object yet, this
method will call Genotypes.read(), while loading only the needed variants
samples : list[str], optional
See documentation for :py:attr:`~.Genotypes.read`
Returns
-------
npt.NDArray[bool]
A 2D matrix of shape (num_samples, 2) where each entry in the matrix
denotes the presence of the haplotype in one chromosome of a sample
"""
var_IDs = self.varIDs
# check: have the genotypes been loaded yet?
# if not, we can load just the variants we need
if genotypes.unset():
start = min(var.start for var in self.variants)
end = max(var.end for var in self.variants)
region = f"{self.chrom}:{start}-{end}"
genotypes.read(region=region, samples=samples, variants=var_IDs)
genotypes.check_biallelic(discard_also=True)
genotypes.check_phase()
# create a dict where the variants are keyed by ID
var_dict = {
var["id"]: var["ref"] for var in genotypes.variants if var["id"] in var_IDs
}
var_idxs = [
idx for idx, var in enumerate(genotypes.variants) if var["id"] in var_IDs
]
missing_IDs = var_IDs - var_dict.keys()
if len(missing_IDs):
raise ValueError(
f"Variants {missing_IDs} are present in haplotype '{self.id}' but "
"absent in the provided genotypes"
)
# create a np array denoting the alleles that we want
alleles = [int(var.allele != var_dict[var.id]) for var in self.variants]
allele_arr = np.array([[[al] for al in alleles]]) # shape: (1, n, 1)
# look for the presence of each allele in each chromosomal strand
# and then just AND them together
return np.all(allele_arr == genotypes.data[:, var_idxs], axis=1)
class Haplotypes(Data):
"""
A class for processing haplotypes from a file
Attributes
----------
fname: Path
The path to the file containing the data
data: dict[str, Haplotype]
A dict of Haplotype objects keyed by their IDs
types: dict
A dict of class names keyed by the symbol denoting their line type
Ex: {'H': Haplotype, 'V': Variant}
version: str
A string denoting the current file format version
log: Logger
A logging instance for recording debug statements.
Examples
--------
Parsing a basic .hap file without any extra fields is simple:
>>> haplotypes = Haplotypes.load('tests/data/basic.hap')
>>> haps = haplotypes.data # a dictionary of Haplotype objects
If the .hap file contains extra fields, you'll need to call the read() method
manually. You'll also need to create Haplotype and Variant subclasses that support
the extra fields and then specify the names of the classes when you initialize the
Haplotypes object:
>>> haplotypes = Haplotypes('tests/data/simphenotype.hap', HaptoolsHaplotype)
>>> haplotypes.read()
>>> haps = haplotypes.data # a dictionary of Haplotype objects
"""
def __init__(
self,
fname: Path,
haplotype: type[Haplotype] = Haplotype,
variant: type[Variant] = Variant,
log: Logger = None,
):
super().__init__(fname, log)
self.data = None
self.types = {"H": haplotype, "V": variant}
self.version = "0.0.1"
@classmethod
def load(
cls: Haplotypes, fname: Path, region: str = None, haplotypes: set[str] = None
) -> Haplotypes:
"""
Load haplotypes from a .hap file
Read the file contents
Parameters
----------
fname: Path
See documentation for :py:attr:`~.Data.fname`
region: str, optional
See documentation for :py:meth:`~.Haplotypes.read`
haplotypes: list[str], optional
See documentation for :py:meth:`~.Haplotypes.read`
Returns
-------
Haplotypes
A Haplotypes object with the data loaded into its properties
"""
haps = cls(fname)
haps.read(region, haplotypes)
return haps
def check_header(self, lines: list[str], check_version=False):
"""
Check 1) that the version number matches and 2) that extra fields declared in
# the .haps file can be handled by the the Variant and Haplotype classes
# provided in __init__()
Parameters
----------
lines: list[str]
Header lines from the .hap file
check_version: bool = False
Whether to also check the version of the file
Raises
------
ValueError
If any of the header lines are not supported
"""
self.log.info("Checking header")
if check_version:
version_line = lines[0].split("\t")
assert version_line[1] == "version", (
"The version of the format spec must be declared as the first line of"
" the header."
)
if version_line[2] != self.version:
self.log.warning(
f"The version of the provided .hap file is {version_line} but this"
f" tool expected {self.version}"
)
expected_lines = [
line
for line_type in self.types.values()
for line in line_type.extras_head()
]
for line in lines:
if line[1] in self.types.keys():
try:
expected_lines.remove(line)
except ValueError:
# extract the name of the extra field
name = line.split("\t", maxsplit=1)[1]
raise ValueError(
f"The extra field '{name}' is declared in the header of the"
" .hap file but is not accepted by this tool."
)
# if there are any fields left...
if expected_lines:
names = [line.split("\t", maxsplit=2)[1] for line in expected_lines]
raise ValueError(
"Expected the input .hap file to have these extra fields, but they "
f"don't seem to be declared in the header: {*names,}"
)
def _line_type(self, line: str) -> type:
"""
Return the type of line that this line matches
Parameters
----------
line: str
A line of the .hap file
Returns
-------
type
The name of the class corresponding with the type of this line
"""
line_types = self.types.keys()
if line[0] in line_types:
return line[0]
else:
# if none of the lines matched, return None
return None
def read(self, region: str = None, haplotypes: set[str] = None):
"""
Read haplotypes from a .hap file into a list stored in :py:attr:`~.Haplotypes.data`
Parameters
----------
region: str, optional
The region from which to extract haplotypes; ex: 'chr1:1234-34566' or 'chr7'
For this to work, the .hap file must be indexed and the seqname must match!
Defaults to loading all haplotypes
haplotypes: list[str], optional
A list of haplotype IDs corresponding to a subset of the haplotypes to
extract
Defaults to loading haplotypes from all samples
For this to work, the .hap file must be indexed
"""
super().read()
self.data = {}
var_haps = {}
for line in self.__iter__(region, haplotypes):
if isinstance(line, Haplotype):
self.data[line.id] = line
elif isinstance(line, Variant):
hap_id = line.hap
del line.hap
# store the variant for later
var_haps.setdefault(hap_id, []).append(line)
for hap in var_haps:
self.data[hap].variants = tuple(var_haps[hap])
def __iter__(
self, region: str = None, haplotypes: set[str] = None
) -> Iterator[Variant | Haplotype]:
"""
Read haplotypes from a .hap file line by line without storing anything
Parameters
----------
region: str, optional
The region from which to extract haplotypes; ex: 'chr1:1234-34566' or 'chr7'
For this to work, the .hap file must be indexed and the seqname must match!
Defaults to loading all haplotypes
haplotypes: list[str], optional
A list of haplotype IDs corresponding to a subset of the haplotypes to
extract
Defaults to loading haplotypes from all samples
For this to work, the .hap file must be indexed
Yields
------
Iterator[Variant|Haplotype]
An iterator over each line in the file, where each line is encoded as a
Variant or Haplotype containing each of the class properties
Examples
--------
If you're worried that the contents of the .hap file will be large, you may
opt to parse the file line-by-line instead of loading it all into memory at
once. In cases like these, you can use the __iter__() method in a for-loop:
>>> haplotypes = Haplotypes('tests/data/basic.hap')
>>> for line in haplotypes:
... print(line)
Call the function manually to pass it the region or haplotypes params:
>>> haplotypes = Haplotypes('tests/data/basic.hap.gz')
>>> for line in haplotypes.__iter__(
... region='21:26928472-26941960', haplotypes={"chr21.q.3365*1"}
... ):
... print(line)
"""
# if the user requested a specific region or set of haplotypes, then we should
# handle it using tabix
# else, we use a regular text opener
if region or haplotypes:
haps_file = TabixFile(str(self.fname))
self.check_header(list(haps_file.header))
if region:
region_positions = region.split(":", maxsplit=1)[1]
# fetch region
# we already know that each line will start with an H, so we don't
# need to check that
for line in haps_file.fetch(region):
hap = self.types["H"].from_hap_spec(line)
if haplotypes is not None:
if hap.id not in haplotypes:
continue
haplotypes.remove(hap.id)
yield hap
else:
for line in haps_file.fetch():
# we only want lines that start with an H
line_type = self._line_type(line)
if line_type == "H":
hap = self.types["H"].from_hap_spec(line)
if hap.id in haplotypes:
yield hap
haplotypes.remove(hap.id)
elif line_type > "H":
# if we've already passed all of the H's, we can just exit
# We assume the file has been sorted so that all of the H lines
# come before the V lines
break
# query for the variants of each haplotype
for hap_id in self.data:
# exclude variants outside the desired region
hap_region = hap_id
if region:
hap_region = hap_id + ":" + region_positions
# fetch region
# we already know that each line will start with a V, so we don't
# need to check that
for line in haps_file.fetch(hap_region):
line_type = self._line_type(line)
if line_type == "V":
var = self.types["V"].from_hap_spec(line)[1]
# add the haplotype, since otherwise, the user won't know
# which haplotype this variant belongs to
var.hap = hap_id
yield var
else:
self.log.warning(
"Check that chromosomes are distinct from your hap IDs!"
)
haps_file.close()
else:
# the file is not indexed, so we can't assume it's sorted, either
# use hook_compressed to automatically handle gz files
with hook_compressed(self.fname, mode="rt") as haps:
self.log.info("Not taking advantage of indexing.")
header_lines = []
for line in haps:
line = line.rstrip("\n")
line_type = self._line_type(line)
if line[0] == "#":
# store header for later
try:
header_lines.append(line)
except AttributeError:
# this happens when we encounter a line beginning with a #
# after already having seen an H or V line
# in this case, it's usually just a comment, so we can ignore
pass
else:
if header_lines:
self.check_header(header_lines)
header_lines = None
self.log.info("Finished reading header.")
if line_type == "H":
yield self.types["H"].from_hap_spec(line)
elif line_type == "V":
hap_id, var = self.types["V"].from_hap_spec(line)
# add the haplotype, since otherwise, the user won't know
# which haplotype this variant belongs to
var.hap = hap_id
yield var
else:
self.log.warning(
f"Ignoring unsupported line type '{line[0]}'"
)
def to_str(self) -> Generator[str, None, None]:
"""
Create a string representation of this Haplotype
Yields
------
Generator[str, None, None]
A list of lines (strings) to include in the output
"""
yield "#\tversion\t" + self.version
for line_type in self.types:
yield from self.types[line_type].extras_head()
for hap in self.data.values():
yield self.types["H"].to_hap_spec(hap)
for hap in self.data.values():
for var in hap.variants:
yield self.types["V"].to_hap_spec(var, hap.id)
def __repr__(self):
return "\n".join(self.to_str())
def write(self):
"""
Write the contents of this Haplotypes object to the file given by fname
Parameters
----------
file: TextIO
A file-like object to which this Haplotypes object should be written.
Examples
--------
To write to a .hap file, you must first initialize a Haplotypes object and then
fill out the data property:
>>> haplotypes = Haplotypes('tests/data/basic.hap')
>>> haplotypes.data = {'H1': Haplotype('chr1', 0, 10, 'H1')}
>>> haplotypes.write()
"""
with hook_compressed(self.fname, mode="wt") as haps:
for line in self.to_str():
haps.write(line + "\n")
def transform(
self,
genotypes: GenotypesRefAlt,
hap_gts: GenotypesRefAlt,
samples: list[str] = None,
low_memory: bool = False,
) -> GenotypesRefAlt:
"""
Transform a genotypes matrix via the current haplotype
Each entry in the returned matrix denotes the presence of each haplotype
in each chromosome of each sample in the Genotypes object
Parameters
----------
genotypes : GenotypesRefAlt
The genotypes which to transform using the current haplotype
If the genotypes have not been loaded into the Genotypes object yet, this
method will call Genotypes.read(), while loading only the needed variants
hap_gts: GenotypesRefAlt
An empty GenotypesRefAlt object into which the haplotype genotypes should
be stored
samples : list[str], optional
See documentation for :py:attr:`~.Genotypes.read`
low_memory : bool, optional
If True, each haplotype's genotypes will be loaded one at a time.
Returns
-------
GenotypesRefAlt
A Genotypes object composed of haplotypes instead of regular variants.
"""
hap_gts.samples = genotypes.samples
hap_gts.variants = np.array(
[(hap.id, hap.chrom, hap.start, 0, "A", "T") for hap in self.data.values()],
dtype=[
("id", "U50"),
("chrom", "U10"),
("pos", np.uint32),
("aaf", np.float64),
("ref", "U100"),
("alt", "U100"),
],
)
self.log.info(
f"Transforming a set of genotypes from {len(genotypes.variants)} total "
f"variants with a list of {len(self.data)} haplotypes"
)
hap_gts.data = np.concatenate(
tuple(
hap.transform(genotypes, samples)[:, np.newaxis]
for hap in self.data.values()
),
axis=1,
).astype(np.uint8)
|
[
"typing.get_type_hints",
"fileinput.hook_compressed",
"dataclasses.field",
"numpy.array",
"numpy.all"
] |
[((805, 834), 'dataclasses.field', 'field', ([], {'init': '(False)', 'repr': '(False)'}), '(init=False, repr=False)\n', (810, 834), False, 'from dataclasses import dataclass, field, fields\n'), ((7026, 7066), 'dataclasses.field', 'field', ([], {'default_factory': 'tuple', 'init': '(False)'}), '(default_factory=tuple, init=False)\n', (7031, 7066), False, 'from dataclasses import dataclass, field, fields\n'), ((11472, 11508), 'numpy.array', 'np.array', (['[[[al] for al in alleles]]'], {}), '([[[al] for al in alleles]])\n', (11480, 11508), True, 'import numpy as np\n'), ((11660, 11717), 'numpy.all', 'np.all', (['(allele_arr == genotypes.data[:, var_idxs])'], {'axis': '(1)'}), '(allele_arr == genotypes.data[:, var_idxs], axis=1)\n', (11666, 11717), True, 'import numpy as np\n'), ((25545, 25583), 'fileinput.hook_compressed', 'hook_compressed', (['self.fname'], {'mode': '"""wt"""'}), "(self.fname, mode='wt')\n", (25560, 25583), False, 'from fileinput import hook_compressed\n'), ((4855, 4874), 'typing.get_type_hints', 'get_type_hints', (['cls'], {}), '(cls)\n', (4869, 4874), False, 'from typing import Iterator, get_type_hints, Generator\n'), ((8390, 8409), 'typing.get_type_hints', 'get_type_hints', (['cls'], {}), '(cls)\n', (8404, 8409), False, 'from typing import Iterator, get_type_hints, Generator\n'), ((22559, 22597), 'fileinput.hook_compressed', 'hook_compressed', (['self.fname'], {'mode': '"""rt"""'}), "(self.fname, mode='rt')\n", (22574, 22597), False, 'from fileinput import hook_compressed\n')]
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# TODO: pass ft_cse to use fine-tuned feature
# TODO: pass fine_steps -1 to use fine samples
from absl import flags, app
import sys
sys.path.insert(0,'')
sys.path.insert(0,'third_party')
import numpy as np
from matplotlib import pyplot as plt
import matplotlib
import torch
import os
import glob
import pdb
import cv2
import trimesh
from scipy.spatial.transform import Rotation as R
import imageio
from utils.io import save_vid, str_to_frame, save_bones, draw_lines, vis_match
from utils.colors import label_colormap
from nnutils.train_utils import v2s_trainer
from nnutils.geom_utils import obj_to_cam, tensor2array, vec_to_sim3, obj_to_cam,\
Kmatinv, K2mat, K2inv, sample_xy, resample_dp,\
raycast
from nnutils.loss_utils import kp_reproj, feat_match, kp_reproj_loss
from ext_utils.util_flow import write_pfm
from ext_utils.flowlib import cat_imgflo
opts = flags.FLAGS
def construct_rays(dp_feats_rsmp, model, xys, rand_inds,
Rmat, Tmat, Kinv, near_far, flip=True):
device = dp_feats_rsmp.device
bs,nsample,_ =xys.shape
opts = model.opts
embedid=model.embedid
embedid = embedid.long().to(device)[:,None]
rays = raycast(xys, Rmat, Tmat, Kinv, near_far)
rtk_vec = rays['rtk_vec']
del rays
feats_at_samp = [dp_feats_rsmp[i].view(model.num_feat,-1).T\
[rand_inds[i].long()] for i in range(bs)]
feats_at_samp = torch.stack(feats_at_samp,0) # bs,ns,num_feat
# TODO implement for se3
if opts.lbs and model.num_bone_used>0:
bone_rts = model.nerf_body_rts(embedid)
bone_rts = bone_rts.repeat(1,nsample,1)
# TODO rearrange inputs
feats_at_samp = feats_at_samp.view(-1, model.num_feat)
xys = xys.view(-1,1,2)
if flip:
rtk_vec = rtk_vec.view(bs//2,2,-1).flip(1).view(rtk_vec.shape)
bone_rts = bone_rts.view(bs//2,2,-1).flip(1).view(bone_rts.shape)
rays = {'rtk_vec': rtk_vec,
'bone_rts': bone_rts}
return rays, feats_at_samp, xys
def match_frames(trainer, idxs, nsample=200):
idxs = [int(i) for i in idxs.split(' ')]
bs = len(idxs)
opts = trainer.opts
device = trainer.device
model = trainer.model
model.eval()
# load frames and aux data
for dataset in trainer.evalloader.dataset.datasets:
dataset.load_pair = False
batch = []
for i in idxs:
batch.append( trainer.evalloader.dataset[i] )
batch = trainer.evalloader.collate_fn(batch)
model.set_input(batch)
rtk = model.rtk
Rmat = rtk[:,:3,:3]
Tmat = rtk[:,:3,3]
Kmat = K2mat(rtk[:,3,:])
kaug = model.kaug # according to cropping, p = Kaug Kmat P
Kaug = K2inv(kaug)
Kinv = Kmatinv(Kaug.matmul(Kmat))
near_far = model.near_far[model.frameid.long()]
dp_feats_rsmp = model.dp_feats
# construct rays for sampled pixels
rand_inds, xys = sample_xy(opts.img_size, bs, nsample, device,return_all=False)
rays, feats_at_samp, xys = construct_rays(dp_feats_rsmp, model, xys, rand_inds,
Rmat, Tmat, Kinv, near_far)
model.update_delta_rts(rays)
# re-project
with torch.no_grad():
pts_pred = feat_match(model.nerf_feat, model.embedding_xyz, feats_at_samp,
model.latest_vars['obj_bound'],grid_size=20,is_training=False)
pts_pred = pts_pred.view(bs,nsample,3)
xy_reproj = kp_reproj(pts_pred, model.nerf_models, model.embedding_xyz, rays)
# draw
imgs_trg = model.imgs.view(bs//2,2,-1).flip(1).view(model.imgs.shape)
xy_reproj = xy_reproj.view(bs,nsample,2)
xys = xys.view(bs,nsample, 2)
sil_at_samp = torch.stack([model.masks[i].view(-1,1)[rand_inds[i]] \
for i in range(bs)],0) # bs,ns,1
for i in range(bs):
img1 = model.imgs[i]
img2 = imgs_trg[i]
img = torch.cat([img1, img2],2)
valid_idx = sil_at_samp[i].bool()[...,0]
p1s = xys[i][valid_idx]
p2s = xy_reproj[i][valid_idx]
p2s[...,0] = p2s[...,0] + img1.shape[2]
img = draw_lines(img, p1s,p2s)
cv2.imwrite('tmp/match_%04d.png'%i, img)
# visualize matching error
if opts.render_size<=128:
with torch.no_grad():
rendered, rand_inds = model.nerf_render(rtk, kaug, model.embedid,
nsample=opts.nsample, ndepth=opts.ndepth)
xyz_camera = rendered['xyz_camera_vis'][0].reshape(opts.render_size**2,-1)
xyz_canonical = rendered['xyz_canonical_vis'][0].reshape(opts.render_size**2,-1)
skip_idx = len(xyz_camera)//50 # vis 50 rays
trimesh.Trimesh(xyz_camera[0::skip_idx].reshape(-1,3).cpu()).\
export('tmp/match_camera_pts.obj')
trimesh.Trimesh(xyz_canonical[0::skip_idx].reshape(-1,3).cpu()).\
export('tmp/match_canonical_pts.obj')
vis_match(rendered, model.masks, model.imgs,
bs,opts.img_size, opts.ndepth)
## construct rays for all pixels
#rand_inds, xys = sample_xy(opts.img_size, bs, nsample, device,return_all=True)
#rays, feats_at_samp, xys = construct_rays(dp_feats_rsmp, model, xys, rand_inds,
# Rmat, Tmat, Kinv, near_far, flip=False)
#with torch.no_grad():
# pts_pred = feat_match(model.nerf_feat, model.embedding_xyz, feats_at_samp,
# model.latest_vars['obj_bound'],grid_size=20,is_training=False)
# pts_pred = pts_pred.view(bs,opts.render_size**2,3)
# proj_err = kp_reproj_loss(pts_pred, xys, model.nerf_models,
# model.embedding_xyz, rays)
# proj_err = proj_err.view(pts_pred.shape[:-1]+(1,))
# proj_err = proj_err/opts.img_size * 2
# results = {}
# results['proj_err'] = proj_err
## visualize current error stats
#feat_err=model.latest_vars['fp_err'][:,0]
#proj_err=model.latest_vars['fp_err'][:,1]
#feat_err = feat_err[feat_err>0]
#proj_err = proj_err[proj_err>0]
#print('feat-med: %f'%(np.median(feat_err)))
#print('proj-med: %f'%(np.median(proj_err)))
#plt.hist(feat_err,bins=100)
#plt.savefig('tmp/viser_feat_err.jpg')
#plt.clf()
#plt.hist(proj_err,bins=100)
#plt.savefig('tmp/viser_proj_err.jpg')
# visualize codes
with torch.no_grad():
fid = torch.Tensor(range(0,len(model.impath))).cuda().long()
D=model.pose_code(fid)
D = D.view(len(fid),-1)
##TODO
#px = torch.Tensor(range(len(D))).cuda()
#py = px*2
#pz = px*5+1
#D = torch.stack([px,py,pz],-1)
D = D-D.mean(0)[None]
A = D.T.matmul(D)/D.shape[0] # fxf
U,S,V=torch.svd(A) #
code_proj_3d=D.matmul(V[:,:3])
cmap = matplotlib.cm.get_cmap('cool')
time = np.asarray(range(len(model.impath)))
time = time/time.max()
code_proj_3d=code_proj_3d.detach().cpu().numpy()
trimesh.Trimesh(code_proj_3d, vertex_colors=cmap(time)).export('tmp/0.obj')
#plt.figure(figsize=(16,16))
plot_stack = []
weight_dir = opts.model_path.rsplit('/',1)[0]
bne_path = sorted(glob.glob('%s/%s-*bne-mrender*.jpg'%\
(weight_dir, opts.seqname)))
img_path = model.impath.copy()
## remove the last img for each video to make shape consistent with bone renders
#for i in model.data_offset[1:][::-1]:
# img_path.remove(img_path[i-1])
# code_proj_3d = np.delete(code_proj_3d, i-1,0)
# plot the first video
img_path = img_path [:model.data_offset[1]-2]
code_proj_3d = code_proj_3d[:model.data_offset[1]-2]
try:
bne_path = bne_path [:model.data_offset[1]-2]
except:
pass
for i in range(len(code_proj_3d)):
plt.plot(code_proj_3d[i,0], code_proj_3d[i,1], color=cmap(time[i]), marker='o')
plt.annotate(str(i), (code_proj_3d[i,0], code_proj_3d[i,1]))
plt.xlim(code_proj_3d[:,0].min(), code_proj_3d[:,0].max())
plt.ylim(code_proj_3d[:,1].min(), code_proj_3d[:,1].max())
fig = plt.gcf()
fig.canvas.draw()
plot = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
plot = plot.reshape(fig.canvas.get_width_height()[::-1] + (3,))
print('plot pose code of frame id:%03d'%i)
if len(bne_path) == len(code_proj_3d):
bneimg = cv2.imread(bne_path[i])
bneimg = cv2.resize(bneimg,\
(bneimg.shape[1]*plot.shape[0]//bneimg.shape[0], plot.shape[0]))
img=cv2.imread(img_path[i])[:,:,::-1]
img = cv2.resize(img,\
(img.shape[1]*plot.shape[0]//img.shape[0], plot.shape[0]))
plot = np.hstack([img, bneimg, plot])
plot_stack.append(plot)
save_vid('tmp/code', plot_stack, suffix='.mp4',
upsample_frame=150.,fps=30)
save_vid('tmp/code', plot_stack, suffix='.gif',
upsample_frame=150.,fps=30)
# vis dps
cv2.imwrite('tmp/match_dpc.png', model.dp_vis[model.dps[0].long()].cpu().numpy()*255)
def main(_):
opts.img_size=opts.render_size
trainer = v2s_trainer(opts, is_eval=True)
data_info = trainer.init_dataset()
trainer.define_model(data_info)
#write matching function
img_match = match_frames(trainer, opts.match_frames)
if __name__ == '__main__':
app.run(main)
|
[
"matplotlib.cm.get_cmap",
"torch.cat",
"nnutils.loss_utils.feat_match",
"glob.glob",
"torch.no_grad",
"nnutils.geom_utils.sample_xy",
"nnutils.train_utils.v2s_trainer",
"cv2.imwrite",
"nnutils.loss_utils.kp_reproj",
"utils.io.vis_match",
"cv2.resize",
"torch.svd",
"numpy.hstack",
"utils.io.draw_lines",
"matplotlib.pyplot.gcf",
"torch.stack",
"nnutils.geom_utils.raycast",
"sys.path.insert",
"utils.io.save_vid",
"cv2.imread",
"absl.app.run",
"nnutils.geom_utils.K2inv",
"nnutils.geom_utils.K2mat"
] |
[((204, 226), 'sys.path.insert', 'sys.path.insert', (['(0)', '""""""'], {}), "(0, '')\n", (219, 226), False, 'import sys\n'), ((226, 259), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""third_party"""'], {}), "(0, 'third_party')\n", (241, 259), False, 'import sys\n'), ((1284, 1324), 'nnutils.geom_utils.raycast', 'raycast', (['xys', 'Rmat', 'Tmat', 'Kinv', 'near_far'], {}), '(xys, Rmat, Tmat, Kinv, near_far)\n', (1291, 1324), False, 'from nnutils.geom_utils import obj_to_cam, tensor2array, vec_to_sim3, obj_to_cam, Kmatinv, K2mat, K2inv, sample_xy, resample_dp, raycast\n'), ((1516, 1545), 'torch.stack', 'torch.stack', (['feats_at_samp', '(0)'], {}), '(feats_at_samp, 0)\n', (1527, 1545), False, 'import torch\n'), ((2683, 2702), 'nnutils.geom_utils.K2mat', 'K2mat', (['rtk[:, 3, :]'], {}), '(rtk[:, 3, :])\n', (2688, 2702), False, 'from nnutils.geom_utils import obj_to_cam, tensor2array, vec_to_sim3, obj_to_cam, Kmatinv, K2mat, K2inv, sample_xy, resample_dp, raycast\n'), ((2777, 2788), 'nnutils.geom_utils.K2inv', 'K2inv', (['kaug'], {}), '(kaug)\n', (2782, 2788), False, 'from nnutils.geom_utils import obj_to_cam, tensor2array, vec_to_sim3, obj_to_cam, Kmatinv, K2mat, K2inv, sample_xy, resample_dp, raycast\n'), ((2977, 3040), 'nnutils.geom_utils.sample_xy', 'sample_xy', (['opts.img_size', 'bs', 'nsample', 'device'], {'return_all': '(False)'}), '(opts.img_size, bs, nsample, device, return_all=False)\n', (2986, 3040), False, 'from nnutils.geom_utils import obj_to_cam, tensor2array, vec_to_sim3, obj_to_cam, Kmatinv, K2mat, K2inv, sample_xy, resample_dp, raycast\n'), ((9389, 9420), 'nnutils.train_utils.v2s_trainer', 'v2s_trainer', (['opts'], {'is_eval': '(True)'}), '(opts, is_eval=True)\n', (9400, 9420), False, 'from nnutils.train_utils import v2s_trainer\n'), ((9615, 9628), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (9622, 9628), False, 'from absl import flags, app\n'), ((3236, 3251), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3249, 3251), False, 'import torch\n'), ((3272, 3405), 'nnutils.loss_utils.feat_match', 'feat_match', (['model.nerf_feat', 'model.embedding_xyz', 'feats_at_samp', "model.latest_vars['obj_bound']"], {'grid_size': '(20)', 'is_training': '(False)'}), "(model.nerf_feat, model.embedding_xyz, feats_at_samp, model.\n latest_vars['obj_bound'], grid_size=20, is_training=False)\n", (3282, 3405), False, 'from nnutils.loss_utils import kp_reproj, feat_match, kp_reproj_loss\n'), ((3478, 3543), 'nnutils.loss_utils.kp_reproj', 'kp_reproj', (['pts_pred', 'model.nerf_models', 'model.embedding_xyz', 'rays'], {}), '(pts_pred, model.nerf_models, model.embedding_xyz, rays)\n', (3487, 3543), False, 'from nnutils.loss_utils import kp_reproj, feat_match, kp_reproj_loss\n'), ((3957, 3983), 'torch.cat', 'torch.cat', (['[img1, img2]', '(2)'], {}), '([img1, img2], 2)\n', (3966, 3983), False, 'import torch\n'), ((4164, 4189), 'utils.io.draw_lines', 'draw_lines', (['img', 'p1s', 'p2s'], {}), '(img, p1s, p2s)\n', (4174, 4189), False, 'from utils.io import save_vid, str_to_frame, save_bones, draw_lines, vis_match\n'), ((4197, 4239), 'cv2.imwrite', 'cv2.imwrite', (["('tmp/match_%04d.png' % i)", 'img'], {}), "('tmp/match_%04d.png' % i, img)\n", (4208, 4239), False, 'import cv2\n'), ((6438, 6453), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6451, 6453), False, 'import torch\n'), ((6819, 6831), 'torch.svd', 'torch.svd', (['A'], {}), '(A)\n', (6828, 6831), False, 'import torch\n'), ((6888, 6918), 'matplotlib.cm.get_cmap', 'matplotlib.cm.get_cmap', (['"""cool"""'], {}), "('cool')\n", (6910, 6918), False, 'import matplotlib\n'), ((9028, 9105), 'utils.io.save_vid', 'save_vid', (['"""tmp/code"""', 'plot_stack'], {'suffix': '""".mp4"""', 'upsample_frame': '(150.0)', 'fps': '(30)'}), "('tmp/code', plot_stack, suffix='.mp4', upsample_frame=150.0, fps=30)\n", (9036, 9105), False, 'from utils.io import save_vid, str_to_frame, save_bones, draw_lines, vis_match\n'), ((9128, 9205), 'utils.io.save_vid', 'save_vid', (['"""tmp/code"""', 'plot_stack'], {'suffix': '""".gif"""', 'upsample_frame': '(150.0)', 'fps': '(30)'}), "('tmp/code', plot_stack, suffix='.gif', upsample_frame=150.0, fps=30)\n", (9136, 9205), False, 'from utils.io import save_vid, str_to_frame, save_bones, draw_lines, vis_match\n'), ((4313, 4328), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4326, 4328), False, 'import torch\n'), ((4983, 5059), 'utils.io.vis_match', 'vis_match', (['rendered', 'model.masks', 'model.imgs', 'bs', 'opts.img_size', 'opts.ndepth'], {}), '(rendered, model.masks, model.imgs, bs, opts.img_size, opts.ndepth)\n', (4992, 5059), False, 'from utils.io import save_vid, str_to_frame, save_bones, draw_lines, vis_match\n'), ((7285, 7350), 'glob.glob', 'glob.glob', (["('%s/%s-*bne-mrender*.jpg' % (weight_dir, opts.seqname))"], {}), "('%s/%s-*bne-mrender*.jpg' % (weight_dir, opts.seqname))\n", (7294, 7350), False, 'import glob\n'), ((8288, 8297), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (8295, 8297), True, 'from matplotlib import pyplot as plt\n'), ((8611, 8634), 'cv2.imread', 'cv2.imread', (['bne_path[i]'], {}), '(bne_path[i])\n', (8621, 8634), False, 'import cv2\n'), ((8660, 8751), 'cv2.resize', 'cv2.resize', (['bneimg', '(bneimg.shape[1] * plot.shape[0] // bneimg.shape[0], plot.shape[0])'], {}), '(bneimg, (bneimg.shape[1] * plot.shape[0] // bneimg.shape[0],\n plot.shape[0]))\n', (8670, 8751), False, 'import cv2\n'), ((8837, 8915), 'cv2.resize', 'cv2.resize', (['img', '(img.shape[1] * plot.shape[0] // img.shape[0], plot.shape[0])'], {}), '(img, (img.shape[1] * plot.shape[0] // img.shape[0], plot.shape[0]))\n', (8847, 8915), False, 'import cv2\n'), ((8952, 8982), 'numpy.hstack', 'np.hstack', (['[img, bneimg, plot]'], {}), '([img, bneimg, plot])\n', (8961, 8982), True, 'import numpy as np\n'), ((8781, 8804), 'cv2.imread', 'cv2.imread', (['img_path[i]'], {}), '(img_path[i])\n', (8791, 8804), False, 'import cv2\n')]
|
# This script is for the rotate function
import numpy as np
import matplotlib.pyplot as plt
import cv2
def rotate(image, degree, output_path):
"""
Rotates an OpenCV 2 / NumPy image about it's centre by the given degree
(in degrees). The returned image will be large enough to hold the entire
new image, with a black background
Arguments:
-----------------------------
image: path of input file
degree: int of degree
Output:
-----------------------------
an image file in .png format
"""
# exception handling
try:
image = plt.imread(image)
except AttributeError:
print("Please type in a string as the path for the input image file.")
raise
except TypeError:
print("Please provide a string as the path for the input image file.")
raise
except FileNotFoundError:
print("The input file/path does not exist, please double check it. ")
raise
except OSError:
print("The input file is not an image.")
raise
except Exception as e:
print("General Error:")
print(e)
raise
# Get the image size
image_size = (image.shape[1], image.shape[0])
image_center = tuple(np.array(image_size) / 2)
# Convert the OpenCV 3x2 rotation matrix to 3x3
rot_mat = np.vstack([cv2.getRotationMatrix2D(image_center, degree, 1.0), [0, 0, 1]])
rot_mat_notranslate = np.matrix(rot_mat[0:2, 0:2])
# Shorthand for below calcs
image_w2 = image_size[0] * 0.5
image_h2 = image_size[1] * 0.5
# Obtain the rotated coordinates of the image corners
rotated_coords = [
(np.array([-image_w2, image_h2]) * rot_mat_notranslate).A[0],
(np.array([image_w2, image_h2]) * rot_mat_notranslate).A[0],
(np.array([-image_w2, -image_h2]) * rot_mat_notranslate).A[0],
(np.array([image_w2, -image_h2]) * rot_mat_notranslate).A[0],
]
# Find the size of the new image
x_coords = [pt[0] for pt in rotated_coords]
x_pos = [x for x in x_coords if x > 0]
x_neg = [x for x in x_coords if x < 0]
y_coords = [pt[1] for pt in rotated_coords]
y_pos = [y for y in y_coords if y > 0]
y_neg = [y for y in y_coords if y < 0]
right_bound = max(x_pos)
left_bound = min(x_neg)
top_bound = max(y_pos)
bot_bound = min(y_neg)
new_w = int(abs(right_bound - left_bound))
new_h = int(abs(top_bound - bot_bound))
# We require a translation matrix to keep the image centred
trans_mat = np.matrix(
[
[1, 0, int(new_w * 0.5 - image_w2)],
[0, 1, int(new_h * 0.5 - image_h2)],
[0, 0, 1],
]
)
# Compute the tranform for the combined rotation and translation
affine_mat = (np.matrix(trans_mat) * np.matrix(rot_mat))[0:2, :]
# Apply the transform
result = cv2.warpAffine(image, affine_mat, (new_w, new_h), flags=cv2.INTER_LINEAR)
# exception handling
try:
plt.imshow(result)
plt.savefig(output_path)
except FileNotFoundError:
print("The output path does not exist.")
raise
except AttributeError:
print("Please provide a string as the path for the output image file.")
raise
except TypeError:
print("Please provide a string as the path for the output image file.")
raise
except Exception as e:
print("Other exceptions, please check your input and output. ")
print(e)
raise
|
[
"numpy.matrix",
"matplotlib.pyplot.imshow",
"cv2.warpAffine",
"numpy.array",
"matplotlib.pyplot.imread",
"cv2.getRotationMatrix2D",
"matplotlib.pyplot.savefig"
] |
[((1446, 1474), 'numpy.matrix', 'np.matrix', (['rot_mat[0:2, 0:2]'], {}), '(rot_mat[0:2, 0:2])\n', (1455, 1474), True, 'import numpy as np\n'), ((2875, 2948), 'cv2.warpAffine', 'cv2.warpAffine', (['image', 'affine_mat', '(new_w, new_h)'], {'flags': 'cv2.INTER_LINEAR'}), '(image, affine_mat, (new_w, new_h), flags=cv2.INTER_LINEAR)\n', (2889, 2948), False, 'import cv2\n'), ((601, 618), 'matplotlib.pyplot.imread', 'plt.imread', (['image'], {}), '(image)\n', (611, 618), True, 'import matplotlib.pyplot as plt\n'), ((2992, 3010), 'matplotlib.pyplot.imshow', 'plt.imshow', (['result'], {}), '(result)\n', (3002, 3010), True, 'import matplotlib.pyplot as plt\n'), ((3019, 3043), 'matplotlib.pyplot.savefig', 'plt.savefig', (['output_path'], {}), '(output_path)\n', (3030, 3043), True, 'import matplotlib.pyplot as plt\n'), ((1251, 1271), 'numpy.array', 'np.array', (['image_size'], {}), '(image_size)\n', (1259, 1271), True, 'import numpy as np\n'), ((1355, 1405), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['image_center', 'degree', '(1.0)'], {}), '(image_center, degree, 1.0)\n', (1378, 1405), False, 'import cv2\n'), ((2784, 2804), 'numpy.matrix', 'np.matrix', (['trans_mat'], {}), '(trans_mat)\n', (2793, 2804), True, 'import numpy as np\n'), ((2807, 2825), 'numpy.matrix', 'np.matrix', (['rot_mat'], {}), '(rot_mat)\n', (2816, 2825), True, 'import numpy as np\n'), ((1669, 1700), 'numpy.array', 'np.array', (['[-image_w2, image_h2]'], {}), '([-image_w2, image_h2])\n', (1677, 1700), True, 'import numpy as np\n'), ((1739, 1769), 'numpy.array', 'np.array', (['[image_w2, image_h2]'], {}), '([image_w2, image_h2])\n', (1747, 1769), True, 'import numpy as np\n'), ((1808, 1840), 'numpy.array', 'np.array', (['[-image_w2, -image_h2]'], {}), '([-image_w2, -image_h2])\n', (1816, 1840), True, 'import numpy as np\n'), ((1879, 1910), 'numpy.array', 'np.array', (['[image_w2, -image_h2]'], {}), '([image_w2, -image_h2])\n', (1887, 1910), True, 'import numpy as np\n')]
|
#******************************************************************************
#
# MantaGen
# Copyright 2018 <NAME>, <NAME>, <NAME>
#
# This program is free software, distributed under the terms of the
# Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
#******************************************************************************
from manta import *
import numpy
from random import randint
from scenes.scene import Scene
from scenes.volumes import *
from scenes.functions import *
from util.logger import *
def instantiate_scene(**kwargs): # instantiate independent of name , TODO replace?
info(kwargs)
return SmokeBuoyantScene(**kwargs)
class SmokeBuoyantScene(Scene):
#----------------------------------------------------------------------------------
def __init__(self, **kwargs):
super(SmokeBuoyantScene,self).__init__(**kwargs)
# optionally, init more grids etc.
self.max_iter_fac = 2
self.accuracy = 5e-4
self.max_source_count = int(kwargs.get("max_source_count", 5))
self.velocity_scale = float(kwargs.get("velocity_scale", self.resolution.y * 0.05))
self.use_inflow_sources = kwargs.get("use_inflow_sources", "True") == "True"
self.open_bound = kwargs.get("use_open_bound", "True") == "True"
self.sources = []
self.source_strengths = []
# smoke sims need to track the density
self.density = self.solver.create(RealGrid, name="Density")
noise = self.solver.create(NoiseField, loadFromFile=True)
noise.posScale = vec3(40) * numpy.random.uniform(low=0.25, high=1.)
noise.posOffset = random_vec3s(vmin=0.0) * 100.
noise.clamp = True
noise.clampNeg = 0
noise.clampPos = 1.
noise.valOffset = 0.15
noise.timeAnim = 0.4 * numpy.random.uniform(low=0.2, high=1.)
self.noise = noise
info("SmokeBuoyantScene initialized")
#----------------------------------------------------------------------------------
def set_velocity(self, volume, velocity):
if self.dimension == 2:
velocity.z = 0.0
volume.applyToGrid(solver=self.solver, grid=self.vel, value=velocity)
#----------------------------------------------------------------------------------
# sources used as smoke inflow in the following
def add_source(self, volume):
shape = volume.shape(self.solver)
self.sources.append(shape)
self.source_strengths.append(numpy.random.uniform(low=0.5, high=1.))
#----------------------------------------------------------------------------------
def _create_scene(self):
super(SmokeBuoyantScene, self)._create_scene()
self.sources = []
self.source_strengths = []
self.density.setConst(0)
self.vel.setConst(vec3(0))
is3d = (self.dimension > 2)
self.flags.initDomain(boundaryWidth=self.boundary)
self.flags.fillGrid()
if self.open_bound:
setOpenBound(self.flags, self.boundary, 'yY', CellType_TypeOutflow|CellType_TypeEmpty)
# formerly initialize_smoke_scene(scene):
source_count = randint(1, self.max_source_count)
for i in range(source_count):
volume = random_box(center_min=[0.2, 0.1, 0.2], center_max=[0.8, 0.6, 0.8], size_min=[0.005, 0.005, 0.005], size_max=[0.2, 0.2, 0.2], is3d=is3d)
self.add_source(volume)
src, sstr = self.sources[-1], self.source_strengths[-1]
densityInflow(flags=self.flags, density=self.density, noise=self.noise, shape=src, scale=2.0*sstr, sigma=0.5)
if self.show_gui:
# central view is more interesting for smoke
self._gui.setPlane(self.resolution.z // 2)
info("SmokeBuoyantScene created with {} sources".format(len(self.sources)))
#==================================================================================
# SIMULATION
#----------------------------------------------------------------------------------
def _compute_simulation_step(self):
# Note - sources are turned off earlier, the more there are in the scene
for i in range(len(self.sources)):
if self.use_inflow_sources:
src, sstr = self.sources[i], self.source_strengths[i]
densityInflow(flags=self.flags, density=self.density, noise=self.noise, shape=src, scale=2.0*sstr, sigma=0.5)
advectSemiLagrange(flags=self.flags, vel=self.vel, grid=self.density, order=2, clampMode=2)
advectSemiLagrange(flags=self.flags, vel=self.vel, grid=self.vel , order=2, clampMode=2)
vorticityConfinement(vel=self.vel, flags=self.flags, strength=0.1)
addBuoyancy(density=self.density, vel=self.vel, gravity=0.2*self.gravity, flags=self.flags)
setWallBcs(flags=self.flags, vel=self.vel)
solvePressure(flags=self.flags, vel=self.vel, pressure=self.pressure, cgMaxIterFac=self.max_iter_fac, cgAccuracy=self.accuracy)
|
[
"numpy.random.uniform",
"random.randint"
] |
[((3221, 3254), 'random.randint', 'randint', (['(1)', 'self.max_source_count'], {}), '(1, self.max_source_count)\n', (3228, 3254), False, 'from random import randint\n'), ((1630, 1670), 'numpy.random.uniform', 'numpy.random.uniform', ([], {'low': '(0.25)', 'high': '(1.0)'}), '(low=0.25, high=1.0)\n', (1650, 1670), False, 'import numpy\n'), ((1871, 1910), 'numpy.random.uniform', 'numpy.random.uniform', ([], {'low': '(0.2)', 'high': '(1.0)'}), '(low=0.2, high=1.0)\n', (1891, 1910), False, 'import numpy\n'), ((2547, 2586), 'numpy.random.uniform', 'numpy.random.uniform', ([], {'low': '(0.5)', 'high': '(1.0)'}), '(low=0.5, high=1.0)\n', (2567, 2586), False, 'import numpy\n')]
|
"""
Generates a path on the given occupancy grid (map of
the environment)
"""
import networkx as nx
from grid_loader import Grid
import numpy as np
def euclidean(node1, node2):
x1, y1 = node1
x2, y2 = node2
return ((x1-x2)**2+(y1-y2)**2)**0.5
class AStar:
# Constructor
def __init__(self):
self.graph = None
self.grid_res = None # m / pixel
def load_grid(self, grid_obj: Grid, occ_thresh = 0.5):
"""
Load a given Grid object into a networkx graph
The edges are given a weight 1 and the occupied
cells are removed
Parameters:
- grid_obj: Grid
A Grid object that is to be loaded for path
finding
- occ_thresh: float (default: 0.5)
A threshold value for depicting occupied cell
If cell value >= occ_thresh, it is considered
occupied and removed
Returns:
- removed_nodes: int
The number of nodes that were removed from
grid (number of occupied cells)
"""
self.grid_res = grid_obj.grid_res # Useful for translation from px to m and back
self.graph = nx.grid_2d_graph(grid_obj.w, grid_obj.h)
removed_nodes = 0
for i in range(grid_obj.w):
for j in range(grid_obj.h):
if grid_obj.grid_data[i, j] >= occ_thresh: # Occupied
self.graph.remove_node((i, j))
removed_nodes += 1
# Set edge properties of the graph
nx.set_edge_attributes(self.graph, {e: 1 for e in self.graph.edges()}, "cost")
return removed_nodes
# Return a route of [x, y] points
def get_route(self, start, end, heuristic = euclidean, weight = 0.5):
start_px = tuple((np.array(start) / self.grid_res).astype(int))
end_px = tuple((np.array(end) / self.grid_res).astype(int))
astar_path = nx.astar_path(self.graph, start_px, end_px,
heuristic=lambda n1, n2: weight*heuristic(n1, n2), weight="cost")
astar_path = np.array(astar_path)
astar_path_m = astar_path * self.grid_res
return astar_path_m
|
[
"numpy.array",
"networkx.grid_2d_graph"
] |
[((1256, 1296), 'networkx.grid_2d_graph', 'nx.grid_2d_graph', (['grid_obj.w', 'grid_obj.h'], {}), '(grid_obj.w, grid_obj.h)\n', (1272, 1296), True, 'import networkx as nx\n'), ((2139, 2159), 'numpy.array', 'np.array', (['astar_path'], {}), '(astar_path)\n', (2147, 2159), True, 'import numpy as np\n'), ((1861, 1876), 'numpy.array', 'np.array', (['start'], {}), '(start)\n', (1869, 1876), True, 'import numpy as np\n'), ((1931, 1944), 'numpy.array', 'np.array', (['end'], {}), '(end)\n', (1939, 1944), True, 'import numpy as np\n')]
|
import numpy as np
import collections
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from sklearn.manifold import MDS
from time import time
from warnings import warn
class ForestSim():
def __init__(self, forest):
# TODO : adapt if non sklearn forest used
self.forest = forest
def fit(self, X, y = 2, randomize = False, nb_repet = 1, keep_all_mat = False):
self.X = np.float32(X) #used in tree.apply function
self.y = y
self.n = self.X.shape[0]
self.similarity_matrix = np.zeros((self.n,self.n))
# True to keep all sim matrices
if keep_all_mat:
self.co_ocs = []
# create the target vector if needed
if not isinstance(self.y, collections.Sequence):
self.y_ = np.random.choice(self.y, size = (self.n,))
else:
self.y_ = self.y
t0 = time()
for repet_id in range(nb_repet):
t = time()
print("Fitting - {}/{} iteration".format(repet_id,nb_repet))
# random seed to have changing bootstrapping in forest.fit
np.random.seed(repet_id)
if randomize:
np.random.shuffle(self.y_)
self.forest.fit(self.X,self.y_) # check inplace op
sim = self.calculate_a_sim_mat()
self.similarity_matrix += sim
if keep_all_mat:
self.co_ocs.append(sim)
print("Took {} seconds".format(np.round(time()-t, decimals=2)))
print("Total time : {} seconds".format(np.round(time()-t0, decimals=2)))
self.similarity_matrix /= nb_repet
return (self)
def calculate_a_sim_mat(self):
co_oc = np.zeros((self.n,self.n))
for iter_, dt in enumerate(self.forest.estimators_):
leafs_id = dt.tree_.apply(self.X)
ser = pd.DataFrame(data={"ser":leafs_id, "ones":1})
ser = ser.pivot(columns="ser").fillna(0)
ser = ser.dot(ser.T)
co_oc+= ser.values
# pondération par unique n of leaf a reflechir
co_oc = co_oc/len(self.forest.estimators_)
return (co_oc)
# should we return a copy ?
def get_similarity_matrix(self):
return (self.similarity_matrix)
def get_distance_matrix(self):
return (np.sqrt(1-self.similarity_matrix))
# use sklearn.manifold.MDS kwags
def apply_MDS(self,n_instance=100, dissimilarity = "precomputed",**kwargs):
np.random.seed(0)
if isinstance(n_instance,int) and 0<n_instance and n_instance<=self.n:
idx = np.random.choice(self.n,n_instance,replace=False)
elif isinstance(n_instance,float) and 0<n_instance and n_instance<=1:
idx = np.random.choice(self.n,int(self.n*n_instance),replace=False)
else:
warn("invalid n_instance argument - should be in [0.0;1.0] or [0,self.n]")
idx = np.arange(self.n)
if len(idx) == self.n:
print("Computing MDS on all {} instances.".format(self.n))
else:
print("Computing MDS on {} / {} instances.".format(len(idx),self.n))
kwargs.update({"dissimilarity":dissimilarity})
if "dissimilarity" not in kwargs.keys():
print("Computing non precomputed MDS - set dissimilarity to precomputed to use the distance matrix")
mds = MDS(**kwargs)
self.X_mds = mds.fit_transform(self.X[idx,:])
else:
print("Computing MDS on precomputed dissimilarities.")
mds = MDS(**kwargs)
dist_mat_ = self.get_distance_matrix()[idx][:,idx]
self.X_mds = mds.fit_transform(dist_mat_)
return (self.X_mds)
def project_MDS_2D(self, **kwargs):
# TODO : add saving options
# TODO : add the necessary sampling, then stratified sampling...
plt.figure(figsize=(8,8))
sns.scatterplot(x = self.X_mds[:,0],
y=self.X_mds[:,1]
)
plt.show()
def main():
# should be able to take a standard csv file somewhere, apply one of the two methods, and output the sim mat in a csv file
print("work in progress")
if __name__ == "__main__":
main()
|
[
"pandas.DataFrame",
"numpy.random.seed",
"matplotlib.pyplot.show",
"seaborn.scatterplot",
"numpy.float32",
"numpy.zeros",
"time.time",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.random.choice",
"warnings.warn",
"sklearn.manifold.MDS",
"numpy.random.shuffle",
"numpy.sqrt"
] |
[((405, 418), 'numpy.float32', 'np.float32', (['X'], {}), '(X)\n', (415, 418), True, 'import numpy as np\n'), ((515, 541), 'numpy.zeros', 'np.zeros', (['(self.n, self.n)'], {}), '((self.n, self.n))\n', (523, 541), True, 'import numpy as np\n'), ((796, 802), 'time.time', 'time', ([], {}), '()\n', (800, 802), False, 'from time import time\n'), ((1470, 1496), 'numpy.zeros', 'np.zeros', (['(self.n, self.n)'], {}), '((self.n, self.n))\n', (1478, 1496), True, 'import numpy as np\n'), ((2006, 2041), 'numpy.sqrt', 'np.sqrt', (['(1 - self.similarity_matrix)'], {}), '(1 - self.similarity_matrix)\n', (2013, 2041), True, 'import numpy as np\n'), ((2155, 2172), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (2169, 2172), True, 'import numpy as np\n'), ((3349, 3375), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (3359, 3375), True, 'import matplotlib.pyplot as plt\n'), ((3377, 3432), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'x': 'self.X_mds[:, 0]', 'y': 'self.X_mds[:, 1]'}), '(x=self.X_mds[:, 0], y=self.X_mds[:, 1])\n', (3392, 3432), True, 'import seaborn as sns\n'), ((3442, 3452), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3450, 3452), True, 'import matplotlib.pyplot as plt\n'), ((718, 758), 'numpy.random.choice', 'np.random.choice', (['self.y'], {'size': '(self.n,)'}), '(self.y, size=(self.n,))\n', (734, 758), True, 'import numpy as np\n'), ((845, 851), 'time.time', 'time', ([], {}), '()\n', (849, 851), False, 'from time import time\n'), ((982, 1006), 'numpy.random.seed', 'np.random.seed', (['repet_id'], {}), '(repet_id)\n', (996, 1006), True, 'import numpy as np\n'), ((1604, 1651), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'ser': leafs_id, 'ones': 1}"}), "(data={'ser': leafs_id, 'ones': 1})\n", (1616, 1651), True, 'import pandas as pd\n'), ((2255, 2306), 'numpy.random.choice', 'np.random.choice', (['self.n', 'n_instance'], {'replace': '(False)'}), '(self.n, n_instance, replace=False)\n', (2271, 2306), True, 'import numpy as np\n'), ((2935, 2948), 'sklearn.manifold.MDS', 'MDS', ([], {}), '(**kwargs)\n', (2938, 2948), False, 'from sklearn.manifold import MDS\n'), ((3073, 3086), 'sklearn.manifold.MDS', 'MDS', ([], {}), '(**kwargs)\n', (3076, 3086), False, 'from sklearn.manifold import MDS\n'), ((1028, 1054), 'numpy.random.shuffle', 'np.random.shuffle', (['self.y_'], {}), '(self.y_)\n', (1045, 1054), True, 'import numpy as np\n'), ((2459, 2533), 'warnings.warn', 'warn', (['"""invalid n_instance argument - should be in [0.0;1.0] or [0,self.n]"""'], {}), "('invalid n_instance argument - should be in [0.0;1.0] or [0,self.n]')\n", (2463, 2533), False, 'from warnings import warn\n'), ((2543, 2560), 'numpy.arange', 'np.arange', (['self.n'], {}), '(self.n)\n', (2552, 2560), True, 'import numpy as np\n'), ((1345, 1351), 'time.time', 'time', ([], {}), '()\n', (1349, 1351), False, 'from time import time\n'), ((1271, 1277), 'time.time', 'time', ([], {}), '()\n', (1275, 1277), False, 'from time import time\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""CNN_using_persistence_images_on_patch.py
The aim of this script is to perform the training of a CNN using persistence
images as a input. This script is inspired from this script:
BorgwardtLab/ADNI_MRI_Analysis/blob/mixed_CNN/mixed_CNN/run_Sarah.py
To get real time information into the model training and structure, run
$ tensorboard --logdir logs/fit
once this script has been started.
NOTES:
- One loaded, the "big" 100x100x3 images aren't that big (>400MB in RAM) so
NO GENERATOR NEEDED
"""
__author__ = "<NAME>"
__email__ = "<EMAIL>"
import dotenv
import datetime
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import numpy as np
import pandas as pd
from tqdm import tqdm
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from itertools import islice
import shutil
print(tf.test.gpu_device_name())
DOTENV_KEY2VAL = dotenv.dotenv_values()
tf.random.set_seed(42)
N_BINS = 1000
N_FILTERS = 4
KERNEL_SIZE = 4
DROPOUT_RATE = 0.3
################################################################################
# Functions
################################################################################
persistence_image_location = (
DOTENV_KEY2VAL["DATA_DIR"] + "/global_persistence_images/"
)
partitions_location = DOTENV_KEY2VAL["DATA_DIR"] + "/partitions/"
diagnosis_json = (
DOTENV_KEY2VAL["DATA_DIR"] + "/collected_diagnoses_complete.json"
)
def make_model(input_shape):
"""Makes a keras model.
Args:
input_shape (tuple): input shape of the neural network
num_classes (int): number of classes involved
Returns:
keral.Model: model ready to be trained
"""
inputs = keras.Input(shape=input_shape)
tower_1 = layers.Conv2D(
N_FILTERS, KERNEL_SIZE, padding="same", activation="relu"
)(inputs[:, :, :, 0:1])
tower_1 = layers.BatchNormalization()(tower_1)
tower_1 = layers.MaxPooling2D()(tower_1)
tower_2 = layers.Conv2D(
N_FILTERS, KERNEL_SIZE, padding="same", activation="relu"
)(inputs[:, :, :, 1:2])
tower_2 = layers.BatchNormalization()(tower_2)
tower_2 = layers.MaxPooling2D()(tower_2)
tower_3 = layers.Conv2D(
N_FILTERS, KERNEL_SIZE, padding="same", activation="relu"
)(inputs[:, :, :, 2:])
tower_3 = layers.BatchNormalization()(tower_3)
tower_3 = layers.MaxPooling2D()(tower_3)
merged = layers.concatenate([tower_1, tower_2, tower_3], axis=1)
merged = layers.Flatten()(merged)
x = layers.Dense(500, activation="relu")(merged)
x = layers.Dropout(DROPOUT_RATE)(x)
x = layers.Dense(500, activation="relu")(merged)
x = layers.Dropout(DROPOUT_RATE)(x)
outputs = layers.Dense(1, activation="sigmoid")(x)
return keras.Model(inputs, outputs)
def get_partitions(partitions_location):
partition = []
labels = []
for root, dirs, files in os.walk(partitions_location):
for file in files:
if file.split("_")[0] == "partition":
partition.append(
np.load(
partitions_location + file, allow_pickle=True
).item()
)
elif file.split("_")[0] == "labels":
labels.append(
np.load(
partitions_location + file, allow_pickle=True
).item()
)
else:
print(f"File {file} is neither partition nor labels file")
return partition, labels
################################################################################
# Main
################################################################################
def main():
############################################################################
# Data loading and processing
############################################################################
inits = 3
partitions, labels = get_partitions(partitions_location)
histories = []
for partition, label in zip(partitions, labels):
for i in range(inits):
# Make sure there aren't the same patients in train and test
X_train_lst = []
y_train_lst = []
for image in tqdm(partition["train"]):
X_train_lst.append(
np.load(persistence_image_location + image + ".npy")
)
y_train_lst.append(label[image])
X_train, y_train = (
np.stack(X_train_lst, axis=0).reshape(
len(X_train_lst), N_BINS, N_BINS, 3
),
np.vstack(y_train_lst),
)
print("Training data loadede")
X_val_lst = []
y_val_lst = []
for image in tqdm(partition["validation"]):
X_val_lst.append(
np.load(persistence_image_location + image + ".npy")
)
y_val_lst.append(label[image])
X_val, y_val = (
np.stack(X_val_lst, axis=0).reshape(
len(X_val_lst), N_BINS, N_BINS, 3
),
np.vstack(y_val_lst),
)
print("Validation data loadede")
####################################################################
# Model definition
####################################################################
model = make_model(input_shape=(N_BINS, N_BINS, 3))
tf.keras.utils.plot_model(
model,
to_file="model.png",
show_shapes=True,
show_layer_names=True,
rankdir="TB",
expand_nested=True,
dpi=96,
)
####################################################################
# Model training
####################################################################
epochs = 100
tensorboard_logs = "logs/fit"
if os.path.exists(tensorboard_logs):
shutil.rmtree(tensorboard_logs)
log_dir = "logs/fit/" + datetime.datetime.now().strftime(
"%Y%m%d-%H%M%S"
)
callbacks = [
tf.keras.callbacks.TensorBoard(
log_dir=log_dir, histogram_freq=1
),
tf.keras.callbacks.EarlyStopping(
monitor="val_accuracy",
min_delta=0.001,
patience=10,
verbose=0,
mode="auto",
baseline=None,
restore_best_weights=True,
),
tf.keras.callbacks.ModelCheckpoint(
filepath="model_weights",
save_weights_only=True,
monitor="val_accuracy",
mode="max",
save_best_only=True,
),
]
lr = keras.optimizers.schedules.ExponentialDecay(
0.01, decay_steps=30, decay_rate=0.6, staircase=True
)
model.compile(
optimizer=keras.optimizers.Adam(
learning_rate=lr,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-07,
amsgrad=False,
),
loss="binary_crossentropy",
metrics=[
keras.metrics.BinaryAccuracy(name="accuracy"),
keras.metrics.Precision(name="precision"),
keras.metrics.Recall(name="recall"),
keras.metrics.AUC(name="auc"),
],
# run_eagerly=True,
)
history = model.fit(
X_train,
y_train,
epochs=epochs,
callbacks=callbacks,
batch_size=16,
validation_data=(X_val, y_val),
)
histories.append(history)
############################################################################
# Model evaluation
############################################################################
# Mosly already included into the training procedure.
last_acc = []
last_val_acc = []
last_val_prec = []
last_val_rec = []
last_val_auc = []
for hist in histories:
last_acc.append(max(hist.history["accuracy"]))
last_val_acc.append(max(hist.history["val_accuracy"]))
last_val_prec.append(max(hist.history["val_precision"]))
last_val_rec.append(max(hist.history["val_recall"]))
last_val_auc.append(max(hist.history["val_auc"]))
print(
f"The mean training accuracy over the folds is {np.mean(last_acc)}, pm {np.std(last_acc)}"
)
print(
f"The mean validation accuracy over the folds is {np.mean(last_val_acc)}, pm {np.std(last_val_acc)}"
)
print(
f"The mean validation precision over the folds is {np.mean(last_val_prec)}, pm {np.std(last_val_prec)}"
)
print(
f"The mean validation recall over the folds is {np.mean(last_val_rec)}, pm {np.std(last_val_rec)}"
)
print(
f"The mean validation auc over the folds is {np.mean(last_val_auc)}, pm {np.std(last_val_auc)}"
)
############################################################################
# Model evaluation
############################################################################
# Here we actually extract the id of the samples that are misclassified
# y_pred = model.predict(X_train)
# difference = np.round(y_train - y_pred)
# index = np.nonzero(difference)
# y_pred = model.predict(X_val)
# difference = np.round(y_val - y_pred)
# index_2 = np.nonzero(difference)
# df_misclassified_train = pd.DataFrame(
# np.array(partitions[0]["train"])[index[0]]
# )
# df_misclassified_val = pd.DataFrame(
# np.array(partitions[0]["validation"])[index_2[0]]
# )
# df_misclassified = pd.concat(
# [df_misclassified_train, df_misclassified_val]
# )
# df_misclassified.to_csv(
# DOTENV_KEY2VAL["GEN_DATA_DIR"] + "misclassification.csv"
# )
if __name__ == "__main__":
main()
|
[
"tensorflow.random.set_seed",
"numpy.load",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.Dense",
"os.walk",
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.keras.layers.concatenate",
"numpy.mean",
"tensorflow.keras.metrics.BinaryAccuracy",
"shutil.rmtree",
"tensorflow.keras.callbacks.EarlyStopping",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.BatchNormalization",
"numpy.std",
"tensorflow.keras.Input",
"os.path.exists",
"tensorflow.keras.utils.plot_model",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.metrics.Precision",
"dotenv.dotenv_values",
"datetime.datetime.now",
"numpy.stack",
"tqdm.tqdm",
"tensorflow.keras.metrics.AUC",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.Model",
"tensorflow.test.gpu_device_name",
"tensorflow.keras.optimizers.schedules.ExponentialDecay",
"numpy.vstack",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.metrics.Recall",
"tensorflow.keras.callbacks.TensorBoard"
] |
[((934, 956), 'dotenv.dotenv_values', 'dotenv.dotenv_values', ([], {}), '()\n', (954, 956), False, 'import dotenv\n'), ((957, 979), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(42)'], {}), '(42)\n', (975, 979), True, 'import tensorflow as tf\n'), ((889, 914), 'tensorflow.test.gpu_device_name', 'tf.test.gpu_device_name', ([], {}), '()\n', (912, 914), True, 'import tensorflow as tf\n'), ((1742, 1772), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (1753, 1772), False, 'from tensorflow import keras\n'), ((2446, 2501), 'tensorflow.keras.layers.concatenate', 'layers.concatenate', (['[tower_1, tower_2, tower_3]'], {'axis': '(1)'}), '([tower_1, tower_2, tower_3], axis=1)\n', (2464, 2501), False, 'from tensorflow.keras import layers\n'), ((2792, 2820), 'tensorflow.keras.Model', 'keras.Model', (['inputs', 'outputs'], {}), '(inputs, outputs)\n', (2803, 2820), False, 'from tensorflow import keras\n'), ((2928, 2956), 'os.walk', 'os.walk', (['partitions_location'], {}), '(partitions_location)\n', (2935, 2956), False, 'import os\n'), ((1788, 1860), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['N_FILTERS', 'KERNEL_SIZE'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(N_FILTERS, KERNEL_SIZE, padding='same', activation='relu')\n", (1801, 1860), False, 'from tensorflow.keras import layers\n'), ((1911, 1938), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (1936, 1938), False, 'from tensorflow.keras import layers\n'), ((1962, 1983), 'tensorflow.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', ([], {}), '()\n', (1981, 1983), False, 'from tensorflow.keras import layers\n'), ((2008, 2080), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['N_FILTERS', 'KERNEL_SIZE'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(N_FILTERS, KERNEL_SIZE, padding='same', activation='relu')\n", (2021, 2080), False, 'from tensorflow.keras import layers\n'), ((2131, 2158), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (2156, 2158), False, 'from tensorflow.keras import layers\n'), ((2182, 2203), 'tensorflow.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', ([], {}), '()\n', (2201, 2203), False, 'from tensorflow.keras import layers\n'), ((2228, 2300), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['N_FILTERS', 'KERNEL_SIZE'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(N_FILTERS, KERNEL_SIZE, padding='same', activation='relu')\n", (2241, 2300), False, 'from tensorflow.keras import layers\n'), ((2350, 2377), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (2375, 2377), False, 'from tensorflow.keras import layers\n'), ((2401, 2422), 'tensorflow.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', ([], {}), '()\n', (2420, 2422), False, 'from tensorflow.keras import layers\n'), ((2515, 2531), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (2529, 2531), False, 'from tensorflow.keras import layers\n'), ((2548, 2584), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(500)'], {'activation': '"""relu"""'}), "(500, activation='relu')\n", (2560, 2584), False, 'from tensorflow.keras import layers\n'), ((2601, 2629), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['DROPOUT_RATE'], {}), '(DROPOUT_RATE)\n', (2615, 2629), False, 'from tensorflow.keras import layers\n'), ((2641, 2677), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(500)'], {'activation': '"""relu"""'}), "(500, activation='relu')\n", (2653, 2677), False, 'from tensorflow.keras import layers\n'), ((2694, 2722), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['DROPOUT_RATE'], {}), '(DROPOUT_RATE)\n', (2708, 2722), False, 'from tensorflow.keras import layers\n'), ((2740, 2777), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (2752, 2777), False, 'from tensorflow.keras import layers\n'), ((4278, 4302), 'tqdm.tqdm', 'tqdm', (["partition['train']"], {}), "(partition['train'])\n", (4282, 4302), False, 'from tqdm import tqdm\n'), ((4844, 4873), 'tqdm.tqdm', 'tqdm', (["partition['validation']"], {}), "(partition['validation'])\n", (4848, 4873), False, 'from tqdm import tqdm\n'), ((5595, 5735), 'tensorflow.keras.utils.plot_model', 'tf.keras.utils.plot_model', (['model'], {'to_file': '"""model.png"""', 'show_shapes': '(True)', 'show_layer_names': '(True)', 'rankdir': '"""TB"""', 'expand_nested': '(True)', 'dpi': '(96)'}), "(model, to_file='model.png', show_shapes=True,\n show_layer_names=True, rankdir='TB', expand_nested=True, dpi=96)\n", (5620, 5735), True, 'import tensorflow as tf\n'), ((6136, 6168), 'os.path.exists', 'os.path.exists', (['tensorboard_logs'], {}), '(tensorboard_logs)\n', (6150, 6168), False, 'import os\n'), ((7120, 7221), 'tensorflow.keras.optimizers.schedules.ExponentialDecay', 'keras.optimizers.schedules.ExponentialDecay', (['(0.01)'], {'decay_steps': '(30)', 'decay_rate': '(0.6)', 'staircase': '(True)'}), '(0.01, decay_steps=30,\n decay_rate=0.6, staircase=True)\n', (7163, 7221), False, 'from tensorflow import keras\n'), ((6186, 6217), 'shutil.rmtree', 'shutil.rmtree', (['tensorboard_logs'], {}), '(tensorboard_logs)\n', (6199, 6217), False, 'import shutil\n'), ((6377, 6442), 'tensorflow.keras.callbacks.TensorBoard', 'tf.keras.callbacks.TensorBoard', ([], {'log_dir': 'log_dir', 'histogram_freq': '(1)'}), '(log_dir=log_dir, histogram_freq=1)\n', (6407, 6442), True, 'import tensorflow as tf\n'), ((6498, 6658), 'tensorflow.keras.callbacks.EarlyStopping', 'tf.keras.callbacks.EarlyStopping', ([], {'monitor': '"""val_accuracy"""', 'min_delta': '(0.001)', 'patience': '(10)', 'verbose': '(0)', 'mode': '"""auto"""', 'baseline': 'None', 'restore_best_weights': '(True)'}), "(monitor='val_accuracy', min_delta=0.001,\n patience=10, verbose=0, mode='auto', baseline=None,\n restore_best_weights=True)\n", (6530, 6658), True, 'import tensorflow as tf\n'), ((6827, 6976), 'tensorflow.keras.callbacks.ModelCheckpoint', 'tf.keras.callbacks.ModelCheckpoint', ([], {'filepath': '"""model_weights"""', 'save_weights_only': '(True)', 'monitor': '"""val_accuracy"""', 'mode': '"""max"""', 'save_best_only': '(True)'}), "(filepath='model_weights',\n save_weights_only=True, monitor='val_accuracy', mode='max',\n save_best_only=True)\n", (6861, 6976), True, 'import tensorflow as tf\n'), ((8923, 8940), 'numpy.mean', 'np.mean', (['last_acc'], {}), '(last_acc)\n', (8930, 8940), True, 'import numpy as np\n'), ((8947, 8963), 'numpy.std', 'np.std', (['last_acc'], {}), '(last_acc)\n', (8953, 8963), True, 'import numpy as np\n'), ((9041, 9062), 'numpy.mean', 'np.mean', (['last_val_acc'], {}), '(last_val_acc)\n', (9048, 9062), True, 'import numpy as np\n'), ((9069, 9089), 'numpy.std', 'np.std', (['last_val_acc'], {}), '(last_val_acc)\n', (9075, 9089), True, 'import numpy as np\n'), ((9168, 9190), 'numpy.mean', 'np.mean', (['last_val_prec'], {}), '(last_val_prec)\n', (9175, 9190), True, 'import numpy as np\n'), ((9197, 9218), 'numpy.std', 'np.std', (['last_val_prec'], {}), '(last_val_prec)\n', (9203, 9218), True, 'import numpy as np\n'), ((9294, 9315), 'numpy.mean', 'np.mean', (['last_val_rec'], {}), '(last_val_rec)\n', (9301, 9315), True, 'import numpy as np\n'), ((9322, 9342), 'numpy.std', 'np.std', (['last_val_rec'], {}), '(last_val_rec)\n', (9328, 9342), True, 'import numpy as np\n'), ((9415, 9436), 'numpy.mean', 'np.mean', (['last_val_auc'], {}), '(last_val_auc)\n', (9422, 9436), True, 'import numpy as np\n'), ((9443, 9463), 'numpy.std', 'np.std', (['last_val_auc'], {}), '(last_val_auc)\n', (9449, 9463), True, 'import numpy as np\n'), ((4360, 4412), 'numpy.load', 'np.load', (["(persistence_image_location + image + '.npy')"], {}), "(persistence_image_location + image + '.npy')\n", (4367, 4412), True, 'import numpy as np\n'), ((4680, 4702), 'numpy.vstack', 'np.vstack', (['y_train_lst'], {}), '(y_train_lst)\n', (4689, 4702), True, 'import numpy as np\n'), ((4929, 4981), 'numpy.load', 'np.load', (["(persistence_image_location + image + '.npy')"], {}), "(persistence_image_location + image + '.npy')\n", (4936, 4981), True, 'import numpy as np\n'), ((5239, 5259), 'numpy.vstack', 'np.vstack', (['y_val_lst'], {}), '(y_val_lst)\n', (5248, 5259), True, 'import numpy as np\n'), ((7301, 7401), 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'learning_rate': 'lr', 'beta_1': '(0.9)', 'beta_2': '(0.999)', 'epsilon': '(1e-07)', 'amsgrad': '(False)'}), '(learning_rate=lr, beta_1=0.9, beta_2=0.999, epsilon=\n 1e-07, amsgrad=False)\n', (7322, 7401), False, 'from tensorflow import keras\n'), ((6255, 6278), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (6276, 6278), False, 'import datetime\n'), ((7607, 7652), 'tensorflow.keras.metrics.BinaryAccuracy', 'keras.metrics.BinaryAccuracy', ([], {'name': '"""accuracy"""'}), "(name='accuracy')\n", (7635, 7652), False, 'from tensorflow import keras\n'), ((7674, 7715), 'tensorflow.keras.metrics.Precision', 'keras.metrics.Precision', ([], {'name': '"""precision"""'}), "(name='precision')\n", (7697, 7715), False, 'from tensorflow import keras\n'), ((7737, 7772), 'tensorflow.keras.metrics.Recall', 'keras.metrics.Recall', ([], {'name': '"""recall"""'}), "(name='recall')\n", (7757, 7772), False, 'from tensorflow import keras\n'), ((7794, 7823), 'tensorflow.keras.metrics.AUC', 'keras.metrics.AUC', ([], {'name': '"""auc"""'}), "(name='auc')\n", (7811, 7823), False, 'from tensorflow import keras\n'), ((3089, 3143), 'numpy.load', 'np.load', (['(partitions_location + file)'], {'allow_pickle': '(True)'}), '(partitions_location + file, allow_pickle=True)\n', (3096, 3143), True, 'import numpy as np\n'), ((4538, 4567), 'numpy.stack', 'np.stack', (['X_train_lst'], {'axis': '(0)'}), '(X_train_lst, axis=0)\n', (4546, 4567), True, 'import numpy as np\n'), ((5101, 5128), 'numpy.stack', 'np.stack', (['X_val_lst'], {'axis': '(0)'}), '(X_val_lst, axis=0)\n', (5109, 5128), True, 'import numpy as np\n'), ((3315, 3369), 'numpy.load', 'np.load', (['(partitions_location + file)'], {'allow_pickle': '(True)'}), '(partitions_location + file, allow_pickle=True)\n', (3322, 3369), True, 'import numpy as np\n')]
|
"""A simple, 2D peridynamics simulation example."""
import argparse
import cProfile
from io import StringIO
import numpy as np
import pathlib
from peridynamics import Model
from peridynamics.model import initial_crack_helper
from peridynamics.integrators import Euler
from pstats import SortKey, Stats
mesh_file = pathlib.Path(__file__).parent.absolute() / "test.msh"
@initial_crack_helper
def is_crack(x, y):
"""Determine whether a pair of particles define the crack."""
output = 0
crack_length = 0.3
p1 = x
p2 = y
if x[0] > y[0]:
p2 = x
p1 = y
# 1e-6 makes it fall one side of central line of particles
if p1[0] < 0.5 + 1e-6 and p2[0] > 0.5 + 1e-6:
# draw a straight line between them
m = (p2[1] - p1[1]) / (p2[0] - p1[0])
c = p1[1] - m * p1[0]
# height a x = 0.5
height = m * 0.5 + c
if (height > 0.5 * (1 - crack_length)
and height < 0.5 * (1 + crack_length)):
output = 1
return output
def boundary_function(model, u, step):
"""
Apply a load to the system.
Particles on each of the sides of the system are pulled apart with
increasing time step.
"""
load_rate = 0.00001
u[model.lhs, 1:3] = np.zeros((len(model.lhs), 2))
u[model.rhs, 1:3] = np.zeros((len(model.rhs), 2))
u[model.lhs, 0] = (
-0.5 * step * load_rate * np.ones(len(model.rhs))
)
u[model.rhs, 0] = (
0.5 * step * load_rate * np.ones(len(model.rhs))
)
return u
def main():
"""Conduct a peridynamics simulation."""
parser = argparse.ArgumentParser()
parser.add_argument('--profile', action='store_const', const=True)
args = parser.parse_args()
if args.profile:
profile = cProfile.Profile()
profile.enable()
model = Model(mesh_file, horizon=0.1, critical_strain=0.005,
elastic_modulus=0.05, initial_crack=is_crack)
# Set left-hand side and right-hand side of boundary
indices = np.arange(model.nnodes)
model.lhs = indices[model.coords[:, 0] < 1.5*model.horizon]
model.rhs = indices[model.coords[:, 0] > 1.0 - 1.5*model.horizon]
integrator = Euler(dt=1e-3)
u, damage, *_ = model.simulate(
steps=100,
integrator=integrator,
boundary_function=boundary_function,
write=1000
)
if args.profile:
profile.disable()
s = StringIO()
stats = Stats(profile, stream=s).sort_stats(SortKey.CUMULATIVE)
stats.print_stats()
print(s.getvalue())
if __name__ == "__main__":
main()
|
[
"io.StringIO",
"argparse.ArgumentParser",
"peridynamics.Model",
"pstats.Stats",
"cProfile.Profile",
"pathlib.Path",
"numpy.arange",
"peridynamics.integrators.Euler"
] |
[((1611, 1636), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1634, 1636), False, 'import argparse\n'), ((1836, 1938), 'peridynamics.Model', 'Model', (['mesh_file'], {'horizon': '(0.1)', 'critical_strain': '(0.005)', 'elastic_modulus': '(0.05)', 'initial_crack': 'is_crack'}), '(mesh_file, horizon=0.1, critical_strain=0.005, elastic_modulus=0.05,\n initial_crack=is_crack)\n', (1841, 1938), False, 'from peridynamics import Model\n'), ((2025, 2048), 'numpy.arange', 'np.arange', (['model.nnodes'], {}), '(model.nnodes)\n', (2034, 2048), True, 'import numpy as np\n'), ((2201, 2216), 'peridynamics.integrators.Euler', 'Euler', ([], {'dt': '(0.001)'}), '(dt=0.001)\n', (2206, 2216), False, 'from peridynamics.integrators import Euler\n'), ((1779, 1797), 'cProfile.Profile', 'cProfile.Profile', ([], {}), '()\n', (1795, 1797), False, 'import cProfile\n'), ((2437, 2447), 'io.StringIO', 'StringIO', ([], {}), '()\n', (2445, 2447), False, 'from io import StringIO\n'), ((315, 337), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (327, 337), False, 'import pathlib\n'), ((2464, 2488), 'pstats.Stats', 'Stats', (['profile'], {'stream': 's'}), '(profile, stream=s)\n', (2469, 2488), False, 'from pstats import SortKey, Stats\n')]
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/tslasso.main.ipynb (unless otherwise specified).
__all__ = ['run_exp']
# Cell
from ..atomgeom.features import get_features,get_D_feats_feats
from ..atomgeom.utils import get_atoms_4
from ..simulations.rigidethanol import get_rigid_ethanol_data
from ..utils.utils import get_234_indices, get_atoms3_full, get_atoms4_full, data_stream_custom_range, get_cosines
from ..geometry.geometry import get_geom, get_wlpca_tangent_sel, get_rm_tangent_sel
from ..statistics.normalization import normalize_L212
from ..optimization.gradientgrouplasso import get_sr_lambda_parallel
from ..optimization.utils import get_selected_function_ids,get_selected_functions_lm2
from ..utils.replicates import Replicate, get_supports_brute_tslasso,get_supports_lasso
from megaman.embedding import SpectralEmbedding
import dill as pickle
import os
import sys
import numpy as np
import itertools
from itertools import permutations,combinations
from sklearn.decomposition import TruncatedSVD
import pathos
from pathos.multiprocessing import ProcessingPool as Pool
# Cell
def run_exp(positions, hparams):
d = hparams.d
n_components = hparams.n_components
atoms2_feat = hparams.atoms2_feat
atoms3_feat = hparams.atoms3_feat
atoms4_feat = hparams.atoms4_feat
atoms2_dict = hparams.atoms2_dict
atoms3_dict = hparams.atoms3_dict
atoms4_dict = hparams.atoms4_dict
diagram = hparams.diagram
ii = np.asarray(hparams.ii)
jj = np.asarray(hparams.jj)
outfile = hparams.outdir + '/' + hparams.name + 'results_tslasso'
#load geometric features
natoms = positions.shape[1]
n = positions.shape[0]
atoms2 = np.asarray(list(itertools.combinations(range(natoms), 2)))
atoms2full = atoms2
atoms3 = np.asarray(list(itertools.combinations(range(natoms), 3)))
atoms4 = np.asarray(list(itertools.combinations(range(natoms), 4)))
atoms3full = get_atoms3_full(atoms3)
atoms4full = get_atoms4_full(atoms4)
if atoms2_feat:
atoms2_feats = atoms2full
else:
atoms2_feats = np.asarray([])
if atoms3_feat:
atoms3_feats = atoms3full
else:
atoms3_feats = np.asarray([])
if atoms4_feat:
atoms4_feats = atoms4full
else:
atoms4_feats = np.asarray([])
#compute rotation/translation invariant featureization
cores = pathos.multiprocessing.cpu_count() - 1
pool = Pool(cores)
print('feature dimensions',atoms2_feats.shape, atoms3_feats.shape,atoms4_feats.shape)
#import pdb;pdb.set_trace
results = pool.map(lambda i: get_features(positions[i],
atoms2 = atoms2_feats,
atoms3 = atoms3_feats,
atoms4 = atoms4_feats),
data_stream_custom_range(list(range(n))))
data = np.vstack([np.hstack(results[i]) for i in range(n)])
data = data - np.mean(data, axis = 0)
#apply SVD
svd = TruncatedSVD(n_components=50)
data_svd = svd.fit_transform(data)
#compute geometry
radius = hparams.radius
n_neighbors = hparams.n_neighbors
geom = get_geom(data_svd, radius, n_neighbors)
print('computing embedding (for comparison)')
spectral_embedding = SpectralEmbedding(n_components=n_components,eigen_solver='arpack',geom=geom)
embed_spectral = spectral_embedding.fit_transform(data_svd)
embedding = embed_spectral
#obtain gradients
if atoms2_dict:
atoms2_dicts = atoms2full
else:
atoms2_dicts = np.asarray([])
if atoms3_dict:
atoms3_dicts = atoms3full
else:
atoms3_dicts = np.asarray([])
if atoms4_dict and not diagram:
atoms4_dicts = atoms4full
elif atoms4_dict:
atoms4_dicts= get_atoms_4(natoms, ii, jj)[0]
else:
atoms4_dicts = np.asarray([])
p = len(atoms2_dicts) + len(atoms3_dicts) + len(atoms4_dicts)
#get gradients
replicates = {}
nreps = hparams.nreps
nsel = hparams.nsel
for r in range(nreps):
#print(i)
replicates[r] = Replicate(nsel = nsel, n = 10000)
replicates[r].tangent_bases_M = get_wlpca_tangent_sel(data_svd, geom, replicates[r].selected_points, d)
D_feats_feats = np.asarray([get_D_feats_feats(positions[replicates[r].selected_points[i]],
atoms2in = atoms2_feats,
atoms3in = atoms3_feats,
atoms4in = atoms4_feats,
atoms2out = atoms2_dicts,
atoms3out = atoms3_dicts,
atoms4out = atoms4_dicts) for i in range(nsel)])
replicates[r].dg_x = np.asarray([svd.transform(D_feats_feats[i].transpose()).transpose() for i in range(nsel)])
replicates[r].dg_x_normalized = normalize_L212(replicates[r].dg_x)
replicates[r].dg_M = np.einsum('i b p, i b d -> i d p', replicates[r].dg_x_normalized, replicates[r].tangent_bases_M)
#run ts lasso
gl_itermax= hparams.gl_itermax
reg_l2 = hparams.reg_l2
max_search = hparams.max_search
d = hparams.d
tol = hparams.tol
learning_rate = hparams.learning_rate
for r in range(nreps):
replicates[r].results = get_sr_lambda_parallel(np.asarray([np.identity(d) for i in range(nsel)]), replicates[r].dg_M, gl_itermax,reg_l2, max_search, d, tol,learning_rate)
replicates[r].get_ordered_axes()
replicates[r].sel_l = replicates[r].get_selection_lambda()
#get manifold lasso support
selected_functions_unique = np.asarray(np.unique(get_selected_function_ids(replicates,d)), dtype = int)
support_tensor_lasso, supports_lasso = get_supports_lasso(replicates,p,d)
#get two stage support
selected_functions_lm2 = get_selected_functions_lm2(replicates)
support_tensor_ts, supports_ts = get_supports_brute_tslasso(replicates,nreps,p,d,selected_functions_lm2)
selected_functions_unique_twostage = np.asarray(np.unique(supports_ts), dtype = int)#np.unique(np.asarray(np.where(support_tensor_ts > 0.)[0], dtype = int))
pool.close()
pool.restart()
#compute function values for plotting... needs 'order234' for full computation
print('computing selected function values lasso, ' + str(selected_functions_unique))
selected_function_values = pool.map(
lambda i: get_features(positions[i],
atoms2 = np.asarray([]),
atoms3 = np.asarray([]),
atoms4 = atoms4_dicts[selected_functions_unique]),
data_stream_custom_range(list(range(n))))
selected_function_values_array = np.vstack([np.hstack(selected_function_values[i]) for i in range(n)])
print('computing selected function values two stage, ' + str(selected_functions_unique_twostage))
selected_function_values_brute = pool.map(
lambda i: get_features(positions[i],
atoms2 = np.asarray([]),
atoms3 = np.asarray([]),
atoms4 = atoms4_dicts[selected_functions_unique_twostage]),
data_stream_custom_range(list(range(n))))
selected_function_values_array_brute = np.vstack([np.hstack(selected_function_values_brute[i]) for i in range(n)])
#remove large gradient arrays
print('prep save')
replicates_small = {}
for r in range(nreps):
replicates_small[r] = Replicate(nsel=nsel, n=n,
selected_points=replicates[r].selected_points)
replicates_small[r].dg_M = replicates[r].dg_M
replicates_small[r].cs_reorder = replicates[r].cs_reorder
replicates_small[r].xaxis_reorder = replicates[r].xaxis_reorder
print('getting cosines')
cosine = get_cosines(replicates[0].dg_M)
replicates_small[0].cosine_abs = np.mean(np.abs(cosine), axis = 0)
#prepare to save
results = {}
results['replicates_small'] = replicates_small
results['data'] = data_svd
results['embed'] = embedding
results['supports_ts'] = support_tensor_ts, supports_ts
results['supports_lasso'] = support_tensor_lasso, supports_lasso
results['supports_lasso_values'] = selected_function_values
results['supports_ts_values'] = selected_function_values_brute
results['selected_lasso'] = selected_functions_unique
results['selected_ts'] = selected_functions_unique_twostage
results['geom'] = geom
results['dictionary'] = {}
results['dictionary']['atoms2'] = atoms2_dicts
results['dictionary']['atoms3'] = atoms3_dicts
results['dictionary']['atoms4'] = atoms4_dicts
#save
with open(outfile,'wb') as output:
pickle.dump(results, output, pickle.HIGHEST_PROTOCOL)
|
[
"numpy.abs",
"pathos.multiprocessing.cpu_count",
"sklearn.decomposition.TruncatedSVD",
"numpy.asarray",
"numpy.einsum",
"numpy.identity",
"numpy.hstack",
"megaman.embedding.SpectralEmbedding",
"numpy.mean",
"dill.dump",
"pathos.multiprocessing.ProcessingPool",
"numpy.unique"
] |
[((1456, 1478), 'numpy.asarray', 'np.asarray', (['hparams.ii'], {}), '(hparams.ii)\n', (1466, 1478), True, 'import numpy as np\n'), ((1488, 1510), 'numpy.asarray', 'np.asarray', (['hparams.jj'], {}), '(hparams.jj)\n', (1498, 1510), True, 'import numpy as np\n'), ((2422, 2433), 'pathos.multiprocessing.ProcessingPool', 'Pool', (['cores'], {}), '(cores)\n', (2426, 2433), True, 'from pathos.multiprocessing import ProcessingPool as Pool\n'), ((2959, 2988), 'sklearn.decomposition.TruncatedSVD', 'TruncatedSVD', ([], {'n_components': '(50)'}), '(n_components=50)\n', (2971, 2988), False, 'from sklearn.decomposition import TruncatedSVD\n'), ((3245, 3323), 'megaman.embedding.SpectralEmbedding', 'SpectralEmbedding', ([], {'n_components': 'n_components', 'eigen_solver': '"""arpack"""', 'geom': 'geom'}), "(n_components=n_components, eigen_solver='arpack', geom=geom)\n", (3262, 3323), False, 'from megaman.embedding import SpectralEmbedding\n'), ((2079, 2093), 'numpy.asarray', 'np.asarray', (['[]'], {}), '([])\n', (2089, 2093), True, 'import numpy as np\n'), ((2182, 2196), 'numpy.asarray', 'np.asarray', (['[]'], {}), '([])\n', (2192, 2196), True, 'import numpy as np\n'), ((2285, 2299), 'numpy.asarray', 'np.asarray', (['[]'], {}), '([])\n', (2295, 2299), True, 'import numpy as np\n'), ((2372, 2406), 'pathos.multiprocessing.cpu_count', 'pathos.multiprocessing.cpu_count', ([], {}), '()\n', (2404, 2406), False, 'import pathos\n'), ((2909, 2930), 'numpy.mean', 'np.mean', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (2916, 2930), True, 'import numpy as np\n'), ((3527, 3541), 'numpy.asarray', 'np.asarray', (['[]'], {}), '([])\n', (3537, 3541), True, 'import numpy as np\n'), ((3629, 3643), 'numpy.asarray', 'np.asarray', (['[]'], {}), '([])\n', (3639, 3643), True, 'import numpy as np\n'), ((4821, 4921), 'numpy.einsum', 'np.einsum', (['"""i b p, i b d -> i d p"""', 'replicates[r].dg_x_normalized', 'replicates[r].tangent_bases_M'], {}), "('i b p, i b d -> i d p', replicates[r].dg_x_normalized,\n replicates[r].tangent_bases_M)\n", (4830, 4921), True, 'import numpy as np\n'), ((5911, 5933), 'numpy.unique', 'np.unique', (['supports_ts'], {}), '(supports_ts)\n', (5920, 5933), True, 'import numpy as np\n'), ((7922, 7936), 'numpy.abs', 'np.abs', (['cosine'], {}), '(cosine)\n', (7928, 7936), True, 'import numpy as np\n'), ((8753, 8806), 'dill.dump', 'pickle.dump', (['results', 'output', 'pickle.HIGHEST_PROTOCOL'], {}), '(results, output, pickle.HIGHEST_PROTOCOL)\n', (8764, 8806), True, 'import dill as pickle\n'), ((2849, 2870), 'numpy.hstack', 'np.hstack', (['results[i]'], {}), '(results[i])\n', (2858, 2870), True, 'import numpy as np\n'), ((3822, 3836), 'numpy.asarray', 'np.asarray', (['[]'], {}), '([])\n', (3832, 3836), True, 'import numpy as np\n'), ((6669, 6707), 'numpy.hstack', 'np.hstack', (['selected_function_values[i]'], {}), '(selected_function_values[i])\n', (6678, 6707), True, 'import numpy as np\n'), ((7291, 7335), 'numpy.hstack', 'np.hstack', (['selected_function_values_brute[i]'], {}), '(selected_function_values_brute[i])\n', (7300, 7335), True, 'import numpy as np\n'), ((5213, 5227), 'numpy.identity', 'np.identity', (['d'], {}), '(d)\n', (5224, 5227), True, 'import numpy as np\n'), ((6380, 6394), 'numpy.asarray', 'np.asarray', (['[]'], {}), '([])\n', (6390, 6394), True, 'import numpy as np\n'), ((6448, 6462), 'numpy.asarray', 'np.asarray', (['[]'], {}), '([])\n', (6458, 6462), True, 'import numpy as np\n'), ((6987, 7001), 'numpy.asarray', 'np.asarray', (['[]'], {}), '([])\n', (6997, 7001), True, 'import numpy as np\n'), ((7055, 7069), 'numpy.asarray', 'np.asarray', (['[]'], {}), '([])\n', (7065, 7069), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from pyGDM2 import (structures, materials, core,
linear, fields, propagators,
tools)
def get_spectrum(geometry, step, wavelengths):
'''Obtains a uv-vis spectra for a specified geometry'''
material = materials.gold()
struct = structures.struct(step, geometry, material, verbose=False)
struct = structures.center_struct(struct)
field_generator = fields.plane_wave
kwargs = dict(theta=0, inc_angle=180)
efield = fields.efield(field_generator,
wavelengths=wavelengths, kwargs=kwargs)
dyads = propagators.DyadsQuasistatic123(n1 = 1.33, n2 = 1.33, n3 = 1.33)
sim = core.simulation(struct, efield, dyads)
sim.scatter(verbose=False)
field_kwargs = tools.get_possible_field_params_spectra(sim)
config_idx = 0
wl, spectrum = tools.calculate_spectrum(sim,
field_kwargs[config_idx], linear.extinct)
abs_ = spectrum.T[2]/np.max(spectrum.T[2])
return abs_, geometry
def obtain_spectra(step, radius_mean, radius_std, wavelength):
'''Calculates the absorption spectra of polydisperse gold spheres that have a normally distributed
radius.
Inputs:
- step: The step size used for the calculation.
- radius_mean: The mean of the normal distribution used to calculate the radius of the sphere
- radius_std: The std of the normal distribution used to calculate the radius of the sphere
- wavelength: A 1-d array of the wavelength values to calculate the absorption spectra
Outputs:
- array: A 2d array of the wavelengths and Intensity values.
'''
n_spheres = 7
radius_list = []
for i in range(n_spheres):
# Normal distribution parameters for Sphere Radius
radius_mean = 6
radius_std = 3
r = (np.random.randn(1)[0]*radius_std + radius_mean)/step
radius_list.append(r)
geometry = structures.sphere(step, R=r, mesh='cube')
loc_array = np.array([[0,0,0],[0,0,1],[0,0,-1],[1,0,0],[-1,0,0],[0,1,0],[0,-1,0]])
sphere = np.hstack((geometry[:,0].reshape(-1,1) + 30*loc_array[i,0]*radius_mean, geometry[:,1].reshape(-1,1) + 30*loc_array[i,1]*radius_mean, geometry[:,2].reshape(-1,1)+ 30*loc_array[i,2]*radius_mean))
if i == 0:
sample = sphere
else:
sample = np.vstack((sample, sphere))
I, g = get_spectrum(geometry, step, wavelength)
array = np.hstack((wavelength.reshape(-1,1), I.reshape(-1,1)))
return array
|
[
"numpy.random.randn",
"pyGDM2.materials.gold",
"pyGDM2.tools.calculate_spectrum",
"pyGDM2.structures.center_struct",
"pyGDM2.structures.struct",
"pyGDM2.core.simulation",
"numpy.max",
"pyGDM2.structures.sphere",
"numpy.array",
"pyGDM2.propagators.DyadsQuasistatic123",
"pyGDM2.fields.efield",
"pyGDM2.tools.get_possible_field_params_spectra",
"numpy.vstack"
] |
[((322, 338), 'pyGDM2.materials.gold', 'materials.gold', ([], {}), '()\n', (336, 338), False, 'from pyGDM2 import structures, materials, core, linear, fields, propagators, tools\n'), ((352, 410), 'pyGDM2.structures.struct', 'structures.struct', (['step', 'geometry', 'material'], {'verbose': '(False)'}), '(step, geometry, material, verbose=False)\n', (369, 410), False, 'from pyGDM2 import structures, materials, core, linear, fields, propagators, tools\n'), ((424, 456), 'pyGDM2.structures.center_struct', 'structures.center_struct', (['struct'], {}), '(struct)\n', (448, 456), False, 'from pyGDM2 import structures, materials, core, linear, fields, propagators, tools\n'), ((553, 623), 'pyGDM2.fields.efield', 'fields.efield', (['field_generator'], {'wavelengths': 'wavelengths', 'kwargs': 'kwargs'}), '(field_generator, wavelengths=wavelengths, kwargs=kwargs)\n', (566, 623), False, 'from pyGDM2 import structures, materials, core, linear, fields, propagators, tools\n'), ((660, 718), 'pyGDM2.propagators.DyadsQuasistatic123', 'propagators.DyadsQuasistatic123', ([], {'n1': '(1.33)', 'n2': '(1.33)', 'n3': '(1.33)'}), '(n1=1.33, n2=1.33, n3=1.33)\n', (691, 718), False, 'from pyGDM2 import structures, materials, core, linear, fields, propagators, tools\n'), ((736, 774), 'pyGDM2.core.simulation', 'core.simulation', (['struct', 'efield', 'dyads'], {}), '(struct, efield, dyads)\n', (751, 774), False, 'from pyGDM2 import structures, materials, core, linear, fields, propagators, tools\n'), ((825, 869), 'pyGDM2.tools.get_possible_field_params_spectra', 'tools.get_possible_field_params_spectra', (['sim'], {}), '(sim)\n', (864, 869), False, 'from pyGDM2 import structures, materials, core, linear, fields, propagators, tools\n'), ((909, 980), 'pyGDM2.tools.calculate_spectrum', 'tools.calculate_spectrum', (['sim', 'field_kwargs[config_idx]', 'linear.extinct'], {}), '(sim, field_kwargs[config_idx], linear.extinct)\n', (933, 980), False, 'from pyGDM2 import structures, materials, core, linear, fields, propagators, tools\n'), ((1035, 1056), 'numpy.max', 'np.max', (['spectrum.T[2]'], {}), '(spectrum.T[2])\n', (1041, 1056), True, 'import numpy as np\n'), ((2015, 2056), 'pyGDM2.structures.sphere', 'structures.sphere', (['step'], {'R': 'r', 'mesh': '"""cube"""'}), "(step, R=r, mesh='cube')\n", (2032, 2056), False, 'from pyGDM2 import structures, materials, core, linear, fields, propagators, tools\n'), ((2077, 2172), 'numpy.array', 'np.array', (['[[0, 0, 0], [0, 0, 1], [0, 0, -1], [1, 0, 0], [-1, 0, 0], [0, 1, 0], [0, -1, 0]\n ]'], {}), '([[0, 0, 0], [0, 0, 1], [0, 0, -1], [1, 0, 0], [-1, 0, 0], [0, 1, 0\n ], [0, -1, 0]])\n', (2085, 2172), True, 'import numpy as np\n'), ((2441, 2468), 'numpy.vstack', 'np.vstack', (['(sample, sphere)'], {}), '((sample, sphere))\n', (2450, 2468), True, 'import numpy as np\n'), ((1913, 1931), 'numpy.random.randn', 'np.random.randn', (['(1)'], {}), '(1)\n', (1928, 1931), True, 'import numpy as np\n')]
|
import numpy as np
with open("data.txt") as f:
draws = np.array([int(d) for d in f.readline().split(",")])
boards = np.array([[[int(n) for n in r.split()] for r in b.split("\n")] for b in f.read()[1:].split("\n\n")])
def bingo(data: np.ndarray, fill: int):
"""
Returns horizontal (rows) and vertical (columns) bingo. TRUE if bingo. FALSE if not.
"""
transposed_data = np.transpose(data)
return any(np.equal(data, [fill for _ in range(5)]).all(1)) or \
any(np.equal(transposed_data, [fill for _ in range(5)]).all(1))
def one(d_data: np.ndarray, b_data: np.ndarray) -> int:
"""
To guarantee victory against the giant squid, figure out which board will win first.
What will your final score be if you choose that board?
"""
# If number is drawn, replace with {fill}
fill = -1
for draw in d_data:
# Replace drawn number by -1
b_data = np.where(b_data == draw, fill, b_data)
for board in b_data:
if bingo(board, fill):
return np.sum(np.where(board == fill, 0, board)) * draw
return -1
def two(d_data: np.ndarray, b_data: np.ndarray) -> int:
"""
Figure out which board will win last. Once it wins, what would its final score be?
"""
# If number is drawn, replace with {fill}
fill = -1
# List of completed bingo boards
completed_idx = []
for draw in d_data:
# Replace drawn number by -1
b_data = np.where(b_data == draw, fill, b_data)
for board, i in zip(b_data, range(len(b_data))):
if bingo(board, fill) and i not in completed_idx:
completed_idx.append(i)
if len(completed_idx) == len(b_data):
return np.sum(np.where(board == fill, 0, board)) * draw
return -1
print(f"1. {one(draws, boards)}")
print(f"2. {two(draws, boards)}")
|
[
"numpy.where",
"numpy.transpose"
] |
[((396, 414), 'numpy.transpose', 'np.transpose', (['data'], {}), '(data)\n', (408, 414), True, 'import numpy as np\n'), ((924, 962), 'numpy.where', 'np.where', (['(b_data == draw)', 'fill', 'b_data'], {}), '(b_data == draw, fill, b_data)\n', (932, 962), True, 'import numpy as np\n'), ((1478, 1516), 'numpy.where', 'np.where', (['(b_data == draw)', 'fill', 'b_data'], {}), '(b_data == draw, fill, b_data)\n', (1486, 1516), True, 'import numpy as np\n'), ((1058, 1091), 'numpy.where', 'np.where', (['(board == fill)', '(0)', 'board'], {}), '(board == fill, 0, board)\n', (1066, 1091), True, 'import numpy as np\n'), ((1766, 1799), 'numpy.where', 'np.where', (['(board == fill)', '(0)', 'board'], {}), '(board == fill, 0, board)\n', (1774, 1799), True, 'import numpy as np\n')]
|
import igraph as ig
import numpy as np
from scipy.special import betaln
g = ig.Graph.Read_GML('karate.txt')
X = np.array(g.get_adjacency().data)
def irm(X, T, a, b, A, random_seed = 42):
N = len(X)
z = np.ones([N,1])
Z = []
np.random.seed(random_seed)
for t in range(T): # for T iterations
for n in range(N): # for each node n
#nn = index mask without currently sampled node n
nn = [_ for _ in range(N)]
nn.remove(n)
X_ = X[np.ix_(nn,nn)] #adjacency matrix without currently sampled node
# K = n. of components
K = len(z[0])
# Delete empty component if present
if K > 1:
idx = np.argwhere(np.sum(z[nn], 0) == 0)
z = np.delete(z, idx, axis=1)
K -= len(idx)
# m = n. of nodes in each component
m = np.sum(z[nn], 0)[np.newaxis]
M = np.tile(m, (K, 1))
# M1 = n. of links between components without current node
M1 = z[nn].T @ X_ @ z[nn] - np.diag(np.sum(X_@z[nn]*z[nn], 0) / 2)
# M0 = n. of non-links between components without current node
M0 = m.T@m - np.diag((m*(m+1) / 2).flatten()) - M1
# r = n. of links from current node to components
r = z[nn].T @ X[nn, n]
R = np.tile(r, (K, 1))
# lik matrix of current node sampled to each component
likelihood = betaln(M1+R+a, M0+M-R+b) - betaln(M1+a, M0+b)
# lik of current node to new component
likelihood_n = betaln(r+a, m-r+b) - betaln(a,b)
logLik = np.sum(np.concatenate([likelihood, likelihood_n]), 1)
logPrior = np.log(np.append(m, A))
logPost = logPrior + logLik
# Convert from log probabilities, normalized to max
P = np.exp(logPost-max(logPost))
# Assignment through random draw fron unif(0,1), taking first value from prob. vector
draw = np.random.rand()
i = np.argwhere(draw<np.cumsum(P)/sum(P))[0]
# Assignment of current node to component i
z[n,:] = 0
if i == K: # If new component: add new column to partition matrix
z = np.hstack((z, np.zeros((N,1))))
z[n,i] = 1
# Delete empty component if present
idx = np.argwhere(np.all(z[..., :] == 0, axis=0))
z = np.delete(z, idx, axis=1)
Z.append(z)
print(z)
print(m)
return Z
T = 500
a = 1
b = 1
A = 10
Z = irm(X, T, a, b, A)
for i in range(1, 11):
print(np.sum(Z[-i], 0))
|
[
"numpy.sum",
"numpy.random.seed",
"numpy.concatenate",
"igraph.Graph.Read_GML",
"numpy.ix_",
"numpy.zeros",
"numpy.ones",
"scipy.special.betaln",
"numpy.append",
"numpy.cumsum",
"numpy.tile",
"numpy.random.rand",
"numpy.delete",
"numpy.all"
] |
[((77, 108), 'igraph.Graph.Read_GML', 'ig.Graph.Read_GML', (['"""karate.txt"""'], {}), "('karate.txt')\n", (94, 108), True, 'import igraph as ig\n'), ((212, 227), 'numpy.ones', 'np.ones', (['[N, 1]'], {}), '([N, 1])\n', (219, 227), True, 'import numpy as np\n'), ((243, 270), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (257, 270), True, 'import numpy as np\n'), ((2486, 2511), 'numpy.delete', 'np.delete', (['z', 'idx'], {'axis': '(1)'}), '(z, idx, axis=1)\n', (2495, 2511), True, 'import numpy as np\n'), ((2663, 2679), 'numpy.sum', 'np.sum', (['Z[-i]', '(0)'], {}), '(Z[-i], 0)\n', (2669, 2679), True, 'import numpy as np\n'), ((951, 969), 'numpy.tile', 'np.tile', (['m', '(K, 1)'], {}), '(m, (K, 1))\n', (958, 969), True, 'import numpy as np\n'), ((1401, 1419), 'numpy.tile', 'np.tile', (['r', '(K, 1)'], {}), '(r, (K, 1))\n', (1408, 1419), True, 'import numpy as np\n'), ((2063, 2079), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2077, 2079), True, 'import numpy as np\n'), ((2442, 2472), 'numpy.all', 'np.all', (['(z[..., :] == 0)'], {'axis': '(0)'}), '(z[..., :] == 0, axis=0)\n', (2448, 2472), True, 'import numpy as np\n'), ((509, 523), 'numpy.ix_', 'np.ix_', (['nn', 'nn'], {}), '(nn, nn)\n', (515, 523), True, 'import numpy as np\n'), ((784, 809), 'numpy.delete', 'np.delete', (['z', 'idx'], {'axis': '(1)'}), '(z, idx, axis=1)\n', (793, 809), True, 'import numpy as np\n'), ((906, 922), 'numpy.sum', 'np.sum', (['z[nn]', '(0)'], {}), '(z[nn], 0)\n', (912, 922), True, 'import numpy as np\n'), ((1513, 1547), 'scipy.special.betaln', 'betaln', (['(M1 + R + a)', '(M0 + M - R + b)'], {}), '(M1 + R + a, M0 + M - R + b)\n', (1519, 1547), False, 'from scipy.special import betaln\n'), ((1540, 1562), 'scipy.special.betaln', 'betaln', (['(M1 + a)', '(M0 + b)'], {}), '(M1 + a, M0 + b)\n', (1546, 1562), False, 'from scipy.special import betaln\n'), ((1637, 1661), 'scipy.special.betaln', 'betaln', (['(r + a)', '(m - r + b)'], {}), '(r + a, m - r + b)\n', (1643, 1661), False, 'from scipy.special import betaln\n'), ((1658, 1670), 'scipy.special.betaln', 'betaln', (['a', 'b'], {}), '(a, b)\n', (1664, 1670), False, 'from scipy.special import betaln\n'), ((1699, 1741), 'numpy.concatenate', 'np.concatenate', (['[likelihood, likelihood_n]'], {}), '([likelihood, likelihood_n])\n', (1713, 1741), True, 'import numpy as np\n'), ((1776, 1791), 'numpy.append', 'np.append', (['m', 'A'], {}), '(m, A)\n', (1785, 1791), True, 'import numpy as np\n'), ((741, 757), 'numpy.sum', 'np.sum', (['z[nn]', '(0)'], {}), '(z[nn], 0)\n', (747, 757), True, 'import numpy as np\n'), ((1103, 1132), 'numpy.sum', 'np.sum', (['(X_ @ z[nn] * z[nn])', '(0)'], {}), '(X_ @ z[nn] * z[nn], 0)\n', (1109, 1132), True, 'import numpy as np\n'), ((2329, 2345), 'numpy.zeros', 'np.zeros', (['(N, 1)'], {}), '((N, 1))\n', (2337, 2345), True, 'import numpy as np\n'), ((2113, 2125), 'numpy.cumsum', 'np.cumsum', (['P'], {}), '(P)\n', (2122, 2125), True, 'import numpy as np\n')]
|
import sys
sys.path.append('./model')
import argparse
import torch
import numpy as np
from model.model import NCNet
import torchvision.transforms as transforms
from dataloader import TrainLoader, ValLoader
from loss import WeakLoss
import torch.optim as optim
import json
import os
## Parameters
parser = argparse.ArgumentParser(description='Nc-Net Training')
## Input / Output
parser.add_argument('--outDir', type=str, help='output model directory')
parser.add_argument('--resumePth', type=str, help='resume model path')
parser.add_argument('--featExtractorPth', type=str, default = 'model/FeatureExtractor/resnet18.pth', help='feature extractor path')
parser.add_argument('--imgDir', type=str, default = 'data/pf-pascal/JPEGImages/', help='image Directory')
parser.add_argument('--trainCSV', type=str, default = 'data/pf-pascal/train.csv', help='train csv')
parser.add_argument('--valCSV', type=str, default = 'data/pf-pascal/val.csv', help='val csv')
parser.add_argument('--imgSize', type=int, default = 400, help='train image size')
## learning parameter
parser.add_argument('--lr', type=float, default=5e-4, help='learning rate')
parser.add_argument('--batchSize', type=int, default=16, help='batch size')
parser.add_argument('--nbEpoch', type=int, default=5, help='number of training epochs')
parser.add_argument('--neighConsKernel', nargs='+', type=int, default=[5,5,5], help='kernels sizes in neigh. cons.')
parser.add_argument('--neighConsChannel', nargs='+', type=int, default=[16,16,1], help='channels in neigh. cons')
parser.add_argument('--finetuneFeatExtractor', action='store_true', help='whether fine-tuning feature extractor')
parser.add_argument('--featExtractor', type=str, default='ResNet18Conv4', choices=['ResNet18Conv4', 'ResNet18Conv5'], help='feature extractor')
parser.add_argument('--cuda', action='store_true', help='GPU setting')
parser.add_argument('--softmaxMM', action='store_true', help='whether use softmax Mutual Matching')
args = parser.parse_args()
print(args)
## Set seed
torch.manual_seed(1)
if args.cuda:
torch.cuda.manual_seed(1)
else :
raise RuntimeError('CPU Version is not supported yet.')
np.random.seed(1)
## Initial Model
model = NCNet(kernel_sizes=args.neighConsKernel,
channels=args.neighConsChannel,
featExtractor = args.featExtractor,
featExtractorPth = args.featExtractorPth,
finetuneFeatExtractor = args.finetuneFeatExtractor,
softmaxMutualMatching = args.softmaxMM)
if not args.finetuneFeatExtractor:
msg = '\nIgnore the gradient for the parameters in the feature extractor'
print (msg)
for p in model.featExtractor.parameters():
p.requires_grad=False
if args.resumePth :
msg = '\nResume from {}'.format(args.resumePth)
model.load_state_dict(torch.load(args.resumePth))
if args.cuda :
model.cuda()
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr)
## Train Val DataLoader
normalize = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) # ImageNet normalization
trainTransform = transforms.Compose([transforms.RandomResizedCrop(args.imgSize),
transforms.ToTensor(),
normalize,])
valTransform = transforms.Compose([transforms.Resize(args.imgSize),
transforms.CenterCrop(args.imgSize),
transforms.ToTensor(),
normalize,])
trainLoader = TrainLoader(batchSize=args.batchSize,
pairCSV=args.trainCSV,
imgDir = args.imgDir,
trainTransform = trainTransform)
valLoader = ValLoader(batchSize=args.batchSize,
pairCSV=args.valCSV,
imgDir = args.imgDir,
valTransform = valTransform)
if not os.path.exists(args.outDir) :
os.mkdir(args.outDir)
# Train
bestValLoss = np.inf
history = {'TrainLoss' : [], 'ValLoss' : []}
outHistory = os.path.join(args.outDir, 'history.json')
outModel = os.path.join(args.outDir, 'netBest.pth')
for epoch in range(1, args.nbEpoch + 1) :
trainLoss = 0.
valLoss = 0.
for i, batch in enumerate(trainLoader) :
optimizer.zero_grad()
if args.cuda :
batch['source_image'] = batch['source_image'].cuda()
batch['target_image'] = batch['target_image'].cuda()
loss = WeakLoss(model, batch, args.softmaxMM)
loss.backward()
optimizer.step()
trainLoss += loss.item()
if i % 30 == 29 :
msg = '\nEpoch {:d}, Batch {:d}, Train Loss : {:.4f}'.format(epoch, i + 1, trainLoss / (i + 1))
print (msg)
## Validation
trainLoss = trainLoss / len(trainLoader)
with torch.no_grad() :
for i, batch in enumerate(valLoader) :
if args.cuda :
batch['source_image'] = batch['source_image'].cuda()
batch['target_image'] = batch['target_image'].cuda()
loss = WeakLoss(model, batch, args.softmaxMM)
valLoss += loss.item()
valLoss = valLoss / len(valLoader)
msg = 'Epoch {:d}, Train Loss : {:.4f}, Val Loss : {:.4f}'.format(epoch, trainLoss , valLoss)
with open(outHistory, 'w') as f :
json.dump(history, f)
print (msg)
if valLoss < bestValLoss :
msg = 'Validation Loss Improved from {:.4f} to {:.4f}'.format(bestValLoss, valLoss)
print (msg)
bestValLoss = valLoss
torch.save(model.state_dict(), outModel)
finalOut = os.path.join(args.outDir, 'netBest{:.3f}.pth'.format(bestValLoss))
cmd = 'mv {} {}'.format(outModel, finalOut)
os.system(cmd)
|
[
"os.mkdir",
"numpy.random.seed",
"argparse.ArgumentParser",
"model.model.NCNet",
"loss.WeakLoss",
"torchvision.transforms.Normalize",
"torch.no_grad",
"os.path.join",
"sys.path.append",
"torch.load",
"os.path.exists",
"dataloader.ValLoader",
"torchvision.transforms.CenterCrop",
"json.dump",
"torch.manual_seed",
"dataloader.TrainLoader",
"torch.cuda.manual_seed",
"os.system",
"torchvision.transforms.Resize",
"torchvision.transforms.RandomResizedCrop",
"torchvision.transforms.ToTensor"
] |
[((12, 38), 'sys.path.append', 'sys.path.append', (['"""./model"""'], {}), "('./model')\n", (27, 38), False, 'import sys\n'), ((311, 365), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Nc-Net Training"""'}), "(description='Nc-Net Training')\n", (334, 365), False, 'import argparse\n'), ((2024, 2044), 'torch.manual_seed', 'torch.manual_seed', (['(1)'], {}), '(1)\n', (2041, 2044), False, 'import torch\n'), ((2157, 2174), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (2171, 2174), True, 'import numpy as np\n'), ((2201, 2448), 'model.model.NCNet', 'NCNet', ([], {'kernel_sizes': 'args.neighConsKernel', 'channels': 'args.neighConsChannel', 'featExtractor': 'args.featExtractor', 'featExtractorPth': 'args.featExtractorPth', 'finetuneFeatExtractor': 'args.finetuneFeatExtractor', 'softmaxMutualMatching': 'args.softmaxMM'}), '(kernel_sizes=args.neighConsKernel, channels=args.neighConsChannel,\n featExtractor=args.featExtractor, featExtractorPth=args.\n featExtractorPth, finetuneFeatExtractor=args.finetuneFeatExtractor,\n softmaxMutualMatching=args.softmaxMM)\n', (2206, 2448), False, 'from model.model import NCNet\n'), ((3034, 3100), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (3054, 3100), True, 'import torchvision.transforms as transforms\n'), ((3589, 3705), 'dataloader.TrainLoader', 'TrainLoader', ([], {'batchSize': 'args.batchSize', 'pairCSV': 'args.trainCSV', 'imgDir': 'args.imgDir', 'trainTransform': 'trainTransform'}), '(batchSize=args.batchSize, pairCSV=args.trainCSV, imgDir=args.\n imgDir, trainTransform=trainTransform)\n', (3600, 3705), False, 'from dataloader import TrainLoader, ValLoader\n'), ((3825, 3932), 'dataloader.ValLoader', 'ValLoader', ([], {'batchSize': 'args.batchSize', 'pairCSV': 'args.valCSV', 'imgDir': 'args.imgDir', 'valTransform': 'valTransform'}), '(batchSize=args.batchSize, pairCSV=args.valCSV, imgDir=args.imgDir,\n valTransform=valTransform)\n', (3834, 3932), False, 'from dataloader import TrainLoader, ValLoader\n'), ((4177, 4218), 'os.path.join', 'os.path.join', (['args.outDir', '"""history.json"""'], {}), "(args.outDir, 'history.json')\n", (4189, 4218), False, 'import os\n'), ((4230, 4270), 'os.path.join', 'os.path.join', (['args.outDir', '"""netBest.pth"""'], {}), "(args.outDir, 'netBest.pth')\n", (4242, 4270), False, 'import os\n'), ((5914, 5928), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (5923, 5928), False, 'import os\n'), ((2063, 2088), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['(1)'], {}), '(1)\n', (2085, 2088), False, 'import torch\n'), ((4023, 4050), 'os.path.exists', 'os.path.exists', (['args.outDir'], {}), '(args.outDir)\n', (4037, 4050), False, 'import os\n'), ((4058, 4079), 'os.mkdir', 'os.mkdir', (['args.outDir'], {}), '(args.outDir)\n', (4066, 4079), False, 'import os\n'), ((2836, 2862), 'torch.load', 'torch.load', (['args.resumePth'], {}), '(args.resumePth)\n', (2846, 2862), False, 'import torch\n'), ((3163, 3205), 'torchvision.transforms.RandomResizedCrop', 'transforms.RandomResizedCrop', (['args.imgSize'], {}), '(args.imgSize)\n', (3191, 3205), True, 'import torchvision.transforms as transforms\n'), ((3244, 3265), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3263, 3265), True, 'import torchvision.transforms as transforms\n'), ((3354, 3385), 'torchvision.transforms.Resize', 'transforms.Resize', (['args.imgSize'], {}), '(args.imgSize)\n', (3371, 3385), True, 'import torchvision.transforms as transforms\n'), ((3425, 3460), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['args.imgSize'], {}), '(args.imgSize)\n', (3446, 3460), True, 'import torchvision.transforms as transforms\n'), ((3500, 3521), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3519, 3521), True, 'import torchvision.transforms as transforms\n'), ((4618, 4656), 'loss.WeakLoss', 'WeakLoss', (['model', 'batch', 'args.softmaxMM'], {}), '(model, batch, args.softmaxMM)\n', (4626, 4656), False, 'from loss import WeakLoss\n'), ((5007, 5022), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5020, 5022), False, 'import torch\n'), ((5530, 5551), 'json.dump', 'json.dump', (['history', 'f'], {}), '(history, f)\n', (5539, 5551), False, 'import json\n'), ((5264, 5302), 'loss.WeakLoss', 'WeakLoss', (['model', 'batch', 'args.softmaxMM'], {}), '(model, batch, args.softmaxMM)\n', (5272, 5302), False, 'from loss import WeakLoss\n')]
|
# encoding: utf-8
"""
@author: liaoxingyu
@contact: <EMAIL>
"""
import torchvision.transforms as T
import random
import numpy as np
import PIL
from .transforms import RandomErasing
class AddGaussianNoise(object):
def __call__(self, img):
std = random.uniform(0, 1.0)
if std > 0.5:
return img
# Convert to ndarray
img = np.asarray(img).copy()
noise = np.random.normal(size=img.shape, scale=std).astype(np.uint8)
img += noise
img = np.clip(img, 0, 255)
# Convert back to PIL image
img = PIL.Image.fromarray(img)
return img
def build_transforms(cfg, is_train=True):
normalize_transform = T.Normalize(mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD)
if is_train:
print('++++ hard train')
transform = T.Compose([
T.Resize(cfg.INPUT.SIZE_DOWN),
T.Resize(cfg.INPUT.SIZE_UP),
T.RandomHorizontalFlip(p=cfg.INPUT.PROB),
T.Pad(padding=cfg.INPUT.PADDING),
T.RandomRotation(cfg.INPUT.DEGREE),
T.ColorJitter(0.6,0.9,0.7),
T.RandomCrop(cfg.INPUT.SIZE_TRAIN),
#AddGaussianNoise(),
T.ToTensor(),
normalize_transform,
RandomErasing(probability=cfg.INPUT.RE_PROB, mean=cfg.INPUT.PIXEL_MEAN)
])
else:
print('++++ init test')
transform = T.Compose([
T.Resize(cfg.INPUT.SIZE_TEST),
T.ToTensor(),
normalize_transform
])
return transform
def build_transforms2(cfg, is_train=True):
#print('++++ easy')
normalize_transform = T.Normalize(mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD)
if is_train:
print('++++ easy train')
transform = T.Compose([
T.Resize(cfg.INPUT.SIZE_TRAIN),
T.RandomHorizontalFlip(p=cfg.INPUT.PROB),
#T.Pad(cfg.INPUT.PADDING),
T.ColorJitter(0.4,0.6,0.7),
T.RandomRotation(cfg.INPUT.DEGREE),
#T.ColorJitter(0.4,0.6,0.7),
T.Pad(cfg.INPUT.PADDING),
T.RandomCrop(cfg.INPUT.SIZE_TRAIN),
T.ToTensor(),
normalize_transform,
RandomErasing(probability=cfg.INPUT.RE_PROB, mean=cfg.INPUT.PIXEL_MEAN)
])
else:
print('++++ easy test')
transform = T.Compose([
T.Resize(cfg.INPUT.SIZE_TEST),
T.ToTensor(),
normalize_transform
])
return transform
def build_transforms3(cfg, is_train=True):
normalize_transform = T.Normalize(mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD)
if is_train:
print('++++ init train')
transform = T.Compose([
T.Resize(cfg.INPUT.SIZE_TRAIN),
T.RandomHorizontalFlip(p=cfg.INPUT.PROB),
T.Pad(cfg.INPUT.PADDING),
T.RandomCrop(cfg.INPUT.SIZE_TRAIN),
T.ToTensor(),
normalize_transform,
RandomErasing(probability=cfg.INPUT.RE_PROB, mean=cfg.INPUT.PIXEL_MEAN)
])
else:
print('++++ init test')
transform = T.Compose([
T.Resize(cfg.INPUT.SIZE_TEST),
T.ToTensor(),
normalize_transform
])
return transform
|
[
"torchvision.transforms.ColorJitter",
"torchvision.transforms.RandomHorizontalFlip",
"random.uniform",
"torchvision.transforms.RandomRotation",
"numpy.asarray",
"numpy.clip",
"torchvision.transforms.ToTensor",
"torchvision.transforms.Pad",
"numpy.random.normal",
"PIL.Image.fromarray",
"torchvision.transforms.Normalize",
"torchvision.transforms.RandomCrop",
"torchvision.transforms.Resize"
] |
[((691, 754), 'torchvision.transforms.Normalize', 'T.Normalize', ([], {'mean': 'cfg.INPUT.PIXEL_MEAN', 'std': 'cfg.INPUT.PIXEL_STD'}), '(mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD)\n', (702, 754), True, 'import torchvision.transforms as T\n'), ((1655, 1718), 'torchvision.transforms.Normalize', 'T.Normalize', ([], {'mean': 'cfg.INPUT.PIXEL_MEAN', 'std': 'cfg.INPUT.PIXEL_STD'}), '(mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD)\n', (1666, 1718), True, 'import torchvision.transforms as T\n'), ((2593, 2656), 'torchvision.transforms.Normalize', 'T.Normalize', ([], {'mean': 'cfg.INPUT.PIXEL_MEAN', 'std': 'cfg.INPUT.PIXEL_STD'}), '(mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD)\n', (2604, 2656), True, 'import torchvision.transforms as T\n'), ((259, 281), 'random.uniform', 'random.uniform', (['(0)', '(1.0)'], {}), '(0, 1.0)\n', (273, 281), False, 'import random\n'), ((506, 526), 'numpy.clip', 'np.clip', (['img', '(0)', '(255)'], {}), '(img, 0, 255)\n', (513, 526), True, 'import numpy as np\n'), ((578, 602), 'PIL.Image.fromarray', 'PIL.Image.fromarray', (['img'], {}), '(img)\n', (597, 602), False, 'import PIL\n'), ((371, 386), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (381, 386), True, 'import numpy as np\n'), ((410, 453), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'img.shape', 'scale': 'std'}), '(size=img.shape, scale=std)\n', (426, 453), True, 'import numpy as np\n'), ((853, 882), 'torchvision.transforms.Resize', 'T.Resize', (['cfg.INPUT.SIZE_DOWN'], {}), '(cfg.INPUT.SIZE_DOWN)\n', (861, 882), True, 'import torchvision.transforms as T\n'), ((896, 923), 'torchvision.transforms.Resize', 'T.Resize', (['cfg.INPUT.SIZE_UP'], {}), '(cfg.INPUT.SIZE_UP)\n', (904, 923), True, 'import torchvision.transforms as T\n'), ((937, 977), 'torchvision.transforms.RandomHorizontalFlip', 'T.RandomHorizontalFlip', ([], {'p': 'cfg.INPUT.PROB'}), '(p=cfg.INPUT.PROB)\n', (959, 977), True, 'import torchvision.transforms as T\n'), ((991, 1023), 'torchvision.transforms.Pad', 'T.Pad', ([], {'padding': 'cfg.INPUT.PADDING'}), '(padding=cfg.INPUT.PADDING)\n', (996, 1023), True, 'import torchvision.transforms as T\n'), ((1037, 1071), 'torchvision.transforms.RandomRotation', 'T.RandomRotation', (['cfg.INPUT.DEGREE'], {}), '(cfg.INPUT.DEGREE)\n', (1053, 1071), True, 'import torchvision.transforms as T\n'), ((1085, 1113), 'torchvision.transforms.ColorJitter', 'T.ColorJitter', (['(0.6)', '(0.9)', '(0.7)'], {}), '(0.6, 0.9, 0.7)\n', (1098, 1113), True, 'import torchvision.transforms as T\n'), ((1125, 1159), 'torchvision.transforms.RandomCrop', 'T.RandomCrop', (['cfg.INPUT.SIZE_TRAIN'], {}), '(cfg.INPUT.SIZE_TRAIN)\n', (1137, 1159), True, 'import torchvision.transforms as T\n'), ((1206, 1218), 'torchvision.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (1216, 1218), True, 'import torchvision.transforms as T\n'), ((1438, 1467), 'torchvision.transforms.Resize', 'T.Resize', (['cfg.INPUT.SIZE_TEST'], {}), '(cfg.INPUT.SIZE_TEST)\n', (1446, 1467), True, 'import torchvision.transforms as T\n'), ((1481, 1493), 'torchvision.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (1491, 1493), True, 'import torchvision.transforms as T\n'), ((1817, 1847), 'torchvision.transforms.Resize', 'T.Resize', (['cfg.INPUT.SIZE_TRAIN'], {}), '(cfg.INPUT.SIZE_TRAIN)\n', (1825, 1847), True, 'import torchvision.transforms as T\n'), ((1861, 1901), 'torchvision.transforms.RandomHorizontalFlip', 'T.RandomHorizontalFlip', ([], {'p': 'cfg.INPUT.PROB'}), '(p=cfg.INPUT.PROB)\n', (1883, 1901), True, 'import torchvision.transforms as T\n'), ((1954, 1982), 'torchvision.transforms.ColorJitter', 'T.ColorJitter', (['(0.4)', '(0.6)', '(0.7)'], {}), '(0.4, 0.6, 0.7)\n', (1967, 1982), True, 'import torchvision.transforms as T\n'), ((1994, 2028), 'torchvision.transforms.RandomRotation', 'T.RandomRotation', (['cfg.INPUT.DEGREE'], {}), '(cfg.INPUT.DEGREE)\n', (2010, 2028), True, 'import torchvision.transforms as T\n'), ((2083, 2107), 'torchvision.transforms.Pad', 'T.Pad', (['cfg.INPUT.PADDING'], {}), '(cfg.INPUT.PADDING)\n', (2088, 2107), True, 'import torchvision.transforms as T\n'), ((2121, 2155), 'torchvision.transforms.RandomCrop', 'T.RandomCrop', (['cfg.INPUT.SIZE_TRAIN'], {}), '(cfg.INPUT.SIZE_TRAIN)\n', (2133, 2155), True, 'import torchvision.transforms as T\n'), ((2169, 2181), 'torchvision.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (2179, 2181), True, 'import torchvision.transforms as T\n'), ((2401, 2430), 'torchvision.transforms.Resize', 'T.Resize', (['cfg.INPUT.SIZE_TEST'], {}), '(cfg.INPUT.SIZE_TEST)\n', (2409, 2430), True, 'import torchvision.transforms as T\n'), ((2444, 2456), 'torchvision.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (2454, 2456), True, 'import torchvision.transforms as T\n'), ((2755, 2785), 'torchvision.transforms.Resize', 'T.Resize', (['cfg.INPUT.SIZE_TRAIN'], {}), '(cfg.INPUT.SIZE_TRAIN)\n', (2763, 2785), True, 'import torchvision.transforms as T\n'), ((2799, 2839), 'torchvision.transforms.RandomHorizontalFlip', 'T.RandomHorizontalFlip', ([], {'p': 'cfg.INPUT.PROB'}), '(p=cfg.INPUT.PROB)\n', (2821, 2839), True, 'import torchvision.transforms as T\n'), ((2853, 2877), 'torchvision.transforms.Pad', 'T.Pad', (['cfg.INPUT.PADDING'], {}), '(cfg.INPUT.PADDING)\n', (2858, 2877), True, 'import torchvision.transforms as T\n'), ((2891, 2925), 'torchvision.transforms.RandomCrop', 'T.RandomCrop', (['cfg.INPUT.SIZE_TRAIN'], {}), '(cfg.INPUT.SIZE_TRAIN)\n', (2903, 2925), True, 'import torchvision.transforms as T\n'), ((2939, 2951), 'torchvision.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (2949, 2951), True, 'import torchvision.transforms as T\n'), ((3171, 3200), 'torchvision.transforms.Resize', 'T.Resize', (['cfg.INPUT.SIZE_TEST'], {}), '(cfg.INPUT.SIZE_TEST)\n', (3179, 3200), True, 'import torchvision.transforms as T\n'), ((3214, 3226), 'torchvision.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (3224, 3226), True, 'import torchvision.transforms as T\n')]
|
import h5py
from signal_filter.fft import LowPassFilter
from signal_filter.mpi_signal_filter import SignalFilter
h5_path = 'giv_raw.h5'
h5_f = h5py.File(h5_path, mode='r+')
h5_grp = h5_f['Measurement_000/Channel_000']
h5_main = h5_grp['Raw_Data']
samp_rate = h5_grp.attrs['IO_samp_rate_[Hz]']
num_spectral_pts = h5_main.shape[1]
frequency_filters = [LowPassFilter(num_spectral_pts, samp_rate, 10E+3)]
noise_tol = 1E-6
sig_filt = SignalFilter(h5_main, frequency_filters=frequency_filters,
noise_threshold=noise_tol, write_filtered=True,
write_condensed=False, num_pix=1, verbose=True)
h5_filt_grp = sig_filt.compute()
# VERIFICATION here:
row_ind = 20
actual_line = h5_filt_grp['Filtered_Data'][row_ind]
h5_ref_path = '/home/syz/giv/pzt_nanocap_6_just_translation_filt_resh_copy.h5'
h5_ref_file = h5py.File(h5_ref_path, mode='r')
h5_ref_grp = h5_ref_file[h5_filt_grp.name]
ref_line = h5_ref_grp['Filtered_Data'][row_ind]
import numpy as np
print('Actual line close to reference:')
print(np.max(np.abs(actual_line - ref_line)))
print(np.allclose(actual_line, ref_line))
"""
single_AO = h5_grp['Spectroscopic_Values'][0, :500]
import numpy as np
row_ind = 20
# read data for a specific scan line
raw_line_resp = h5_main[row_ind]
# break this up into pixels:
raw_line_mat = np.reshape(raw_line_resp, (-1, single_AO.size))
filt_line_resp = h5_filt_grp['Filtered_Data'][row_ind]
filt_line_mat = np.reshape(filt_line_resp, (-1, single_AO.size))
import pyUSID as usid
fig, axes = usid.plot_utils.plot_curves(single_AO, [raw_line_mat, filt_line_mat], use_rainbow_plots=False, x_label='Bias (V)',
y_label='Current (nA)', subtitle_prefix='Pixel', title=None, num_plots=9)
fig.savefig('result.png', format='png', )
savefig(os.path.join(other_figures_folder, file_name + '.png'), format='png', dpi=300)
"""
h5_f.close()
|
[
"h5py.File",
"numpy.abs",
"numpy.allclose",
"signal_filter.fft.LowPassFilter",
"signal_filter.mpi_signal_filter.SignalFilter"
] |
[((145, 174), 'h5py.File', 'h5py.File', (['h5_path'], {'mode': '"""r+"""'}), "(h5_path, mode='r+')\n", (154, 174), False, 'import h5py\n'), ((434, 597), 'signal_filter.mpi_signal_filter.SignalFilter', 'SignalFilter', (['h5_main'], {'frequency_filters': 'frequency_filters', 'noise_threshold': 'noise_tol', 'write_filtered': '(True)', 'write_condensed': '(False)', 'num_pix': '(1)', 'verbose': '(True)'}), '(h5_main, frequency_filters=frequency_filters, noise_threshold=\n noise_tol, write_filtered=True, write_condensed=False, num_pix=1,\n verbose=True)\n', (446, 597), False, 'from signal_filter.mpi_signal_filter import SignalFilter\n'), ((857, 889), 'h5py.File', 'h5py.File', (['h5_ref_path'], {'mode': '"""r"""'}), "(h5_ref_path, mode='r')\n", (866, 889), False, 'import h5py\n'), ((354, 405), 'signal_filter.fft.LowPassFilter', 'LowPassFilter', (['num_spectral_pts', 'samp_rate', '(10000.0)'], {}), '(num_spectral_pts, samp_rate, 10000.0)\n', (367, 405), False, 'from signal_filter.fft import LowPassFilter\n'), ((1094, 1128), 'numpy.allclose', 'np.allclose', (['actual_line', 'ref_line'], {}), '(actual_line, ref_line)\n', (1105, 1128), True, 'import numpy as np\n'), ((1055, 1085), 'numpy.abs', 'np.abs', (['(actual_line - ref_line)'], {}), '(actual_line - ref_line)\n', (1061, 1085), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
import os, sys, subprocess
from os.path import basename,dirname
import h5py
from netCDF4 import Dataset
import numpy as np
import numpy.ma as ma
import gdal
from gdalconst import *
from osgeo import ogr, osr
from datetime import datetime, date
def createImgSCISAT(fileAbsPath):
# read info from netcdf
ncfile = Dataset(fileAbsPath, 'r')
latitude = ncfile.groups['ACE-FTS-v2.2'].latitude
longitude = ncfile.groups['ACE-FTS-v2.2'].longitude
datestart = datetime.strptime(ncfile.groups['ACE-FTS-v2.2'].start_time,'%Y-%m-%d %H:%M:%S+00')
dateend = datetime.strptime(ncfile.groups['ACE-FTS-v2.2'].end_time,'%Y-%m-%d %H:%M:%S+00')
ozone = ncfile.groups['ACE-FTS-v2.2'].groups['Data-L2_1km_grid'].variables['O3'][:]
heightLevels = ncfile.groups['ACE-FTS-v2.2'].groups['Data-L2_1km_grid'].variables['z'][:]
numBand = len(ozone)
ncfile.close()
#common vars
no_value = -9999
minValue = ma.min(ozone)
maxValue = ma.max(ozone)
ma.set_fill_value(ozone, no_value)
ozone = ozone.filled()
#ma.set_fill_value(heightLevels, no_value)
#heightLevels = heightLevels.filled()
sizeX = 1
sizeY = 1
dataType = gdal.GDT_Float32
resolution = 1.0 # in degree
driver = gdal.GetDriverByName('GTiff' )
outFile = 'ACE-FTS_L2_ozone_'+datestart.strftime('%Y%m%d.%H%M%S')+'.tif'
#create tiff
dst_ds = driver.Create(outFile, sizeX, sizeY, numBand, dataType)
for i in range(numBand):
dst_ds.GetRasterBand(i+1).WriteArray(np.expand_dims(np.expand_dims(ozone[i],axis=0),axis=0))
# The computed stat produces this warning
# Warning 1: Lost metadata writing to GeoTIFF ... too large to fit in tag.
# An additional *.aux.xml is added
#if ozone[i] != no_value:
# dst_ds.GetRasterBand(i+1).ComputeStatistics(False)
dst_ds.GetRasterBand(i+1).SetNoDataValue(no_value)
#set geotrasform matrix
top_left_x = longitude - (resolution / 2)
w_e_pixel_resolution = resolution
top_left_y = latitude - (resolution / 2)
n_s_pixel_resolution = - resolution
coord = [top_left_x, w_e_pixel_resolution, 0, top_left_y,0, n_s_pixel_resolution]
dst_ds.SetGeoTransform(coord)
srs = osr.SpatialReference()
srs.SetWellKnownGeogCS('WGS84')
dst_ds.SetProjection(srs.ExportToWkt())
#set metadata
dst_ds.SetMetadataItem('GLOBAL_MAX',str(maxValue))
dst_ds.SetMetadataItem('GLOBAL_MIN',str(minValue))
dst_ds.SetMetadataItem('TIME_END', dateend.strftime('%Y-%m-%dT%H:%M:%SZ'))
dst_ds.SetMetadataItem('TIME_START', datestart.strftime('%Y-%m-%dT%H:%M:%SZ'))
dst_ds.SetMetadataItem('VERTICAL_LEVELS_NUMBER', str(len(heightLevels)))
dst_ds.SetMetadataItem('VERTICAL_LEVELS', ','.join(str(x) for x in heightLevels))
dst_ds =None
return [outFile]
if __name__ == '__main__':
if len(sys.argv) != 2:
sys.exit('\nUsage: %s L2_SCISAT_file \n' % sys.argv[0] )
else:
if not os.path.exists(sys.argv[1]):
sys.exit('\nERROR: File %s was not found!\n' % sys.argv[1])
fileAbsPath = sys.argv[1]
outFileName = createImgSCISAT(fileAbsPath)
exit(0)
# else:
# Module is imported from another module
|
[
"netCDF4.Dataset",
"numpy.ma.min",
"gdal.GetDriverByName",
"numpy.ma.set_fill_value",
"os.path.exists",
"numpy.expand_dims",
"datetime.datetime.strptime",
"sys.exit",
"osgeo.osr.SpatialReference",
"numpy.ma.max"
] |
[((343, 368), 'netCDF4.Dataset', 'Dataset', (['fileAbsPath', '"""r"""'], {}), "(fileAbsPath, 'r')\n", (350, 368), False, 'from netCDF4 import Dataset\n'), ((495, 582), 'datetime.datetime.strptime', 'datetime.strptime', (["ncfile.groups['ACE-FTS-v2.2'].start_time", '"""%Y-%m-%d %H:%M:%S+00"""'], {}), "(ncfile.groups['ACE-FTS-v2.2'].start_time,\n '%Y-%m-%d %H:%M:%S+00')\n", (512, 582), False, 'from datetime import datetime, date\n'), ((592, 677), 'datetime.datetime.strptime', 'datetime.strptime', (["ncfile.groups['ACE-FTS-v2.2'].end_time", '"""%Y-%m-%d %H:%M:%S+00"""'], {}), "(ncfile.groups['ACE-FTS-v2.2'].end_time,\n '%Y-%m-%d %H:%M:%S+00')\n", (609, 677), False, 'from datetime import datetime, date\n'), ((957, 970), 'numpy.ma.min', 'ma.min', (['ozone'], {}), '(ozone)\n', (963, 970), True, 'import numpy.ma as ma\n'), ((986, 999), 'numpy.ma.max', 'ma.max', (['ozone'], {}), '(ozone)\n', (992, 999), True, 'import numpy.ma as ma\n'), ((1004, 1038), 'numpy.ma.set_fill_value', 'ma.set_fill_value', (['ozone', 'no_value'], {}), '(ozone, no_value)\n', (1021, 1038), True, 'import numpy.ma as ma\n'), ((1265, 1294), 'gdal.GetDriverByName', 'gdal.GetDriverByName', (['"""GTiff"""'], {}), "('GTiff')\n", (1285, 1294), False, 'import gdal\n'), ((2258, 2280), 'osgeo.osr.SpatialReference', 'osr.SpatialReference', ([], {}), '()\n', (2278, 2280), False, 'from osgeo import ogr, osr\n'), ((2959, 3016), 'sys.exit', 'sys.exit', (['("""\nUsage: %s L2_SCISAT_file \n""" % sys.argv[0])'], {}), '("""\nUsage: %s L2_SCISAT_file \n""" % sys.argv[0])\n', (2967, 3016), False, 'import os, sys, subprocess\n'), ((3041, 3068), 'os.path.exists', 'os.path.exists', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (3055, 3068), False, 'import os, sys, subprocess\n'), ((3082, 3143), 'sys.exit', 'sys.exit', (['("""\nERROR: File %s was not found!\n""" % sys.argv[1])'], {}), '("""\nERROR: File %s was not found!\n""" % sys.argv[1])\n', (3090, 3143), False, 'import os, sys, subprocess\n'), ((1554, 1586), 'numpy.expand_dims', 'np.expand_dims', (['ozone[i]'], {'axis': '(0)'}), '(ozone[i], axis=0)\n', (1568, 1586), True, 'import numpy as np\n')]
|
"""Class for converter."""
import numpy as np
import math
import cmath
import scipy
import logging
from scipy import signal
from scipy.integrate import odeint,ode
#from converter_utilities import plot_signal, plot_FFT
import converter_utilities
import config
from models import InverterModels
class PowerElectronicConverter:
"""
Converter base class.
Attributes:
count (int): Number of converter objects.
"""
count = 0 #Object count
def __init__(self,model_type):
"""Creates an instance of `Converter`.
Args:
fsw (float): Switching frequency in Hz.
Raises:
ValueError: If parameters corresponding to `Sinverter_rated` are not available.
"""
PowerElectronicConverter.count = PowerElectronicConverter.count+1 #Increment count to keep track of number of converter model instances
self.name = 'converter_'+str(PowerElectronicConverter.count) #Generate a name for the instance
self.model_type = model_type
"""
if self.model_type is 'switching':
assert self.signal_type is 'square_wave' or self.signal_type is 'sinePWM', 'Switching model needs square or sine PWM as switching signal!'
if self.model_type is 'average':
assert self.signal_type is 'duty_cycle', 'Average model needs duty_cycle as switching signal!'
"""
def check_model_type(self,model_type):
"""Check if model type is valid."""
assert model_type in self.model_types, f'{model_type} is not a valid model type!'
def show_spec(self):
"""Print the specs."""
print('Model type:{}'.format(self.model_type))
print('Switching signal type:{}'.format(self.signal_type))
def calc_primary(self,signal):
"""Calculate the primary switch."""
assert isinstance(signal,bool), 'Switching signal must be boolean.'
Sprimary = int(signal)
return Sprimary
def calc_complimentary(self,signal):
"""Calculate the complimentary."""
assert isinstance(signal,bool), 'Switching signal must be boolean.'
Scomplimentary = int(not signal)
return Scomplimentary
def calc_average(self,m):
"""Calculate average voltage."""
return Vdc
#Current controller dynamics
class PowerElectronicInverter(PowerElectronicConverter,InverterModels):
"""
Inverter class.
Attributes:
():
"""
Rf = 0.01
Lf = 1.0e-3
Rload = 1.0
inverter_types = ['single_phase_half_bridge','single_phase_full_bridge',
'three_phase_full_bridge']
model_types = ['EMT_switching','EMT_average','dynamic_phasor']
def __init__(self,Vdc,model_type = 'EMT_average',inverter_type='single_phase_half_bridge'):
"""Creates an instance of `Converter`.
Args:
Vdc (float): DC link voltage.
Raises:
ValueError: To be added.
"""
self.check_model_type(model_type)
super().__init__(model_type) #Initialize converter class (base class)
self.update_Vdc(Vdc)
self.inverter_type =inverter_type
@property #Decorator used for auto updating
def y(self):
"""List of initial states"""
return [self.ia, 0.0]
def update_Vdc(self,Vdc):
"""Update DC link voltage."""
self.Vdc = Vdc
"""
def control_signal_calc(self,signals,t):
Calculate control signal.
if self.model_type is 'EMT_switching':
signals = self.switching_signal_calc(signals,t)
control_signal = signals['switching']
elif self.model_type is 'EMT_average':
signals = self.average_signal_calc(signals,t)
control_signal = signals['modulating']
elif self.model_type is 'dynamicphasor':
pass
return control_signal
"""
def setup_model(self):
"""Initialize mode."""
self.initialize_model()
self.vt_calc = self.select_vt_model()
self.vpcc_calc = self.select_vpcc_model()
self.ODE_model = self.select_ODE_model()
#self.control_signal_calc = self.select_control_signal()
def select_control_signal(self):
"""Select the control signal suitable for the problem."""
if self.model_type is 'EMT_switching':
if self.inverter_type == 'single_phase_half_bridge':
control_signal = self.switching_signal_single_phase
elif self.inverter_type == 'single_phase_full_bridge':
raise NotImplementedError(f'{self.inverter_type} is not implemented!')
elif self.model_type is 'EMT_average':
if self.inverter_type == 'single_phase_half_bridge':
control_signal = self.modulating_signal_single_phase
elif self.inverter_type == 'single_phase_full_bridge':
raise NotImplementedError(f'{self.inverter_type} is not implemented!')
elif self.model_type is 'dynamic_phasor':
if self.inverter_type == 'single_phase_half_bridge':
control_signal = self.phasor_signal_single_phase
elif self.inverter_type == 'single_phase_full_bridge':
raise NotImplementedError(f'{self.inverter_type} is not implemented!')
return control_signal
def select_vt_model(self):
"""Get the terminal voltage model."""
if self.model_type == 'EMT_switching':
if self.inverter_type == 'single_phase_half_bridge':
vt_model = self.single_phase_half_bridge_switching
elif self.inverter_type == 'single_phase_full_bridge':
vt_model = self.single_phase_full_bridge_switching
elif self.inverter_type == 'three_phase_full_bridge':
vt_model = self.three_phase_full_bridge_switching
else:
print(f'{self.inverter_type} not found for model type {self.model_type}!')
elif self.model_type == 'EMT_average':
if self.inverter_type == 'single_phase_half_bridge':
vt_model = self.single_phase_half_bridge_average
elif self.inverter_type == 'single_phase_full_bridge':
vt_model = self.single_phase_full_bridge_average
elif self.inverter_type == 'three_phase_full_bridge':
raise NotImplementedError(f'{self.inverter_type} is not implemented!')
else:
print(f'{self.inverter_type} not found for model type {self.model_type}!')
elif self.model_type == 'dynamicphasor':
if self.inverter_type == 'single_phase_half_bridge':
vt_model = self.single_phase_half_bridge_phasor
elif self.inverter_type == 'single_phase_full_bridge':
vt_model = self.single_phase_full_bridge_phasor
elif self.inverter_type == 'three_phase_full_bridge':
raise NotImplementedError(f'{self.inverter_type} is not implemented!')
else:
print(f'{self.inverter_type} not found for model type {self.model_type}!')
print(type(vt_model))
return vt_model
def select_vpcc_model(self,grid=None):
"""Get the PCC voltage model."""
if not grid:
vpcc_model = self.v_load_model()
return vpcc_model
def select_ODE_model(self):
"""Select ODE model."""
if self.model_type is 'EMT_switching' or self.model_type is 'EMT_average':
if self.inverter_type is 'single_phase_half_bridge' or self.inverter_type is 'single_phase_full_bridge':
ODE_model = self.ODE_model_single_phase_EMT
elif self.inverter_type is 'three_phase_full_bridge':
raise NotImplementedError(f'{self.inverter_type} is not implemented!')
elif self.model_type is 'dynamic_phasor':
if self.inverter_type is 'single_phase_half_bridge' or self.inverter_type is 'single_phase_full_bridge':
ODE_model = self.ODE_model_single_phase_dynamicphasor
elif self.inverter_type is 'three_phase_full_bridge':
raise NotImplementedError(f'{self.inverter_type} is not implemented!')
return ODE_model
def initialize_model(self):
"""Initialize mode."""
if self.model_type is 'EMT_switching' or self.model_type is 'EMT_average':
if self.inverter_type is 'single_phase_half_bridge' or self.inverter_type is 'single_phase_full_bridge':
self.ia = 0.0
elif self.inverter_type is 'three_phase_full_bridge':
raise NotImplementedError
elif self.model_type is 'dynamic_phasor':
if self.inverter_type is 'single_phase_half_bridge' or self.inverter_type is 'single_phase_full_bridge':
self.iaR = 0.0
self.iaI = 0.0
if self.inverter_type is 'three_phase_full_bridge':
raise NotImplementedError
"""
def vta_calc(self,Vdc,control_signal):
Calculate inverter terminal voltage.
if self.model_type is 'switching':
vta = self.half_bridge_switching(Vdc,control_signal)
elif self.model_type is 'average':
vta = self.half_bridge_average(Vdc,control_signal)
return vta
"""
def v_load_model(self):
"""Calculate voltage across load at PCC."""
return self.Rload*self.ia
def ODE_model_switching(self,y,t):
"""ODE model of inverter branch."""
self.ia,dummy = y # unpack current values of y
Vdc = 100.0 #Get DC link voltage
switching_signal = self.control_signal_calc(t)
self.vta = self.half_bridge_switching(Vdc,switching_signal)
self.va = self.PCC_voltage_calc(self.ia,t)
dia = (1/self.Lf)*(-self.Rf*self.ia -self.va + self.vta)
result = [dia,dummy]
return np.array(result)
def ODE_model_average(self,y,t):
"""ODE model of inverter branch."""
self.ia,dummy = y # unpack current values of y
Vdc = 100.0 #Get DC link voltage
modulating_signal = self.control_signal_calc(t)
self.vta = self.half_bridge_average(Vdc,modulating_signal)
self.va = self.PCC_voltage_calc(self.ia,t)
dia = (1/self.Lf)*(-self.Rf*self.ia -self.va + self.vta)
result = [dia,dummy]
return np.array(result)
def power_calc(self,v,i):
"""Calcuate instantaneous power."""
return v*i
def show_states(self):
"""Show states."""
print('Inverter states:{}'.format(self.y))
|
[
"numpy.array"
] |
[((10519, 10535), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (10527, 10535), True, 'import numpy as np\n'), ((11081, 11097), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (11089, 11097), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 4 12:48:05 2020
@author: sven
"""
import numpy as np
def nearfield(f,c,theta):
"""
Compute the nearfield
Parameters
----------
f : numeric
Transducer Frequency in kHz [kHz].
c : numeric
Ambient sound speed [m/s].
theta : numeric
3dB angle or beam width in degrees [degrees].
Returns
-------
Rnf : numeric
Range of the nearfield for the given conditions in meters [m].
"""
lmbd = c/ ( f * 1000)
k = 2* np.pi / lmbd
a = 1.6 / (k * np.sin((theta * np.pi/180) / 2))
Rnf = (2*a)**2 / lmbd
return Rnf
def eba(f,c,theta):
"""
Compute the equivalent beam angle for a circular transducer.
Parameters
----------
f : numeric
Transducer Frequency in kHz [kHz].
c : numeric
Ambient sound speed [m/s].
theta : numeric
3dB angle or beam width in degrees [degrees].
Returns
-------
EBA : numeric
equivalent beam angle in dB [dB].
"""
lmbd = c/ ( f * 1000)
k = 2* np.pi / lmbd
a = 1.6 / (k * np.sin((theta * np.pi/180) / 2))
EBA = 10 * np.log10( 5.78 / ( ( k * a ) ** 2))#equivalent beam angle in steradians
return EBA
def vol_samp(f,c,theta,tau,R,start=0):
f = f*1000
Rtot = R+start
Vtot = 10**(eba(f,c,theta)/10) * Rtot**2 * c * tau / 2
V0 = 10**(eba(f,c,theta)/10) * start**2 * c * tau / 2
V = Vtot - V0
return V
def footprint_radius(theta,R):
return R * np.tan(theta * np.pi / 180 / 2)
def footprint_area(theta, R):
return np.pi * footprint_radius(theta,R)**2
'''
vol_samp(f=200,c=1450,theta=9.8,tau=6/1000,R=10)
vol_samp(f=1000,c=1450,theta=4,tau=6/1000,R=10)
#Zonar
nearfield(200,1480,9.8)
nearfield(1000,1480,4)
c=1450;f=200000
0.045**2/(c/f)
c=1450;f=1000000
0.022**2/(c/f)
'''
|
[
"numpy.sin",
"numpy.log10",
"numpy.tan"
] |
[((1024, 1053), 'numpy.log10', 'np.log10', (['(5.78 / (k * a) ** 2)'], {}), '(5.78 / (k * a) ** 2)\n', (1032, 1053), True, 'import numpy as np\n'), ((1356, 1387), 'numpy.tan', 'np.tan', (['(theta * np.pi / 180 / 2)'], {}), '(theta * np.pi / 180 / 2)\n', (1362, 1387), True, 'import numpy as np\n'), ((504, 535), 'numpy.sin', 'np.sin', (['(theta * np.pi / 180 / 2)'], {}), '(theta * np.pi / 180 / 2)\n', (510, 535), True, 'import numpy as np\n'), ((979, 1010), 'numpy.sin', 'np.sin', (['(theta * np.pi / 180 / 2)'], {}), '(theta * np.pi / 180 / 2)\n', (985, 1010), True, 'import numpy as np\n')]
|
import pandas as pd
import numpy as np
class regout(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
stat_names=['coeff', 'se', 't', 'p>t', 'CI_low', 'CI_high']
var_names=['mpg', 'length', '_cons']
tsls_std = regout(
summary=pd.DataFrame(np.array([
[-1319.865169393102,
1906.786380881755,
-.6921935160784805,
.4910734473693195,
-5121.889227450638,
2482.158888664433,
],
[-217.1947537663291,
420.1260089670161,
-.5169752624941175,
.6067801835089433,
-1054.902223005562,
620.5127154729038,
],
[75092.75604853875,
119511.8053379244,
.6283291917163411,
.5318043826192644,
-163207.0155842729,
313392.5276813505,
],
]),
columns=stat_names,
index=var_names),
vce=pd.DataFrame(np.array([
[3635834.30231614,
799471.1768877679,
-227680006.992276,
],
[799471.1768877679,
176505.8634105533,
-50197751.5841309,
],
[-227680006.992276,
-50197751.5841309,
14283071615.12995,
],
]),
columns=var_names,
index=var_names),
N=74,
r2=np.nan,
r2_a=np.nan,
mss=-1031817172.794085,
tss=np.nan,
rss=1666882568.915706,
kappa=np.nan,
F=3.97987798611259,
pF=.0230019984382644,
)
tsls_robust = regout(
summary=pd.DataFrame(np.array([
[-1319.865169393102,
2357.647789772478,
-.5598228773265894,
.5773622437125422,
-6020.881343525829,
3381.151004739624,
],
[-217.1947537663291,
503.6720846601052,
-.4312225362120266,
.6676130605679584,
-1221.488366543325,
787.0988590106673,
],
[75092.75604853875,
144765.6412502902,
.5187194654752942,
.6055693972498957,
-213561.7342143963,
363747.2463114738,
],
]),
columns=stat_names,
index=var_names),
vce=pd.DataFrame(np.array([
[5558503.100619048,
1185986.375722446,
-341107563.0831394,
],
[1185986.375722446,
253685.5688658562,
-72904288.91181517,
],
[-341107563.0831394,
-72904288.91181517,
20957090886.60773,
],
]),
columns=var_names,
index=var_names),
N=74,
r2=np.nan,
r2_a=np.nan,
mss=-1031817172.794085,
tss=np.nan,
rss=1666882568.915706,
kappa=np.nan,
F=3.406896316082843,
pF=.0386511725211229,
)
tsls_cluster = regout(
summary=pd.DataFrame(np.array([
[-1319.865169393102,
2257.567862016117,
-.5846403076514384,
.5625396644960171,
-5902.971584635199,
3263.241245848994,
],
[-217.1947537663291,
486.3497477085017,
-.4465814052329,
.6579283787885248,
-1204.537232491913,
770.1477249592547,
],
[75092.75604853875,
139493.4175166438,
.5383247280437371,
.5937601902027558,
-208093.9367907353,
358279.4488878128,
],
]),
columns=stat_names,
index=var_names),
vce=pd.DataFrame(np.array([
[5096612.65160802,
1096219.32167181,
-314686204.683651,
],
[1096219.32167181,
236536.0770961233,
-67830404.58467865,
],
[-314686204.683651,
-67830404.58467865,
19458413530.47272,
],
]),
columns=var_names,
index=var_names),
N=74,
r2=np.nan,
r2_a=np.nan,
mss=-1031817172.794085,
tss=np.nan,
rss=1666882568.915706,
kappa=np.nan,
F=3.125695274137819,
pF=.0563657644983311,
)
|
[
"numpy.array"
] |
[((278, 673), 'numpy.array', 'np.array', (['[[-1319.865169393102, 1906.786380881755, -0.6921935160784805, \n 0.4910734473693195, -5121.889227450638, 2482.158888664433], [-\n 217.1947537663291, 420.1260089670161, -0.5169752624941175, \n 0.6067801835089432, -1054.902223005562, 620.5127154729038], [\n 75092.75604853875, 119511.8053379244, 0.628329191716341, \n 0.5318043826192644, -163207.0155842729, 313392.5276813505]]'], {}), '([[-1319.865169393102, 1906.786380881755, -0.6921935160784805, \n 0.4910734473693195, -5121.889227450638, 2482.158888664433], [-\n 217.1947537663291, 420.1260089670161, -0.5169752624941175, \n 0.6067801835089432, -1054.902223005562, 620.5127154729038], [\n 75092.75604853875, 119511.8053379244, 0.628329191716341, \n 0.5318043826192644, -163207.0155842729, 313392.5276813505]])\n', (286, 673), True, 'import numpy as np\n'), ((734, 930), 'numpy.array', 'np.array', (['[[3635834.30231614, 799471.1768877679, -227680006.992276], [\n 799471.1768877679, 176505.8634105533, -50197751.5841309], [-\n 227680006.992276, -50197751.5841309, 14283071615.12995]]'], {}), '([[3635834.30231614, 799471.1768877679, -227680006.992276], [\n 799471.1768877679, 176505.8634105533, -50197751.5841309], [-\n 227680006.992276, -50197751.5841309, 14283071615.12995]])\n', (742, 930), True, 'import numpy as np\n'), ((1185, 1581), 'numpy.array', 'np.array', (['[[-1319.865169393102, 2357.647789772478, -0.5598228773265894, \n 0.5773622437125422, -6020.881343525829, 3381.151004739624], [-\n 217.1947537663291, 503.6720846601052, -0.4312225362120266, \n 0.6676130605679584, -1221.488366543325, 787.0988590106673], [\n 75092.75604853875, 144765.6412502902, 0.5187194654752942, \n 0.6055693972498957, -213561.7342143963, 363747.2463114738]]'], {}), '([[-1319.865169393102, 2357.647789772478, -0.5598228773265894, \n 0.5773622437125422, -6020.881343525829, 3381.151004739624], [-\n 217.1947537663291, 503.6720846601052, -0.4312225362120266, \n 0.6676130605679584, -1221.488366543325, 787.0988590106673], [\n 75092.75604853875, 144765.6412502902, 0.5187194654752942, \n 0.6055693972498957, -213561.7342143963, 363747.2463114738]])\n', (1193, 1581), True, 'import numpy as np\n'), ((1641, 1842), 'numpy.array', 'np.array', (['[[5558503.100619048, 1185986.375722446, -341107563.0831394], [\n 1185986.375722446, 253685.5688658562, -72904288.91181517], [-\n 341107563.0831394, -72904288.91181517, 20957090886.60773]]'], {}), '([[5558503.100619048, 1185986.375722446, -341107563.0831394], [\n 1185986.375722446, 253685.5688658562, -72904288.91181517], [-\n 341107563.0831394, -72904288.91181517, 20957090886.60773]])\n', (1649, 1842), True, 'import numpy as np\n'), ((2099, 2492), 'numpy.array', 'np.array', (['[[-1319.865169393102, 2257.567862016117, -0.5846403076514384, \n 0.5625396644960171, -5902.971584635199, 3263.241245848994], [-\n 217.1947537663291, 486.3497477085017, -0.4465814052329, \n 0.6579283787885248, -1204.537232491913, 770.1477249592547], [\n 75092.75604853875, 139493.4175166438, 0.5383247280437371, \n 0.5937601902027558, -208093.9367907353, 358279.4488878128]]'], {}), '([[-1319.865169393102, 2257.567862016117, -0.5846403076514384, \n 0.5625396644960171, -5902.971584635199, 3263.241245848994], [-\n 217.1947537663291, 486.3497477085017, -0.4465814052329, \n 0.6579283787885248, -1204.537232491913, 770.1477249592547], [\n 75092.75604853875, 139493.4175166438, 0.5383247280437371, \n 0.5937601902027558, -208093.9367907353, 358279.4488878128]])\n', (2107, 2492), True, 'import numpy as np\n'), ((2552, 2748), 'numpy.array', 'np.array', (['[[5096612.65160802, 1096219.32167181, -314686204.683651], [1096219.32167181,\n 236536.0770961233, -67830404.58467865], [-314686204.683651, -\n 67830404.58467865, 19458413530.47272]]'], {}), '([[5096612.65160802, 1096219.32167181, -314686204.683651], [\n 1096219.32167181, 236536.0770961233, -67830404.58467865], [-\n 314686204.683651, -67830404.58467865, 19458413530.47272]])\n', (2560, 2748), True, 'import numpy as np\n')]
|
# Author: <NAME>
# MIT license (see LICENCE.txt in the top-level folder)
import unittest
import numpy as np
from numpy import random
from numpy import linalg as LA
from sklearn.linear_model import LinearRegression, LogisticRegression
from single_neuron import models as models
from single_neuron import math_utils as math_utils
datasets_n = 50
max_ds_n = 10000
max_features_n = 100
max_abs_value = 1000
min_epochs = 100
max_epochs = 10000
min_lr = 1e-9
max_lr = 1e-5
def generate_synthetic_datasets(N_max, m_max, gaussian=False):
N_train = random.randint(3, N_max + 1)
N_valid = random.randint(3, N_max + 1)
m = random.randint(2, m_max + 1)
if gaussian:
# we are generating a synthetic dataset based on a multivariate Gaussian
# distribution. In order to generate the latter, we need a mean vector
# (easy) and a positive definite matrix for the covariances. This matrix
# is way more tricky to sample and I don' t know what is the best way.
# My current brute-force approach is the following: (a) I sample m
# vectors; (b) I take all the possible inner products (Gram matrix) as
# the covariance matrix and (c) if the covariance matrix is singular, I
# go back to step (b).
mu = 2 * (random.rand(m) - 0.5) * max_abs_value
Cov = np.zeros([m, m])
while LA.matrix_rank(Cov) != m:
a = 2 * (random.rand(m) - 0.5) * max_abs_value
X = a * random.rand(m, m)
Cov = X.T.dot(X)
train_ds = random.multivariate_normal(mu, Cov, N_train)
valid_ds = random.multivariate_normal(mu, Cov, N_valid)
else:
# uniformly random datasets
train_ds = 2 * (random.rand(N_train, m) - 0.5) * max_abs_value
valid_ds = 2 * (random.rand(N_valid, m) - 0.5) * max_abs_value
return train_ds, valid_ds
class TestLinearNeuron(unittest.TestCase):
def setUp(self):
"""
Prepare a few synthetic datasets for the tests. Two categories of
datasets: One random without any implied structure and one that arises
from a predefined distribution.
"""
self.train_X = []
self.valid_X = []
self.train_y = []
self.valid_y = []
for ds_i in range(0, datasets_n):
# make sure that there are some datasets with extremely small values
if ds_i < 10:
N_max = 7
else:
N_max = max_ds_n
if ds_i < 10:
m_max = 2
else:
m_max = max_features_n
#gaussian = random.rand() < 0.5
gaussian = True
train_ds, valid_ds = generate_synthetic_datasets(N_max, m_max,
gaussian)
# we use the last column as the target variable
self.train_X.append(train_ds[:, :-1])
self.valid_X.append(valid_ds[:, :-1])
self.train_y.append(train_ds[:, -1])
self.valid_y.append(valid_ds[:, -1])
self.lin_model = LinearRegression()
def test_rmse_is_equal_with_sklearn(self):
pass
def test_params_are_equal_with_sklearn(self):
pass
def test_initialization_does_not_matter(self):
pass
class TestReluNeuron(unittest.TestCase):
def test_rmse_is_equal_with_sklearn(self):
pass
def test_initialization_with_negatives_leads_to_zero_gradients(self):
pass
def test_initialization_does_not_matter(self):
pass
class TestLogisticNeuron(unittest.TestCase):
def test_ce_is_equal_with_sklearn(self):
pass
def test_initialization_does_not_matter(self):
pass
|
[
"numpy.zeros",
"sklearn.linear_model.LinearRegression",
"numpy.linalg.matrix_rank",
"numpy.random.randint",
"numpy.random.multivariate_normal",
"numpy.random.rand"
] |
[((551, 579), 'numpy.random.randint', 'random.randint', (['(3)', '(N_max + 1)'], {}), '(3, N_max + 1)\n', (565, 579), False, 'from numpy import random\n'), ((594, 622), 'numpy.random.randint', 'random.randint', (['(3)', '(N_max + 1)'], {}), '(3, N_max + 1)\n', (608, 622), False, 'from numpy import random\n'), ((631, 659), 'numpy.random.randint', 'random.randint', (['(2)', '(m_max + 1)'], {}), '(2, m_max + 1)\n', (645, 659), False, 'from numpy import random\n'), ((1359, 1375), 'numpy.zeros', 'np.zeros', (['[m, m]'], {}), '([m, m])\n', (1367, 1375), True, 'import numpy as np\n'), ((1569, 1613), 'numpy.random.multivariate_normal', 'random.multivariate_normal', (['mu', 'Cov', 'N_train'], {}), '(mu, Cov, N_train)\n', (1595, 1613), False, 'from numpy import random\n'), ((1633, 1677), 'numpy.random.multivariate_normal', 'random.multivariate_normal', (['mu', 'Cov', 'N_valid'], {}), '(mu, Cov, N_valid)\n', (1659, 1677), False, 'from numpy import random\n'), ((3207, 3225), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (3223, 3225), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression\n'), ((1390, 1409), 'numpy.linalg.matrix_rank', 'LA.matrix_rank', (['Cov'], {}), '(Cov)\n', (1404, 1409), True, 'from numpy import linalg as LA\n'), ((1495, 1512), 'numpy.random.rand', 'random.rand', (['m', 'm'], {}), '(m, m)\n', (1506, 1512), False, 'from numpy import random\n'), ((1298, 1312), 'numpy.random.rand', 'random.rand', (['m'], {}), '(m)\n', (1309, 1312), False, 'from numpy import random\n'), ((1749, 1772), 'numpy.random.rand', 'random.rand', (['N_train', 'm'], {}), '(N_train, m)\n', (1760, 1772), False, 'from numpy import random\n'), ((1820, 1843), 'numpy.random.rand', 'random.rand', (['N_valid', 'm'], {}), '(N_valid, m)\n', (1831, 1843), False, 'from numpy import random\n'), ((1437, 1451), 'numpy.random.rand', 'random.rand', (['m'], {}), '(m)\n', (1448, 1451), False, 'from numpy import random\n')]
|
import tensorflow as tf
import numpy as np
from model import decoder,vae
import cv2
vae.load_weights("vae_cnn.h5")
lv = np.load("lv.npy")
fourcc = cv2.VideoWriter_fourcc(*'XVID')
video = cv2.VideoWriter("output.avi", fourcc, 30.0, (208, 120))
for i in range(1000):
data = lv[i].reshape(1,128)
img = decoder.predict(data)
img = np.array(img).reshape(120,208,1)
img = img * 255
img = np.array(img).astype("uint8")
img = cv2.cvtColor(img,cv2.COLOR_GRAY2RGB)
video.write(img)
video.release()
|
[
"numpy.load",
"cv2.VideoWriter_fourcc",
"model.decoder.predict",
"cv2.cvtColor",
"numpy.array",
"cv2.VideoWriter",
"model.vae.load_weights"
] |
[((85, 115), 'model.vae.load_weights', 'vae.load_weights', (['"""vae_cnn.h5"""'], {}), "('vae_cnn.h5')\n", (101, 115), False, 'from model import decoder, vae\n'), ((121, 138), 'numpy.load', 'np.load', (['"""lv.npy"""'], {}), "('lv.npy')\n", (128, 138), True, 'import numpy as np\n'), ((148, 179), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'XVID'"], {}), "(*'XVID')\n", (170, 179), False, 'import cv2\n'), ((188, 243), 'cv2.VideoWriter', 'cv2.VideoWriter', (['"""output.avi"""', 'fourcc', '(30.0)', '(208, 120)'], {}), "('output.avi', fourcc, 30.0, (208, 120))\n", (203, 243), False, 'import cv2\n'), ((309, 330), 'model.decoder.predict', 'decoder.predict', (['data'], {}), '(data)\n', (324, 330), False, 'from model import decoder, vae\n'), ((444, 481), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_GRAY2RGB'], {}), '(img, cv2.COLOR_GRAY2RGB)\n', (456, 481), False, 'import cv2\n'), ((341, 354), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (349, 354), True, 'import numpy as np\n'), ((404, 417), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (412, 417), True, 'import numpy as np\n')]
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
USE_CUDA = torch.cuda.is_available()
device = torch.device("cuda" if USE_CUDA else "cpu")
class Bert_Base(nn.Module):
def __init__(self, opt):
super(Bert_Base, self).__init__()
self.opt = opt
#self.tokenizer = BertTokenizer.from_pretrained(model_path)
def forward(self, inputs, use_hidden_state=False):
text_raw_indices, text_raw_indices_mask, aspect_position_text = inputs[0], inputs[1], inputs[2]
ctx = self.opt.bse.get_vector(text_raw_indices)
ctx_len = torch.sum(text_raw_indices_mask != 0, dim=1)
vectors = []
aspect_vectors = []
asp_len = []
for idx, vector in enumerate(ctx):
# print(aspect_position_text[idx])
# print(vector.size())
#vector = torch.stack(vector)
left, right = aspect_position_text[idx].split('_')
vector = [np.asarray(each, dtype=float) for each in vector]
aspect_vector = vector[int(left):int(right)]
# if self.opt.device:
# vector = vector.cpu()
# aspect_vector = aspect_vector.cpu()
pad_number = self.opt.max_seq_len - len(vector) + 2
#ctx_len.append(len(vector))
vector = np.asarray(vector, dtype=float)
vector = vector[1:-1]
vector = np.concatenate((vector, np.zeros((pad_number, self.opt.embed_dim))))
vector = vector.astype('float32')
vector = torch.from_numpy(vector)
#pad_tuple = (0, 0, left, 0)
#vector = F.pad(vector, pad_tuple, 'constant', 0)
pad_number = self.opt.max_seq_len - len(aspect_vector)
asp_len.append(len(aspect_vector))
aspect_vector = np.asarray(aspect_vector)
aspect_vector = np.concatenate((aspect_vector, np.zeros((pad_number, self.opt.embed_dim))))
aspect_vector = aspect_vector.astype('float32')
aspect_vector = torch.from_numpy(aspect_vector)
if self.opt.device:
vector = vector.to(self.opt.device)
aspect_vector = aspect_vector.to(self.opt.device)
vectors.append(vector)
aspect_vectors.append(aspect_vector)
ctx = torch.stack(vectors)
asp = torch.stack(aspect_vectors)
asp_len = torch.from_numpy(np.asarray(asp_len))
#ctx_len = torch.from_numpy(np.asarray(ctx_len))
if self.opt.device:
asp_len = asp_len.to(self.opt.device)
ctx_len = ctx_len.to(self.opt.device)
ctx.requires_grad = False
asp.requires_grad = False
# print(vectors.size())
# print(aspect_vectors.size())
return ctx, asp, ctx_len, asp_len
|
[
"torch.stack",
"numpy.asarray",
"numpy.zeros",
"torch.cuda.is_available",
"torch.device",
"torch.sum",
"torch.from_numpy"
] |
[((98, 123), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (121, 123), False, 'import torch\n'), ((133, 176), 'torch.device', 'torch.device', (["('cuda' if USE_CUDA else 'cpu')"], {}), "('cuda' if USE_CUDA else 'cpu')\n", (145, 176), False, 'import torch\n'), ((603, 647), 'torch.sum', 'torch.sum', (['(text_raw_indices_mask != 0)'], {'dim': '(1)'}), '(text_raw_indices_mask != 0, dim=1)\n', (612, 647), False, 'import torch\n'), ((2324, 2344), 'torch.stack', 'torch.stack', (['vectors'], {}), '(vectors)\n', (2335, 2344), False, 'import torch\n'), ((2359, 2386), 'torch.stack', 'torch.stack', (['aspect_vectors'], {}), '(aspect_vectors)\n', (2370, 2386), False, 'import torch\n'), ((1332, 1363), 'numpy.asarray', 'np.asarray', (['vector'], {'dtype': 'float'}), '(vector, dtype=float)\n', (1342, 1363), True, 'import numpy as np\n'), ((1555, 1579), 'torch.from_numpy', 'torch.from_numpy', (['vector'], {}), '(vector)\n', (1571, 1579), False, 'import torch\n'), ((1826, 1851), 'numpy.asarray', 'np.asarray', (['aspect_vector'], {}), '(aspect_vector)\n', (1836, 1851), True, 'import numpy as np\n'), ((2044, 2075), 'torch.from_numpy', 'torch.from_numpy', (['aspect_vector'], {}), '(aspect_vector)\n', (2060, 2075), False, 'import torch\n'), ((2422, 2441), 'numpy.asarray', 'np.asarray', (['asp_len'], {}), '(asp_len)\n', (2432, 2441), True, 'import numpy as np\n'), ((970, 999), 'numpy.asarray', 'np.asarray', (['each'], {'dtype': 'float'}), '(each, dtype=float)\n', (980, 999), True, 'import numpy as np\n'), ((1443, 1485), 'numpy.zeros', 'np.zeros', (['(pad_number, self.opt.embed_dim)'], {}), '((pad_number, self.opt.embed_dim))\n', (1451, 1485), True, 'import numpy as np\n'), ((1911, 1953), 'numpy.zeros', 'np.zeros', (['(pad_number, self.opt.embed_dim)'], {}), '((pad_number, self.opt.embed_dim))\n', (1919, 1953), True, 'import numpy as np\n')]
|
import glob
import os
import time
import cv2
import numpy as np
from Pre_Processing import frameManipulator
commands = ['bin', 'lay', 'place', 'set']
prepositions = ['at', 'by', 'in', 'with']
colors = ['blue', 'green', 'red', 'white']
adverbs = ['again', 'now', 'please', 'soon']
alphabet = [chr(x) for x in range(ord('a'), ord('z') + 1)]
numbers = ['one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine']
categories = ['Adverb', 'Alphabet', 'Commands', 'Colors', 'Numbers', 'Prepositions']
commonCNNDataPath = 'D:/CNN-Test-Images/'
def getVideoFrames(videoPath):
"""Function to return a video's frames in a list"""
vidcap = cv2.VideoCapture(videoPath)
success, image = vidcap.read()
allFrames = []
while success:
allFrames.append(image)
success, image = vidcap.read()
return allFrames
def stackFramesToImage(listOfFrames):
"""Function to concat frames into a single picture"""
if len(listOfFrames) < frameManipulator.FPS:
return None
newList = [np.hstack(listOfFrames[:5]), np.hstack(listOfFrames[5:10]), np.hstack(listOfFrames[10:15]),
np.hstack(listOfFrames[15:20]), np.hstack(listOfFrames[20:25]), np.hstack(listOfFrames[25:30])]
return np.vstack(newList)
def saveImage(image, imagePath):
"""Function to save an image in grayscale to a specific path"""
if len(image.shape) == 3:
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
index = len(os.listdir(imagePath))
imagePath = imagePath + '/{}.jpg'.format(index)
cv2.imwrite(imagePath, image)
def createCNNDataDirectories():
"""Function to create label directories for each category for training the CNN"""
for command in commands:
dirName = commonCNNDataPath + '/Commands/{}/'.format(command)
if not os.path.exists(dirName):
os.makedirs(dirName)
for preposition in prepositions:
dirName = commonCNNDataPath + '/Prepositions/{}/'.format(preposition)
if not os.path.exists(dirName):
os.makedirs(dirName)
for color in colors:
dirName = commonCNNDataPath + '/Colors/{}/'.format(color)
if not os.path.exists(dirName):
os.makedirs(dirName)
for adverb in adverbs:
dirName = commonCNNDataPath + '/Adverb/{}/'.format(adverb)
if not os.path.exists(dirName):
os.makedirs(dirName)
for letter in alphabet:
dirName = commonCNNDataPath + '/Alphabet/{}/'.format(letter)
if not os.path.exists(dirName):
os.makedirs(dirName)
for number in numbers:
dirName = commonCNNDataPath + '/Numbers/{}/'.format(number)
if not os.path.exists(dirName):
os.makedirs(dirName)
def extractLipsHaarCascade(haarDetector, frame):
"""Function to extract lips from a frame"""
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
roi_gray = 0
faces = haarDetector.detectMultiScale(gray, 1.3, 5)
if len(faces) == 0:
roi_gray = cv2.resize(gray, (150, 100))
return roi_gray
for (x, y, w, h) in faces:
roi_gray = gray[y + (2 * h // 3):y + h, x:x + w]
roi_gray = cv2.resize(roi_gray, (150, 100))
return roi_gray
def prepareSingleVideoForCNN(path, haarDetector):
"""Function to prepare a single video to be redy for CNN training"""
vidData = frameManipulator.getVideoDataFromPath(path)
videoFrames = getVideoFrames(path)
videoFrames = [extractLipsHaarCascade(haarDetector, x) for x in videoFrames]
if len(videoFrames) != 0:
stackedImage = stackFramesToImage(videoFrames)
videoLabel = vidData.identifier.split('_')[0]
imageSavePath = commonCNNDataPath + vidData.category + '/{}'.format(videoLabel)
saveImage(stackedImage, imageSavePath)
else:
print("Error in finding video with path: {}".format(path))
def prepareDataSetForCNN(firstSpeaker, secondSpeaker):
"""Function that traverses the whole dataset and creates new directory for the CNN"""
detector = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
for i in range(firstSpeaker, secondSpeaker):
for category in categories:
sTime = time.time()
videoPath = "../New-DataSet-Videos/S{}/{}/".format(i, category) + "*.mp4"
vidList = glob.glob(videoPath)
def f(x):
return x.replace("\\", '/')
vidList = [f(x) for x in vidList]
for j in vidList:
prepareSingleVideoForCNN(j, detector)
print("Finished category : {}, for speaker: {}".format(category, i))
print("In:{} Seconds".format(time.time() - sTime))
print("Finished Speaker {}".format(i))
def main():
startTime = time.time()
firstSpeaker = 23
secondSpeaker = 24
createCNNDataDirectories()
prepareDataSetForCNN(firstSpeaker, secondSpeaker)
print("Finished preparing the videos in {} seconds".format(time.time() - startTime))
if __name__ == "__main__":
main()
|
[
"cv2.resize",
"os.makedirs",
"cv2.cvtColor",
"cv2.imwrite",
"Pre_Processing.frameManipulator.getVideoDataFromPath",
"os.path.exists",
"time.time",
"cv2.VideoCapture",
"numpy.hstack",
"cv2.CascadeClassifier",
"glob.glob",
"os.listdir",
"numpy.vstack"
] |
[((654, 681), 'cv2.VideoCapture', 'cv2.VideoCapture', (['videoPath'], {}), '(videoPath)\n', (670, 681), False, 'import cv2\n'), ((1243, 1261), 'numpy.vstack', 'np.vstack', (['newList'], {}), '(newList)\n', (1252, 1261), True, 'import numpy as np\n'), ((1546, 1575), 'cv2.imwrite', 'cv2.imwrite', (['imagePath', 'image'], {}), '(imagePath, image)\n', (1557, 1575), False, 'import cv2\n'), ((2840, 2879), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (2852, 2879), False, 'import cv2\n'), ((3153, 3185), 'cv2.resize', 'cv2.resize', (['roi_gray', '(150, 100)'], {}), '(roi_gray, (150, 100))\n', (3163, 3185), False, 'import cv2\n'), ((3345, 3388), 'Pre_Processing.frameManipulator.getVideoDataFromPath', 'frameManipulator.getVideoDataFromPath', (['path'], {}), '(path)\n', (3382, 3388), False, 'from Pre_Processing import frameManipulator\n'), ((4023, 4111), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (["(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')"], {}), "(cv2.data.haarcascades +\n 'haarcascade_frontalface_default.xml')\n", (4044, 4111), False, 'import cv2\n'), ((4775, 4786), 'time.time', 'time.time', ([], {}), '()\n', (4784, 4786), False, 'import time\n'), ((1029, 1056), 'numpy.hstack', 'np.hstack', (['listOfFrames[:5]'], {}), '(listOfFrames[:5])\n', (1038, 1056), True, 'import numpy as np\n'), ((1058, 1087), 'numpy.hstack', 'np.hstack', (['listOfFrames[5:10]'], {}), '(listOfFrames[5:10])\n', (1067, 1087), True, 'import numpy as np\n'), ((1089, 1119), 'numpy.hstack', 'np.hstack', (['listOfFrames[10:15]'], {}), '(listOfFrames[10:15])\n', (1098, 1119), True, 'import numpy as np\n'), ((1136, 1166), 'numpy.hstack', 'np.hstack', (['listOfFrames[15:20]'], {}), '(listOfFrames[15:20])\n', (1145, 1166), True, 'import numpy as np\n'), ((1168, 1198), 'numpy.hstack', 'np.hstack', (['listOfFrames[20:25]'], {}), '(listOfFrames[20:25])\n', (1177, 1198), True, 'import numpy as np\n'), ((1200, 1230), 'numpy.hstack', 'np.hstack', (['listOfFrames[25:30]'], {}), '(listOfFrames[25:30])\n', (1209, 1230), True, 'import numpy as np\n'), ((1411, 1450), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (1423, 1450), False, 'import cv2\n'), ((1467, 1488), 'os.listdir', 'os.listdir', (['imagePath'], {}), '(imagePath)\n', (1477, 1488), False, 'import os\n'), ((2996, 3024), 'cv2.resize', 'cv2.resize', (['gray', '(150, 100)'], {}), '(gray, (150, 100))\n', (3006, 3024), False, 'import cv2\n'), ((1810, 1833), 'os.path.exists', 'os.path.exists', (['dirName'], {}), '(dirName)\n', (1824, 1833), False, 'import os\n'), ((1847, 1867), 'os.makedirs', 'os.makedirs', (['dirName'], {}), '(dirName)\n', (1858, 1867), False, 'import os\n'), ((1999, 2022), 'os.path.exists', 'os.path.exists', (['dirName'], {}), '(dirName)\n', (2013, 2022), False, 'import os\n'), ((2036, 2056), 'os.makedirs', 'os.makedirs', (['dirName'], {}), '(dirName)\n', (2047, 2056), False, 'import os\n'), ((2164, 2187), 'os.path.exists', 'os.path.exists', (['dirName'], {}), '(dirName)\n', (2178, 2187), False, 'import os\n'), ((2201, 2221), 'os.makedirs', 'os.makedirs', (['dirName'], {}), '(dirName)\n', (2212, 2221), False, 'import os\n'), ((2332, 2355), 'os.path.exists', 'os.path.exists', (['dirName'], {}), '(dirName)\n', (2346, 2355), False, 'import os\n'), ((2369, 2389), 'os.makedirs', 'os.makedirs', (['dirName'], {}), '(dirName)\n', (2380, 2389), False, 'import os\n'), ((2503, 2526), 'os.path.exists', 'os.path.exists', (['dirName'], {}), '(dirName)\n', (2517, 2526), False, 'import os\n'), ((2540, 2560), 'os.makedirs', 'os.makedirs', (['dirName'], {}), '(dirName)\n', (2551, 2560), False, 'import os\n'), ((2672, 2695), 'os.path.exists', 'os.path.exists', (['dirName'], {}), '(dirName)\n', (2686, 2695), False, 'import os\n'), ((2709, 2729), 'os.makedirs', 'os.makedirs', (['dirName'], {}), '(dirName)\n', (2720, 2729), False, 'import os\n'), ((4213, 4224), 'time.time', 'time.time', ([], {}), '()\n', (4222, 4224), False, 'import time\n'), ((4333, 4353), 'glob.glob', 'glob.glob', (['videoPath'], {}), '(videoPath)\n', (4342, 4353), False, 'import glob\n'), ((4981, 4992), 'time.time', 'time.time', ([], {}), '()\n', (4990, 4992), False, 'import time\n'), ((4675, 4686), 'time.time', 'time.time', ([], {}), '()\n', (4684, 4686), False, 'import time\n')]
|
import numpy as np
import math
from pressiotools import linalg as la
def read_binary_array(fileName, nCols):
# read a numpy array from a binary file "fileName"
if nCols==1:
return np.fromfile(fileName)
else:
array = np.fromfile(fileName)
nRows = int(len(array) / float(nCols))
return array.reshape((nCols,nRows)).T
def read_ascii_array(fileName, nCols):
# read a numpy array from an ascii file "fileName"
return np.asfortranarray(np.loadtxt(fileName))
def read_array(fileName, nCols, isBinary=True):
if isBinary:
return read_binary_array(fileName,nCols)
else:
return read_ascii_array(fileName,nCols)
def read_array_distributed(comm, rootFileName, nCols, isBinary=True):
# Read an array from binary or ascii files with the name specified
# by the string rootFileName
# Each local array segment will be read from a file rootFileName.XX.YY,
# where XX is the number of ranks and YY is the local rank
rank = comm.Get_rank()
size = comm.Get_size()
nDigit = int(math.log10(size)) + 1
myFileName = "{}.{}.{:0{width}d}".format(rootFileName,size,rank,width=nDigit)
myArr = read_array(myFileName,nCols,isBinary)
if nCols==1:
return la.Vector(myArr)
else:
return la.MultiVector(myArr)
|
[
"numpy.fromfile",
"pressiotools.linalg.MultiVector",
"pressiotools.linalg.Vector",
"math.log10",
"numpy.loadtxt"
] |
[((190, 211), 'numpy.fromfile', 'np.fromfile', (['fileName'], {}), '(fileName)\n', (201, 211), True, 'import numpy as np\n'), ((232, 253), 'numpy.fromfile', 'np.fromfile', (['fileName'], {}), '(fileName)\n', (243, 253), True, 'import numpy as np\n'), ((459, 479), 'numpy.loadtxt', 'np.loadtxt', (['fileName'], {}), '(fileName)\n', (469, 479), True, 'import numpy as np\n'), ((1191, 1207), 'pressiotools.linalg.Vector', 'la.Vector', (['myArr'], {}), '(myArr)\n', (1200, 1207), True, 'from pressiotools import linalg as la\n'), ((1227, 1248), 'pressiotools.linalg.MultiVector', 'la.MultiVector', (['myArr'], {}), '(myArr)\n', (1241, 1248), True, 'from pressiotools import linalg as la\n'), ((1014, 1030), 'math.log10', 'math.log10', (['size'], {}), '(size)\n', (1024, 1030), False, 'import math\n')]
|
import numpy as np
import sys
# convert any index to a 4 tuple
def unpackIndex(i, default):
a = b = c = d = default
if type(i) == int:
d = i
elif len(i) == 1:
d = i[0]
elif len(i) == 2:
c = i[0]
d = i[1]
elif len(i) == 3:
b = i[0]
c = i[1]
d = i[2]
else:
a = i[0]
b = i[1]
c = i[2]
d = i[3]
return (a, b, c, d)
def convert(path):
# load the file
arr = np.load(path + ".npy")
# open the output file
with open(("../cifar10/" + path + ".c").lower(), "w") as f:
# get dimensions
(a, b, c, d) = unpackIndex(arr.shape, 1)
arr = arr.reshape((a, b, c, d))
# write head
f.write('#include "../include/deep_cyber.h"\n')
f.write('\n')
f.write('const uint8_t ' + path.upper() + '_DATA[' + str(arr.view(np.uint8).flatten().shape[0]) + '] = {\n')
# write data
for ai in range(a):
for bi in range(b):
for ci in range(c):
for di in range(d):
elem_arr = np.zeros((1), dtype=np.float32)
elem_arr[0] = arr[ai, bi, ci, di]
elem = elem_arr.view(np.uint8).flatten()
e = elem.shape[0]
for ei in range(e):
if ai == a - 1 and bi == b - 1 and ci == c - 1 and di == d - 1 and ei == e - 1:
break
f.write('\t' + hex(elem[ei]) + ',\n')
# write tail
elem_arr = np.zeros((1), dtype=np.float32)
elem_arr[0] = arr.flatten()[-1]
elem = elem_arr.view(np.uint8).flatten()
e = elem.shape[0]
f.write('\t' + hex(elem[-1]) + '};\n')
f.write('\n')
f.write('Tensor ' + path.upper() + ' = {' + str(a) + ', ' + str(b) + ', ' + str(c) + ', ' + str(d) + ', (float*)' + path.upper() + '_DATA};\n')
convert("c1b")
convert("c1w")
convert("c2b")
convert("c2w")
convert("c3b")
convert("c3w")
convert("c4b")
convert("c4w")
convert("d1b")
convert("d1w")
convert("d2b")
convert("d2w")
|
[
"numpy.load",
"numpy.zeros"
] |
[((479, 501), 'numpy.load', 'np.load', (["(path + '.npy')"], {}), "(path + '.npy')\n", (486, 501), True, 'import numpy as np\n'), ((1660, 1689), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'np.float32'}), '(1, dtype=np.float32)\n', (1668, 1689), True, 'import numpy as np\n'), ((1138, 1167), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'np.float32'}), '(1, dtype=np.float32)\n', (1146, 1167), True, 'import numpy as np\n')]
|
x1=[]
x2=[]
x3=[]
import sys
import numpy as np
f1 = open("light_gbm.txt")
for line in f1:
x1.append(float((line.strip().split('\t')[1])))
#print x1
f2 = open("simese_cnn.txt")
for line in f2:
x2.append(0.5 + 0.5*float((line.strip().split('\t')[1])))
#print x2
f3 = open("matchpyramid.txt")
for line in f3:
x3.append(float((line.strip().split('\t')[1])))
#print x3
x1=np.asarray(x1)
x2=np.asarray(x2)
x3=np.asarray(x3)
f=np.vstack((x1,x2))
f=np.vstack((f,x3))
y_pred=f[0]/3+f[1]/3+f[2]/3
#print pred.shape
#print pred
for i in range(len(y_pred)):
if y_pred[i]>0.31:
y_pred[i]=1
else:
y_pred[i]=0
output_file=sys.argv[1]
with open(output_file, 'w') as fo:
print("\nemsembling...\n")
lineno = 1
for pred in y_pred:
fo.write('{}\t{}\n'.format(lineno, int(pred)))
lineno += 1
|
[
"numpy.asarray",
"numpy.vstack"
] |
[((385, 399), 'numpy.asarray', 'np.asarray', (['x1'], {}), '(x1)\n', (395, 399), True, 'import numpy as np\n'), ((403, 417), 'numpy.asarray', 'np.asarray', (['x2'], {}), '(x2)\n', (413, 417), True, 'import numpy as np\n'), ((421, 435), 'numpy.asarray', 'np.asarray', (['x3'], {}), '(x3)\n', (431, 435), True, 'import numpy as np\n'), ((438, 457), 'numpy.vstack', 'np.vstack', (['(x1, x2)'], {}), '((x1, x2))\n', (447, 457), True, 'import numpy as np\n'), ((459, 477), 'numpy.vstack', 'np.vstack', (['(f, x3)'], {}), '((f, x3))\n', (468, 477), True, 'import numpy as np\n')]
|
"""
Module with a function for plotting spectra.
"""
import os
import math
import warnings
import itertools
from typing import Optional, Union, Tuple, List
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from typeguard import typechecked
from matplotlib.ticker import AutoMinorLocator, MultipleLocator
from species.core import box, constants
from species.read import read_filter
from species.util import plot_util
@typechecked
def plot_spectrum(boxes: list,
filters: Optional[List[str]] = None,
residuals: Optional[box.ResidualsBox] = None,
plot_kwargs: Optional[List[Optional[dict]]] = None,
xlim: Optional[Tuple[float, float]] = None,
ylim: Optional[Tuple[float, float]] = None,
ylim_res: Optional[Tuple[float, float]] = None,
scale: Optional[Tuple[str, str]] = None,
title: Optional[str] = None,
offset: Optional[Tuple[float, float]] = None,
legend: Optional[Union[str, dict, Tuple[float, float],
List[Optional[Union[dict, str, Tuple[float, float]]]]]] = None,
figsize: Optional[Tuple[float, float]] = (10., 5.),
object_type: str = 'planet',
quantity: str = 'flux density',
output: str = 'spectrum.pdf'):
"""
Parameters
----------
boxes : list(species.core.box, )
Boxes with data.
filters : list(str, ), None
Filter IDs for which the transmission profile is plotted. Not plotted if set to None.
residuals : species.core.box.ResidualsBox, None
Box with residuals of a fit. Not plotted if set to None.
plot_kwargs : list(dict, ), None
List with dictionaries of keyword arguments for each box. For example, if the ``boxes``
are a ``ModelBox`` and ``ObjectBox``:
.. code-block:: python
plot_kwargs=[{'ls': '-', 'lw': 1., 'color': 'black'},
{'spectrum_1': {'marker': 'o', 'ms': 3., 'color': 'tab:brown', 'ls': 'none'},
'spectrum_2': {'marker': 'o', 'ms': 3., 'color': 'tab:blue', 'ls': 'none'},
'Paranal/SPHERE.IRDIS_D_H23_3': {'marker': 's', 'ms': 4., 'color': 'tab:cyan', 'ls': 'none'},
'Paranal/SPHERE.IRDIS_D_K12_1': [{'marker': 's', 'ms': 4., 'color': 'tab:orange', 'ls': 'none'},
{'marker': 's', 'ms': 4., 'color': 'tab:red', 'ls': 'none'}],
'Paranal/NACO.Lp': {'marker': 's', 'ms': 4., 'color': 'tab:green', 'ls': 'none'},
'Paranal/NACO.Mp': {'marker': 's', 'ms': 4., 'color': 'tab:green', 'ls': 'none'}}]
For an ``ObjectBox``, the dictionary contains items for the different spectrum and filter
names stored with :func:`~species.data.database.Database.add_object`. In case both
and ``ObjectBox`` and a ``SynphotBox`` are provided, then the latter can be set to ``None``
in order to use the same (but open) symbols as the data from the ``ObjectBox``. Note that
if a filter name is duplicated in an ``ObjectBox`` (Paranal/SPHERE.IRDIS_D_K12_1 in the
example) then a list with two dictionaries should be provided. Colors are automatically
chosen if ``plot_kwargs`` is set to ``None``.
xlim : tuple(float, float)
Limits of the wavelength axis.
ylim : tuple(float, float)
Limits of the flux axis.
ylim_res : tuple(float, float), None
Limits of the residuals axis. Automatically chosen (based on the minimum and maximum
residual value) if set to None.
scale : tuple(str, str), None
Scale of the x and y axes ('linear' or 'log'). The scale is set to ``('linear', 'linear')``
if set to ``None``.
title : str
Title.
offset : tuple(float, float)
Offset for the label of the x- and y-axis.
legend : str, tuple, dict, list(dict, dict), None
Location of the legend (str or tuple(float, float)) or a dictionary with the ``**kwargs``
of ``matplotlib.pyplot.legend``, for example ``{'loc': 'upper left', 'fontsize: 12.}``.
Alternatively, a list with two values can be provided to separate the model and data
handles in two legends. Each of these two elements can be set to ``None``. For example,
``[None, {'loc': 'upper left', 'fontsize: 12.}]``, if only the data points should be
included in a legend.
figsize : tuple(float, float)
Figure size.
object_type : str
Object type ('planet' or 'star'). With 'planet', the radius and mass are expressed in
Jupiter units. With 'star', the radius and mass are expressed in solar units.
quantity: str
The quantity of the y-axis ('flux density', 'flux', or 'magnitude').
output : str
Output filename.
Returns
-------
NoneType
None
"""
mpl.rcParams['font.serif'] = ['Bitstream Vera Serif']
mpl.rcParams['font.family'] = 'serif'
plt.rc('axes', edgecolor='black', linewidth=2.2)
plt.rcParams['axes.axisbelow'] = False
if plot_kwargs is None:
plot_kwargs = []
elif plot_kwargs is not None and len(boxes) != len(plot_kwargs):
raise ValueError(f'The number of \'boxes\' ({len(boxes)}) should be equal to the '
f'number of items in \'plot_kwargs\' ({len(plot_kwargs)}).')
if residuals is not None and filters is not None:
plt.figure(1, figsize=figsize)
gridsp = mpl.gridspec.GridSpec(3, 1, height_ratios=[1, 3, 1])
gridsp.update(wspace=0, hspace=0, left=0, right=1, bottom=0, top=1)
ax1 = plt.subplot(gridsp[1, 0])
ax2 = plt.subplot(gridsp[0, 0])
ax3 = plt.subplot(gridsp[2, 0])
elif residuals is not None:
plt.figure(1, figsize=figsize)
gridsp = mpl.gridspec.GridSpec(2, 1, height_ratios=[4, 1])
gridsp.update(wspace=0, hspace=0, left=0, right=1, bottom=0, top=1)
ax1 = plt.subplot(gridsp[0, 0])
ax2 = None
ax3 = plt.subplot(gridsp[1, 0])
elif filters is not None:
plt.figure(1, figsize=figsize)
gridsp = mpl.gridspec.GridSpec(2, 1, height_ratios=[1, 4])
gridsp.update(wspace=0, hspace=0, left=0, right=1, bottom=0, top=1)
ax1 = plt.subplot(gridsp[1, 0])
ax2 = plt.subplot(gridsp[0, 0])
ax3 = None
else:
plt.figure(1, figsize=figsize)
gridsp = mpl.gridspec.GridSpec(1, 1)
gridsp.update(wspace=0, hspace=0, left=0, right=1, bottom=0, top=1)
ax1 = plt.subplot(gridsp[0, 0])
ax2 = None
ax3 = None
if residuals is not None:
labelbottom = False
else:
labelbottom = True
if scale is None:
scale = ('linear', 'linear')
ax1.set_xscale(scale[0])
ax1.set_yscale(scale[1])
if filters is not None:
ax2.set_xscale(scale[0])
if residuals is not None:
ax3.set_xscale(scale[0])
ax1.tick_params(axis='both', which='major', colors='black', labelcolor='black',
direction='in', width=1, length=5, labelsize=12, top=True,
bottom=True, left=True, right=True, labelbottom=labelbottom)
ax1.tick_params(axis='both', which='minor', colors='black', labelcolor='black',
direction='in', width=1, length=3, labelsize=12, top=True,
bottom=True, left=True, right=True, labelbottom=labelbottom)
if filters is not None:
ax2.tick_params(axis='both', which='major', colors='black', labelcolor='black',
direction='in', width=1, length=5, labelsize=12, top=True,
bottom=True, left=True, right=True, labelbottom=False)
ax2.tick_params(axis='both', which='minor', colors='black', labelcolor='black',
direction='in', width=1, length=3, labelsize=12, top=True,
bottom=True, left=True, right=True, labelbottom=False)
if residuals is not None:
ax3.tick_params(axis='both', which='major', colors='black', labelcolor='black',
direction='in', width=1, length=5, labelsize=12, top=True,
bottom=True, left=True, right=True)
ax3.tick_params(axis='both', which='minor', colors='black', labelcolor='black',
direction='in', width=1, length=3, labelsize=12, top=True,
bottom=True, left=True, right=True)
if scale[0] == 'linear':
ax1.xaxis.set_minor_locator(AutoMinorLocator(5))
if scale[1] == 'linear':
ax1.yaxis.set_minor_locator(AutoMinorLocator(5))
# ax1.set_yticks([1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1e0])
# ax3.set_yticks([-2., 0., 2.])
if filters is not None and scale[0] == 'linear':
ax2.xaxis.set_minor_locator(AutoMinorLocator(5))
if residuals is not None and scale[0] == 'linear':
ax3.xaxis.set_minor_locator(AutoMinorLocator(5))
if residuals is not None and filters is not None:
ax1.set_xlabel('')
ax2.set_xlabel('')
ax3.set_xlabel('Wavelength (µm)', fontsize=13)
elif residuals is not None:
ax1.set_xlabel('')
ax3.set_xlabel('Wavelength (µm)', fontsize=11)
elif filters is not None:
ax1.set_xlabel('Wavelength (µm)', fontsize=13)
ax2.set_xlabel('')
else:
ax1.set_xlabel('Wavelength (µm)', fontsize=13)
if filters is not None:
ax2.set_ylabel('Transmission', fontsize=13)
if residuals is not None:
if quantity == 'flux density':
ax3.set_ylabel(r'$\Delta$$\mathregular{F}_\lambda$ ($\sigma$)', fontsize=11)
elif quantity == 'flux':
ax3.set_ylabel(r'$\Delta$$\mathregular{F}_\lambda$ ($\sigma$)', fontsize=11)
if xlim is None:
ax1.set_xlim(0.6, 6.)
else:
ax1.set_xlim(xlim[0], xlim[1])
if quantity == 'magnitude':
scaling = 1.
ax1.set_ylabel('Flux contrast (mag)', fontsize=13)
if ylim:
ax1.set_ylim(ylim[0], ylim[1])
else:
if ylim:
ax1.set_ylim(ylim[0], ylim[1])
ylim = ax1.get_ylim()
exponent = math.floor(math.log10(ylim[1]))
scaling = 10.**exponent
if quantity == 'flux density':
ylabel = r'$\mathregular{F}_\lambda$ (10$^{'+str(exponent)+r'}$ W m$^{-2}$ µm$^{-1}$)'
elif quantity == 'flux':
ylabel = r'$\lambda$$\mathregular{F}_\lambda$ (10$^{'+str(exponent)+r'}$ W m$^{-2}$)'
ax1.set_ylabel(ylabel, fontsize=11)
ax1.set_ylim(ylim[0]/scaling, ylim[1]/scaling)
if ylim[0] < 0.:
ax1.axhline(0.0, ls='--', lw=0.7, color='gray', dashes=(2, 4), zorder=0.5)
else:
if quantity == 'flux density':
ax1.set_ylabel(r'$\mathregular{F}_\lambda$ (W m$^{-2}$ µm$^{-1}$)', fontsize=11)
elif quantity == 'flux':
ax1.set_ylabel(r'$\lambda$$\mathregular{F}_\lambda$ (W m$^{-2}$)', fontsize=11)
scaling = 1.
xlim = ax1.get_xlim()
if filters is not None:
ax2.set_xlim(xlim[0], xlim[1])
ax2.set_ylim(0., 1.)
if residuals is not None:
ax3.set_xlim(xlim[0], xlim[1])
if offset is not None and residuals is not None and filters is not None:
ax3.get_xaxis().set_label_coords(0.5, offset[0])
ax1.get_yaxis().set_label_coords(offset[1], 0.5)
ax2.get_yaxis().set_label_coords(offset[1], 0.5)
ax3.get_yaxis().set_label_coords(offset[1], 0.5)
elif offset is not None and filters is not None:
ax1.get_xaxis().set_label_coords(0.5, offset[0])
ax1.get_yaxis().set_label_coords(offset[1], 0.5)
ax2.get_yaxis().set_label_coords(offset[1], 0.5)
elif offset is not None and residuals is not None:
ax3.get_xaxis().set_label_coords(0.5, offset[0])
ax1.get_yaxis().set_label_coords(offset[1], 0.5)
ax3.get_yaxis().set_label_coords(offset[1], 0.5)
elif offset is not None:
ax1.get_xaxis().set_label_coords(0.5, offset[0])
ax1.get_yaxis().set_label_coords(offset[1], 0.5)
else:
ax1.get_xaxis().set_label_coords(0.5, -0.12)
ax1.get_yaxis().set_label_coords(-0.1, 0.5)
for j, boxitem in enumerate(boxes):
flux_scaling = 1.
if j < len(boxes):
plot_kwargs.append(None)
if isinstance(boxitem, (box.SpectrumBox, box.ModelBox)):
wavelength = boxitem.wavelength
flux = boxitem.flux
if isinstance(wavelength[0], (np.float32, np.float64)):
data = np.array(flux, dtype=np.float64)
masked = np.ma.array(data, mask=np.isnan(data))
if isinstance(boxitem, box.ModelBox):
param = boxitem.parameters
par_key, par_unit, par_label = plot_util.quantity_unit(
param=list(param.keys()), object_type=object_type)
label = ''
newline = False
for i, item in enumerate(par_key):
if item[:4] == 'teff':
value = f'{param[item]:.0f}'
elif item in ['logg', 'feh', 'fsed', 'lognorm_ext',
'powerlaw_ext', 'ism_ext']:
value = f'{param[item]:.1f}'
elif item in ['co']:
value = f'{param[item]:.2f}'
elif item[:6] == 'radius':
if object_type == 'planet':
value = f'{param[item]:.1f}'
# if item == 'radius_1':
# value = f'{param[item]:.0f}'
# else:
# value = f'{param[item]:.1f}'
elif object_type == 'star':
value = f'{param[item]*constants.R_JUP/constants.R_SUN:.1f}'
elif item == 'mass':
if object_type == 'planet':
value = f'{param[item]:.0f}'
elif object_type == 'star':
value = f'{param[item]*constants.M_JUP/constants.M_SUN:.1f}'
elif item == 'luminosity':
value = f'{np.log10(param[item]):.2f}'
else:
continue
# if len(label) > 80 and newline == False:
# label += '\n'
# newline = True
if par_unit[i] is None:
label += f'{par_label[i]} = {value}'
else:
label += f'{par_label[i]} = {value} {par_unit[i]}'
if i < len(par_key)-1:
label += ', '
else:
label = None
if plot_kwargs[j]:
kwargs_copy = plot_kwargs[j].copy()
if 'label' in kwargs_copy:
if kwargs_copy['label'] is None:
label = None
else:
label = kwargs_copy['label']
del kwargs_copy['label']
if quantity == 'flux':
flux_scaling = wavelength
ax1.plot(wavelength, flux_scaling*masked/scaling, zorder=2, label=label, **kwargs_copy)
else:
if quantity == 'flux':
flux_scaling = wavelength
ax1.plot(wavelength, flux_scaling*masked/scaling, lw=0.5, label=label, zorder=2)
elif isinstance(wavelength[0], (np.ndarray)):
for i, item in enumerate(wavelength):
data = np.array(flux[i], dtype=np.float64)
masked = np.ma.array(data, mask=np.isnan(data))
if isinstance(boxitem.name[i], bytes):
label = boxitem.name[i].decode('utf-8')
else:
label = boxitem.name[i]
if quantity == 'flux':
flux_scaling = item
ax1.plot(item, flux_scaling*masked/scaling, lw=0.5, label=label)
elif isinstance(boxitem, list):
for i, item in enumerate(boxitem):
wavelength = item.wavelength
flux = item.flux
data = np.array(flux, dtype=np.float64)
masked = np.ma.array(data, mask=np.isnan(data))
if quantity == 'flux':
flux_scaling = wavelength
if plot_kwargs[j]:
ax1.plot(wavelength, flux_scaling*masked/scaling, zorder=1, **plot_kwargs[j])
else:
ax1.plot(wavelength, flux_scaling*masked/scaling, color='gray', lw=0.2, alpha=0.5, zorder=1)
elif isinstance(boxitem, box.PhotometryBox):
label_check = []
for i, item in enumerate(boxitem.wavelength):
transmission = read_filter.ReadFilter(boxitem.filter_name[i])
fwhm = transmission.filter_fwhm()
if quantity == 'flux':
flux_scaling = item
if plot_kwargs[j]:
if 'label' in plot_kwargs[j] and plot_kwargs[j]['label'] not in label_check:
label_check.append(plot_kwargs[j]['label'])
elif 'label' in plot_kwargs[j] and plot_kwargs[j]['label'] in label_check:
del plot_kwargs[j]['label']
if boxitem.flux[i][1] is None:
ax1.errorbar(item, flux_scaling*boxitem.flux[i][0]/scaling, xerr=fwhm/2.,
yerr=None, zorder=3, **plot_kwargs[j])
else:
ax1.errorbar(item, flux_scaling*boxitem.flux[i][0]/scaling, xerr=fwhm/2.,
yerr=flux_scaling*boxitem.flux[i][1]/scaling, zorder=3, **plot_kwargs[j])
else:
if boxitem.flux[i][1] is None:
ax1.errorbar(item, flux_scaling*boxitem.flux[i][0]/scaling, xerr=fwhm/2.,
yerr=None, marker='s', ms=6, color='black', zorder=3)
else:
ax1.errorbar(item, flux_scaling*boxitem.flux[i][0]/scaling, xerr=fwhm/2.,
yerr=flux_scaling*boxitem.flux[i][1]/scaling, marker='s', ms=6, color='black',
zorder=3)
elif isinstance(boxitem, box.ObjectBox):
if boxitem.spectrum is not None:
spec_list = []
wavel_list = []
for item in boxitem.spectrum:
spec_list.append(item)
wavel_list.append(boxitem.spectrum[item][0][0, 0])
sort_index = np.argsort(wavel_list)
spec_sort = []
for i in range(sort_index.size):
spec_sort.append(spec_list[sort_index[i]])
for key in spec_sort:
masked = np.ma.array(boxitem.spectrum[key][0],
mask=np.isnan(boxitem.spectrum[key][0]))
if quantity == 'flux':
flux_scaling = masked[:, 0]
if not plot_kwargs[j] or key not in plot_kwargs[j]:
plot_obj = ax1.errorbar(masked[:, 0], flux_scaling*masked[:, 1]/scaling,
yerr=flux_scaling*masked[:, 2]/scaling, ms=2, marker='s',
zorder=2.5, ls='none')
if plot_kwargs[j] is None:
plot_kwargs[j] = {}
plot_kwargs[j][key] = {'marker': 's', 'ms': 2., 'ls': 'none',
'color': plot_obj[0].get_color()}
else:
ax1.errorbar(masked[:, 0], flux_scaling*masked[:, 1]/scaling, yerr=flux_scaling*masked[:, 2]/scaling,
zorder=2.5, **plot_kwargs[j][key])
if boxitem.flux is not None:
filter_list = []
wavel_list = []
for item in boxitem.flux:
read_filt = read_filter.ReadFilter(item)
filter_list.append(item)
wavel_list.append(read_filt.mean_wavelength())
sort_index = np.argsort(wavel_list)
filter_sort = []
for i in range(sort_index.size):
filter_sort.append(filter_list[sort_index[i]])
for item in filter_sort:
transmission = read_filter.ReadFilter(item)
wavelength = transmission.mean_wavelength()
fwhm = transmission.filter_fwhm()
if not plot_kwargs[j] or item not in plot_kwargs[j]:
if not plot_kwargs[j]:
plot_kwargs[j] = {}
if quantity == 'flux':
flux_scaling = wavelength
if isinstance(boxitem.flux[item][0], np.ndarray):
for i in range(boxitem.flux[item].shape[1]):
plot_obj = ax1.errorbar(wavelength, flux_scaling*boxitem.flux[item][0, i]/scaling, xerr=fwhm/2.,
yerr=flux_scaling*boxitem.flux[item][1, i]/scaling, marker='s', ms=5, zorder=3)
else:
plot_obj = ax1.errorbar(wavelength, flux_scaling*boxitem.flux[item][0]/scaling, xerr=fwhm/2.,
yerr=flux_scaling*boxitem.flux[item][1]/scaling, marker='s', ms=5, zorder=3)
plot_kwargs[j][item] = {'marker': 's', 'ms': 5., 'color': plot_obj[0].get_color()}
else:
if quantity == 'flux':
flux_scaling = wavelength
if isinstance(boxitem.flux[item][0], np.ndarray):
if not isinstance(plot_kwargs[j][item], list):
raise ValueError(f'A list with {boxitem.flux[item].shape[1]} '
f'dictionaries are required because the filter '
f'{item} has {boxitem.flux[item].shape[1]} '
f'values.')
for i in range(boxitem.flux[item].shape[1]):
ax1.errorbar(wavelength, flux_scaling*boxitem.flux[item][0, i]/scaling, xerr=fwhm/2.,
yerr=flux_scaling*boxitem.flux[item][1, i]/scaling, zorder=3, **plot_kwargs[j][item][i])
else:
if boxitem.flux[item][1] == 0.:
ax1.errorbar(wavelength, flux_scaling*boxitem.flux[item][0]/scaling,
xerr=fwhm/2., yerr=0.5*flux_scaling*boxitem.flux[item][0]/scaling,
uplims=True, capsize=2., capthick=0., zorder=3, **plot_kwargs[j][item])
else:
ax1.errorbar(wavelength, flux_scaling*boxitem.flux[item][0]/scaling,
xerr=fwhm/2., yerr=flux_scaling*boxitem.flux[item][1]/scaling,
zorder=3, **plot_kwargs[j][item])
elif isinstance(boxitem, box.SynphotBox):
for i, find_item in enumerate(boxes):
if isinstance(find_item, box.ObjectBox):
obj_index = i
break
for item in boxitem.flux:
transmission = read_filter.ReadFilter(item)
wavelength = transmission.mean_wavelength()
fwhm = transmission.filter_fwhm()
if quantity == 'flux':
flux_scaling = wavelength
if not plot_kwargs[obj_index] or item not in plot_kwargs[obj_index]:
ax1.errorbar(wavelength, flux_scaling*boxitem.flux[item]/scaling, xerr=fwhm/2., yerr=None,
alpha=0.7, marker='s', ms=5, zorder=4, mfc='white')
else:
if isinstance(plot_kwargs[obj_index][item], list):
# In case of multiple photometry values for the same filter, use the
# plot_kwargs of the first data point
kwargs_copy = plot_kwargs[obj_index][item][0].copy()
if 'label' in kwargs_copy:
del kwargs_copy['label']
ax1.errorbar(wavelength, flux_scaling*boxitem.flux[item]/scaling, xerr=fwhm/2., yerr=None,
zorder=4, mfc='white', **kwargs_copy)
else:
kwargs_copy = plot_kwargs[obj_index][item].copy()
if 'label' in kwargs_copy:
del kwargs_copy['label']
if 'mfc' in kwargs_copy:
del kwargs_copy['mfc']
ax1.errorbar(wavelength, flux_scaling*boxitem.flux[item]/scaling, xerr=fwhm/2., yerr=None,
zorder=4, mfc='white', **kwargs_copy)
if filters is not None:
for i, item in enumerate(filters):
transmission = read_filter.ReadFilter(item)
data = transmission.get_filter()
ax2.plot(data[:, 0], data[:, 1], '-', lw=0.7, color='black', zorder=1)
if residuals is not None:
for i, find_item in enumerate(boxes):
if isinstance(find_item, box.ObjectBox):
obj_index = i
break
res_max = 0.
if residuals.photometry is not None:
for item in residuals.photometry:
if not plot_kwargs[obj_index] or item not in plot_kwargs[obj_index]:
ax3.plot(residuals.photometry[item][0], residuals.photometry[item][1], marker='s',
ms=5, linestyle='none', zorder=2)
else:
if residuals.photometry[item].ndim == 1:
ax3.errorbar(residuals.photometry[item][0], residuals.photometry[item][1],
zorder=2, **plot_kwargs[obj_index][item])
elif residuals.photometry[item].ndim == 2:
for i in range(residuals.photometry[item].shape[1]):
if isinstance(plot_kwargs[obj_index][item], list):
ax3.errorbar(residuals.photometry[item][0, i],
residuals.photometry[item][1, i], zorder=2,
**plot_kwargs[obj_index][item][i])
else:
ax3.errorbar(residuals.photometry[item][0, i],
residuals.photometry[item][1, i], zorder=2,
**plot_kwargs[obj_index][item])
res_max = np.nanmax(np.abs(residuals.photometry[item][1]))
if residuals.spectrum is not None:
for key, value in residuals.spectrum.items():
if not plot_kwargs[obj_index] or key not in plot_kwargs[obj_index]:
ax3.errorbar(value[:, 0], value[:, 1], marker='o', ms=2, ls='none', zorder=1)
else:
ax3.errorbar(value[:, 0], value[:, 1], zorder=1, **plot_kwargs[obj_index][key])
max_tmp = np.nanmax(np.abs(value[:, 1]))
if max_tmp > res_max:
res_max = max_tmp
res_lim = math.ceil(1.1*res_max)
if res_lim > 10.:
res_lim = 5.
ax3.axhline(0., ls='--', lw=0.7, color='gray', dashes=(2, 4), zorder=0.5)
# ax3.axhline(-2.5, ls=':', lw=0.7, color='gray', dashes=(1, 4), zorder=0.5)
# ax3.axhline(2.5, ls=':', lw=0.7, color='gray', dashes=(1, 4), zorder=0.5)
if ylim_res is None:
ax3.set_ylim(-res_lim, res_lim)
else:
ax3.set_ylim(ylim_res[0], ylim_res[1])
if filters is not None:
ax2.set_ylim(0., 1.1)
print(f'Plotting spectrum: {output}...', end='', flush=True)
if title is not None:
if filters:
ax2.set_title(title, y=1.02, fontsize=13)
else:
ax1.set_title(title, y=1.02, fontsize=13)
handles, labels = ax1.get_legend_handles_labels()
if handles and legend is not None:
if isinstance(legend, list):
model_handles = []
data_handles = []
model_labels = []
data_labels = []
for i, item in enumerate(handles):
if isinstance(item, mpl.lines.Line2D):
model_handles.append(item)
model_labels.append(labels[i])
elif isinstance(item, mpl.container.ErrorbarContainer):
data_handles.append(item)
data_labels.append(labels[i])
else:
warnings.warn(f'The object type {item} is not implemented for the legend.')
if legend[0] is not None:
if isinstance(legend[0], (str, tuple)):
leg_1 = ax1.legend(model_handles, model_labels, loc=legend[0], fontsize=10., frameon=False)
else:
leg_1 = ax1.legend(model_handles, model_labels, **legend[0])
else:
leg_1 = None
if legend[1] is not None:
if isinstance(legend[1], (str, tuple)):
leg_2 = ax1.legend(data_handles, data_labels, loc=legend[1], fontsize=8, frameon=False)
else:
leg_2 = ax1.legend(data_handles, data_labels, **legend[1])
if leg_1 is not None:
ax1.add_artist(leg_1)
elif isinstance(legend, (str, tuple)):
ax1.legend(loc=legend, fontsize=8, frameon=False)
else:
ax1.legend(**legend)
# filters = ['Paranal/SPHERE.ZIMPOL_N_Ha',
# 'MUSE/Hbeta',
# 'ALMA/855']
#
# filters = ['Paranal/SPHERE.IRDIS_B_Y',
# 'MKO/NSFCam.J',
# 'Paranal/SPHERE.IRDIS_D_H23_2',
# 'Paranal/SPHERE.IRDIS_D_H23_3',
# 'Paranal/SPHERE.IRDIS_D_K12_1',
# 'Paranal/SPHERE.IRDIS_D_K12_2',
# 'Paranal/NACO.Lp',
# 'Paranal/NACO.NB405',
# 'Paranal/NACO.Mp']
#
# for i, item in enumerate(filters):
# readfilter = read_filter.ReadFilter(item)
# filter_wavelength = readfilter.mean_wavelength()
# filter_width = readfilter.filter_fwhm()
#
# # if i == 5:
# # ax1.errorbar(filter_wavelength, 1.3e4, xerr=filter_width/2., color='dimgray', elinewidth=2.5, zorder=10)
# # else:
# # ax1.errorbar(filter_wavelength, 6e3, xerr=filter_width/2., color='dimgray', elinewidth=2.5, zorder=10)
#
# if i == 0:
# ax1.text(filter_wavelength, 1e-2, r'H$\alpha$', ha='center', va='center', fontsize=10, color='black')
# elif i == 1:
# ax1.text(filter_wavelength, 1e-2, r'H$\beta$', ha='center', va='center', fontsize=10, color='black')
# elif i == 2:
# ax1.text(filter_wavelength, 1e-2, 'ALMA\nband 7 rms', ha='center', va='center', fontsize=8, color='black')
#
# if i == 0:
# ax1.text(filter_wavelength, 1.4, 'Y', ha='center', va='center', fontsize=10, color='black')
# elif i == 1:
# ax1.text(filter_wavelength, 1.4, 'J', ha='center', va='center', fontsize=10, color='black')
# elif i == 2:
# ax1.text(filter_wavelength-0.04, 1.4, 'H2', ha='center', va='center', fontsize=10, color='black')
# elif i == 3:
# ax1.text(filter_wavelength+0.04, 1.4, 'H3', ha='center', va='center', fontsize=10, color='black')
# elif i == 4:
# ax1.text(filter_wavelength, 1.4, 'K1', ha='center', va='center', fontsize=10, color='black')
# elif i == 5:
# ax1.text(filter_wavelength, 1.4, 'K2', ha='center', va='center', fontsize=10, color='black')
# elif i == 6:
# ax1.text(filter_wavelength, 1.4, 'L$\'$', ha='center', va='center', fontsize=10, color='black')
# elif i == 7:
# ax1.text(filter_wavelength, 1.4, 'NB4.05', ha='center', va='center', fontsize=10, color='black')
# elif i == 8:
# ax1.text(filter_wavelength, 1.4, 'M$\'}$', ha='center', va='center', fontsize=10, color='black')
#
# ax1.text(1.26, 0.58, 'VLT/SPHERE', ha='center', va='center', fontsize=8., color='slateblue', rotation=43.)
# ax1.text(2.5, 1.28, 'VLT/SINFONI', ha='left', va='center', fontsize=8., color='darkgray')
plt.savefig(os.getcwd()+'/'+output, bbox_inches='tight')
plt.clf()
plt.close()
print(' [DONE]')
|
[
"matplotlib.pyplot.subplot",
"numpy.abs",
"matplotlib.pyplot.clf",
"math.ceil",
"matplotlib.pyplot.close",
"os.getcwd",
"species.read.read_filter.ReadFilter",
"numpy.isnan",
"numpy.argsort",
"numpy.log10",
"matplotlib.pyplot.figure",
"matplotlib.ticker.AutoMinorLocator",
"math.log10",
"matplotlib.pyplot.rc",
"numpy.array",
"warnings.warn",
"matplotlib.gridspec.GridSpec"
] |
[((5193, 5241), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'edgecolor': '"""black"""', 'linewidth': '(2.2)'}), "('axes', edgecolor='black', linewidth=2.2)\n", (5199, 5241), True, 'import matplotlib.pyplot as plt\n'), ((33899, 33908), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (33906, 33908), True, 'import matplotlib.pyplot as plt\n'), ((33913, 33924), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (33922, 33924), True, 'import matplotlib.pyplot as plt\n'), ((5649, 5679), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': 'figsize'}), '(1, figsize=figsize)\n', (5659, 5679), True, 'import matplotlib.pyplot as plt\n'), ((5697, 5749), 'matplotlib.gridspec.GridSpec', 'mpl.gridspec.GridSpec', (['(3)', '(1)'], {'height_ratios': '[1, 3, 1]'}), '(3, 1, height_ratios=[1, 3, 1])\n', (5718, 5749), True, 'import matplotlib as mpl\n'), ((5841, 5866), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gridsp[1, 0]'], {}), '(gridsp[1, 0])\n', (5852, 5866), True, 'import matplotlib.pyplot as plt\n'), ((5881, 5906), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gridsp[0, 0]'], {}), '(gridsp[0, 0])\n', (5892, 5906), True, 'import matplotlib.pyplot as plt\n'), ((5921, 5946), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gridsp[2, 0]'], {}), '(gridsp[2, 0])\n', (5932, 5946), True, 'import matplotlib.pyplot as plt\n'), ((28610, 28634), 'math.ceil', 'math.ceil', (['(1.1 * res_max)'], {}), '(1.1 * res_max)\n', (28619, 28634), False, 'import math\n'), ((5988, 6018), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': 'figsize'}), '(1, figsize=figsize)\n', (5998, 6018), True, 'import matplotlib.pyplot as plt\n'), ((6036, 6085), 'matplotlib.gridspec.GridSpec', 'mpl.gridspec.GridSpec', (['(2)', '(1)'], {'height_ratios': '[4, 1]'}), '(2, 1, height_ratios=[4, 1])\n', (6057, 6085), True, 'import matplotlib as mpl\n'), ((6177, 6202), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gridsp[0, 0]'], {}), '(gridsp[0, 0])\n', (6188, 6202), True, 'import matplotlib.pyplot as plt\n'), ((6236, 6261), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gridsp[1, 0]'], {}), '(gridsp[1, 0])\n', (6247, 6261), True, 'import matplotlib.pyplot as plt\n'), ((8746, 8765), 'matplotlib.ticker.AutoMinorLocator', 'AutoMinorLocator', (['(5)'], {}), '(5)\n', (8762, 8765), False, 'from matplotlib.ticker import AutoMinorLocator, MultipleLocator\n'), ((8833, 8852), 'matplotlib.ticker.AutoMinorLocator', 'AutoMinorLocator', (['(5)'], {}), '(5)\n', (8849, 8852), False, 'from matplotlib.ticker import AutoMinorLocator, MultipleLocator\n'), ((9039, 9058), 'matplotlib.ticker.AutoMinorLocator', 'AutoMinorLocator', (['(5)'], {}), '(5)\n', (9055, 9058), False, 'from matplotlib.ticker import AutoMinorLocator, MultipleLocator\n'), ((9152, 9171), 'matplotlib.ticker.AutoMinorLocator', 'AutoMinorLocator', (['(5)'], {}), '(5)\n', (9168, 9171), False, 'from matplotlib.ticker import AutoMinorLocator, MultipleLocator\n'), ((26258, 26286), 'species.read.read_filter.ReadFilter', 'read_filter.ReadFilter', (['item'], {}), '(item)\n', (26280, 26286), False, 'from species.read import read_filter\n'), ((6301, 6331), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': 'figsize'}), '(1, figsize=figsize)\n', (6311, 6331), True, 'import matplotlib.pyplot as plt\n'), ((6349, 6398), 'matplotlib.gridspec.GridSpec', 'mpl.gridspec.GridSpec', (['(2)', '(1)'], {'height_ratios': '[1, 4]'}), '(2, 1, height_ratios=[1, 4])\n', (6370, 6398), True, 'import matplotlib as mpl\n'), ((6490, 6515), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gridsp[1, 0]'], {}), '(gridsp[1, 0])\n', (6501, 6515), True, 'import matplotlib.pyplot as plt\n'), ((6530, 6555), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gridsp[0, 0]'], {}), '(gridsp[0, 0])\n', (6541, 6555), True, 'import matplotlib.pyplot as plt\n'), ((6594, 6624), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': 'figsize'}), '(1, figsize=figsize)\n', (6604, 6624), True, 'import matplotlib.pyplot as plt\n'), ((6642, 6669), 'matplotlib.gridspec.GridSpec', 'mpl.gridspec.GridSpec', (['(1)', '(1)'], {}), '(1, 1)\n', (6663, 6669), True, 'import matplotlib as mpl\n'), ((6761, 6786), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gridsp[0, 0]'], {}), '(gridsp[0, 0])\n', (6772, 6786), True, 'import matplotlib.pyplot as plt\n'), ((10410, 10429), 'math.log10', 'math.log10', (['ylim[1]'], {}), '(ylim[1])\n', (10420, 10429), False, 'import math\n'), ((12879, 12911), 'numpy.array', 'np.array', (['flux'], {'dtype': 'np.float64'}), '(flux, dtype=np.float64)\n', (12887, 12911), True, 'import numpy as np\n'), ((33850, 33861), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (33859, 33861), False, 'import os\n'), ((16940, 16972), 'numpy.array', 'np.array', (['flux'], {'dtype': 'np.float64'}), '(flux, dtype=np.float64)\n', (16948, 16972), True, 'import numpy as np\n'), ((28010, 28047), 'numpy.abs', 'np.abs', (['residuals.photometry[item][1]'], {}), '(residuals.photometry[item][1])\n', (28016, 28047), True, 'import numpy as np\n'), ((28493, 28512), 'numpy.abs', 'np.abs', (['value[:, 1]'], {}), '(value[:, 1])\n', (28499, 28512), True, 'import numpy as np\n'), ((12960, 12974), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (12968, 12974), True, 'import numpy as np\n'), ((16274, 16309), 'numpy.array', 'np.array', (['flux[i]'], {'dtype': 'np.float64'}), '(flux[i], dtype=np.float64)\n', (16282, 16309), True, 'import numpy as np\n'), ((17565, 17611), 'species.read.read_filter.ReadFilter', 'read_filter.ReadFilter', (['boxitem.filter_name[i]'], {}), '(boxitem.filter_name[i])\n', (17587, 17611), False, 'from species.read import read_filter\n'), ((30037, 30112), 'warnings.warn', 'warnings.warn', (['f"""The object type {item} is not implemented for the legend."""'], {}), "(f'The object type {item} is not implemented for the legend.')\n", (30050, 30112), False, 'import warnings\n'), ((17021, 17035), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (17029, 17035), True, 'import numpy as np\n'), ((19453, 19475), 'numpy.argsort', 'np.argsort', (['wavel_list'], {}), '(wavel_list)\n', (19463, 19475), True, 'import numpy as np\n'), ((21097, 21119), 'numpy.argsort', 'np.argsort', (['wavel_list'], {}), '(wavel_list)\n', (21107, 21119), True, 'import numpy as np\n'), ((16362, 16376), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (16370, 16376), True, 'import numpy as np\n'), ((20926, 20954), 'species.read.read_filter.ReadFilter', 'read_filter.ReadFilter', (['item'], {}), '(item)\n', (20948, 20954), False, 'from species.read import read_filter\n'), ((21347, 21375), 'species.read.read_filter.ReadFilter', 'read_filter.ReadFilter', (['item'], {}), '(item)\n', (21369, 21375), False, 'from species.read import read_filter\n'), ((24531, 24559), 'species.read.read_filter.ReadFilter', 'read_filter.ReadFilter', (['item'], {}), '(item)\n', (24553, 24559), False, 'from species.read import read_filter\n'), ((19772, 19806), 'numpy.isnan', 'np.isnan', (['boxitem.spectrum[key][0]'], {}), '(boxitem.spectrum[key][0])\n', (19780, 19806), True, 'import numpy as np\n'), ((14716, 14737), 'numpy.log10', 'np.log10', (['param[item]'], {}), '(param[item])\n', (14724, 14737), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
#
# Tests if the Fitzhugh-Nagumo toy model runs.
#
# This file is part of PINTS (https://github.com/pints-team/pints/) which is
# released under the BSD 3-clause license. See accompanying LICENSE.md for
# copyright notice and full license details.
#
import unittest
import pints
import pints.toy
import numpy as np
class TestFitzhughNagumoModel(unittest.TestCase):
"""
Tests if the Fitzhugh-Nagumo toy model runs.
"""
def test_run(self):
# Test basic properties
model = pints.toy.FitzhughNagumoModel()
self.assertEqual(model.n_parameters(), 3)
self.assertEqual(model.n_outputs(), 2)
# Test simulation
x = model.suggested_parameters()
times = model.suggested_times()
values = model.simulate(x, times)
self.assertEqual(values.shape, (len(times), 2))
# Simulation with sensitivities
values, dvalues_dp = model.simulateS1(x, times)
self.assertEqual(values.shape, (len(times), 2))
self.assertEqual(dvalues_dp.shape, (len(times), 2, 3))
# Test alternative starting position
model = pints.toy.FitzhughNagumoModel([0.1, 0.1])
values = model.simulate(x, times)
self.assertEqual(values.shape, (len(times), 2))
# Times can't be negative
times = [-1, 2, 3, 4]
self.assertRaises(ValueError, model.simulate, x, times)
# Initial value must have size 2
pints.toy.FitzhughNagumoModel([1, 1])
self.assertRaises(ValueError, pints.toy.FitzhughNagumoModel, [1])
def test_values(self):
# value-based tests of Fitzhugh-Nagumo model
parameters = [0.2, 0.4, 2.5]
y0 = [-2, 1.5]
times = np.linspace(0, 20, 201)
model = pints.toy.FitzhughNagumoModel(y0)
values = model.simulate(parameters, times)
self.assertAlmostEqual(values[200, 0], 1.675726, places=6)
self.assertAlmostEqual(values[200, 1], -0.226142, places=6)
def test_sensitivities(self):
# compares sensitivities against standards
model = pints.toy.FitzhughNagumoModel([2, 3])
parameters = [0.2, 0.7, 2.8]
# Test with initial point t=0 included in range
sols, sens = model.simulateS1(parameters, [0, 7, 12])
self.assertAlmostEqual(sens[1, 0, 2], 5.01378, 5)
self.assertAlmostEqual(sens[2, 1, 1], 0.82883, 4)
# Test without initial point in range
sols, sens = model.simulateS1(parameters, [7, 12])
self.assertAlmostEqual(sens[0, 0, 2], 5.01378, 5)
self.assertAlmostEqual(sens[1, 1, 1], 0.82883, 4)
# Test without any points in range
sols, sens = model.simulateS1(parameters, [])
self.assertEqual(sols.shape, (0, 2))
self.assertEqual(sens.shape, (0, 2, 3))
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"numpy.linspace",
"pints.toy.FitzhughNagumoModel"
] |
[((2848, 2863), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2861, 2863), False, 'import unittest\n'), ((529, 560), 'pints.toy.FitzhughNagumoModel', 'pints.toy.FitzhughNagumoModel', ([], {}), '()\n', (558, 560), False, 'import pints\n'), ((1142, 1183), 'pints.toy.FitzhughNagumoModel', 'pints.toy.FitzhughNagumoModel', (['[0.1, 0.1]'], {}), '([0.1, 0.1])\n', (1171, 1183), False, 'import pints\n'), ((1461, 1498), 'pints.toy.FitzhughNagumoModel', 'pints.toy.FitzhughNagumoModel', (['[1, 1]'], {}), '([1, 1])\n', (1490, 1498), False, 'import pints\n'), ((1730, 1753), 'numpy.linspace', 'np.linspace', (['(0)', '(20)', '(201)'], {}), '(0, 20, 201)\n', (1741, 1753), True, 'import numpy as np\n'), ((1770, 1803), 'pints.toy.FitzhughNagumoModel', 'pints.toy.FitzhughNagumoModel', (['y0'], {}), '(y0)\n', (1799, 1803), False, 'import pints\n'), ((2092, 2129), 'pints.toy.FitzhughNagumoModel', 'pints.toy.FitzhughNagumoModel', (['[2, 3]'], {}), '([2, 3])\n', (2121, 2129), False, 'import pints\n')]
|
import math
import numpy as np
## Real Data:
# %% Kinect Color Camera
color_cam_matrix = np.array([ 1.0526303338534365e+03, 0., 9.3528526085572480e+02, 0., 1.0534191001014469e+03, 5.2225718970556716e+02, 0., 0., 1. ]).reshape(3,3)
color_distortion_coeffs = np.array([ 4.5467150011699140e-02, -7.4470107942918126e-02, -6.1697129558609537e-03, -2.5667037404509380e-03, -1.4503959457133547e-02 ]).reshape(1,5)
color_rotation = np.eye(3)
color_projection = np.array([ 1.0526303338534365e+03, 0., 9.3528526085572480e+02, 0., 0., 1.0534191001014469e+03, 5.2225718970556716e+02, 0., 0., 0., 1., 0., 0., 0., 0., 1. ]).reshape(4,4)
# %% Kinect IR Camera
ir_cam_matrix = np.array([ 3.5706872738709285e+02, 0., 2.5037220752105404e+02, 0., 3.5700920458183873e+02, 2.0803230739018434e+02, 0., 0., 1. ]).reshape(3,3)
ir_distortion_coeffs = np.array([ 5.5998048975189132e-02, -2.5691440815038830e-01, -5.3889184410447575e-03, -1.6922667364749613e-03, 1.9674519800098919e-01 ]).reshape(1,5)
ir_rotation = np.eye(3)
ir_projection = np.array([ 3.5706872738709285e+02, 0., 2.5037220752105404e+02, 0., 0., 3.5700920458183873e+02, 2.0803230739018434e+02, 0., 0., 0., 1., 0., 0., 0., 0., 1. ]).reshape(4,4)
depthShift = -2.7989551644219979e+01
# %% Pose Calibration between depth and color
rotation = np.array([ 9.9997222955499243e-01, -7.4399336788120839e-03, 4.3301925190808763e-04, 7.4347723554060875e-03, 9.9991294780487039e-01, 1.0900503300210780e-02, -5.1408057825089366e-04, -1.0896981188819882e-02, 9.9994049399058227e-01 ]).reshape(3,3)
translation = np.array([ -5.2291985456630448e-02, -1.9227292627499695e-04, 1.7173350151375650e-03 ]).reshape(3,1)
essential = np.array([ -1.2669151118394222e-05, -1.7150903228939863e-03, -2.1098130088050980e-04, 1.6904050298585356e-03, -5.8260164046387006e-04, 5.2289617408374921e-02, -1.9651142111198186e-04, -5.2288863822328481e-02, -5.6992570216587654e-04 ]).reshape(3,3)
fundamental = np.array([ -8.8142664830290771e-09, -1.1934330447023842e-06, 1.9806702972926870e-04, 1.1751792885051283e-06, -4.0509553642475600e-07, 1.2770218257581496e-02, -7.4941574482561516e-04, -3.6972004067303506e-02, 1. ]).reshape(3,3)
# %% Color Params
color_height = 1080
color_width = 1920
color_fov_x = 360 / math.pi * math.atan2(color_width, 2 * color_cam_matrix[0,0])
color_fov_y = 360 / math.pi * math.atan2(color_height, 2 * color_cam_matrix[1,1] )
color_fx = color_cam_matrix[0,0]
color_fy = color_cam_matrix[1,1]
color_cx = color_cam_matrix[0,2]
color_cy = color_cam_matrix[1,2]
color_fx
color_fy
color_fov_x
color_fov_y
# %% IR Field of View, Width, Height computation
ir_width = 512
ir_height = 424
ir_aspect = ir_width / ir_height
depth_fov_x = 360 / math.pi * math.atan2(ir_width, 2 * color_cam_matrix[0,0])
depth_fov_y = 360 / math.pi * math.atan2(ir_height, 2 * color_cam_matrix[1,1])
ir_fx = ir_cam_matrix[0,0]
ir_fy = ir_cam_matrix[1,1]
ir_cx = ir_cam_matrix[0,2]
ir_cy = ir_cam_matrix[1,2]
## transform into camera frame. useful for reconstruction!
T_magic_to_cam = np.array([ [0. ,-1. , 0. , 0. ],
[0. , 0. ,-1. , 0. ],
[1. , 0. , 0. , 0. ],
[0. , 0. , 0. , 1.0]])
## Simulation Camera Params
# %%
znear = 0.1
zfar = 12
sim_width = 192
sim_height = 108
# sim_width = 720 * 4
# sim_height = 405 * 4
old_sim_fovy = 60 * math.pi / 180
old_sim_fovx = 2 * math.atan(math.tan(old_sim_fovy / 2) * sim_width / sim_height)
old_sim_fovy * 180 / math.pi
old_sim_fovx * 180 / math.pi
old_sim_focal_y = (sim_height / 2) / math.tan(old_sim_fovy / 2)
old_sim_focal_x = (sim_width / 2 ) / math.tan(old_sim_fovx / 2)
old_sim_proj_matrix = np.array([[old_sim_focal_x, 0, sim_width / 2],
[0, old_sim_focal_y, sim_height / 2],
[0, 0, 1]])
# new sim cam Params, using color fov_y
sim_focal_y = (sim_height / 2) / math.tan(color_fov_y * 3.14 / 180.0 / 2)
sim_focal_x = sim_focal_y
sim_proj_matrix = np.array([[sim_focal_x, 0, sim_width / 2],
[0, sim_focal_y, sim_height / 2],
[0, 0, 1]])
# checking that these are reasonable
color_fov_x = 360 / math.pi * math.atan2(color_width, 2 * color_cam_matrix[0,0])
color_fov_y = 360 / math.pi * math.atan2(color_height, 2 * color_cam_matrix[1,1] )
color_fov_x
color_fov_y
test_sim_fov_y = 360 / math.pi * math.atan2(sim_height, 2 * sim_proj_matrix[1,1] )
test_sim_fov_x = 360 / math.pi * math.atan2(sim_width, 2 * sim_proj_matrix[0,0] )
# fake real sim cam Params (ie, size is the full 1920 x 1080)
fake_focal_y = (color_height / 2) / math.tan(color_fov_y * 3.14 / 180.0 / 2)
fake_focal_x = (color_width / 2) / math.tan(color_fov_x * 3.14 / 180.0 / 2)
fake_proj_matrix = np.array([[fake_focal_x, 0, color_width / 2],
[0, fake_focal_y, color_height / 2],
[0, 0, 1]])
if __name__ == '__main__':
np.set_printoptions(suppress=True)
print(' \n simulated cam matrix: \n\t', str(np.round(fake_proj_matrix,0)).replace('\n', '\n\t'))
print(' \n real cam matrix: \n\t', str(np.round(color_cam_matrix,0)).replace('\n', '\n\t'))
print(' \n ')
print(color_fov_y)
|
[
"numpy.set_printoptions",
"math.atan2",
"math.tan",
"numpy.array",
"numpy.eye",
"numpy.round"
] |
[((425, 434), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (431, 434), True, 'import numpy as np\n'), ((991, 1000), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (997, 1000), True, 'import numpy as np\n'), ((3004, 3109), 'numpy.array', 'np.array', (['[[0.0, -1.0, 0.0, 0.0], [0.0, 0.0, -1.0, 0.0], [1.0, 0.0, 0.0, 0.0], [0.0, \n 0.0, 0.0, 1.0]]'], {}), '([[0.0, -1.0, 0.0, 0.0], [0.0, 0.0, -1.0, 0.0], [1.0, 0.0, 0.0, 0.0\n ], [0.0, 0.0, 0.0, 1.0]])\n', (3012, 3109), True, 'import numpy as np\n'), ((3649, 3750), 'numpy.array', 'np.array', (['[[old_sim_focal_x, 0, sim_width / 2], [0, old_sim_focal_y, sim_height / 2],\n [0, 0, 1]]'], {}), '([[old_sim_focal_x, 0, sim_width / 2], [0, old_sim_focal_y, \n sim_height / 2], [0, 0, 1]])\n', (3657, 3750), True, 'import numpy as np\n'), ((3970, 4062), 'numpy.array', 'np.array', (['[[sim_focal_x, 0, sim_width / 2], [0, sim_focal_y, sim_height / 2], [0, 0, 1]]'], {}), '([[sim_focal_x, 0, sim_width / 2], [0, sim_focal_y, sim_height / 2],\n [0, 0, 1]])\n', (3978, 4062), True, 'import numpy as np\n'), ((4753, 4852), 'numpy.array', 'np.array', (['[[fake_focal_x, 0, color_width / 2], [0, fake_focal_y, color_height / 2], [\n 0, 0, 1]]'], {}), '([[fake_focal_x, 0, color_width / 2], [0, fake_focal_y, \n color_height / 2], [0, 0, 1]])\n', (4761, 4852), True, 'import numpy as np\n'), ((2239, 2290), 'math.atan2', 'math.atan2', (['color_width', '(2 * color_cam_matrix[0, 0])'], {}), '(color_width, 2 * color_cam_matrix[0, 0])\n', (2249, 2290), False, 'import math\n'), ((2320, 2372), 'math.atan2', 'math.atan2', (['color_height', '(2 * color_cam_matrix[1, 1])'], {}), '(color_height, 2 * color_cam_matrix[1, 1])\n', (2330, 2372), False, 'import math\n'), ((2692, 2740), 'math.atan2', 'math.atan2', (['ir_width', '(2 * color_cam_matrix[0, 0])'], {}), '(ir_width, 2 * color_cam_matrix[0, 0])\n', (2702, 2740), False, 'import math\n'), ((2770, 2819), 'math.atan2', 'math.atan2', (['ir_height', '(2 * color_cam_matrix[1, 1])'], {}), '(ir_height, 2 * color_cam_matrix[1, 1])\n', (2780, 2819), False, 'import math\n'), ((3536, 3562), 'math.tan', 'math.tan', (['(old_sim_fovy / 2)'], {}), '(old_sim_fovy / 2)\n', (3544, 3562), False, 'import math\n'), ((3600, 3626), 'math.tan', 'math.tan', (['(old_sim_fovx / 2)'], {}), '(old_sim_fovx / 2)\n', (3608, 3626), False, 'import math\n'), ((3885, 3925), 'math.tan', 'math.tan', (['(color_fov_y * 3.14 / 180.0 / 2)'], {}), '(color_fov_y * 3.14 / 180.0 / 2)\n', (3893, 3925), False, 'import math\n'), ((4191, 4242), 'math.atan2', 'math.atan2', (['color_width', '(2 * color_cam_matrix[0, 0])'], {}), '(color_width, 2 * color_cam_matrix[0, 0])\n', (4201, 4242), False, 'import math\n'), ((4272, 4324), 'math.atan2', 'math.atan2', (['color_height', '(2 * color_cam_matrix[1, 1])'], {}), '(color_height, 2 * color_cam_matrix[1, 1])\n', (4282, 4324), False, 'import math\n'), ((4385, 4434), 'math.atan2', 'math.atan2', (['sim_height', '(2 * sim_proj_matrix[1, 1])'], {}), '(sim_height, 2 * sim_proj_matrix[1, 1])\n', (4395, 4434), False, 'import math\n'), ((4469, 4517), 'math.atan2', 'math.atan2', (['sim_width', '(2 * sim_proj_matrix[0, 0])'], {}), '(sim_width, 2 * sim_proj_matrix[0, 0])\n', (4479, 4517), False, 'import math\n'), ((4617, 4657), 'math.tan', 'math.tan', (['(color_fov_y * 3.14 / 180.0 / 2)'], {}), '(color_fov_y * 3.14 / 180.0 / 2)\n', (4625, 4657), False, 'import math\n'), ((4693, 4733), 'math.tan', 'math.tan', (['(color_fov_x * 3.14 / 180.0 / 2)'], {}), '(color_fov_x * 3.14 / 180.0 / 2)\n', (4701, 4733), False, 'import math\n'), ((4944, 4978), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)'}), '(suppress=True)\n', (4963, 4978), True, 'import numpy as np\n'), ((90, 207), 'numpy.array', 'np.array', (['[1052.6303338534365, 0.0, 935.2852608557248, 0.0, 1053.419100101447, \n 522.2571897055672, 0.0, 0.0, 1.0]'], {}), '([1052.6303338534365, 0.0, 935.2852608557248, 0.0, \n 1053.419100101447, 522.2571897055672, 0.0, 0.0, 1.0])\n', (98, 207), True, 'import numpy as np\n'), ((258, 384), 'numpy.array', 'np.array', (['[0.04546715001169914, -0.07447010794291813, -0.006169712955860954, -\n 0.002566703740450938, -0.014503959457133547]'], {}), '([0.04546715001169914, -0.07447010794291813, -0.006169712955860954,\n -0.002566703740450938, -0.014503959457133547])\n', (266, 384), True, 'import numpy as np\n'), ((454, 610), 'numpy.array', 'np.array', (['[1052.6303338534365, 0.0, 935.2852608557248, 0.0, 0.0, 1053.419100101447, \n 522.2571897055672, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0]'], {}), '([1052.6303338534365, 0.0, 935.2852608557248, 0.0, 0.0, \n 1053.419100101447, 522.2571897055672, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0,\n 0.0, 1.0])\n', (462, 610), True, 'import numpy as np\n'), ((663, 783), 'numpy.array', 'np.array', (['[357.06872738709285, 0.0, 250.37220752105404, 0.0, 357.00920458183873, \n 208.03230739018434, 0.0, 0.0, 1.0]'], {}), '([357.06872738709285, 0.0, 250.37220752105404, 0.0, \n 357.00920458183873, 208.03230739018434, 0.0, 0.0, 1.0])\n', (671, 783), True, 'import numpy as np\n'), ((828, 952), 'numpy.array', 'np.array', (['[0.05599804897518913, -0.2569144081503883, -0.0053889184410447575, -\n 0.0016922667364749613, 0.1967451980009892]'], {}), '([0.05599804897518913, -0.2569144081503883, -0.0053889184410447575,\n -0.0016922667364749613, 0.1967451980009892])\n', (836, 952), True, 'import numpy as np\n'), ((1017, 1177), 'numpy.array', 'np.array', (['[357.06872738709285, 0.0, 250.37220752105404, 0.0, 0.0, 357.00920458183873,\n 208.03230739018434, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0]'], {}), '([357.06872738709285, 0.0, 250.37220752105404, 0.0, 0.0, \n 357.00920458183873, 208.03230739018434, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, \n 0.0, 0.0, 1.0])\n', (1025, 1177), True, 'import numpy as np\n'), ((1282, 1499), 'numpy.array', 'np.array', (['[0.9999722295549924, -0.007439933678812084, 0.00043301925190808763, \n 0.0074347723554060875, 0.9999129478048704, 0.01090050330021078, -\n 0.0005140805782508937, -0.010896981188819882, 0.9999404939905823]'], {}), '([0.9999722295549924, -0.007439933678812084, 0.00043301925190808763,\n 0.0074347723554060875, 0.9999129478048704, 0.01090050330021078, -\n 0.0005140805782508937, -0.010896981188819882, 0.9999404939905823])\n', (1290, 1499), True, 'import numpy as np\n'), ((1549, 1628), 'numpy.array', 'np.array', (['[-0.05229198545663045, -0.00019227292627499695, 0.001717335015137565]'], {}), '([-0.05229198545663045, -0.00019227292627499695, 0.001717335015137565])\n', (1557, 1628), True, 'import numpy as np\n'), ((1661, 1898), 'numpy.array', 'np.array', (['[-1.2669151118394222e-05, -0.0017150903228939863, -0.0002109813008805098, \n 0.0016904050298585356, -0.0005826016404638701, 0.05228961740837492, -\n 0.00019651142111198186, -0.05228886382232848, -0.0005699257021658765]'], {}), '([-1.2669151118394222e-05, -0.0017150903228939863, -\n 0.0002109813008805098, 0.0016904050298585356, -0.0005826016404638701, \n 0.05228961740837492, -0.00019651142111198186, -0.05228886382232848, -\n 0.0005699257021658765])\n', (1669, 1898), True, 'import numpy as np\n'), ((1924, 2137), 'numpy.array', 'np.array', (['[-8.814266483029077e-09, -1.1934330447023842e-06, 0.0001980670297292687, \n 1.1751792885051283e-06, -4.05095536424756e-07, 0.012770218257581496, -\n 0.0007494157448256152, -0.036972004067303506, 1.0]'], {}), '([-8.814266483029077e-09, -1.1934330447023842e-06, \n 0.0001980670297292687, 1.1751792885051283e-06, -4.05095536424756e-07, \n 0.012770218257581496, -0.0007494157448256152, -0.036972004067303506, 1.0])\n', (1932, 2137), True, 'import numpy as np\n'), ((3386, 3412), 'math.tan', 'math.tan', (['(old_sim_fovy / 2)'], {}), '(old_sim_fovy / 2)\n', (3394, 3412), False, 'import math\n'), ((5027, 5056), 'numpy.round', 'np.round', (['fake_proj_matrix', '(0)'], {}), '(fake_proj_matrix, 0)\n', (5035, 5056), True, 'import numpy as np\n'), ((5123, 5152), 'numpy.round', 'np.round', (['color_cam_matrix', '(0)'], {}), '(color_cam_matrix, 0)\n', (5131, 5152), True, 'import numpy as np\n')]
|
from deeplearning import tf_util as U
from init import make_env_fn, make_model_fn
from collections import namedtuple
import os, argparse, json
import numpy as np
def eval_robot(args, env, pi):
rewards = []
lengths = []
for j in range(args.nepisodes):
rewards.append(0)
lengths.append(0)
done = False
ob = env.reset()
while not done:
ac = pi.actor.mode(ob[None])[0]
ob, rew, done, _ = env.step(ac)
rewards[-1] += rew
lengths[-1] += 1
return np.mean(lengths), np.mean(rewards)
def main(args):
U.reset()
with open(os.path.join(args.logdir, 'hyps.json'), 'r') as f:
hyps = json.load(f)
train_args = namedtuple('Args', hyps.keys())(**hyps)
env_fn = make_env_fn(train_args)
model_fn = make_model_fn(train_args)
env = env_fn(0)
model = model_fn(env)
model.build('model', 1, 1)
model.sampler.build('model', 1, 1)
sess = U.make_session()
sess.__enter__()
U.initialize()
t = U.Experiment(args.logdir).load(args.ckpt)
ls = []
rs = []
for i in range(args.samples):
env.update_robot(model.sampler.sample(args.stochastic)[0])
l,r = eval_robot(args, env, model)
ls.append(l)
rs.append(r)
if not args.stochastic:
break
os.makedirs(os.path.join(args.logdir, 'eval'), exist_ok=True)
with open(os.path.join(args.logdir, 'eval', '{}.json'.format(t)), 'w') as f:
json.dump({'l':ls, 'r':rs}, f)
sess.__exit__(None, None, None)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Evaluate a Checkpoint')
parser.add_argument('logdir', type=str, help='log directory')
parser.add_argument('-t', '--ckpt', type=int, default=None, help='which checkpoint file to use')
parser.add_argument('-n', '--nepisodes', type=int, default=1, help='n episodes to show')
parser.add_argument('-s', '--samples', type=int, default=1, help='# of robots to sample')
parser.add_argument('--stochastic', type=bool, default=True, help='If false, eval the mode of the robot distribution')
main(parser.parse_args())
|
[
"json.dump",
"json.load",
"deeplearning.tf_util.initialize",
"argparse.ArgumentParser",
"init.make_env_fn",
"deeplearning.tf_util.reset",
"deeplearning.tf_util.make_session",
"numpy.mean",
"init.make_model_fn",
"os.path.join",
"deeplearning.tf_util.Experiment"
] |
[((601, 610), 'deeplearning.tf_util.reset', 'U.reset', ([], {}), '()\n', (608, 610), True, 'from deeplearning import tf_util as U\n'), ((775, 798), 'init.make_env_fn', 'make_env_fn', (['train_args'], {}), '(train_args)\n', (786, 798), False, 'from init import make_env_fn, make_model_fn\n'), ((814, 839), 'init.make_model_fn', 'make_model_fn', (['train_args'], {}), '(train_args)\n', (827, 839), False, 'from init import make_env_fn, make_model_fn\n'), ((969, 985), 'deeplearning.tf_util.make_session', 'U.make_session', ([], {}), '()\n', (983, 985), True, 'from deeplearning import tf_util as U\n'), ((1011, 1025), 'deeplearning.tf_util.initialize', 'U.initialize', ([], {}), '()\n', (1023, 1025), True, 'from deeplearning import tf_util as U\n'), ((1602, 1662), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Evaluate a Checkpoint"""'}), "(description='Evaluate a Checkpoint')\n", (1625, 1662), False, 'import os, argparse, json\n'), ((545, 561), 'numpy.mean', 'np.mean', (['lengths'], {}), '(lengths)\n', (552, 561), True, 'import numpy as np\n'), ((563, 579), 'numpy.mean', 'np.mean', (['rewards'], {}), '(rewards)\n', (570, 579), True, 'import numpy as np\n'), ((692, 704), 'json.load', 'json.load', (['f'], {}), '(f)\n', (701, 704), False, 'import os, argparse, json\n'), ((1355, 1388), 'os.path.join', 'os.path.join', (['args.logdir', '"""eval"""'], {}), "(args.logdir, 'eval')\n", (1367, 1388), False, 'import os, argparse, json\n'), ((1494, 1526), 'json.dump', 'json.dump', (["{'l': ls, 'r': rs}", 'f'], {}), "({'l': ls, 'r': rs}, f)\n", (1503, 1526), False, 'import os, argparse, json\n'), ((626, 664), 'os.path.join', 'os.path.join', (['args.logdir', '"""hyps.json"""'], {}), "(args.logdir, 'hyps.json')\n", (638, 664), False, 'import os, argparse, json\n'), ((1034, 1059), 'deeplearning.tf_util.Experiment', 'U.Experiment', (['args.logdir'], {}), '(args.logdir)\n', (1046, 1059), True, 'from deeplearning import tf_util as U\n')]
|
#!/usr/bin/python
import numpy as np
# Construct an array by executing a function over each coordinate.
def f(x, y):
return 2*x + y + 1
a = np.fromfunction(f, (5, 4), dtype=int)
print(a)
# anonymous functoin
b = np.fromfunction(lambda x, y: 2*x + y, (2, 2))
print(b)
|
[
"numpy.fromfunction"
] |
[((148, 185), 'numpy.fromfunction', 'np.fromfunction', (['f', '(5, 4)'], {'dtype': 'int'}), '(f, (5, 4), dtype=int)\n', (163, 185), True, 'import numpy as np\n'), ((221, 268), 'numpy.fromfunction', 'np.fromfunction', (['(lambda x, y: 2 * x + y)', '(2, 2)'], {}), '(lambda x, y: 2 * x + y, (2, 2))\n', (236, 268), True, 'import numpy as np\n')]
|
# import curses and GPIO
import curses
import serial
import time
from picamera.array import PiRGBArray
from picamera import PiCamera
import cv2
import numpy as np
ser = serial.Serial("/dev/ttyUSB0", "9600")
serLidar = serial.Serial("/dev/ttyACM0", "115200")
cap = cv2.VideoCapture(0)
piCam = False
#check if picamera exists
try:
camera = PiCamera()
camera.resolution = (224,224)
camera.framerate = 20
rawCapture = PiRGBArray(camera, size=(224,224))
piCam = True
except:
print("Pi camera does not exist, using USB camera")
# Get the curses window, turn off echoing of keyboard to screen, turn on
# instant (no waiting) key response, and use special values for cursor keys
screen = curses.initscr()
curses.noecho()
curses.cbreak()
screen.keypad(True)
keyRec = open('key_strokes.txt','w+')
train_data = []
try:
while True:
distString = serLidar.readline()
dist = 1000
try:
dist = int(distString.decode("utf-8"))
except:
print("can't convert dist")
if piCam == True:
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
image_np = np.array(frame.array)
rawCapture.truncate(0)
char = screen.getch()
key = [0,0,0,0,1]
if char == ord('x'):
np.save("train_data.npy", train_data)
ser.write(b'5')
keyRec.close()
curses.nocbreak(); screen.keypad(0); curses.echo()
curses.endwin()
break
elif char == ord('w') and dist > 100:
ser.write(b'1')
key = [1,0,0,0,0]
elif char == ord('s') and dist > 100:
ser.write(b'2')
key = [0,1,0,0,0]
elif char == ord('a') and dist > 100:
ser.write(b'3')
key = [0,0,1,0,0]
elif char == ord('d') and dist > 100:
ser.write(b'4')
key = [0,0,0,1,0]
elif char == ord(' '):
ser.write(b'5')
key = [0,0,0,0,1]
val_dict = {"input":key, "image":image_np}
train_data.append(val_dict)
keyRec.write(str(key)+"\n")
if len(train_data) % 100 == 0:
np.save("train_data.npy", train_data)
#no pi camera, using USB
else:
ret, image_np = cap.read()
char = screen.getch()
key = [0,0,0,0,1]
if char == ord('x'):
np.save("train_data.npy", train_data)
ser.write(b'5')
keyRec.close()
curses.nocbreak(); screen.keypad(0); curses.echo()
curses.endwin()
break
elif char == ord('w') and dist > 100:
ser.write(b'1')
key = [1,0,0,0,0]
elif char == ord('s') and dist > 100:
ser.write(b'2')
key = [0,1,0,0,0]
elif char == ord('a') and dist > 100:
ser.write(b'3')
key = [0,0,1,0,0]
elif char == ord('d') and dist > 100:
ser.write(b'4')
key = [0,0,0,1,0]
elif char == ord(' '):
ser.write(b'5')
key = [0,0,0,0,1]
val_dict = {"input":key, "image":image_np}
train_data.append(val_dict)
keyRec.write(str(key)+"\n")
if len(train_data) % 100 == 0:
np.save("train_data.npy", train_data)
finally:
#Close down curses properly, inc turn echo back on!
keyRec.close()
curses.nocbreak(); screen.keypad(0); curses.echo()
curses.endwin()
|
[
"serial.Serial",
"numpy.save",
"curses.noecho",
"curses.initscr",
"curses.endwin",
"cv2.VideoCapture",
"curses.cbreak",
"numpy.array",
"picamera.array.PiRGBArray",
"curses.nocbreak",
"curses.echo",
"picamera.PiCamera"
] |
[((170, 207), 'serial.Serial', 'serial.Serial', (['"""/dev/ttyUSB0"""', '"""9600"""'], {}), "('/dev/ttyUSB0', '9600')\n", (183, 207), False, 'import serial\n'), ((219, 258), 'serial.Serial', 'serial.Serial', (['"""/dev/ttyACM0"""', '"""115200"""'], {}), "('/dev/ttyACM0', '115200')\n", (232, 258), False, 'import serial\n'), ((265, 284), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (281, 284), False, 'import cv2\n'), ((705, 721), 'curses.initscr', 'curses.initscr', ([], {}), '()\n', (719, 721), False, 'import curses\n'), ((722, 737), 'curses.noecho', 'curses.noecho', ([], {}), '()\n', (735, 737), False, 'import curses\n'), ((739, 754), 'curses.cbreak', 'curses.cbreak', ([], {}), '()\n', (752, 754), False, 'import curses\n'), ((343, 353), 'picamera.PiCamera', 'PiCamera', ([], {}), '()\n', (351, 353), False, 'from picamera import PiCamera\n'), ((431, 466), 'picamera.array.PiRGBArray', 'PiRGBArray', (['camera'], {'size': '(224, 224)'}), '(camera, size=(224, 224))\n', (441, 466), False, 'from picamera.array import PiRGBArray\n'), ((4006, 4023), 'curses.nocbreak', 'curses.nocbreak', ([], {}), '()\n', (4021, 4023), False, 'import curses\n'), ((4043, 4056), 'curses.echo', 'curses.echo', ([], {}), '()\n', (4054, 4056), False, 'import curses\n'), ((4061, 4076), 'curses.endwin', 'curses.endwin', ([], {}), '()\n', (4074, 4076), False, 'import curses\n'), ((1186, 1207), 'numpy.array', 'np.array', (['frame.array'], {}), '(frame.array)\n', (1194, 1207), True, 'import numpy as np\n'), ((2822, 2859), 'numpy.save', 'np.save', (['"""train_data.npy"""', 'train_data'], {}), "('train_data.npy', train_data)\n", (2829, 2859), True, 'import numpy as np\n'), ((2939, 2956), 'curses.nocbreak', 'curses.nocbreak', ([], {}), '()\n', (2954, 2956), False, 'import curses\n'), ((2976, 2989), 'curses.echo', 'curses.echo', ([], {}), '()\n', (2987, 2989), False, 'import curses\n'), ((3006, 3021), 'curses.endwin', 'curses.endwin', ([], {}), '()\n', (3019, 3021), False, 'import curses\n'), ((3880, 3917), 'numpy.save', 'np.save', (['"""train_data.npy"""', 'train_data'], {}), "('train_data.npy', train_data)\n", (3887, 3917), True, 'import numpy as np\n'), ((1410, 1447), 'numpy.save', 'np.save', (['"""train_data.npy"""', 'train_data'], {}), "('train_data.npy', train_data)\n", (1417, 1447), True, 'import numpy as np\n'), ((1539, 1556), 'curses.nocbreak', 'curses.nocbreak', ([], {}), '()\n', (1554, 1556), False, 'import curses\n'), ((1576, 1589), 'curses.echo', 'curses.echo', ([], {}), '()\n', (1587, 1589), False, 'import curses\n'), ((1610, 1625), 'curses.endwin', 'curses.endwin', ([], {}), '()\n', (1623, 1625), False, 'import curses\n'), ((2568, 2605), 'numpy.save', 'np.save', (['"""train_data.npy"""', 'train_data'], {}), "('train_data.npy', train_data)\n", (2575, 2605), True, 'import numpy as np\n')]
|
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
import numpy as np
import popart
import torch
from op_tester import op_tester
def test_asinh(op_tester):
# create test data
# Notice: as asinh(x) = ln(x + sqrt(x^2 + 1)), absolute precision
# deteriorates for larger negative numbers as you will have ln(0.0001).
d1 = np.array([
-30.0, -20.12, -2.2, -1.5, -0.2, 0.0, 0.234, 1.0, 1.2, 2.0, 3.0, 10.0,
100.0, 2001.0
],
dtype=np.float32)
def init_builder(builder):
i1 = builder.addInputTensor(d1)
o = builder.aiOnnx.asinh([i1])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
out = np.arcsinh(d1)
return [out]
op_tester.setPatterns(['DecomposeBinaryConstScalar'],
enableRuntimeAsserts=False)
op_tester.run(init_builder, reference, 'infer')
def test_asinh_inplace(op_tester):
# create test data
d1 = np.array([
-30.0, -20.12, -2.2, -1.5, -0.2, 0.0, 0.234, 1.0, 1.2, 2.0, 3.0, 10.0,
100.0, 2001.0
],
dtype=np.float32)
def init_builder(builder):
i1 = builder.addInputTensor(d1)
o = builder.aiOnnx.asinh([i1])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
out = np.arcsinh(d1)
return [out]
op_tester.setPatterns(['InPlace', 'DecomposeBinaryConstScalar'],
enableRuntimeAsserts=False)
op_tester.run(init_builder, reference, 'infer')
def test_asinh_grad(op_tester):
# create test data
d1 = np.array([
-20.12, -2.2, -1.5, -0.2, 0.0, 0.234, 1.0, 1.2, 2.0, 3.0, 10.0, 100.0,
2001.0
],
dtype=np.float32)
def derivative_asinh(x):
return 1 / (np.sqrt(np.power(x, 2) + 1))
def init_builder(builder):
i1 = builder.addInputTensor(d1)
o = builder.aiOnnx.asinh([i1])
builder.addOutputTensor(o)
return [
o,
popart.reservedGradientPrefix() + i1,
popart.reservedGradientPrefix() + o,
]
def reference(ref_data):
out = np.arcsinh(d1)
d__o = derivative_asinh(d1) * ref_data.getOutputTensorGrad(0)
return [out, d__o, None]
op_tester.setPatterns([
'SubtractArg1GradOp', 'LogGradOp', 'SqrtGradOp', 'PowArg0GradOp',
'DecomposeBinaryConstScalar'
],
enableRuntimeAsserts=False)
op_tester.run(init_builder, reference, 'train')
|
[
"op_tester.op_tester.setPatterns",
"numpy.power",
"op_tester.op_tester.run",
"popart.reservedGradientPrefix",
"numpy.array",
"numpy.arcsinh"
] |
[((342, 461), 'numpy.array', 'np.array', (['[-30.0, -20.12, -2.2, -1.5, -0.2, 0.0, 0.234, 1.0, 1.2, 2.0, 3.0, 10.0, \n 100.0, 2001.0]'], {'dtype': 'np.float32'}), '([-30.0, -20.12, -2.2, -1.5, -0.2, 0.0, 0.234, 1.0, 1.2, 2.0, 3.0, \n 10.0, 100.0, 2001.0], dtype=np.float32)\n', (350, 461), True, 'import numpy as np\n'), ((747, 833), 'op_tester.op_tester.setPatterns', 'op_tester.setPatterns', (["['DecomposeBinaryConstScalar']"], {'enableRuntimeAsserts': '(False)'}), "(['DecomposeBinaryConstScalar'], enableRuntimeAsserts=\n False)\n", (768, 833), False, 'from op_tester import op_tester\n'), ((859, 906), 'op_tester.op_tester.run', 'op_tester.run', (['init_builder', 'reference', '"""infer"""'], {}), "(init_builder, reference, 'infer')\n", (872, 906), False, 'from op_tester import op_tester\n'), ((976, 1095), 'numpy.array', 'np.array', (['[-30.0, -20.12, -2.2, -1.5, -0.2, 0.0, 0.234, 1.0, 1.2, 2.0, 3.0, 10.0, \n 100.0, 2001.0]'], {'dtype': 'np.float32'}), '([-30.0, -20.12, -2.2, -1.5, -0.2, 0.0, 0.234, 1.0, 1.2, 2.0, 3.0, \n 10.0, 100.0, 2001.0], dtype=np.float32)\n', (984, 1095), True, 'import numpy as np\n'), ((1381, 1477), 'op_tester.op_tester.setPatterns', 'op_tester.setPatterns', (["['InPlace', 'DecomposeBinaryConstScalar']"], {'enableRuntimeAsserts': '(False)'}), "(['InPlace', 'DecomposeBinaryConstScalar'],\n enableRuntimeAsserts=False)\n", (1402, 1477), False, 'from op_tester import op_tester\n'), ((1504, 1551), 'op_tester.op_tester.run', 'op_tester.run', (['init_builder', 'reference', '"""infer"""'], {}), "(init_builder, reference, 'infer')\n", (1517, 1551), False, 'from op_tester import op_tester\n'), ((1618, 1730), 'numpy.array', 'np.array', (['[-20.12, -2.2, -1.5, -0.2, 0.0, 0.234, 1.0, 1.2, 2.0, 3.0, 10.0, 100.0, 2001.0]'], {'dtype': 'np.float32'}), '([-20.12, -2.2, -1.5, -0.2, 0.0, 0.234, 1.0, 1.2, 2.0, 3.0, 10.0, \n 100.0, 2001.0], dtype=np.float32)\n', (1626, 1730), True, 'import numpy as np\n'), ((2299, 2450), 'op_tester.op_tester.setPatterns', 'op_tester.setPatterns', (["['SubtractArg1GradOp', 'LogGradOp', 'SqrtGradOp', 'PowArg0GradOp',\n 'DecomposeBinaryConstScalar']"], {'enableRuntimeAsserts': '(False)'}), "(['SubtractArg1GradOp', 'LogGradOp', 'SqrtGradOp',\n 'PowArg0GradOp', 'DecomposeBinaryConstScalar'], enableRuntimeAsserts=False)\n", (2320, 2450), False, 'from op_tester import op_tester\n'), ((2499, 2546), 'op_tester.op_tester.run', 'op_tester.run', (['init_builder', 'reference', '"""train"""'], {}), "(init_builder, reference, 'train')\n", (2512, 2546), False, 'from op_tester import op_tester\n'), ((706, 720), 'numpy.arcsinh', 'np.arcsinh', (['d1'], {}), '(d1)\n', (716, 720), True, 'import numpy as np\n'), ((1340, 1354), 'numpy.arcsinh', 'np.arcsinh', (['d1'], {}), '(d1)\n', (1350, 1354), True, 'import numpy as np\n'), ((2176, 2190), 'numpy.arcsinh', 'np.arcsinh', (['d1'], {}), '(d1)\n', (2186, 2190), True, 'import numpy as np\n'), ((2035, 2066), 'popart.reservedGradientPrefix', 'popart.reservedGradientPrefix', ([], {}), '()\n', (2064, 2066), False, 'import popart\n'), ((2085, 2116), 'popart.reservedGradientPrefix', 'popart.reservedGradientPrefix', ([], {}), '()\n', (2114, 2116), False, 'import popart\n'), ((1824, 1838), 'numpy.power', 'np.power', (['x', '(2)'], {}), '(x, 2)\n', (1832, 1838), True, 'import numpy as np\n')]
|
# painting.py
import os
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
from preprocess import Kmeans
from monitor import Monitor
import tkinter as tk
import csv
from utils import BlankImg
class Painting():
def __init__(self, K, shape):
#self.radius = 3
self.K = K
self.size = shape
self.count = 0
self.fixcount = 0
self.brush_size = 3
self.img = np.zeros((self.size))
for i in range(0, self.size[0]):
for j in range(0, self.size[1]):
self.img[i, j, 0] = self.img[i, j, 1] = self.img[i, j, 2] = 255
def Painting(self):
img = BlankImg(self.size)
self.color_list = []
for i in range(0, self.K):
filename = "./points/" + str(i) + "_point.csv"
with open(filename, newline='') as csvfile:
rows = csv.reader(csvfile)
for row in rows:
print(row)
if (len(row) != 2):
r, g, b = int(row[3]), int(row[4]), int(row[5])
self.color_list.append((r, g, b))
else:
x = int(row[0])
y = int(row[1])
for a in range(x-self.brush_size, x+self.brush_size):
for b in range(y-self.brush_size, y+self.brush_size):
if (a >= 0 and a <= self.size[0]-1):
if (b >= 0 and b <= self.size[1]-1):
img[a, b, 0] = r
img[a ,b ,1] = g
img[a ,b, 2] = b
save_name = "./painting/" + str(i) + ".png"
cv.imwrite(save_name, img)
words = "finished " + str(i)
print (words)
return (self.color_list)
def DectectImg(self, targetname, comparename):
target_img = cv.imread(targetname)
compare_img = cv.imread(comparename)
different_img = BlankImg(self.size)
for x in range(0, self.size[0]):
for y in range(0, self.size[1]):
if (int(target_img[x, y, 0]) != int(compare_img[x, y, 0])):
different_img[x, y, 0] = target_img[x, y, 0]
different_img[x, y, 1] = target_img[x, y, 1]
different_img[x, y, 2] = target_img[x, y, 2]
else:
if (int(target_img[x, y, 1]) != int(compare_img[x, y, 1])):
different_img[x, y, 0] = target_img[x, y, 0]
different_img[x, y, 1] = target_img[x, y, 1]
different_img[x, y, 2] = target_img[x, y, 2]
else:
if (int(target_img[x, y, 2]) != int(compare_img[x, y, 2])):
different_img[x, y, 0] = target_img[x, y, 0]
different_img[x, y, 1] = target_img[x, y, 1]
different_img[x, y, 2] = target_img[x, y, 2]
save_name = "./difference/" + str(self.count) + ".png"
cv.imwrite(save_name, different_img)
self.count += 1
"""
def DectectImg(self, targetname, comparedname):
targetimg = cv.imread(targetname)
comparedimg = cv.imread(comparedname)
print (type(targetimg))
print (type(comparedimg))
fiximg = np.zeros((self.size))
for x in range(0, self.size[0]):
for y in range(0, self.size[1]):
if (targetimg[x, y, 0] == comparedimg[x, y, 0] and \
targetimg[x, y, 1] == comparedimg[x, y, 1] and \
targetimg[x, y, 2] == comparedimg[x, y, 2]):
fiximg[x, y, 0] = fiximg[x, y, 1] = fiximg[x, y, 2] = 255
else:
fiximg[x, y, 0] = targetimg[x, y, 0]
fiximg[x, y, 1] = targetimg[x, y, 1]
fiximg[x, y, 2] = targetimg[x, y, 2]
save_name = "./fixpoint/" + str(self.fixcount) + "_fix.png"
cv.imwrite(save_name, fiximg)
print ("save name: ", save_name)
self.fixcount += 1
return (save_name)
"""
if __name__ == "__main__":
K = 298
filename = "K_298_1_2.png"
img = cv.imread(filename)
size = img.shape
new = Painting(K, size)
#filename = "./points/0_line.csv"
color_list = new.Painting()
comparename = "./painting/297.png"
new.DectectImg(filename, comparename)
print ("finished.")
|
[
"csv.reader",
"cv2.imwrite",
"utils.BlankImg",
"numpy.zeros",
"cv2.imread"
] |
[((4358, 4377), 'cv2.imread', 'cv.imread', (['filename'], {}), '(filename)\n', (4367, 4377), True, 'import cv2 as cv\n'), ((436, 455), 'numpy.zeros', 'np.zeros', (['self.size'], {}), '(self.size)\n', (444, 455), True, 'import numpy as np\n'), ((663, 682), 'utils.BlankImg', 'BlankImg', (['self.size'], {}), '(self.size)\n', (671, 682), False, 'from utils import BlankImg\n'), ((2002, 2023), 'cv2.imread', 'cv.imread', (['targetname'], {}), '(targetname)\n', (2011, 2023), True, 'import cv2 as cv\n'), ((2046, 2068), 'cv2.imread', 'cv.imread', (['comparename'], {}), '(comparename)\n', (2055, 2068), True, 'import cv2 as cv\n'), ((2093, 2112), 'utils.BlankImg', 'BlankImg', (['self.size'], {}), '(self.size)\n', (2101, 2112), False, 'from utils import BlankImg\n'), ((3179, 3215), 'cv2.imwrite', 'cv.imwrite', (['save_name', 'different_img'], {}), '(save_name, different_img)\n', (3189, 3215), True, 'import cv2 as cv\n'), ((885, 904), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (895, 904), False, 'import csv\n'), ((1794, 1820), 'cv2.imwrite', 'cv.imwrite', (['save_name', 'img'], {}), '(save_name, img)\n', (1804, 1820), True, 'import cv2 as cv\n')]
|
from itertools import chain, combinations
import numpy as np
from numpy.testing import assert_allclose
from wpca.tests.tools import assert_allclose_upto_sign
from wpca.utils import orthonormalize, random_orthonormal, weighted_mean
def test_orthonormalize():
rand = np.random.RandomState(42)
X = rand.randn(3, 4)
X2 = orthonormalize(X)
assert_allclose_upto_sign(X[0] / np.linalg.norm(X[0]), X2[0])
assert_allclose(np.dot(X2, X2.T), np.eye(X2.shape[0]), atol=1E-15)
def test_random_orthonormal():
def check_random_orthonormal(N, M, rows):
X = random_orthonormal(N, M, rows=rows, random_state=42)
assert X.shape == (N, M)
if rows:
C = np.dot(X, X.T)
else:
C = np.dot(X.T, X)
assert_allclose(C, np.eye(C.shape[0]), atol=1E-15)
for M in [5]:
for N in range(1, M + 1):
yield check_random_orthonormal, N, M, True
yield check_random_orthonormal, M, N, False
def test_weighted_mean():
def check_weighted_mean(shape, axis):
rand = np.random.RandomState(0)
x = rand.rand(*shape)
w = rand.rand(*shape)
wm = weighted_mean(x, w, axis)
assert_allclose(wm, np.average(x, axis, w))
assert_allclose(wm, (w * x).sum(axis) / w.sum(axis))
for ndim in range(1, 5):
shape = tuple(range(3, 3 + ndim))
axis_tuples = chain(*(combinations(range(ndim), nax)
for nax in range(ndim + 1)))
for axis in chain([None], range(ndim), axis_tuples):
yield check_weighted_mean, shape, axis
|
[
"numpy.average",
"numpy.eye",
"wpca.utils.orthonormalize",
"numpy.random.RandomState",
"numpy.linalg.norm",
"numpy.dot",
"wpca.utils.random_orthonormal",
"wpca.utils.weighted_mean"
] |
[((273, 298), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (294, 298), True, 'import numpy as np\n'), ((333, 350), 'wpca.utils.orthonormalize', 'orthonormalize', (['X'], {}), '(X)\n', (347, 350), False, 'from wpca.utils import orthonormalize, random_orthonormal, weighted_mean\n'), ((437, 453), 'numpy.dot', 'np.dot', (['X2', 'X2.T'], {}), '(X2, X2.T)\n', (443, 453), True, 'import numpy as np\n'), ((455, 474), 'numpy.eye', 'np.eye', (['X2.shape[0]'], {}), '(X2.shape[0])\n', (461, 474), True, 'import numpy as np\n'), ((579, 631), 'wpca.utils.random_orthonormal', 'random_orthonormal', (['N', 'M'], {'rows': 'rows', 'random_state': '(42)'}), '(N, M, rows=rows, random_state=42)\n', (597, 631), False, 'from wpca.utils import orthonormalize, random_orthonormal, weighted_mean\n'), ((1065, 1089), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (1086, 1089), True, 'import numpy as np\n'), ((1163, 1188), 'wpca.utils.weighted_mean', 'weighted_mean', (['x', 'w', 'axis'], {}), '(x, w, axis)\n', (1176, 1188), False, 'from wpca.utils import orthonormalize, random_orthonormal, weighted_mean\n'), ((388, 408), 'numpy.linalg.norm', 'np.linalg.norm', (['X[0]'], {}), '(X[0])\n', (402, 408), True, 'import numpy as np\n'), ((698, 712), 'numpy.dot', 'np.dot', (['X', 'X.T'], {}), '(X, X.T)\n', (704, 712), True, 'import numpy as np\n'), ((743, 757), 'numpy.dot', 'np.dot', (['X.T', 'X'], {}), '(X.T, X)\n', (749, 757), True, 'import numpy as np\n'), ((785, 803), 'numpy.eye', 'np.eye', (['C.shape[0]'], {}), '(C.shape[0])\n', (791, 803), True, 'import numpy as np\n'), ((1217, 1239), 'numpy.average', 'np.average', (['x', 'axis', 'w'], {}), '(x, axis, w)\n', (1227, 1239), True, 'import numpy as np\n')]
|
import numpy as np
from acoustics.turbulence import Gaussian2DTemp, VonKarman2DTemp, Comparison, Field2D
def main():
mu_0 = np.sqrt(10.0**(-6))
correlation_length = 1.0 # Typical correlation length for Gaussian spectrum.
x = 20.0
y = 0.0
z = 40.0
plane = (1,0,1)
#f_resolution = wavenumber_resolution / (2.0*np.pi)
spatial_resolution = 0.05
N = 100
min_wavenumber = 0.01
max_wavenumber = 10.0
wavenumber_resolution = (max_wavenumber - min_wavenumber) / N
"""Create an object to describe an Gaussian turbulence spectrum."""
g = Gaussian2DTemp(plane=plane, a=correlation_length, mu_0=mu_0, wavenumber_resolution=wavenumber_resolution, max_mode_order=N)
"""Create an object to describe a VonKarman turbulence spectrum."""
s = VonKarman2DTemp(plane=plane, a=correlation_length, mu_0=mu_0, wavenumber_resolution=wavenumber_resolution, max_mode_order=N)
g.plot_mode_amplitudes('Gaussian2DTemp_mode_amplitudes.png')
s.plot_mode_amplitudes('VonKarman2DTemp_mode_amplitudes.png')
c = Comparison([g, s])
c.plot_mode_amplitudes('Gaussian2DTemp_and_VonKarman2DTemp_mode_amplitudes.png')
field_g = Field2D(x=x, y=y, z=z, spatial_resolution=spatial_resolution, spectrum=g)
field_s = Field2D(x=x, y=y, z=z, spatial_resolution=spatial_resolution, spectrum=s)
field_g.generate().plot('Gaussian2DTemp_field.png')
field_s.generate().plot('VonKarman2DTemp_field.png')
if __name__ == '__main__':
main()
|
[
"acoustics.turbulence.Comparison",
"acoustics.turbulence.Field2D",
"acoustics.turbulence.Gaussian2DTemp",
"numpy.sqrt",
"acoustics.turbulence.VonKarman2DTemp"
] |
[((134, 153), 'numpy.sqrt', 'np.sqrt', (['(10.0 ** -6)'], {}), '(10.0 ** -6)\n', (141, 153), True, 'import numpy as np\n'), ((647, 774), 'acoustics.turbulence.Gaussian2DTemp', 'Gaussian2DTemp', ([], {'plane': 'plane', 'a': 'correlation_length', 'mu_0': 'mu_0', 'wavenumber_resolution': 'wavenumber_resolution', 'max_mode_order': 'N'}), '(plane=plane, a=correlation_length, mu_0=mu_0,\n wavenumber_resolution=wavenumber_resolution, max_mode_order=N)\n', (661, 774), False, 'from acoustics.turbulence import Gaussian2DTemp, VonKarman2DTemp, Comparison, Field2D\n'), ((856, 984), 'acoustics.turbulence.VonKarman2DTemp', 'VonKarman2DTemp', ([], {'plane': 'plane', 'a': 'correlation_length', 'mu_0': 'mu_0', 'wavenumber_resolution': 'wavenumber_resolution', 'max_mode_order': 'N'}), '(plane=plane, a=correlation_length, mu_0=mu_0,\n wavenumber_resolution=wavenumber_resolution, max_mode_order=N)\n', (871, 984), False, 'from acoustics.turbulence import Gaussian2DTemp, VonKarman2DTemp, Comparison, Field2D\n'), ((1126, 1144), 'acoustics.turbulence.Comparison', 'Comparison', (['[g, s]'], {}), '([g, s])\n', (1136, 1144), False, 'from acoustics.turbulence import Gaussian2DTemp, VonKarman2DTemp, Comparison, Field2D\n'), ((1259, 1332), 'acoustics.turbulence.Field2D', 'Field2D', ([], {'x': 'x', 'y': 'y', 'z': 'z', 'spatial_resolution': 'spatial_resolution', 'spectrum': 'g'}), '(x=x, y=y, z=z, spatial_resolution=spatial_resolution, spectrum=g)\n', (1266, 1332), False, 'from acoustics.turbulence import Gaussian2DTemp, VonKarman2DTemp, Comparison, Field2D\n'), ((1347, 1420), 'acoustics.turbulence.Field2D', 'Field2D', ([], {'x': 'x', 'y': 'y', 'z': 'z', 'spatial_resolution': 'spatial_resolution', 'spectrum': 's'}), '(x=x, y=y, z=z, spatial_resolution=spatial_resolution, spectrum=s)\n', (1354, 1420), False, 'from acoustics.turbulence import Gaussian2DTemp, VonKarman2DTemp, Comparison, Field2D\n')]
|
import torch as th
from tqdm import tqdm
from . import BaseFlow, register_flow
from ..models import build_model
from ..models.GATNE import NSLoss
import torch
from tqdm.auto import tqdm
from numpy import random
import dgl
from ..sampler.GATNE_sampler import NeighborSampler, generate_pairs
@register_flow("GATNE_trainer")
class GATNE(BaseFlow):
def __init__(self, args):
super(GATNE, self).__init__(args)
self.model = build_model(self.model_name).build_model_from_args(self.args, self.hg).to(self.device)
self.train_pairs = None
self.train_dataloader = None
self.nsloss = None
self.neighbor_sampler = None
self.orig_val_hg = self.task.val_hg
self.orig_test_hg = self.task.test_hg
self.preprocess()
self.train()
def preprocess(self):
assert len(self.hg.ntypes) == 1
bidirected_hg = dgl.to_bidirected(dgl.to_simple(self.hg.to('cpu')))
all_walks = []
for etype in self.hg.etypes:
nodes = torch.unique(bidirected_hg.edges(etype=etype)[0]).repeat(self.args.rw_walks)
traces, types = dgl.sampling.random_walk(
bidirected_hg, nodes, metapath=[etype] * (self.args.rw_length - 1)
)
all_walks.append(traces)
self.train_pairs = generate_pairs(all_walks, self.args.window_size, self.args.num_workers)
self.neighbor_sampler = NeighborSampler(bidirected_hg, [self.args.neighbor_samples])
self.train_dataloader = torch.utils.data.DataLoader(
self.train_pairs,
batch_size=self.args.batch_size,
collate_fn=self.neighbor_sampler.sample,
shuffle=True,
num_workers=self.args.num_workers,
pin_memory=True,
)
self.nsloss = NSLoss(self.hg.num_nodes(), self.args.neg_size, self.args.dim).to(self.device)
self.optimizer = torch.optim.Adam(
[{"params": self.model.parameters()}, {"params": self.nsloss.parameters()}], lr=self.args.learning_rate
)
return
def train(self):
best_score = 0
patience = 0
for self.epoch in range(self.args.max_epoch):
self._full_train_step()
cur_score = self._full_test_step()
if cur_score > best_score:
best_score = cur_score
patience = 0
else:
patience += 1
if patience > self.args.patience:
self.logger.train_info(f'Early Stop!\tEpoch:{self.epoch:03d}.')
break
def _full_train_step(self):
self.model.train()
random.shuffle(self.train_pairs)
data_iter = tqdm(
self.train_dataloader,
desc="epoch %d" % self.epoch,
total=(len(self.train_pairs) + (self.args.batch_size - 1)) // self.args.batch_size,
)
avg_loss = 0.0
for i, (block, head_invmap, tails, block_types) in enumerate(data_iter):
self.optimizer.zero_grad()
# embs: [batch_size, edge_type_count, embedding_size]
block_types = block_types.to(self.device)
embs = self.model(block[0].to(self.device))[head_invmap]
embs = embs.gather(
1, block_types.view(-1, 1, 1).expand(embs.shape[0], 1, embs.shape[2])
)[:, 0]
loss = self.nsloss(
block[0].dstdata[dgl.NID][head_invmap].to(self.device),
embs,
tails.to(self.device),
)
loss.backward()
self.optimizer.step()
avg_loss += loss.item()
post_fix = {
"epoch": self.epoch,
"iter": i,
"avg_loss": avg_loss / (i + 1),
"loss": loss.item(),
}
data_iter.set_postfix(post_fix)
def _full_test_step(self):
self.model.eval()
# {'1': {}, '2': {}}
final_model = dict(
zip(self.hg.etypes, [th.empty(self.hg.num_nodes(), self.args.dim) for _ in range(len(self.hg.etypes))]))
for i in tqdm(range(self.hg.num_nodes()), desc='Evaluating...'):
train_inputs = (
torch.tensor([i for _ in range(len(self.hg.etypes))])
.unsqueeze(1)
.to(self.device)
) # [i, i]
train_types = (
torch.tensor(list(range(len(self.hg.etypes)))).unsqueeze(1).to(self.device)
) # [0, 1]
pairs = torch.cat(
(train_inputs, train_inputs, train_types), dim=1
) # (2, 3)
(
train_blocks,
train_invmap,
fake_tails,
train_types,
) = self.neighbor_sampler.sample(pairs)
node_emb = self.model(train_blocks[0].to(self.device))[train_invmap]
node_emb = node_emb.gather(
1,
train_types.to(self.device)
.view(-1, 1, 1)
.expand(node_emb.shape[0], 1, node_emb.shape[2]),
)[:, 0]
for j in range(len(self.hg.etypes)):
final_model[self.hg.etypes[j]][i] = node_emb[j].detach()
metric = {}
score = []
for etype in self.hg.etypes:
self.task.val_hg = dgl.edge_type_subgraph(self.orig_val_hg, [etype])
self.task.test_hg = dgl.edge_type_subgraph(self.orig_test_hg, [etype])
for split in ['test', 'valid']:
n_embedding = {self.hg.ntypes[0]: final_model[etype].to(self.device)}
res = self.task.evaluate(n_embedding=n_embedding, mode=split)
metric[split] = res
if split == 'valid':
score.append(res.get('roc_auc'))
self.logger.train_info(etype + self.logger.metric2str(metric))
avg_score = sum(score) / len(score)
return avg_score
|
[
"torch.utils.data.DataLoader",
"torch.cat",
"dgl.sampling.random_walk",
"dgl.edge_type_subgraph",
"numpy.random.shuffle"
] |
[((1512, 1706), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['self.train_pairs'], {'batch_size': 'self.args.batch_size', 'collate_fn': 'self.neighbor_sampler.sample', 'shuffle': '(True)', 'num_workers': 'self.args.num_workers', 'pin_memory': '(True)'}), '(self.train_pairs, batch_size=self.args.\n batch_size, collate_fn=self.neighbor_sampler.sample, shuffle=True,\n num_workers=self.args.num_workers, pin_memory=True)\n', (1539, 1706), False, 'import torch\n'), ((2652, 2684), 'numpy.random.shuffle', 'random.shuffle', (['self.train_pairs'], {}), '(self.train_pairs)\n', (2666, 2684), False, 'from numpy import random\n'), ((1128, 1225), 'dgl.sampling.random_walk', 'dgl.sampling.random_walk', (['bidirected_hg', 'nodes'], {'metapath': '([etype] * (self.args.rw_length - 1))'}), '(bidirected_hg, nodes, metapath=[etype] * (self.\n args.rw_length - 1))\n', (1152, 1225), False, 'import dgl\n'), ((4539, 4598), 'torch.cat', 'torch.cat', (['(train_inputs, train_inputs, train_types)'], {'dim': '(1)'}), '((train_inputs, train_inputs, train_types), dim=1)\n', (4548, 4598), False, 'import torch\n'), ((5363, 5412), 'dgl.edge_type_subgraph', 'dgl.edge_type_subgraph', (['self.orig_val_hg', '[etype]'], {}), '(self.orig_val_hg, [etype])\n', (5385, 5412), False, 'import dgl\n'), ((5445, 5495), 'dgl.edge_type_subgraph', 'dgl.edge_type_subgraph', (['self.orig_test_hg', '[etype]'], {}), '(self.orig_test_hg, [etype])\n', (5467, 5495), False, 'import dgl\n')]
|
import argparse
import torch
from torch import nn
from torch import optim
from torchvision import transforms, datasets, models
from collections import OrderedDict
from PIL import Image
import numpy as np
import json
#Take inputs from user
parser = argparse.ArgumentParser()
parser.add_argument('path_to_image', type=str, help='Set path to image', default='./flowers/test/1/image_06743.jpg')
parser.add_argument('checkpoint', type=str, help='Load checkpoint', default='./checkpoint.pth')
parser.add_argument('--top_k', type=int, help='Return top k most likely classes', default=5)
parser.add_argument('--category_names', type=str, help='Use a mapping of categories to real names', default='cat_to_name.json')
parser.add_argument('--gpu', type=str, help='Use GPU for inference', default='cpu')
args = parser.parse_args()
def load_checkpoint(filepath):
checkpoint = torch.load(filepath)
if checkpoint['model'] == "vgg16":
model = models.vgg16(pretrained=True)
elif checkpoint['model'] == "densenet121":
model = models.densenet121(pretrained=True)
model.eval()
model.classifier = checkpoint['classifier']
model.load_state_dict(checkpoint['state_dict'])
model.class_to_idx = checkpoint['class_to_idx']
epoch = checkpoint['epoch']
return model
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
#Perform transformations, convert to tensor and normalize
transform = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
#Open image and apply transformation
pil_image = Image.open(image)
pil_image = transform(pil_image)
#Convert to numpy array
np_image = np.array(pil_image)
return np_image
def predict(image_path, model, topk, device):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
model = load_checkpoint(model)
model.eval()
model.to(device)
np_image = process_image(image_path) #numpy array returned
torch_image = torch.from_numpy(np_image).to(device) #convert to tensor
torch_image = torch_image.unsqueeze_(0)
torch_image = torch_image.float() #returns float tensor of single dimension (1 column)
with torch.no_grad():
output = model.forward(torch_image)
ps = torch.exp(output)
#taking top 5 probabilities and their indices
if topk is None:
probs, indices = torch.topk(ps, 1)
else:
probs, indices = torch.topk(ps, topk)
#invert class_to_idx
inv_class_to_idx = {index: cls for cls, index in model.class_to_idx.items()}
classes = []
for index in indices.cpu().numpy()[0]: #iterating through indices
classes.append(inv_class_to_idx[index])
return probs.cpu().numpy()[0], classes
# Print the most likely image class and it's associated probability
# map with json
if args.gpu == "gpu":
device = "cuda:0"
elif args.gpu == "cpu":
device = "cpu"
probs, classes = predict(args.path_to_image, args.checkpoint, args.top_k, device)
if args.category_names is not None:
with open(args.category_names, 'r') as f:
cat_to_name = json.load(f)
classes = [cat_to_name[c] for c in classes]
print("Most probable class:", classes[0])
print("Probability :", probs[0])
if args.top_k is not None:
print("\nTop",args.top_k,"probable classes and their probabilities are")
for index in range(len(classes)):
print(classes[index],":",probs[index])
|
[
"torch.from_numpy",
"json.load",
"torch.topk",
"argparse.ArgumentParser",
"torch.load",
"torchvision.models.densenet121",
"torchvision.transforms.Normalize",
"PIL.Image.open",
"torchvision.transforms.ToTensor",
"torch.exp",
"numpy.array",
"torchvision.transforms.CenterCrop",
"torchvision.models.vgg16",
"torch.no_grad",
"torchvision.transforms.Resize"
] |
[((250, 275), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (273, 275), False, 'import argparse\n'), ((870, 890), 'torch.load', 'torch.load', (['filepath'], {}), '(filepath)\n', (880, 890), False, 'import torch\n'), ((1899, 1916), 'PIL.Image.open', 'Image.open', (['image'], {}), '(image)\n', (1909, 1916), False, 'from PIL import Image\n'), ((2002, 2021), 'numpy.array', 'np.array', (['pil_image'], {}), '(pil_image)\n', (2010, 2021), True, 'import numpy as np\n'), ((946, 975), 'torchvision.models.vgg16', 'models.vgg16', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (958, 975), False, 'from torchvision import transforms, datasets, models\n'), ((2568, 2583), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2581, 2583), False, 'import torch\n'), ((2644, 2661), 'torch.exp', 'torch.exp', (['output'], {}), '(output)\n', (2653, 2661), False, 'import torch\n'), ((2765, 2782), 'torch.topk', 'torch.topk', (['ps', '(1)'], {}), '(ps, 1)\n', (2775, 2782), False, 'import torch\n'), ((2818, 2838), 'torch.topk', 'torch.topk', (['ps', 'topk'], {}), '(ps, topk)\n', (2828, 2838), False, 'import torch\n'), ((3507, 3519), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3516, 3519), False, 'import json\n'), ((1039, 1074), 'torchvision.models.densenet121', 'models.densenet121', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (1057, 1074), False, 'from torchvision import transforms, datasets, models\n'), ((1530, 1552), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256)'], {}), '(256)\n', (1547, 1552), False, 'from torchvision import transforms, datasets, models\n'), ((1591, 1617), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (1612, 1617), False, 'from torchvision import transforms, datasets, models\n'), ((1656, 1677), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1675, 1677), False, 'from torchvision import transforms, datasets, models\n'), ((1715, 1781), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (1735, 1781), False, 'from torchvision import transforms, datasets, models\n'), ((2361, 2387), 'torch.from_numpy', 'torch.from_numpy', (['np_image'], {}), '(np_image)\n', (2377, 2387), False, 'import torch\n')]
|
from setuptools import Extension, setup
from Cython.Build import cythonize
import numpy as np
import os.path as osp
__version__ = '1.1.4'
url = 'https://github.com/jannessm/quadric-mesh-simplification'
files = [
'simplify.c',
'array.c',
'clean_mesh.c',
'contract_pair.c',
'edges.c',
'maths.c',
'mesh_inversion.c',
'pair_heap.c',
'pair.c',
'preserve_bounds.c',
'q.c',
'targets.c',
'upper_tri.c',
'valid_pairs.c',
'test_utils.c'
]
src_path = osp.join(osp.dirname(osp.abspath(__file__)), 'quad_mesh_simplify')
ext_modules = [
Extension(
'simplify',
[osp.join(src_path, 'c', f) for f in files] + [osp.join(src_path,'simplify.pyx')],
# extra_compile_args=['-fopenmp'],
# extra_link_args=['-fopenmp'],
include_dirs=[np.get_include()],
define_macros=[("NPY_NO_DEPRECATED_API", "NPY_1_17_API_VERSION")],
),
]
ext_modules = cythonize(ext_modules)
with open("README.md", "r") as fh:
long_description = fh.read()
def parse_requirements(filename):
"""Load requirements from a pip requirements file."""
lineiter = (line.strip() for line in open(filename))
return [line for line in lineiter if line and not line.startswith("#")]
setup(
name='quad_mesh_simplify',
version=__version__,
author='<NAME>',
url=url,
description="Simplify meshes including vertex features.",
long_description=long_description,
long_description_content_type="text/markdown",
install_requires=parse_requirements("requirements.txt"),
python_requires=">=3.6.3",
ext_modules=ext_modules,
zip_safe=False,
)
|
[
"os.path.abspath",
"Cython.Build.cythonize",
"os.path.join",
"numpy.get_include"
] |
[((850, 872), 'Cython.Build.cythonize', 'cythonize', (['ext_modules'], {}), '(ext_modules)\n', (859, 872), False, 'from Cython.Build import cythonize\n'), ((480, 501), 'os.path.abspath', 'osp.abspath', (['__file__'], {}), '(__file__)\n', (491, 501), True, 'import os.path as osp\n'), ((572, 598), 'os.path.join', 'osp.join', (['src_path', '"""c"""', 'f'], {}), "(src_path, 'c', f)\n", (580, 598), True, 'import os.path as osp\n'), ((618, 652), 'os.path.join', 'osp.join', (['src_path', '"""simplify.pyx"""'], {}), "(src_path, 'simplify.pyx')\n", (626, 652), True, 'import os.path as osp\n'), ((741, 757), 'numpy.get_include', 'np.get_include', ([], {}), '()\n', (755, 757), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 9 17:53:39 2019
@author: alankar
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.special.orthogonal import p_roots #Legendre Polynomial roots
from scipy import constants
def gauss_quad(func,a,b,n,*args):#Legendre
[x,w] = p_roots(n+1)
I_G = 0.5*(b-a)*np.sum(w*func(0.5*(b-a)*x+0.5*(b+a),*args))
return I_G
V = 1000*1e-6 #m^3
rho = 6.022e28 #m^-3
thetaD = 428 #K
def CV(T):
N = 50
f = lambda x:(x**4*np.exp(x))/(np.exp(x)-1)**2
return 9*V*rho*constants.k*(T/thetaD)**3*gauss_quad(f,0,thetaD/T,N)
Temperature = np.linspace(5,500,1000)
Heat_cap = np.array([CV(T) for T in Temperature])
plt.figure(figsize=(13,10))
plt.plot(Temperature,Heat_cap)
plt.grid()
plt.title(r'Debye Heat capacity $C_V(T)$ in a solid',size=25,y=1.02)
plt.xlabel(r'$T$',size=22)
plt.ylabel(r'$C_V(T)$',size=22)
plt.tick_params(axis='both', which='major', labelsize=15)
plt.tick_params(axis='both', which='minor', labelsize=12)
plt.savefig('9.png')
plt.show()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"scipy.special.orthogonal.p_roots",
"matplotlib.pyplot.figure",
"numpy.exp",
"numpy.linspace",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig"
] |
[((634, 659), 'numpy.linspace', 'np.linspace', (['(5)', '(500)', '(1000)'], {}), '(5, 500, 1000)\n', (645, 659), True, 'import numpy as np\n'), ((712, 740), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(13, 10)'}), '(figsize=(13, 10))\n', (722, 740), True, 'import matplotlib.pyplot as plt\n'), ((740, 771), 'matplotlib.pyplot.plot', 'plt.plot', (['Temperature', 'Heat_cap'], {}), '(Temperature, Heat_cap)\n', (748, 771), True, 'import matplotlib.pyplot as plt\n'), ((771, 781), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (779, 781), True, 'import matplotlib.pyplot as plt\n'), ((782, 851), 'matplotlib.pyplot.title', 'plt.title', (['"""Debye Heat capacity $C_V(T)$ in a solid"""'], {'size': '(25)', 'y': '(1.02)'}), "('Debye Heat capacity $C_V(T)$ in a solid', size=25, y=1.02)\n", (791, 851), True, 'import matplotlib.pyplot as plt\n'), ((851, 877), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$T$"""'], {'size': '(22)'}), "('$T$', size=22)\n", (861, 877), True, 'import matplotlib.pyplot as plt\n'), ((878, 909), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$C_V(T)$"""'], {'size': '(22)'}), "('$C_V(T)$', size=22)\n", (888, 909), True, 'import matplotlib.pyplot as plt\n'), ((910, 967), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'which': '"""major"""', 'labelsize': '(15)'}), "(axis='both', which='major', labelsize=15)\n", (925, 967), True, 'import matplotlib.pyplot as plt\n'), ((968, 1025), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'which': '"""minor"""', 'labelsize': '(12)'}), "(axis='both', which='minor', labelsize=12)\n", (983, 1025), True, 'import matplotlib.pyplot as plt\n'), ((1026, 1046), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""9.png"""'], {}), "('9.png')\n", (1037, 1046), True, 'import matplotlib.pyplot as plt\n'), ((1047, 1057), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1055, 1057), True, 'import matplotlib.pyplot as plt\n'), ((317, 331), 'scipy.special.orthogonal.p_roots', 'p_roots', (['(n + 1)'], {}), '(n + 1)\n', (324, 331), False, 'from scipy.special.orthogonal import p_roots\n'), ((513, 522), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (519, 522), True, 'import numpy as np\n'), ((525, 534), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (531, 534), True, 'import numpy as np\n')]
|
import numpy as np
#import matplotlib.pyplot as plt
import shapely.geometry
from scipy.ndimage.morphology import binary_dilation
from scipy.ndimage import label
from multiprocessing import Pool
def voxels_to_polygon(image_stack, pixel_size, center=(0.5, 0.5)):
"""Take a stack of images and produce a stack of shapely polygons.
The images are interpreted as a solid shape with boundary along the pixel
exterior edge. Thus an image eith a single nonzero pixel will return a square
polygon with sidelength equal to the pixel_size.
IN:
image_stack: list of binary (1.0,0) numpy array 2d images each depicting
a single connected region of 1.0 surrounded by 0.0.
pixel_size: The absolute pixel size of the input images. Used to make the
output polygons coordinates real spaced.
center: the relative origin of the image, axis=0 is x and axis=1 is y
increasing with increasingf index. For instance center=(0.5,0.5)
will select the centre of the image as the orign.
OUT:
polygon_stack: list of shapely.geometry.polygons each representing the bound
of the corresponding input binary image.
"""
polygon_stack = [pixels_to_polygon(image, pixel_size, center) for image in image_stack]
return polygon_stack
def check_input(image):
"""Check that the provided image consists of a single connected domain of pixels.
"""
# Check that the input image has no floating pixels.
labeled_array, num_features = label(image.astype(int) + 1)
assert num_features == 1, "The input image must contain a single solid domain of connected pixels but it appears " \
"to have floating pixels "
#
# Check that the input image has no holes.
s = np.sum(np.abs(image.astype(int)[1:, :] - image.astype(int)[0:-1, :]), axis=0)
assert np.alltrue(
s <= 2), "The input image must contain a single solid domain of connected pixels but it appears to have holes"
#
def pixels_to_polygon(image, pixel_size, center=(0.5, 0.5)):
"""Take a single image and produce a shapely polygon.
"""
check_input(image)
expanded_image = expand_image(image, factor=3)
indices = get_image_boundary_index(expanded_image)
coordinates = indices_to_coordinates(indices, pixel_size / 3., center, expanded_image)
polygon = shapely.geometry.Polygon(coordinates)
# show_polygon_and_image(polygon, image, pixel_size, center) #<= DEBUG
return polygon
def expand_image(image, factor):
"""Expand 2d binary image so that each pixel is split by copying
into factor x factor number of pixels.
"""
expanded_image = np.repeat(image, factor, axis=1)
expanded_image = np.repeat(expanded_image, factor, axis=0)
return expanded_image
def get_image_boundary_index(image):
"""Find the pixel indices of the boundary pixels of a binary image.
"""
boundary_image = get_boundary_image(image)
bound_indx = np.where(boundary_image == 1)
ix, iy = bound_indx[0][0], bound_indx[1][0] # starting index
indices = [(ix, iy)]
while (not len(indices) == np.sum(boundary_image)):
# Walk around border and save boundary pixel indices
mask = np.zeros(boundary_image.shape)
mask[np.max([0, ix - 1]):ix + 2, iy] = 1
mask[ix, np.max([iy - 1]):iy + 2] = 1
neighbour_indx = np.where(boundary_image * mask)
for ix, iy in zip(neighbour_indx[0], neighbour_indx[1]):
if (ix, iy) not in indices:
indices.append((ix, iy))
break
indices = sparse_indices(indices)
return indices
def get_boundary_image(image):
"""Return a pixel image with 1 along the boundary if the assumed
object in image.
"""
k = np.ones((3, 3), dtype=int)
dilation = binary_dilation(image == 0, k, border_value=1)
boundary_image = dilation * image
return boundary_image
def sparse_indices(indices):
"""Remove uneccesary nodes in the polygon (three nodes on a line is uneccesary).
"""
new_indices = []
for i in range(0, len(indices) - 1):
if not (indices[i - 1][0] == indices[i][0] == indices[i + 1][0] or \
indices[i - 1][1] == indices[i][1] == indices[i + 1][1]):
new_indices.append(indices[i])
return new_indices
def indices_to_coordinates(indices, pixel_size, center, image):
"""Compute real space coordinates of image boundary form set of pixel indices.
"""
dx = image.shape[1] * center[0]
dy = image.shape[0] * center[1]
coordinates = []
for c in indices:
# Verified by simulated nonsymmetric grain
ycoord = pixel_size * (c[1] + 0.5 - dx + (c[1] % 3 - 1) * 0.5)
xcoord = pixel_size * (-c[0] - 0.5 + dy - (c[0] % 3 - 1) * 0.5)
coordinates.append((xcoord, ycoord))
return coordinates
def get_path_for_pos(args):
arr, all_entry, all_exit, all_nhat, all_L, all_nsegs, \
bad_lines, xray_endpoints, sample_polygon, zpos = args
for i, ang, dty in arr:
# Translate and rotate the xray endpoints according to ytrans and angle
c, s = np.cos(np.radians(-ang)), np.sin(np.radians(-ang))
rotz = np.array([[c, -s], [s, c]])
rx = rotz.dot(xray_endpoints + np.array([[0, 0], [dty, dty]]))
xray_polygon = shapely.geometry.LineString([rx[:, 0], rx[:, 1]])
# compute the intersections between beam and sample in sample coordinates
intersection_points = get_intersection(xray_polygon, sample_polygon, zpos)
if intersection_points is None:
# If a measurement missed the sample or graced a corner, we skipp ahead
bad_lines.append(int(i))
else:
# make a measurement at the current setting
entry, exit, nhat, L, nsegs = get_quanteties(intersection_points)
# save the measurement results in global lists
all_entry.append(entry)
all_exit.append(exit)
all_nhat.append(nhat)
all_L.append(L)
all_nsegs.append(nsegs)
return all_entry, all_exit, all_nhat, all_L, all_nsegs, bad_lines
def get_integral_paths(angles, ytrans, zpos, sample_polygon, nprocs, show_geom=False):
"""Compute entry-exit points for a scanrange.
"""
# Instantiate lists to contain all measurements
all_entry, all_exit, all_nhat, all_L, all_nsegs, bad_lines = [], [], [], [], [], []
xray_endpoints = get_xray_endpoints(sample_polygon)
# Loop over all experimental settings
split_arrays = np.array_split(list(zip(range(len(angles)), angles, ytrans)), nprocs)
# split_arrays = np.array_split(np.array(list(enumerate(zip(angles, ytrans)))), 2)
args = [(arr, all_entry, all_exit, all_nhat, all_L, all_nsegs, bad_lines,
xray_endpoints, sample_polygon, zpos) for arr in split_arrays]
with Pool(nprocs) as p:
out = p.map(get_path_for_pos, args)
# Unpack the multicore results
all_entry, all_exit, all_nhat, all_L, all_nsegs, bad_lines = [], [], [], [], [], []
for o in out:
for i, l in enumerate([all_entry, all_exit, all_nhat, all_L, all_nsegs, bad_lines]):
l.extend(o[i])
# repack lists of measurements into numpy arrays of desired format
entry, exit, nhat, L, nsegs = repack(all_entry, all_exit, all_nhat, all_L, all_nsegs)
return entry, exit, nhat, L, nsegs, bad_lines
def get_xray_endpoints(sample_polygon):
"""Calculate endpoitns of xray line segement. The lenght of the
line segment is adapted to make sure xray always convers the full
length of the sample.
"""
xc, yc = sample_polygon.exterior.xy
xmin = np.min(xc)
xmax = np.max(xc)
ymin = np.min(yc)
ymax = np.max(yc)
D = np.sqrt((xmax - xmin) ** 2 + (ymax - ymin) ** 2)
return np.array([[-1.1 * D, 1.1 * D], [0, 0]])
def get_intersection(xray_polygon, sample_polygon, z):
"""Compute the 3d coordinates of intersection between xray and
sample.
"""
intersection = sample_polygon.intersection(xray_polygon)
if intersection.is_empty or isinstance(intersection, shapely.geometry.point.Point):
# we missed the sample with the beam
intersection_points = None
elif isinstance(intersection, shapely.geometry.linestring.LineString):
# we got a single line segment intersection
intersection_points = np.zeros((2, 3))
intersection_points[:2, :2] = np.array(intersection.xy).T
intersection_points[:, 2] = z
elif isinstance(intersection, shapely.geometry.multilinestring.MultiLineString):
# we got multiple line segments intersection
intersection_points = np.zeros((2 * len(intersection.geoms), 3))
for i, line_segment in enumerate(intersection.geoms):
intersection_points[2 * i:2 * (i + 1), :2] = np.array(line_segment.xy).T
intersection_points[:, 2] = z
return intersection_points
def get_quanteties(intersection_points):
nsegs = intersection_points.shape[0] // 2
entry, exit = [], []
p1 = intersection_points[0, :]
p2 = intersection_points[1, :]
nhat = list((p2 - p1) / np.linalg.norm(p2 - p1))
L = 0
for i in range(nsegs):
p1 = intersection_points[2 * i, :]
p2 = intersection_points[2 * i + 1, :]
entry.extend(list(p1))
exit.extend(list(p2))
length = np.linalg.norm(p2 - p1)
L += length
return entry, exit, nhat, L, nsegs
def repack(all_entry, all_exit, all_nhat, all_L, all_nsegs):
"""Repack global measurement list into numpy arrays of desired format.
"""
N = len(all_L)
p = max(max(all_nsegs), 1)
nsegs = np.array(all_nsegs).reshape(1, N)
L = np.array(all_L).reshape(1, N)
entry = np.zeros((3 * p, N))
for i, en in enumerate(all_entry):
entry[:len(en[:]), i] = en[:]
exit = np.zeros((3 * p, N))
for i, ex in enumerate(all_exit):
exit[:len(ex[:]), i] = ex[:]
nhat = np.array(all_nhat).T
return entry, exit, nhat, L, nsegs
# def show_polygon_and_image(polygon, image, pixel_size, center):
# """Plot a image and polygon for debugging purposes
# """
# fig, ax = plt.subplots(1, 2, figsize=(12, 6))
# fig.suptitle('Center at ' + str(center))
# xc, yc = polygon.exterior.xy
# xcenter = image.shape[1] * pixel_size * center[0]
# ycenter = image.shape[0] * pixel_size * center[1]
# ax[0].imshow(image, cmap='gray')
# ax[0].set_title('Pixel image')
# ax[0].arrow(int(image.shape[1] * center[0]), int(image.shape[0] * center[1]), \
# image.shape[0] // 4, 0, color='r', head_width=0.15) # y
# ax[0].text(int(image.shape[1] * center[0]) + image.shape[1] // 4, int(image.shape[0] * center[1]) + 0.25, \
# 'y', color='r')
# ax[0].arrow(int(image.shape[1] * center[0]), int(image.shape[0] * center[1]), \
# 0, -image.shape[1] // 4, color='r', head_width=0.15) # x
# ax[0].text(int(image.shape[1] * center[0]) + 0.25, int(image.shape[0] * center[1]) - image.shape[1] // 4, \
# 'x', color='r')
# ax[1].set_title('Polygon representation')
# ax[1].fill(xc, yc, c='gray', zorder=1)
# ax[1].scatter(xc, yc, c='r', zorder=2)
# ax[1].grid(True)
# ax[1].scatter(0, 0, c='b', zorder=3)
# ax[1].set_xlim([-xcenter, image.shape[1] * pixel_size - xcenter])
# ax[1].set_ylim([-ycenter, image.shape[0] * pixel_size - ycenter])
# ax[1].set_xlabel('x')
# ax[1].set_ylabel('y')
# plt.show()
|
[
"numpy.radians",
"numpy.sum",
"scipy.ndimage.morphology.binary_dilation",
"numpy.zeros",
"numpy.ones",
"numpy.min",
"numpy.where",
"numpy.max",
"numpy.array",
"multiprocessing.Pool",
"numpy.alltrue",
"numpy.linalg.norm",
"numpy.sqrt",
"numpy.repeat"
] |
[((1932, 1950), 'numpy.alltrue', 'np.alltrue', (['(s <= 2)'], {}), '(s <= 2)\n', (1942, 1950), True, 'import numpy as np\n'), ((2742, 2774), 'numpy.repeat', 'np.repeat', (['image', 'factor'], {'axis': '(1)'}), '(image, factor, axis=1)\n', (2751, 2774), True, 'import numpy as np\n'), ((2796, 2837), 'numpy.repeat', 'np.repeat', (['expanded_image', 'factor'], {'axis': '(0)'}), '(expanded_image, factor, axis=0)\n', (2805, 2837), True, 'import numpy as np\n'), ((3048, 3077), 'numpy.where', 'np.where', (['(boundary_image == 1)'], {}), '(boundary_image == 1)\n', (3056, 3077), True, 'import numpy as np\n'), ((3848, 3874), 'numpy.ones', 'np.ones', (['(3, 3)'], {'dtype': 'int'}), '((3, 3), dtype=int)\n', (3855, 3874), True, 'import numpy as np\n'), ((3890, 3936), 'scipy.ndimage.morphology.binary_dilation', 'binary_dilation', (['(image == 0)', 'k'], {'border_value': '(1)'}), '(image == 0, k, border_value=1)\n', (3905, 3936), False, 'from scipy.ndimage.morphology import binary_dilation\n'), ((7754, 7764), 'numpy.min', 'np.min', (['xc'], {}), '(xc)\n', (7760, 7764), True, 'import numpy as np\n'), ((7776, 7786), 'numpy.max', 'np.max', (['xc'], {}), '(xc)\n', (7782, 7786), True, 'import numpy as np\n'), ((7798, 7808), 'numpy.min', 'np.min', (['yc'], {}), '(yc)\n', (7804, 7808), True, 'import numpy as np\n'), ((7820, 7830), 'numpy.max', 'np.max', (['yc'], {}), '(yc)\n', (7826, 7830), True, 'import numpy as np\n'), ((7839, 7887), 'numpy.sqrt', 'np.sqrt', (['((xmax - xmin) ** 2 + (ymax - ymin) ** 2)'], {}), '((xmax - xmin) ** 2 + (ymax - ymin) ** 2)\n', (7846, 7887), True, 'import numpy as np\n'), ((7899, 7938), 'numpy.array', 'np.array', (['[[-1.1 * D, 1.1 * D], [0, 0]]'], {}), '([[-1.1 * D, 1.1 * D], [0, 0]])\n', (7907, 7938), True, 'import numpy as np\n'), ((9838, 9858), 'numpy.zeros', 'np.zeros', (['(3 * p, N)'], {}), '((3 * p, N))\n', (9846, 9858), True, 'import numpy as np\n'), ((9948, 9968), 'numpy.zeros', 'np.zeros', (['(3 * p, N)'], {}), '((3 * p, N))\n', (9956, 9968), True, 'import numpy as np\n'), ((3301, 3331), 'numpy.zeros', 'np.zeros', (['boundary_image.shape'], {}), '(boundary_image.shape)\n', (3309, 3331), True, 'import numpy as np\n'), ((3452, 3483), 'numpy.where', 'np.where', (['(boundary_image * mask)'], {}), '(boundary_image * mask)\n', (3460, 3483), True, 'import numpy as np\n'), ((5277, 5304), 'numpy.array', 'np.array', (['[[c, -s], [s, c]]'], {}), '([[c, -s], [s, c]])\n', (5285, 5304), True, 'import numpy as np\n'), ((6950, 6962), 'multiprocessing.Pool', 'Pool', (['nprocs'], {}), '(nprocs)\n', (6954, 6962), False, 'from multiprocessing import Pool\n'), ((9460, 9483), 'numpy.linalg.norm', 'np.linalg.norm', (['(p2 - p1)'], {}), '(p2 - p1)\n', (9474, 9483), True, 'import numpy as np\n'), ((10056, 10074), 'numpy.array', 'np.array', (['all_nhat'], {}), '(all_nhat)\n', (10064, 10074), True, 'import numpy as np\n'), ((3200, 3222), 'numpy.sum', 'np.sum', (['boundary_image'], {}), '(boundary_image)\n', (3206, 3222), True, 'import numpy as np\n'), ((8469, 8485), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {}), '((2, 3))\n', (8477, 8485), True, 'import numpy as np\n'), ((9229, 9252), 'numpy.linalg.norm', 'np.linalg.norm', (['(p2 - p1)'], {}), '(p2 - p1)\n', (9243, 9252), True, 'import numpy as np\n'), ((9753, 9772), 'numpy.array', 'np.array', (['all_nsegs'], {}), '(all_nsegs)\n', (9761, 9772), True, 'import numpy as np\n'), ((9795, 9810), 'numpy.array', 'np.array', (['all_L'], {}), '(all_L)\n', (9803, 9810), True, 'import numpy as np\n'), ((5218, 5234), 'numpy.radians', 'np.radians', (['(-ang)'], {}), '(-ang)\n', (5228, 5234), True, 'import numpy as np\n'), ((5244, 5260), 'numpy.radians', 'np.radians', (['(-ang)'], {}), '(-ang)\n', (5254, 5260), True, 'import numpy as np\n'), ((5344, 5374), 'numpy.array', 'np.array', (['[[0, 0], [dty, dty]]'], {}), '([[0, 0], [dty, dty]])\n', (5352, 5374), True, 'import numpy as np\n'), ((8524, 8549), 'numpy.array', 'np.array', (['intersection.xy'], {}), '(intersection.xy)\n', (8532, 8549), True, 'import numpy as np\n'), ((3345, 3364), 'numpy.max', 'np.max', (['[0, ix - 1]'], {}), '([0, ix - 1])\n', (3351, 3364), True, 'import numpy as np\n'), ((3398, 3414), 'numpy.max', 'np.max', (['[iy - 1]'], {}), '([iy - 1])\n', (3404, 3414), True, 'import numpy as np\n'), ((8920, 8945), 'numpy.array', 'np.array', (['line_segment.xy'], {}), '(line_segment.xy)\n', (8928, 8945), True, 'import numpy as np\n')]
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import copy
import os
import datetime
import time
from functools import reduce
# Function that provide some information about the cvs files
def infos(old_df_names, months):
"""
Print informations about databases
input:
- dataframe
- months
output:
- months, number of NaN values in each column
"""
for i in range(len(old_df_names)):
df = pd.read_csv(old_df_names[i])
print('Month %s :' %months[i])
for i in df.columns:
print('\t- {} has number of Nan : {:d} ({:.2f}%)'.format(i, int(df[i].isna().sum()), (int(df[i].isna().sum())/len(df))*100))
print('Total number of rows: {:d}'.format(len(df)))
print('\n')
return
# Function that clean the databases from NaN values
def clean_dataframe(df):
"""
Clean the dataframe, removing NaN from columns
input:
- dataframe
output:
- cleaned dataframe
"""
df.dropna(inplace = True)
return df
# Function that create new csv files
def make_new_csv(old_df_names, df_names):
"""
Make new csv files
input:
- dataframe
output:
- new csv files
"""
for i in range(len(old_df_names)):
df = pd.read_csv(old_df_names[i])
# cleaning function
df = clean_dataframe(df)
df.to_csv(df_names[i], index=False)
return
# RQ1 functions
# RQ1.1 functions
def compute_average_session(df_names):
"""
Compute average number of times users perform view/cart/purchase within each session
input:
- list of names of csv files to open
output:
- series of average of each operation
"""
# init the daily average dict
average_session_dict = {}
for i in range(len(df_names)):
average_session_dict[i] = {}
# load the ith dataframe, taking the event_type and user_session columns
df = pd.read_csv(df_names[i], usecols=['event_type', 'user_session'])
for j in df['event_type'].unique():
#print('{} of {:d} has average of : {:.2f} ' .format(j, i, float(df[df['event_type'] == j].groupby(['user_session']).count().mean())))
average_session_dict[i][j] = df[df['event_type'] == j].groupby(['user_session']).count().mean()
average_session_df = pd.DataFrame(average_session_dict).mean(axis=1)
return average_session_df
def plot_average_session(average_session_df, months):
"""
plots the average number of times users perform each operation
"""
# plot average_session_df
fig = plt.figure()
X = np.arange(len(average_session_df))
plt.bar(X, average_session_df)
plt.xticks(np.arange(len(average_session_df)),average_session_df.index)
plt.ylabel("average operation per session")
plt.xlabel("operations")
plt.title("Average number of times users perform each operation within a session")
plt.grid(color ='silver', linestyle = ':')
fig.set_figwidth(15)
fig.set_figheight(5)
return
# RQ1.2 functions
def compute_average_view_cart(df_names, months):
"""
Compute average number of times a user views a product before adding it to the cart
input:
- list of names of csv files to open
output:
- the average of how many times a product is viewed before to be added to the cart
"""
# init a dataframe with index as every months and column as the mean for each user
df_mean_database = pd.DataFrame(index=months, columns=['mean'])
for i in range(len(df_names)):
# load the ith dataframe, taking the event_time, event_type, product_id, user_id columns
df = pd.read_csv(df_names[i],
usecols=['event_time','event_type', 'product_id', 'user_id'], nrows=100000,
parse_dates=['event_time'])
# cut off the 'purchase' variable from event_type
df_2 = df[df['event_type'] != 'purchase']
df_3 = df_2[df_2.event_type=='view'].groupby(by=['product_id']).agg(view=('event_type', 'count'))
df_4 = df_2[df_2.event_type=='cart'].groupby(by=['product_id']).agg(cart=('event_type', 'count'))
# get dataframe where event_type is equal to 'cart'
df_cart = df_2[df_2['event_type']=='cart']
# init a dataframe with index as every user and column as the mean for each user
df_mean_user = pd.DataFrame(index=df_cart['user_id'].unique(), columns=['mean'])
df_cart.groupby(by=['user_id']).count()
for user in df_cart['user_id'].unique():
# get dataframe with one user at a time
df_user = df_2[df_2['user_id'] == user]
# init the dict where the key are the products and the values are the mean of each product
product_dict = {}
for prod in df_user['product_id'].unique():
# get dataframe with one product at a time
df_product = df_user[df_user['product_id'] == prod]
df_product_2 = df_product.copy()
product_dict[prod] = []
# init a list to append how many times 'view' appears before 'cart' for each product
product_lst = []
# check if at least a 'view' exist in the dataframe otherwise pass
if any(df_product_2['event_type'] == 'view') == True:
df_product_2_time = df_product_2[df_product_2['event_type'] == 'view'].event_time.reset_index(drop=True)[0]
# check if there are some 'cart' event before the 'view' event (only for the first time of seeing the 'cart')
if any(df_product_2[df_product_2['event_type'] == 'cart'].event_time <= df_product_2_time) == True:
df_product_3 = df_product_2[df_product_2.event_time <= df_product_2_time]
# drop any 'cart' events at the beginning
df_product_2 = df_product_2.drop(labels=df_product_3[df_product_3['event_type'] == 'cart'].index)
# count how many times 'view' is before 'cart'
if any(df_product_2['event_type'] == 'view') == True:
for index, row in df_product_2.iterrows():
if row['event_type'] == 'cart':
product_lst.append(np.sum(df_product_2[df_product['event_type'] == 'view'].event_time < row['event_time']))
df_product_2 = df_product_2[df_product_2.event_time > row['event_time']]
# compute mean for each product
if len(product_lst) > 0:
product_dict[prod] = [i for i in product_lst if i != 0]
product_dict[prod] = np.mean(product_dict[prod])
else:
product_dict[prod].append(0)
# compute mean for each user
try:
df_mean_user.loc[user,'mean'] = round(pd.DataFrame(product_dict).mean(axis=1)[0], 2)
except ValueError:
df_mean_user.loc[user,'mean'] = round(product_dict[prod], 2)
# compute final average for a user for a product
df_mean_user.dropna(inplace=True)
mean_prod_user = np.mean(df_mean_user)
# add final average per month
df_mean_database.loc[months[i], 'mean'] = round(mean_prod_user[0], 2)
df_mean_database.dropna(inplace=True)
final_mean = np.mean(df_mean_database)
return final_mean
# RQ1.3 functions
def compute_probability_cart_purchase(df_names, months):
"""
Compute the probability that products are bought once is added to the cart
input:
- list of names of csv files to open
output:
- probability products are purchased once are added to the cart
"""
# init dictionary to merge each monthly datasets
df_database = {}
for i in range(len(df_names)):
# load the ith dataframe, taking only the event_type
df = pd.read_csv(df_names[i],
usecols=['event_type'])
# cut off the view variable from event_type
df_database[months[i]] = df[df['event_type'] != 'view']
# function to concatenate each dataset
merged_df = pd.concat([df_database[months[i]] for i in range(len(df_database))])
# compute probability as the ratio between purchase and cart events
prob = round(merged_df[merged_df['event_type'] == 'purchase'].shape[0] /
merged_df[merged_df['event_type'] == 'cart'].shape[0], 4) * 100
return prob
# RQ1.4 functions
def compute_average_time_removed_item(df_names, months):
"""
Compute the average time an item stays in the cart before being removed
input:
- list of names of csv files to open
output:
- average time
"""
df_mean_database = pd.DataFrame(index=months, columns=['mean'])
for i in range(len(df_names)):
# load the ith dataframe, taking only the
df = pd.read_csv(df_names[i],
usecols=['event_time', 'event_type', 'product_id'], nrows=100000,
parse_dates=['event_time'])
# cut off the view variable from event_type
df_2 = df[df['event_type'] != 'view']
# init the dict where the key are the products and the values are the mean of each product
product_dict = {}
# loop through the event_type 'purchase' to find unique product_id
for prod in df_2[df_2['event_type'] == 'purchase']['product_id'].unique():
df_product = df_2[df_2['product_id'] == prod]
# check if at least a 'cart' event exist
if df_product['event_type'].str.contains('cart').any():
pass
else:
continue
# check if there are some 'purchase' event before the 'cart' event (only for the first time of seeing the 'purchase')
if any(df_product[df_product['event_type'] == 'purchase'].event_time <=
df_product[df_product['event_type'] == 'cart'].event_time.reset_index(drop=True)[0]) == True:
df_3 = df_product[df_product.event_time <= df_product[df_product['event_type'] == 'cart'].event_time.reset_index(drop=True)[0]]
# drop any 'cart' events at the beginning
df_product = df_product.drop(labels=df_3[df_3['event_type'] == 'purchase'].index)
# check if there are some 'cart' event before the 'purchase' event (only for the last time of seeing the 'cart')
if any(df_product[df_product['event_type'] == 'cart'].event_time >=
df_product[df_product['event_type'] == 'purchase'].event_time.reset_index(drop=True)[len(df_product[df_product['event_type'] == 'purchase'])-1]) == True:
df_3 = df_product[df_product.event_time >= df_product[df_product['event_type'] == 'purchase'].event_time.reset_index(drop=True)[len(df_product[df_product['event_type'] == 'purchase'])-1]]
# drop any 'cart' events at the beginning
df_product = df_product.drop(labels=df_3[df_3['event_type'] == 'cart'].index)
# check if at least a 'cart' event exist
if df_product['event_type'].str.contains('cart').any():
pass
else:
continue
# check if at least a 'purchase' event exist
if df_product['event_type'].str.contains('purchase').any():
pass
else:
continue
dist_prod = df_product.event_time[df_product.event_type == 'purchase'].values - df_product.event_time[df_product.event_type == 'cart'].values
product_dict[prod] = []
product_dict[prod].append(np.mean(dist_prod))
# add final average per month
df_mean_database.loc[months[i], 'mean'] = pd.DataFrame(product_dict).mean(axis=1)[0]
# RQ1.5 functions
def compute_average_time_first_view(df_names, months):
"""
Compute the average time an item stays in the cart between the first time view and purchase/addition to cart
input:
- list of names of csv files to open
output:
- average time
"""
df_mean_database = pd.DataFrame(index=months, columns=['mean'])
for i in range(len(df_names)):
# load the ith dataframe, taking only the
df = pd.read_csv(df_names[i],
usecols=['event_time', 'event_type', 'product_id'],
parse_dates=['event_time'])
# cut off the view variable from event_type
df_3 = df[df['event_type'] != 'view']
# init the dict where the key are the products and the values are the mean of each product
product_dict = {}
# loop through the event_type 'purchase' to find unique product_id
for prod in df_3['product_id'].unique():
df_product = df[df['product_id'] == prod]
# check if at least a 'view' event exist
if df_product['event_type'].str.contains('view').any():
pass
else:
continue
# check if there are some 'purchase' event before the 'view' event (only for the first time of seeing the 'purchase')
if any(df_product[df_product['event_type'] == 'purchase'].event_time <=
df_product[df_product['event_type'] == 'view'].event_time.reset_index(drop=True)[0]) == True:
df_3 = df_product[df_product.event_time <= df_product[df_product['event_type'] == 'view'].event_time.reset_index(drop=True)[0]]
# drop any 'cart' events at the beginning
df_product = df_product.drop(labels=df_3[df_3['event_type'] == 'purchase'].index)
# check if there are some 'cart' event before the 'view' event (only for the first time of seeing the 'purchase')
if any(df_product[df_product['event_type'] == 'cart'].event_time <=
df_product[df_product['event_type'] == 'view'].event_time.reset_index(drop=True)[0]) == True:
df_3 = df_product[df_product.event_time <= df_product[df_product['event_type'] == 'view'].event_time.reset_index(drop=True)[0]]
# drop any 'cart' events at the beginning
df_product = df_product.drop(labels=df_3[df_3['event_type'] == 'cart'].index)
# check if at least a 'purchase' event exist
if df_product['event_type'].str.contains('purchase').any():
pass
else:
continue
# check if at least a 'cart' event exist
if df_product['event_type'].str.contains('cart').any():
pass
else:
continue
product_dict[prod] = []
df_product.drop_duplicates(subset=['event_type'], keep='first', inplace=True)
df_product.reset_index(inplace=True)
product_dict[prod].append(df_product.event_time[1] - df_product.event_time[0])
# add final average per month
df_mean_database.loc[months[i], 'mean'] = pd.DataFrame(product_dict).mean(axis=1)[0]
return df_mean_database
# RQ2 functions
def compute_number_sold_per_category(df_names, months):
"""
Compute the most sold product per category
input:
- list of names of csv files to open
output:
- number of sold product per category
"""
# init a dataframe with index as months and column as most sold product
df_final = {}
for i in range(len(df_names)):
# load the ith dataframe, taking only the
df = pd.read_csv(df_names[i],
usecols=['product_id', 'category_code', 'event_type'])
df = df[df['event_type'] == 'purchase']
new = df['category_code'].str.split(".", expand=True)
df['category_1'] = new[0]
df.drop(columns=['category_code', 'event_type'], inplace=True)
df_final[months[i]] = df.groupby(by=['category_1']).count().sort_values('product_id', ascending=False)
df_final = [df_final[months[i]] for i in range(len(df_final))]
return df_final
def plot_number_sold_per_category(df_final, months):
"""
plot the number of sold product per category per month
"""
# plot number of sold product per category pe moth using subplots
fig, a = plt.subplots(4,2)
# Plot 1
df_final[0].reset_index().plot(kind='bar', y='product_id', x='category_1', ax=a[0][0])
a[0][0].set(title=months[0], xlabel='Categories', ylabel='Total Sales')
a[0][0].tick_params(labelrotation=45)
a[0][0].get_legend().remove()
a[0][0].grid(color ='silver', linestyle = ':')
# Plot 2
df_final[1].reset_index().plot(kind='bar', y='product_id', x='category_1', ax=a[0][1])
a[0][1].set(title=months[1], xlabel='Categories', ylabel='Total Sales')
a[0][1].tick_params(labelrotation=45)
a[0][1].get_legend().remove()
a[0][1].grid(color ='silver', linestyle = ':')
# Plot 3
df_final[2].reset_index().plot(kind='bar', y='product_id', x='category_1', ax=a[1][0])
a[1][0].set(title=months[2], xlabel='Categories', ylabel='Total Sales')
a[1][0].tick_params(labelrotation=45)
a[1][0].get_legend().remove()
a[1][0].grid(color ='silver', linestyle = ':')
# Plot 4
df_final[3].reset_index().plot(kind='bar', y='product_id', x='category_1', ax=a[1][1])
a[1][1].set(title=months[3], xlabel='Categories', ylabel='Total Sales')
a[1][1].tick_params(labelrotation=45)
a[1][1].get_legend().remove()
a[1][1].grid(color ='silver', linestyle = ':')
# Plot 5
df_final[4].reset_index().plot(kind='bar', y='product_id', x='category_1', ax=a[2][0])
a[2][0].set(title=months[4], xlabel='Categories', ylabel='Total Sales')
a[2][0].tick_params(labelrotation=45)
a[2][0].get_legend().remove()
a[2][0].grid(color ='silver', linestyle = ':')
# Plot 6
df_final[5].reset_index().plot(kind='bar', y='product_id', x='category_1', ax=a[2][1])
a[2][1].set(title=months[5], xlabel='Categories', ylabel='Total Sales')
a[2][1].tick_params(labelrotation=45)
a[2][1].get_legend().remove()
a[2][1].grid(color ='silver', linestyle = ':')
# Plot 7
df_final[6].reset_index().plot(kind='bar', y='product_id', x='category_1', ax=a[3][0])
a[3][0].set(title=months[6], xlabel='Categories', ylabel='Total Sales')
a[3][0].tick_params(labelrotation=45)
a[3][0].get_legend().remove()
a[3][0].grid(color ='silver', linestyle = ':')
a[3][1].axis('off')
# Title the figure
fig.suptitle('Category of the most trending products overall', fontsize=14, fontweight='bold')
fig.set_figwidth(20)
fig.set_figheight(50)
plt.show()
return
def plot_most_visited_subcategories(df_names, months):
"""
plot the most visited subcategories
"""
# init a dataframe with index as months and column as most sold product
df_final = {}
for i in range(len(df_names)):
# load the ith dataframe, taking only the
df = pd.read_csv(df_names[i],
usecols=['event_type', 'category_code'])
# take only the view events
df = df[df['event_type'] == 'view']
# split the categories into subcategories
new = df['category_code'].str.split(".", expand=True)
df['subcategory'] = new[1]
df.drop(columns=['category_code'], inplace=True)
# group the subcategories and sort in descending order the relative values
df_final[months[i]] = df.groupby(by=['subcategory']).count().sort_values('event_type', ascending=False)
# build a pool of lists
df_final = [df_final[months[i]] for i in range(len(df_final))]
# concat each list of month
merged_df = pd.concat([df_final[i] for i in range(len(df_final))]).reset_index()
df_tot = merged_df.groupby(by=['subcategory']).sum().sort_values('event_type', ascending=False).rename(columns={'event_type': 'view'}).reset_index()
# plot most visited subcategories
fig = plt.figure()
X = np.arange(len(df_tot))
plt.barh(X, df_tot['view'])
plt.yticks(np.arange(len(df_tot)),df_tot['subcategory'])
plt.ylabel("views")
plt.xlabel("subcategories")
plt.title("Most visited subcategories")
plt.grid(color ='silver', linestyle = ':')
fig.set_figwidth(15)
fig.set_figheight(15)
plt.show()
return
def plot_10_most_sold(df_final, months):
"""
plot the 10 most sold product per category
"""
# concat the dataset
merged_df = pd.concat([df_final[i] for i in range(len(df_final))]).reset_index()
# group together by category in descending order
df_tot = merged_df.groupby(by=['category_1']).sum().sort_values('product_id', ascending=False).rename(columns={'event_type': 'view'})[:10]
return df_tot
# RQ3 functions
# Function used for showing the values of the bars in the plots of RQ3
def plot_values_in_barh(y):
for index, value in enumerate(y):
plt.text(value, index, str(round(value, 2)))
# Function that given a category in input, returns a plot with the average price per brand for the selected category
def plot_average_price_per_category(category, df_names):
# Initializing an empty list where we will put every grouped-by DataFrame later on
l = []
# Starting a for loop to read every DataFrame
for i in range(len(df_names)):
# Selecting the columns to use for this task
data = pd.read_csv(df_names[i], usecols=['category_code', 'brand', 'price'])
# For every category_code and brand, calculating the average price of the products, then i reset the index
# because i do not want to work with MultiIndex
a = data.groupby(['category_code', 'brand']).mean().reset_index()
# Appending the DataFrame analyzed for 1 month to the list l
l.append(a)
# Concatenating every DataFrame of each month grouped by category_code and brand in one DataFrame that will not
# be memory expensive
final = pd.concat(l)
# Grouping again by category_code and brand after the concatenation. We reset again the index for the same
# reason as before
final2 = final.groupby(['category_code', 'brand']).mean().reset_index()
# Selecting the category_code we want to analyze
fplot = final2.loc[final2['category_code'] == category]
# Setting the values to show in the plot at the end of the bars
y = list(fplot['price'])
# Assigning a variable to the plot
end = fplot.plot(x='brand', kind='barh', figsize=(20, 60))
# Returning the plot and calling the function to show the prices on the top of the bars
return end, plot_values_in_barh(y)
# Function that returns for each category, the brand with the highest price
def brand_with_highest_price_for_category(df_names):
# Initializing an empty list where we will put our Dataframes later on
l = []
# Starting a for loop to read every DataFrame
for i in range(len(df_names)):
# Selecting the columns to use for this task
data = pd.read_csv(df_names[i], usecols=['category_code', 'brand', 'price'])
# For every category_code and brand, calculating the average price of the products
a = data.groupby(['category_code', 'brand']).mean()
# Selecting the rows with the higher average price for each category
a1 = a.loc[a.groupby(level='category_code')['price'].idxmax()]
# Appending the analyzed DataFrame for 1 month to the list l
l.append(a1)
# Concatenating every DataFrame of each month grouped by category_code and brand in one DataFrame that will not
# be memory expensive
final = pd.concat(l)
# Resetting the index because i do not want to work with MultiIndex
rfinal = final.reset_index()
# Selecting again only the rows with the higher average price for category after concatenating the DataFrames
last_final = rfinal.loc[rfinal.groupby('category_code')['price'].idxmax()]
# Return the output
return last_final.sort_values(by=['price'])
# RQ4 functions
# Function that is used to see if the prices of different brands are significantly different
def average_price_per_brand(df_names):
# Initializing an empty list
l = []
# Starting the loop to read the dataframes of every month
for i in range(len(df_names)):
# Selecting just the columns referring to the brand and price
data = pd.read_csv(df_names[i], usecols=['brand', 'price'])
# Grouping by brand and calculating the average price per brand
a = data.groupby('brand').mean()
# Appending the obtained DataFrame regarding the results of one month in the starting empty list
l.append(a)
# Concatenating every DataFrame of each month in one DataFrame that will not be memory expensive
t = pd.concat(l)
# Resetting the index because i do not want to work with MultiIndex
rt = t.reset_index()
# Grouping by brand the full DataFrame regarding all months and calculating the mean price
u = rt.groupby('brand').mean()
# Returning the Dataframe, the minimum and the maximum to compare the results
return u, u.min(), u.max()
# Function that is used to reduce the number of data we want to analyze for the RQ4
def make_df_purchase(df_names, months):
df_purchase = {}
# Reading the data of all months and selecting only purchase events from the DataFrame
for i in range(len(df_names)):
data = pd.read_csv(df_names[i], usecols=['brand', 'price', 'event_type'])
df_purchase[months[i]] = data[data['event_type'] == 'purchase']
# Appending the results of every months to a dictionary
return df_purchase
# Function that returns the profit of every brand in each month
def earning_per_month(df_purchase, months):
dict_earning = {}
# Calculating the earning per month of each brand grouping by brand and doing the sum of the prices of every sold
# product
for i in range(len(df_purchase)):
data = df_purchase[months[i]]
dict_earning[months[i]] = data.groupby('brand', as_index=False).sum()
return dict_earning
# Function that given a brand in input, returns the total profit for month of that brand
def brand_per_month(brand, dict_earning, months):
df_profit = {}
# For every month selecting the profit from the dictionary of earnings created before. If there is no profit for the
# selected brand, we set it equal to 0
for i in range(len(months)):
try:
df_profit[months[i]] = dict_earning[months[i]].loc[dict_earning[months[i]].brand == brand, 'price'].values[
0]
except IndexError:
df_profit[months[i]] = 0
return df_profit
# Function that given the earnings of every brand, returns the top 3 brands that have suffered the biggest losses
# between one month and the previous one
def find_3_worst_brand(dict_earning, months):
# Selecting the dictionary obtained from the total profits of the brands and then merging them in one DataFrame
# where on the columns we have the months and on the rows we have the brands. The values are the earnings of each
# brand for every month
data_frames = [dict_earning[months[i]] for i in range(len(dict_earning))]
df_merged = reduce(lambda left, right: pd.merge(left, right, on=['brand'],
how='outer'), data_frames)
df_merged.set_index('brand', inplace=True)
df_merged.set_axis(months, axis=1, inplace=True)
# Transposing the DataFrame and applying the pct_change to calculate the percentage change between every month
# and the month before
df_pct = df_merged.T.pct_change()
worst_brand = []
worst_value = []
worst_months = []
# Selecting the minimum of the percentage change(which means the bigger loss) in our DataFrame, the brand that
# corresponds to it and the month that refers to it. We append those values to the lists we defined before
for i in range(0, 3):
worst_brand.append(df_pct.min().sort_values().index[i])
worst_value.append(round(abs(df_pct.min().sort_values()[i]) * 100, 2))
L = list(df_pct[df_pct[worst_brand[i]] == df_pct.min().sort_values()[i]].index.values)
worst_months.append(''.join(L))
# Showing the result of the request
for j in range(0, 3):
print('{} lost {}% bewteen {} and the month before'.format(worst_brand[j], worst_value[j], worst_months[j]),
end=' \n')
return
#RQ5
#Function that create a plot that for each day of the week shows the hourly average of visitors
def plot_hour_avg(df_names,months):
'''
create a plot
input:
-dataframe
-months
output:
-plot
'''
for i in range(len(df_names)):
df=pd.read_csv(df_names[i],parse_dates=['event_time'],usecols=['event_time','user_id'])
#hourly averege of visitors for each day
domenica=df[df.event_time.dt.dayofweek==0].groupby(df.event_time.dt.hour).user_id.count()
lunedi=df[df.event_time.dt.dayofweek==1].groupby(df.event_time.dt.hour).user_id.count()
martedi=df[df.event_time.dt.dayofweek==2].groupby(df.event_time.dt.hour).user_id.count()
mercoledi=df[df.event_time.dt.dayofweek==3].groupby(df.event_time.dt.hour).user_id.count()
giovedi=df[df.event_time.dt.dayofweek==4].groupby(df.event_time.dt.hour).user_id.count()
venerdi=df[df.event_time.dt.dayofweek==5].groupby(df.event_time.dt.hour).user_id.count()
sabato=df[df.event_time.dt.dayofweek==6].groupby(df.event_time.dt.hour).user_id.count()
plt.figure(figsize=[10.0,5.0])
plt.plot(domenica, '-o', color='royalblue', label = 'SUNDAY')
plt.plot(lunedi, '-o', color='green', label = 'MONDAY')
plt.plot(martedi, '-o', color='red', label = 'TUESDAY')
plt.plot(mercoledi, '-o', color='yellow', label = 'WEDNESDAY')
plt.plot(giovedi, '-o', color='orange', label = 'THURSDAY')
plt.plot(venerdi, '-o', color='violet', label = 'FRIDAY')
plt.plot(sabato, '-o', color='grey', label = 'SATURDAY')
plt.xlabel('HOUR')
plt.ylabel('VISITORS')
plt.title("Daily average - %s " %months[i])
plt.xticks(range(0,24))
plt.legend()
plt.show()
return
#RQ6
#Function that calculates the overall conversion rate of the products, creates the plot of the number of purchases by category and shows the conversion rate of each category in descending order
def conversion_rate(df_names,months):
"""
calculate overall conversion rate
plot of purchase by category
calculate conversion rate for each category
input:
- dataframe
- months
output:
- overall conversion rate for each month
- conversion rate for each category of each month
- plot of purchase by category of each month
"""
for i in range(len(df_names)):
dataset=pd.read_csv(df_names[i],usecols=['event_type','category_code'])
#NUMBER OF ALL PURCHASE PRODUCTS
purchase=dataset[dataset.event_type=='purchase']
totpurc=len(purchase)
#NUMBER OF ALL VIEW PRODUCTS
view=dataset[dataset.event_type=='view']
totview=len(view)
#OVERALL CONVERSION RATE OF STORE
cr=totpurc/totview
print ('Overall conversion rate of %s'%months[i])
print (cr)
#CREATE A NEW COLUMN WITH THE SPLITTED CATEGORY NAME
new = dataset['category_code'].str.split(".", expand=True)
dataset['category_name'] = new[0]
dataset.drop(columns=['category_code'], inplace=True)
#NUMBER OF PURCHASE FOR CATEGORY
purc_4_category=dataset[dataset.event_type=='purchase'].groupby('category_name').agg(purchase=('event_type','count'))
#NUMBER OF VIEW FOR CATEGORY
view_4_category=dataset[dataset.event_type=='view'].groupby('category_name').agg(view=('event_type','count'))
#PLOT OF NUMBER OF PURCHASE FOR CATEGORY
fig = plt.figure()
purc_4_category.plot.bar(figsize = (18, 7), title='Number of purchase of %s'%months[i])
plt.show()
#CONVERSION RATE FOR CATEGORY
cr_4_cat=(purc_4_category.purchase/view_4_category.view)
dec=cr_4_cat.sort_values(axis=0, ascending=False)
print ('Conversion rate of each category of %s'%months[i])
print(dec, end='\n')
return
#RQ7
#Function that demonstrates the Pareto's principle
def pareto(df_names,months):
"""
Apply Pareto's principle
input:
- dataframe
- months
output:
- dimostration if Pareto's principle is apply for each month
"""
for i in range(len(df_names)):
dataset=pd.read_csv(df_names[i],usecols=['user_id','event_type','price'])
#PURCHASE BY USERS
purchase_by_user=dataset[dataset.event_type == 'purchase'].groupby(dataset.user_id).agg(number_of_purchases=('user_id','count'),total_spent=('price','sum'))
purchase_by_user=purchase_by_user.sort_values('total_spent',ascending=False)
#20% OF USERS
user_20=int(len(purchase_by_user)*20/100)
purch_by_user20=purchase_by_user[:user_20]
#TOTAL SPENT BY 20% OF USERS
spent_by_20=purch_by_user20.agg('sum')
#TOTAL PROFIT OF STORE
profit=dataset[dataset.event_type == 'purchase'].groupby(dataset.event_type).agg(gain=('price','sum'))
#80% OF STORE'S TOTAL PROFIT
profit_80=(profit*80)/100
#PERCENTAGE CHANGE BETWEEN 80% OF PROFIT AND 20% OF USERS
percent=int((float( spent_by_20.total_spent)/float(profit_80.gain))*100)
print("%d%% of the profit for the month of %s comes from 20%% of the user's purchases"%(percent,months[i]))
if (percent >= 80):
print ("For the month of %s Pareto's principle is applied." %months[i])
else:
print ("For the month of %s Pareto's principle isn't applied." %months[i])
return
|
[
"matplotlib.pyplot.title",
"pandas.DataFrame",
"matplotlib.pyplot.show",
"numpy.sum",
"matplotlib.pyplot.plot",
"pandas.read_csv",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.legend",
"pandas.merge",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.barh",
"matplotlib.pyplot.figure",
"numpy.mean",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid",
"pandas.concat"
] |
[((2627, 2639), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2637, 2639), True, 'import matplotlib.pyplot as plt\n'), ((2687, 2717), 'matplotlib.pyplot.bar', 'plt.bar', (['X', 'average_session_df'], {}), '(X, average_session_df)\n', (2694, 2717), True, 'import matplotlib.pyplot as plt\n'), ((2798, 2841), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""average operation per session"""'], {}), "('average operation per session')\n", (2808, 2841), True, 'import matplotlib.pyplot as plt\n'), ((2846, 2870), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""operations"""'], {}), "('operations')\n", (2856, 2870), True, 'import matplotlib.pyplot as plt\n'), ((2875, 2962), 'matplotlib.pyplot.title', 'plt.title', (['"""Average number of times users perform each operation within a session"""'], {}), "(\n 'Average number of times users perform each operation within a session')\n", (2884, 2962), True, 'import matplotlib.pyplot as plt\n'), ((2962, 3001), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'color': '"""silver"""', 'linestyle': '""":"""'}), "(color='silver', linestyle=':')\n", (2970, 3001), True, 'import matplotlib.pyplot as plt\n'), ((3505, 3549), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'months', 'columns': "['mean']"}), "(index=months, columns=['mean'])\n", (3517, 3549), True, 'import pandas as pd\n'), ((7516, 7541), 'numpy.mean', 'np.mean', (['df_mean_database'], {}), '(df_mean_database)\n', (7523, 7541), True, 'import numpy as np\n'), ((8905, 8949), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'months', 'columns': "['mean']"}), "(index=months, columns=['mean'])\n", (8917, 8949), True, 'import pandas as pd\n'), ((12325, 12369), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'months', 'columns': "['mean']"}), "(index=months, columns=['mean'])\n", (12337, 12369), True, 'import pandas as pd\n'), ((16476, 16494), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(2)'], {}), '(4, 2)\n', (16488, 16494), True, 'import matplotlib.pyplot as plt\n'), ((18846, 18856), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18854, 18856), True, 'import matplotlib.pyplot as plt\n'), ((20151, 20163), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (20161, 20163), True, 'import matplotlib.pyplot as plt\n'), ((20199, 20226), 'matplotlib.pyplot.barh', 'plt.barh', (['X', "df_tot['view']"], {}), "(X, df_tot['view'])\n", (20207, 20226), True, 'import matplotlib.pyplot as plt\n'), ((20292, 20311), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""views"""'], {}), "('views')\n", (20302, 20311), True, 'import matplotlib.pyplot as plt\n'), ((20316, 20343), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""subcategories"""'], {}), "('subcategories')\n", (20326, 20343), True, 'import matplotlib.pyplot as plt\n'), ((20348, 20387), 'matplotlib.pyplot.title', 'plt.title', (['"""Most visited subcategories"""'], {}), "('Most visited subcategories')\n", (20357, 20387), True, 'import matplotlib.pyplot as plt\n'), ((20392, 20431), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'color': '"""silver"""', 'linestyle': '""":"""'}), "(color='silver', linestyle=':')\n", (20400, 20431), True, 'import matplotlib.pyplot as plt\n'), ((20490, 20500), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (20498, 20500), True, 'import matplotlib.pyplot as plt\n'), ((22138, 22150), 'pandas.concat', 'pd.concat', (['l'], {}), '(l)\n', (22147, 22150), True, 'import pandas as pd\n'), ((23788, 23800), 'pandas.concat', 'pd.concat', (['l'], {}), '(l)\n', (23797, 23800), True, 'import pandas as pd\n'), ((24948, 24960), 'pandas.concat', 'pd.concat', (['l'], {}), '(l)\n', (24957, 24960), True, 'import pandas as pd\n'), ((491, 519), 'pandas.read_csv', 'pd.read_csv', (['old_df_names[i]'], {}), '(old_df_names[i])\n', (502, 519), True, 'import pandas as pd\n'), ((1312, 1340), 'pandas.read_csv', 'pd.read_csv', (['old_df_names[i]'], {}), '(old_df_names[i])\n', (1323, 1340), True, 'import pandas as pd\n'), ((1979, 2043), 'pandas.read_csv', 'pd.read_csv', (['df_names[i]'], {'usecols': "['event_type', 'user_session']"}), "(df_names[i], usecols=['event_type', 'user_session'])\n", (1990, 2043), True, 'import pandas as pd\n'), ((3695, 3828), 'pandas.read_csv', 'pd.read_csv', (['df_names[i]'], {'usecols': "['event_time', 'event_type', 'product_id', 'user_id']", 'nrows': '(100000)', 'parse_dates': "['event_time']"}), "(df_names[i], usecols=['event_time', 'event_type', 'product_id',\n 'user_id'], nrows=100000, parse_dates=['event_time'])\n", (3706, 3828), True, 'import pandas as pd\n'), ((7311, 7332), 'numpy.mean', 'np.mean', (['df_mean_user'], {}), '(df_mean_user)\n', (7318, 7332), True, 'import numpy as np\n'), ((8058, 8106), 'pandas.read_csv', 'pd.read_csv', (['df_names[i]'], {'usecols': "['event_type']"}), "(df_names[i], usecols=['event_type'])\n", (8069, 8106), True, 'import pandas as pd\n'), ((9049, 9171), 'pandas.read_csv', 'pd.read_csv', (['df_names[i]'], {'usecols': "['event_time', 'event_type', 'product_id']", 'nrows': '(100000)', 'parse_dates': "['event_time']"}), "(df_names[i], usecols=['event_time', 'event_type', 'product_id'],\n nrows=100000, parse_dates=['event_time'])\n", (9060, 9171), True, 'import pandas as pd\n'), ((12469, 12577), 'pandas.read_csv', 'pd.read_csv', (['df_names[i]'], {'usecols': "['event_time', 'event_type', 'product_id']", 'parse_dates': "['event_time']"}), "(df_names[i], usecols=['event_time', 'event_type', 'product_id'],\n parse_dates=['event_time'])\n", (12480, 12577), True, 'import pandas as pd\n'), ((15755, 15834), 'pandas.read_csv', 'pd.read_csv', (['df_names[i]'], {'usecols': "['product_id', 'category_code', 'event_type']"}), "(df_names[i], usecols=['product_id', 'category_code', 'event_type'])\n", (15766, 15834), True, 'import pandas as pd\n'), ((19177, 19242), 'pandas.read_csv', 'pd.read_csv', (['df_names[i]'], {'usecols': "['event_type', 'category_code']"}), "(df_names[i], usecols=['event_type', 'category_code'])\n", (19188, 19242), True, 'import pandas as pd\n'), ((21580, 21649), 'pandas.read_csv', 'pd.read_csv', (['df_names[i]'], {'usecols': "['category_code', 'brand', 'price']"}), "(df_names[i], usecols=['category_code', 'brand', 'price'])\n", (21591, 21649), True, 'import pandas as pd\n'), ((23175, 23244), 'pandas.read_csv', 'pd.read_csv', (['df_names[i]'], {'usecols': "['category_code', 'brand', 'price']"}), "(df_names[i], usecols=['category_code', 'brand', 'price'])\n", (23186, 23244), True, 'import pandas as pd\n'), ((24548, 24600), 'pandas.read_csv', 'pd.read_csv', (['df_names[i]'], {'usecols': "['brand', 'price']"}), "(df_names[i], usecols=['brand', 'price'])\n", (24559, 24600), True, 'import pandas as pd\n'), ((25589, 25655), 'pandas.read_csv', 'pd.read_csv', (['df_names[i]'], {'usecols': "['brand', 'price', 'event_type']"}), "(df_names[i], usecols=['brand', 'price', 'event_type'])\n", (25600, 25655), True, 'import pandas as pd\n'), ((28925, 29016), 'pandas.read_csv', 'pd.read_csv', (['df_names[i]'], {'parse_dates': "['event_time']", 'usecols': "['event_time', 'user_id']"}), "(df_names[i], parse_dates=['event_time'], usecols=['event_time',\n 'user_id'])\n", (28936, 29016), True, 'import pandas as pd\n'), ((29748, 29779), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[10.0, 5.0]'}), '(figsize=[10.0, 5.0])\n', (29758, 29779), True, 'import matplotlib.pyplot as plt\n'), ((29787, 29846), 'matplotlib.pyplot.plot', 'plt.plot', (['domenica', '"""-o"""'], {'color': '"""royalblue"""', 'label': '"""SUNDAY"""'}), "(domenica, '-o', color='royalblue', label='SUNDAY')\n", (29795, 29846), True, 'import matplotlib.pyplot as plt\n'), ((29857, 29910), 'matplotlib.pyplot.plot', 'plt.plot', (['lunedi', '"""-o"""'], {'color': '"""green"""', 'label': '"""MONDAY"""'}), "(lunedi, '-o', color='green', label='MONDAY')\n", (29865, 29910), True, 'import matplotlib.pyplot as plt\n'), ((29921, 29974), 'matplotlib.pyplot.plot', 'plt.plot', (['martedi', '"""-o"""'], {'color': '"""red"""', 'label': '"""TUESDAY"""'}), "(martedi, '-o', color='red', label='TUESDAY')\n", (29929, 29974), True, 'import matplotlib.pyplot as plt\n'), ((29985, 30045), 'matplotlib.pyplot.plot', 'plt.plot', (['mercoledi', '"""-o"""'], {'color': '"""yellow"""', 'label': '"""WEDNESDAY"""'}), "(mercoledi, '-o', color='yellow', label='WEDNESDAY')\n", (29993, 30045), True, 'import matplotlib.pyplot as plt\n'), ((30056, 30113), 'matplotlib.pyplot.plot', 'plt.plot', (['giovedi', '"""-o"""'], {'color': '"""orange"""', 'label': '"""THURSDAY"""'}), "(giovedi, '-o', color='orange', label='THURSDAY')\n", (30064, 30113), True, 'import matplotlib.pyplot as plt\n'), ((30124, 30179), 'matplotlib.pyplot.plot', 'plt.plot', (['venerdi', '"""-o"""'], {'color': '"""violet"""', 'label': '"""FRIDAY"""'}), "(venerdi, '-o', color='violet', label='FRIDAY')\n", (30132, 30179), True, 'import matplotlib.pyplot as plt\n'), ((30190, 30244), 'matplotlib.pyplot.plot', 'plt.plot', (['sabato', '"""-o"""'], {'color': '"""grey"""', 'label': '"""SATURDAY"""'}), "(sabato, '-o', color='grey', label='SATURDAY')\n", (30198, 30244), True, 'import matplotlib.pyplot as plt\n'), ((30255, 30273), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""HOUR"""'], {}), "('HOUR')\n", (30265, 30273), True, 'import matplotlib.pyplot as plt\n'), ((30282, 30304), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""VISITORS"""'], {}), "('VISITORS')\n", (30292, 30304), True, 'import matplotlib.pyplot as plt\n'), ((30313, 30357), 'matplotlib.pyplot.title', 'plt.title', (["('Daily average - %s ' % months[i])"], {}), "('Daily average - %s ' % months[i])\n", (30322, 30357), True, 'import matplotlib.pyplot as plt\n'), ((30397, 30409), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (30407, 30409), True, 'import matplotlib.pyplot as plt\n'), ((30418, 30428), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (30426, 30428), True, 'import matplotlib.pyplot as plt\n'), ((31069, 31134), 'pandas.read_csv', 'pd.read_csv', (['df_names[i]'], {'usecols': "['event_type', 'category_code']"}), "(df_names[i], usecols=['event_type', 'category_code'])\n", (31080, 31134), True, 'import pandas as pd\n'), ((32136, 32148), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (32146, 32148), True, 'import matplotlib.pyplot as plt\n'), ((32253, 32263), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (32261, 32263), True, 'import matplotlib.pyplot as plt\n'), ((32831, 32899), 'pandas.read_csv', 'pd.read_csv', (['df_names[i]'], {'usecols': "['user_id', 'event_type', 'price']"}), "(df_names[i], usecols=['user_id', 'event_type', 'price'])\n", (32842, 32899), True, 'import pandas as pd\n'), ((2370, 2404), 'pandas.DataFrame', 'pd.DataFrame', (['average_session_dict'], {}), '(average_session_dict)\n', (2382, 2404), True, 'import pandas as pd\n'), ((27434, 27482), 'pandas.merge', 'pd.merge', (['left', 'right'], {'on': "['brand']", 'how': '"""outer"""'}), "(left, right, on=['brand'], how='outer')\n", (27442, 27482), True, 'import pandas as pd\n'), ((11855, 11873), 'numpy.mean', 'np.mean', (['dist_prod'], {}), '(dist_prod)\n', (11862, 11873), True, 'import numpy as np\n'), ((6798, 6825), 'numpy.mean', 'np.mean', (['product_dict[prod]'], {}), '(product_dict[prod])\n', (6805, 6825), True, 'import numpy as np\n'), ((11965, 11991), 'pandas.DataFrame', 'pd.DataFrame', (['product_dict'], {}), '(product_dict)\n', (11977, 11991), True, 'import pandas as pd\n'), ((15226, 15252), 'pandas.DataFrame', 'pd.DataFrame', (['product_dict'], {}), '(product_dict)\n', (15238, 15252), True, 'import pandas as pd\n'), ((6385, 6477), 'numpy.sum', 'np.sum', (["(df_product_2[df_product['event_type'] == 'view'].event_time < row[\n 'event_time'])"], {}), "(df_product_2[df_product['event_type'] == 'view'].event_time < row[\n 'event_time'])\n", (6391, 6477), True, 'import numpy as np\n'), ((7022, 7048), 'pandas.DataFrame', 'pd.DataFrame', (['product_dict'], {}), '(product_dict)\n', (7034, 7048), True, 'import pandas as pd\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 7 16:12:33 2016
@author: rmcleod
"""
import numpy as np
import matplotlib.pyplot as plt
import os, os.path, glob
mcFRCFiles = glob.glob( "FRC/*mcFRC.npy" )
zorroFRCFiles = glob.glob( "FRC/*zorroFRC.npy" )
zorroFRCs = [None] * len( zorroFRCFiles)
for J in np.arange( len(zorroFRCFiles) ):
zorroFRCs[J] = np.load( zorroFRCFiles[J] )
mcFRCs = [None] * len( mcFRCFiles)
for J in np.arange( len(mcFRCFiles) ):
mcFRCs[J] = np.load( mcFRCFiles[J] )
zorroMeanFRC = np.mean( np.array(zorroFRCs), axis=0 )
mcMeanFRC = np.mean( np.array(mcFRCs), axis=0 )
plt.figure()
plt.plot( mcMeanFRC, '.-', color='firebrick', label='MotionCorr' )
plt.plot( zorroMeanFRC, '.-', color='black', label='Zorro' )
plt.title( "Mean FRC Re-aligned from MotionCorr" )
plt.legend()
plt.xlim( [0,len(mcMeanFRC)] )
plt.savefig( "Dataset_mean_MC_vs_Zorro.png" )
|
[
"matplotlib.pyplot.title",
"numpy.load",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"numpy.array",
"glob.glob",
"matplotlib.pyplot.savefig"
] |
[((177, 204), 'glob.glob', 'glob.glob', (['"""FRC/*mcFRC.npy"""'], {}), "('FRC/*mcFRC.npy')\n", (186, 204), False, 'import os, os.path, glob\n'), ((223, 253), 'glob.glob', 'glob.glob', (['"""FRC/*zorroFRC.npy"""'], {}), "('FRC/*zorroFRC.npy')\n", (232, 253), False, 'import os, os.path, glob\n'), ((621, 633), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (631, 633), True, 'import matplotlib.pyplot as plt\n'), ((634, 698), 'matplotlib.pyplot.plot', 'plt.plot', (['mcMeanFRC', '""".-"""'], {'color': '"""firebrick"""', 'label': '"""MotionCorr"""'}), "(mcMeanFRC, '.-', color='firebrick', label='MotionCorr')\n", (642, 698), True, 'import matplotlib.pyplot as plt\n'), ((701, 759), 'matplotlib.pyplot.plot', 'plt.plot', (['zorroMeanFRC', '""".-"""'], {'color': '"""black"""', 'label': '"""Zorro"""'}), "(zorroMeanFRC, '.-', color='black', label='Zorro')\n", (709, 759), True, 'import matplotlib.pyplot as plt\n'), ((762, 810), 'matplotlib.pyplot.title', 'plt.title', (['"""Mean FRC Re-aligned from MotionCorr"""'], {}), "('Mean FRC Re-aligned from MotionCorr')\n", (771, 810), True, 'import matplotlib.pyplot as plt\n'), ((813, 825), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (823, 825), True, 'import matplotlib.pyplot as plt\n'), ((857, 900), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Dataset_mean_MC_vs_Zorro.png"""'], {}), "('Dataset_mean_MC_vs_Zorro.png')\n", (868, 900), True, 'import matplotlib.pyplot as plt\n'), ((361, 386), 'numpy.load', 'np.load', (['zorroFRCFiles[J]'], {}), '(zorroFRCFiles[J])\n', (368, 386), True, 'import numpy as np\n'), ((488, 510), 'numpy.load', 'np.load', (['mcFRCFiles[J]'], {}), '(mcFRCFiles[J])\n', (495, 510), True, 'import numpy as np\n'), ((542, 561), 'numpy.array', 'np.array', (['zorroFRCs'], {}), '(zorroFRCs)\n', (550, 561), True, 'import numpy as np\n'), ((593, 609), 'numpy.array', 'np.array', (['mcFRCs'], {}), '(mcFRCs)\n', (601, 609), True, 'import numpy as np\n')]
|
import numpy as np
from pyutai import trees
from potentials import cluster
def cpd_size(cpd):
return np.prod(cpd.cardinality)
def unique_values(cpd):
unique, _ = np.unique(cpd.values, return_counts=True)
return len(unique)
def stats(net):
if not net.endswith('.bif'):
raise ValueError('Net format not supported. Expected .bif, got {net}')
file_ = read.read(f'networks/{net}')
model = file_.get_model()
cpds = model.get_cpds()
unique_values = statistics.mean(_unique_values(cpd) for cpd in cpds)
max_values = max(
((i, _unique_values(cpd)) for i, cpd in enumerate(cpds)),
key=lambda x: x[1])
print(
f'Net: {net}. Mean unique value: {unique_values:.2f}. Biggest cpd: {max_values}'
)
def tree_from_cpd(cpd, selector):
if selector is None:
pass
else:
selector = selector(cpd.values, cpd.variables)
cardinality_ = dict(zip(cpd.variables, cpd.cardinality))
return trees.Tree.from_array(cpd.values,
cpd.variables,
cardinality_,
selector=selector)
def cluster_from_cpd(cpd):
return cluster.Cluster.from_array(cpd.values,
cpd.variables)
|
[
"numpy.unique",
"pyutai.trees.Tree.from_array",
"numpy.prod",
"potentials.cluster.Cluster.from_array"
] |
[((107, 131), 'numpy.prod', 'np.prod', (['cpd.cardinality'], {}), '(cpd.cardinality)\n', (114, 131), True, 'import numpy as np\n'), ((174, 215), 'numpy.unique', 'np.unique', (['cpd.values'], {'return_counts': '(True)'}), '(cpd.values, return_counts=True)\n', (183, 215), True, 'import numpy as np\n'), ((978, 1064), 'pyutai.trees.Tree.from_array', 'trees.Tree.from_array', (['cpd.values', 'cpd.variables', 'cardinality_'], {'selector': 'selector'}), '(cpd.values, cpd.variables, cardinality_, selector=\n selector)\n', (999, 1064), False, 'from pyutai import trees\n'), ((1197, 1250), 'potentials.cluster.Cluster.from_array', 'cluster.Cluster.from_array', (['cpd.values', 'cpd.variables'], {}), '(cpd.values, cpd.variables)\n', (1223, 1250), False, 'from potentials import cluster\n')]
|
import os,sys
import datetime as dt
import numpy as np
try:
#for python 3.0 or later
from urllib.request import urlopen
except ImportError:
#Fall back to python 2 urllib2
from urllib2 import urlopen
import requests
from multiprocessing import Pool
import drms
from shutil import move
import glob
###Remove proxy server variables from Lockheed after using the proxy server to connect to the google calendar 2019/02/20 <NAME>
##os.environ.pop("http_proxy" )
##os.environ.pop("https_proxy")
class dark_times:
def __init__(self,time,
irisweb='http://iris.lmsal.com/health-safety/timeline/iris_tim_archive/{2}/IRIS_science_timeline_{0}.V{1:2d}.txt',
simpleb=False,complexa=False,tol=50):
"""
A python class used for finding and downloading IRIS dark observations. This module requires that parameters be specified in
a parameter file in this directory. The parameter file's name must be "parameter_file" and contain the three following lines:
Line1: email address registered with JSOC (e.g. <EMAIL>)
Line2: A base directory containing the level 1 IRIS dark files. The program will concatenate YYYY/MM/simpleb/ or YYYY/MM/complexa/ onto the base directory
Line3: A base directory containing the level 0 IRIS dark files. The program will concatenate simpleb/YYYY/MM/ or complexa/YYYY/MM/ onto the base directory
Example three lines below:
<EMAIL>
/data/alisdair/IRIS_LEVEL1_DARKS/
/data/alisdair/opabina/scratch/joan/iris/newdat/orbit/level0/
The program will create the level0 and level1 directories as needed.
Parameters
----------
time: str
A string containing the date the dark observations started based on the IRIS calibration-as-run calendar in YYYY/MM/DD format (e.g.
test = gdf.dark_times(time,simpleb=True))
irisweb: string, optional
A formatted text string which corresponds to the location of the IRIS timeline files
(Default = 'http://iris.lmsal.com/health-safety/timeline/iris_tim_archive/{2}/IRIS_science_timeline_{0}.V{1:2d}.txt').
The {0} character string corresponds the date of the timeline uploaded in YYYYMMDD format, while {1:2d}
corresponds to the highest number version of the timeline, which I assume is the timeline uploaded to the spacecraft.
simpleb: boolean, optional
Whether to download simpleb darks can only perform simpleb or complexa darks per call (Default = False).
complexa: boolean, optional
Whether to download complexa darks can only perform simpleb or complexa darks per call (Default = False).
tol: int, optional
The number of darks in a directory before the program decides to download. If greater than tolerance
than it will not download any new darks if less than tolerance then it will download the new darks (Default = 50).
Returns
-------
None
Just downloads files and creates required directories.
"""
#web page location of IRIS timeline
self.irisweb = irisweb #.replace('IRIS',time+'/IRIS')
self.otime = dt.datetime.strptime(time,'%Y/%m/%d')
self.stime = self.otime.strftime('%Y%m%d')
#Type of dark to download simple B or complex A
self.complexa = complexa
self.simpleb = simpleb
#Minimum number of dark files reqiured to run
self.tol = tol
#read lines in parameter file
parU = open('parameter_file','r')
pars = parU.readlines()
parU.close()
#update parameters based on new parameter file
#get email address
self.email = pars[0].strip()
#get level 1/download base directory (without simpleb or complexa subdirectory
bdir = pars[1].strip()
#get level 0 directory
ldir = pars[2].strip()
if complexa:
self.obsid = 'OBSID=4203400000'
if simpleb:
self.obsid = 'OBSID=4202000003'
#make the download directory
if self.simpleb:
self.bdir = bdir+'/{0}/simpleB/'.format(self.otime.strftime('%Y/%m'))
self.ldir = ldir+'/simpleB/{0}/'.format(self.otime.strftime('%Y/%m'))
else:
self.bdir = bdir+'/{0}/complexA/'.format(self.otime.strftime('%Y/%m'))
self.ldir = ldir+'/complexA/{0}/'.format(self.otime.strftime('%Y/%m'))
def request_files(self):
#First check that any time line exists for given day
searching = True
sb = 0 #searching backwards days to correct for weekend or multiday timelines
while searching:
#look in iris's timeline structure
self.stime = (self.otime-dt.timedelta(days=sb)).strftime('%Y%m%d')
irispath = (self.otime-dt.timedelta(days=sb)).strftime('%Y/%m/%d')
inurl = self.irisweb.format(self.stime,0,irispath).replace(' ','0') #searching for V00 file verision
resp = requests.head(inurl)
#leave loop if V00 is found
if resp.status_code == 200: searching =False
else: sb += 1 #look one day back if timeline is missing
if sb >= 9:
searching = False #dont look back more than 9 days
sys.stdout.write('FAILED, IRIS timeline does not exist')#printing this will cause the c-shell script to fail too
sys.exit(1) # exit the python script
check = True
v = 0 #timeline version
#get lastest timeline version
while check == True:
inurl = self.irisweb.format(self.stime, v,irispath).replace(' ','0')
resp = requests.head(inurl)
if resp.status_code != 200:
check = False
v+=-1
inurl = self.irisweb.format(self.stime, v,irispath).replace(' ','0')
else:
v+=1
#get the timeline file information for request timeline
res = urlopen(inurl)
self.res = res
#Need to add decode because python 3 is wonderful 2019/01/16 <NAME>
self.timeline = res.read().decode('utf-8')
def get_start_end(self):
#lines with OBSID=obsid
self.lines = []
for line in self.timeline.split('\n'):
if self.obsid in line:
self.lines.append(line)
#get the last set of OBSIDs (useful for eclipse season)
#Query from start to end time 2019/01/02 <NAME>
self.sta_dark = self.lines[0][3:20]
self.end_dark = self.lines[-1][3:20]
self.sta_dark_dt = self.create_dt_object(self.sta_dark)
self.end_dark_dt = self.create_dt_object(self.end_dark)
self.sta_dark_dt = self.sta_dark_dt-dt.timedelta(minutes=1)
self.end_dark_dt = self.end_dark_dt+dt.timedelta(minutes=1)
#create datetime objects using doy in timeline
def create_dt_object(self,dtobj):
splt = dtobj.split(':')
obj = dt.datetime(int(splt[0]),1,1,int(splt[2]),int(splt[3]))+dt.timedelta(days=int(splt[1])-1) #convert doy to datetime obj
return obj
#set up JSOC query for darks
def dark_query(self):
#use drms module to download from JSOC (https://pypi.python.org/pypi/drms)
client = drms.Client(email=self.email,verbose=False)
fmt = '%Y.%m.%d_%H:%M'
self.qstr = 'iris.lev1[{0}_TAI-{1}_TAI][][? IMG_TYPE ~ "DARK" ?]'.format(self.sta_dark_dt.strftime(fmt),self.end_dark_dt.strftime(fmt))
self.expt = client.export(self.qstr)
#setup string to pass write to sswidl for download
### fmt = '%Y-%m-%dT%H:%M:%S'
### self.response = client.query(jsoc.Time(self.sta_dark_dt.strftime(fmt),self.end_dark_dt.strftime(fmt)),jsoc.Series('iris.lev1'),
### jsoc.Notify('<EMAIL>'),jsoc.Segment('image'))
###
self.get_darks(client)
def get_darks(self,client):
#### import time
#### wait = True
####
#### request = client.request_data(self.response)
#### waittime = 60.*5. #five minute wait to check on data completion
#### time.sleep(waittime) #
####
#### while wait:
#### stat = client.check_request(request)
#### if stat == 1:
#### temp.sleep(waittime)
#### elif stat == 0:
#### wait = False
#### elif stat > 1:
#### break #jump out of loop if you get an error
# check to make sure directory does not exist
if not os.path.exists(self.bdir):
os.makedirs(self.bdir)
#also make level0 directory
if not os.path.exists(self.ldir):
os.makedirs(self.ldir)
#get number of records
try:
index = np.arange(np.size(self.expt.urls.url))
if index[-1] < self.tol: #make sure to have at least 50 darks in archive before downloading
sys.stdout.write("FAILED, LESS THAN {0:2d} DARKS IN ARCHIVE".format(self.tol))
sys.exit(1)
except: #exit nicely if no records exist
sys.stdout.write("FAILED, No JSOC record exists")
sys.exit(1)
#check to see if darks are already downloaded Added 2017/03/20
#make sure the downloaded files are on the same day added 2017/12/05 (<NAME>)
if len(glob.glob(self.bdir+'/iris.lev1.{0}*.fits'.format(self.otime.strftime('%Y-%m-%d')))) < self.tol:
#Dowloand the data using drms in par. (will fuss about mounted drive ocassionaly)
for ii in index: self.download_par(ii)
#DRMS DOES NOT WORK IN PARALELL
#### pool = Pool(processes=4)
#### outf = pool.map(self.download_par,index)
#### pool.close()
### self.expt.download(bdir,1,fname_from_rec=True)
#download the data
#### res = client.get_request(request,path=bdir,progress=True)
#### res.wait()
#
def download_par(self,index):
# get file from JSOC
outf = self.expt.download(self.bdir,index,fname_from_rec=True)
#format output file
fils = str(outf['download'].values[0])
fils = fils.split('/')[-1]
nout = fils[:14]+'-'+fils[14:16]+'-'+fils[16:24]+fils[26:]
#create new file name in same as previous format
if os.path.isfile(str(outf['download'].values[0])):
move(str(outf['download'].values[0]),self.bdir+nout)
#run to completion
def run_all(self):
self.request_files()
self.get_start_end()
self.dark_query()
|
[
"sys.stdout.write",
"numpy.size",
"requests.head",
"os.makedirs",
"os.path.exists",
"datetime.datetime.strptime",
"datetime.timedelta",
"drms.Client",
"urllib2.urlopen",
"sys.exit"
] |
[((3268, 3306), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['time', '"""%Y/%m/%d"""'], {}), "(time, '%Y/%m/%d')\n", (3288, 3306), True, 'import datetime as dt\n'), ((6072, 6086), 'urllib2.urlopen', 'urlopen', (['inurl'], {}), '(inurl)\n', (6079, 6086), False, 'from urllib2 import urlopen\n'), ((7346, 7390), 'drms.Client', 'drms.Client', ([], {'email': 'self.email', 'verbose': '(False)'}), '(email=self.email, verbose=False)\n', (7357, 7390), False, 'import drms\n'), ((5089, 5109), 'requests.head', 'requests.head', (['inurl'], {}), '(inurl)\n', (5102, 5109), False, 'import requests\n'), ((5763, 5783), 'requests.head', 'requests.head', (['inurl'], {}), '(inurl)\n', (5776, 5783), False, 'import requests\n'), ((6824, 6847), 'datetime.timedelta', 'dt.timedelta', ([], {'minutes': '(1)'}), '(minutes=1)\n', (6836, 6847), True, 'import datetime as dt\n'), ((6892, 6915), 'datetime.timedelta', 'dt.timedelta', ([], {'minutes': '(1)'}), '(minutes=1)\n', (6904, 6915), True, 'import datetime as dt\n'), ((8611, 8636), 'os.path.exists', 'os.path.exists', (['self.bdir'], {}), '(self.bdir)\n', (8625, 8636), False, 'import os, sys\n'), ((8650, 8672), 'os.makedirs', 'os.makedirs', (['self.bdir'], {}), '(self.bdir)\n', (8661, 8672), False, 'import os, sys\n'), ((8724, 8749), 'os.path.exists', 'os.path.exists', (['self.ldir'], {}), '(self.ldir)\n', (8738, 8749), False, 'import os, sys\n'), ((8763, 8785), 'os.makedirs', 'os.makedirs', (['self.ldir'], {}), '(self.ldir)\n', (8774, 8785), False, 'import os, sys\n'), ((5371, 5427), 'sys.stdout.write', 'sys.stdout.write', (['"""FAILED, IRIS timeline does not exist"""'], {}), "('FAILED, IRIS timeline does not exist')\n", (5387, 5427), False, 'import os, sys\n'), ((5500, 5511), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (5508, 5511), False, 'import os, sys\n'), ((8861, 8888), 'numpy.size', 'np.size', (['self.expt.urls.url'], {}), '(self.expt.urls.url)\n', (8868, 8888), True, 'import numpy as np\n'), ((9105, 9116), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (9113, 9116), False, 'import os, sys\n'), ((9181, 9230), 'sys.stdout.write', 'sys.stdout.write', (['"""FAILED, No JSOC record exists"""'], {}), "('FAILED, No JSOC record exists')\n", (9197, 9230), False, 'import os, sys\n'), ((9243, 9254), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (9251, 9254), False, 'import os, sys\n'), ((4836, 4857), 'datetime.timedelta', 'dt.timedelta', ([], {'days': 'sb'}), '(days=sb)\n', (4848, 4857), True, 'import datetime as dt\n'), ((4913, 4934), 'datetime.timedelta', 'dt.timedelta', ([], {'days': 'sb'}), '(days=sb)\n', (4925, 4934), True, 'import datetime as dt\n')]
|
"""Candid Covariance-Free Incremental PCA (CCIPCA)."""
import numpy as np
from scipy import linalg
from sklearn.utils import check_array
from sklearn.utils.validation import FLOAT_DTYPES
from sklearn.base import BaseEstimator
from sklearn.preprocessing import normalize
import copy
class CCIPCA(BaseEstimator):
"""Candid Covariance-Free Incremental PCA (CCIPCA).
Parameters
----------
n_components : int or None, (default=None)
Number of components to keep. If ``n_components `` is ``None``,
then ``n_components`` is set to ``min(n_samples, n_features)``.
copy : bool, (default=True)
If False, X will be overwritten. ``copy=False`` can be used to
save memory but is unsafe for general use.
References
Candid Covariance-free Incremental Principal Component Analysis
"""
def __init__(self, n_components=10, amnesic=2, copy=True):
self.__name__ = 'Incremental Projection on Latent Space (IPLS)'
self.n_components = n_components
self.amnesic = amnesic
self.n = 0
self.copy = copy
self.x_rotations = None
self.sum_x = None
self.n_features = None
self.eign_values = None
self.x_mean = None
def normalize(self, x):
return normalize(x[:, np.newaxis], axis=0).ravel()
def fit(self, X, Y=None):
X = check_array(X, dtype=FLOAT_DTYPES, copy=self.copy)
n_samples, n_features = X.shape
if self.n == 0:
self.n_features = n_features
self.x_rotations = np.zeros((n_features, self.n_components))
self.eign_values = np.zeros((self.n_components))
self.incremental_mean = 1
for j in range(0, n_samples):
self.n = self.n + 1
u = X[j]
old_mean = (self.n-1)/self.n*self.incremental_mean
new_mean = 1/self.n*u
self.incremental_mean = old_mean+new_mean
if self.n == 1:
self.x_rotations[:, 0] = u
self.sum_x = u
else:
u = u - self.incremental_mean
self.sum_x = self.sum_x + u
k = min(self.n, self.n_components)
for i in range(1, k+1):
if i == self.n:
self.x_rotations[:, i - 1] = u
else:
w1, w2 = (self.n-1-self.amnesic)/self.n, (self.n+self.amnesic)/self.n
v_norm = self.normalize(self.x_rotations[:, i-1])
v_norm = np.expand_dims(v_norm, axis=1)
self.x_rotations[:, i - 1] = w1 * self.x_rotations[:, i - 1] + w2*u*np.dot(u.T, v_norm)[0]
v_norm = self.normalize(self.x_rotations[:, i-1])
v_norm = np.expand_dims(v_norm, axis=1)
u = u - (np.dot(u.T, v_norm)*v_norm)[:, 0]
return self
def transform(self, X, Y=None, copy=True):
"""Apply the dimension reduction learned on the train data."""
X = check_array(X, copy=copy, dtype=FLOAT_DTYPES)
X -= self.incremental_mean
w_rotation = np.zeros(self.x_rotations.shape)
for c in range(0, self.n_components):
w_rotation[:, c] = self.normalize(self.x_rotations[:, c])
return np.dot(X, w_rotation)
|
[
"sklearn.utils.check_array",
"numpy.zeros",
"numpy.expand_dims",
"sklearn.preprocessing.normalize",
"numpy.dot"
] |
[((1416, 1466), 'sklearn.utils.check_array', 'check_array', (['X'], {'dtype': 'FLOAT_DTYPES', 'copy': 'self.copy'}), '(X, dtype=FLOAT_DTYPES, copy=self.copy)\n', (1427, 1466), False, 'from sklearn.utils import check_array\n'), ((3110, 3155), 'sklearn.utils.check_array', 'check_array', (['X'], {'copy': 'copy', 'dtype': 'FLOAT_DTYPES'}), '(X, copy=copy, dtype=FLOAT_DTYPES)\n', (3121, 3155), False, 'from sklearn.utils import check_array\n'), ((3218, 3250), 'numpy.zeros', 'np.zeros', (['self.x_rotations.shape'], {}), '(self.x_rotations.shape)\n', (3226, 3250), True, 'import numpy as np\n'), ((3389, 3410), 'numpy.dot', 'np.dot', (['X', 'w_rotation'], {}), '(X, w_rotation)\n', (3395, 3410), True, 'import numpy as np\n'), ((1611, 1652), 'numpy.zeros', 'np.zeros', (['(n_features, self.n_components)'], {}), '((n_features, self.n_components))\n', (1619, 1652), True, 'import numpy as np\n'), ((1685, 1712), 'numpy.zeros', 'np.zeros', (['self.n_components'], {}), '(self.n_components)\n', (1693, 1712), True, 'import numpy as np\n'), ((1326, 1361), 'sklearn.preprocessing.normalize', 'normalize', (['x[:, np.newaxis]'], {'axis': '(0)'}), '(x[:, np.newaxis], axis=0)\n', (1335, 1361), False, 'from sklearn.preprocessing import normalize\n'), ((2613, 2643), 'numpy.expand_dims', 'np.expand_dims', (['v_norm'], {'axis': '(1)'}), '(v_norm, axis=1)\n', (2627, 2643), True, 'import numpy as np\n'), ((2859, 2889), 'numpy.expand_dims', 'np.expand_dims', (['v_norm'], {'axis': '(1)'}), '(v_norm, axis=1)\n', (2873, 2889), True, 'import numpy as np\n'), ((2733, 2752), 'numpy.dot', 'np.dot', (['u.T', 'v_norm'], {}), '(u.T, v_norm)\n', (2739, 2752), True, 'import numpy as np\n'), ((2920, 2939), 'numpy.dot', 'np.dot', (['u.T', 'v_norm'], {}), '(u.T, v_norm)\n', (2926, 2939), True, 'import numpy as np\n')]
|
import warnings
warnings.filterwarnings('ignore', category=UserWarning, append=True)
RAMS_Units=dict()
# winds
RAMS_Units['UC']='m s-1'
RAMS_Units['VC']='m s-1'
RAMS_Units['WC']='m s-1'
# potential temperature
RAMS_Units['THETA']='K'
RAMS_Units['PI']='J kg-1 K-1'
RAMS_Units['DN0']='kg m-3'
# water vapour mixing ratio:
RAMS_Units['RV']='kg kg-1'
# hydrometeor mass mixing ratios:
mass_mixing_ratios=['RCP','RDP','RRP','RPP','RSP','RAP','RGP','RHP']
for variable in mass_mixing_ratios:
RAMS_Units[variable]='kg kg-1'
# hydrometeor number mixing ratios:
mass_mixing_ratios=['CCP','CDP','CRP','CPP','CSP','CAP','CGP','CHP']
for variable in mass_mixing_ratios:
RAMS_Units[variable]='kg-1'
#hydrometeor precipitation rates:
precipitation_rates=['PCPRR','PCPRD','PCPRS','PCPRH','PCPRP','PCPRA','PCPRG']
for variable in precipitation_rates:
RAMS_Units[variable]='kg m-2'
# hydrometeor precipitation accumulated:
precipitation_accumulated=['ACCPR','ACCPD','ACCPS','ACCPH','ACCPP','ACCPA','ACCPG']
for variable in precipitation_accumulated:
RAMS_Units[variable]='kg m-2 s-1'
# radiation:
RAMS_Units['LWUP']='W m-2'
RAMS_Units['LWDN']='W m-2'
RAMS_Units['SWUP']='W m-2'
RAMS_Units['SWDN']='W m-2'
# individual microphysics processes accumulated
RAMS_processes_mass=[
'NUCCLDRT',
'NUCICERT',
'INUCHOMRT',
'INUCCONTR',
'INUCIFNRT',
'INUCHAZRT',
'VAPCLDT',
'VAPRAINT',
'VAPPRIST',
'VAPSNOWT',
'VAPAGGRT',
'VAPGRAUT',
'VAPHAILT',
'VAPDRIZT',
'MELTSNOWT',
'MELTAGGRT',
'MELTGRAUT',
'MELTHAILT',
'RIMECLDSNOWT',
'RIMECLDAGGRT',
'RIMECLDGRAUT',
'RIMECLDHAILT',
'RAIN2PRT',
'RAIN2SNT',
'RAIN2AGT',
'RAIN2GRT',
'RAIN2HAT',
'AGGRSELFPRIST',
'AGGRSELFSNOWT',
'AGGRPRISSNOWT'
]
for variable in RAMS_processes_mass:
RAMS_Units[variable]='kg kg-1'
# grouped microphysics processes accumulated:
RAMS_processes_mass_grouped=[
'VAPLIQT',
'VAPICET',
'MELTICET',
'CLD2RAINT',
'RIMECLDT',
'RAIN2ICET',
'ICE2RAINT',
'AGGREGATET'
]
for variable in RAMS_processes_mass_grouped:
RAMS_Units[variable]='kg kg-1'
# grouped microphysics processes instantaneous:
RAMS_processes_mass_grouped_instantaneous=[
'VAPLIQ',
'VAPICE',
'MELTICE',
'CLD2RAIN',
'RIMECLD',
'RAIN2ICE',
'ICE2RAIN',
'NUCCLDR',
'NUCICER'
]
for variable in RAMS_processes_mass_grouped_instantaneous:
RAMS_Units[variable]='kg kg-1 s-1'
RAMS_standard_name=dict()
variable_list_derive=[
'air_temperature',
'air_pressure',
'temperature',
'air_density',
'OLR',
'LWC',
'IWC',
'LWP',
'IWP',
'IWV',
'airmass',
'airmass_path',
'surface_precipitation',
'surface_precipitation_average',
'surface_precipitation_accumulated',
'surface_precipitation_instantaneous',
'LWup_TOA',
'LWup_sfc',
'LWdn_TOA',
'LWdn_sfc',
'SWup_TOA',
'SWup_sfc',
'SWdn_TOA',
'SWdn_sfc'
]
def variable_list(filenames):
from iris import load
cubelist=load(filenames[0])
variable_list = [cube.name() for cube in cubelist]
return variable_list
def load(filenames,variable,mode='auto',**kwargs):
if variable in variable_list_derive:
variable_cube=deriveramscube(filenames,variable,**kwargs)
else:
variable_cube=loadramscube(filenames,variable,**kwargs)
# if mode=='auto':
# variable_list_file=variable_list(filenames)
# if variable in variable_list_file:
# variable_cube=loadramscube(filenames,variable,**kwargs)
# elif variable in variable_list_derive:
# variable_cube=deriveramscube(filenames,variable,**kwargs)
# elif variable in variable_dict_pseudonym.keys():
# variable_load=variable_dict_pseudonym[variable]
# variable_cube=loadramscube(filenames,variable_load,**kwargs)
# else:
# raise SystemExit('variable not found')
# elif mode=='file':
# variable_list_file=variable_list(filenames)
# if variable in variable_list_file:
# variable_cube=loadramscube(filenames,variable,**kwargs)
# elif mode=='derive':
# variable_cube=deriveramscube(filenames,variable,**kwargs)
# elif mode=='pseudonym':
# variable_load=variable_dict_pseudonym[variable]
# variable_cube=loadramscube(filenames,variable_load,**kwargs)
# else:
# print("mode=",mode)
# raise SystemExit('unknown mode')
return variable_cube
def loadramscube(filenames,variable,**kwargs):
if type(filenames) is list:
variable_cube=loadramscube_mult(filenames,variable,**kwargs)
elif type(filenames) is str:
variable_cube=loadramscube_single(filenames,variable,**kwargs)
else:
print("filenames=",filenames)
raise SystemExit('Type of input unknown: Must be str of list')
return variable_cube
def loadramscube_single(filenames,variable,constraint=None,add_coordinates=None):
from iris import load_cube
variable_cube=load_cube(filenames,variable)
variable_cube.units=RAMS_Units[variable]
variable_cube=addcoordinates(filenames, variable,variable_cube,add_coordinates=add_coordinates)
return variable_cube
def loadramscube_mult(filenames,variable,constraint=None,add_coordinates=None):
from iris.cube import CubeList
cube_list=[]
for i in range(len(filenames)):
cube_list.append(loadramscube_single(filenames[i],variable,add_coordinates=add_coordinates) )
for member in cube_list:
member.attributes={}
variable_cubes=CubeList(cube_list)
variable_cube=variable_cubes.merge_cube()
variable_cube=variable_cube.extract(constraint)
return variable_cube
def readramsheader(filename):
from numpy import array
searchfile = open(filename, "r")
coord_dict=dict()
variable_dict=dict()
coord_part=False
i_variable=0
n_variable=0
for i,line in enumerate(searchfile):
if (i==0):
num_variables=int(line[:-1])
if (i>0 and i<=num_variables):
line_split=line[:-1].split()
variable_dict[line_split[0]]=int(line_split[2])
if ('__') in line:
coord_part=True
i_variable=i
variable_name=line[2:-1]
variable_list=[]
if coord_part:
if (i==i_variable+1):
n_variable=int(line[:-1])
if n_variable>0:
if (i>=i_variable+2 and i<=i_variable+1+n_variable):
try:
value_out=array(float(line[:-1]))
except:
value_out=line[:-1]
variable_list.append(value_out)
if (i==i_variable+1+n_variable):
coord_dict[variable_name]=array(variable_list)
coord_part=False
# else:
# coord_part=False
return variable_dict, coord_dict
def addcoordinates(filename, variable,variable_cube,**kwargs):
filename_header=filename[:-5]+'head.txt'
domain=filename[-4]
variable_dict, coord_dict=readramsheader(filename_header)
variable_cube=add_dim_coordinates(filename, variable,variable_cube,variable_dict, coord_dict,domain,**kwargs)
variable_cube=add_aux_coordinates(filename, variable,variable_cube,variable_dict, coord_dict,domain,**kwargs)
return variable_cube
def make_time_coord(coord_dict):
from datetime import datetime,timedelta
from iris import coords
timestr=str(int(coord_dict['iyear1'][0]))+str(int(coord_dict['imonth1'][0])).zfill(2)+str(int(coord_dict['idate1'][0])).zfill(2)+str(int(coord_dict['itime1'][0])).zfill(4)
timeobj = datetime.strptime(timestr,"%Y%m%d%H%M")+timedelta(seconds=1)*coord_dict['time'][0]
if timeobj<datetime(100,1,1):
base_date=datetime(1,1,1)
else:
base_date=datetime(1970,1,1)
time_units='days since '+ base_date.strftime('%Y-%m-%d')
time_days=(timeobj - base_date).total_seconds() / timedelta(days=1).total_seconds()
time_coord=coords.DimCoord(time_days, standard_name='time', long_name='time', var_name='time', units=time_units, bounds=None, attributes=None, coord_system=None, circular=False)
return time_coord
def make_model_level_number_coordinate(n_level):
from iris import coords
from numpy import arange
MODEL_LEVEL_NUMBER=arange(0,n_level)
model_level_number=coords.AuxCoord(MODEL_LEVEL_NUMBER, standard_name='model_level_number', units='1')
return model_level_number
def add_dim_coordinates(filename, variable,variable_cube,variable_dict, coord_dict,domain,add_coordinates=None):
from iris import coords
import numpy as np
# from iris import coord_systems
# coord_system=coord_systems.LambertConformal(central_lat=MOAD_CEN_LAT, central_lon=CEN_LON, false_easting=0.0, false_northing=0.0, secant_latitudes=(TRUELAT1, TRUELAT2))
coord_system=None
if (variable_dict[variable]==3):
time_coord=make_time_coord(coord_dict)
variable_cube.add_aux_coord(time_coord)
z_coord=coords.DimCoord(coord_dict['ztn01'], standard_name='geopotential_height', long_name='z', var_name='z', units='m', bounds=None, attributes=None, coord_system=coord_system)
variable_cube.add_dim_coord(z_coord,0)
model_level_number_coord=make_model_level_number_coordinate(len(z_coord.points))
variable_cube.add_aux_coord(model_level_number_coord,0)
x_coord=coords.DimCoord(np.arange(len(coord_dict['xtn0'+domain])), long_name='x', units='1', bounds=None, attributes=None, coord_system=coord_system)
variable_cube.add_dim_coord(x_coord,2)
y_coord=coords.DimCoord(np.arange(len(coord_dict['ytn0'+domain])), long_name='y', units='1', bounds=None, attributes=None, coord_system=coord_system)
variable_cube.add_dim_coord(y_coord,1)
projection_x_coord=coords.DimCoord(coord_dict['xtn0'+domain], standard_name='projection_x_coordinate', long_name='x', var_name='x', units='m', bounds=None, attributes=None, coord_system=coord_system)
variable_cube.add_aux_coord(projection_x_coord,(2))
projection_y_coord=coords.DimCoord(coord_dict['ytn0'+domain], standard_name='projection_y_coordinate', long_name='y', var_name='y', units='m', bounds=None, attributes=None, coord_system=coord_system)
variable_cube.add_aux_coord(projection_y_coord,(1))
elif (variable_dict[variable]==2):
x_coord=coords.DimCoord(np.arange(len(coord_dict['xtn0'+domain])), long_name='x', units='1', bounds=None, attributes=None, coord_system=coord_system)
variable_cube.add_dim_coord(x_coord,1)
y_coord=coords.DimCoord(np.arange(len(coord_dict['ytn0'+domain])), long_name='y', units='1', bounds=None, attributes=None, coord_system=coord_system)
variable_cube.add_dim_coord(y_coord,0)
projection_x_coord=coords.DimCoord(coord_dict['xtn0'+domain], standard_name='projection_x_coordinate', long_name='x', var_name='x', units='m', bounds=None, attributes=None, coord_system=coord_system)
variable_cube.add_aux_coord(projection_x_coord,(1))
projection_y_coord=coords.DimCoord(coord_dict['ytn0'+domain], standard_name='projection_y_coordinate', long_name='y', var_name='y', units='m', bounds=None, attributes=None, coord_system=coord_system)
variable_cube.add_aux_coord(projection_y_coord,(0))
time_coord=make_time_coord(coord_dict)
variable_cube.add_aux_coord(time_coord)
return variable_cube
def add_aux_coordinates(filename,variable,variable_cube,variable_dict, coord_dict,domain,**kwargs):
from iris import load_cube,coords
coord_system=None
latitude=load_cube(filename,'GLAT').core_data()
longitude=load_cube(filename,'GLON').core_data()
lat_coord=coords.AuxCoord(latitude, standard_name='latitude', long_name='latitude', var_name='latitude', units='degrees', bounds=None, attributes=None, coord_system=coord_system)
lon_coord=coords.AuxCoord(longitude, standard_name='longitude', long_name='longitude', var_name='longitude', units='degrees', bounds=None, attributes=None, coord_system=coord_system)
if (variable_dict[variable]==3):
variable_cube.add_aux_coord(lon_coord,(1,2))
variable_cube.add_aux_coord(lat_coord,(1,2))
elif (variable_dict[variable]==2):
variable_cube.add_aux_coord(lon_coord,(0,1))
variable_cube.add_aux_coord(lat_coord,(0,1))
# add_coordinates=kwargs.pop('add_coordinates')
# if type(add_coordinates)!=list:
# add_coordinates1=add_coordinates
# add_coordinates=[]
# add_coordinates.append(add_coordinates1)
# for coordinate in add_coordinates:
# if coordinate=='latlon':
# latitude=load_cube(filename,'GLAT').data
# longitude=load_cube(filename,'GLON').data
# lat_coord=coords.AuxCoord(latitude, standard_name='latitude', long_name='latitude', var_name='latitude', units='degrees', bounds=None, attributes=None, coord_system=coord_system)
# lon_coord=coords.AuxCoord(longitude, standard_name='longitude', long_name='longitude', var_name='longitude', units='degrees', bounds=None, attributes=None, coord_system=coord_system)
# if (variable_dict[variable]==3):
# variable_cube.add_aux_coord(lon_coord,(1,2))
# variable_cube.add_aux_coord(lat_coord,(1,2))
# elif (variable_dict[variable]==2):
# variable_cube.add_aux_coord(lon_coord,(0,1))
# variable_cube.add_aux_coord(lat_coord,(0,1))
return variable_cube
def calculate_rams_LWC(filenames,**kwargs):
RCP=loadramscube(filenames, 'RCP',**kwargs)
RDP=loadramscube(filenames, 'RDP',**kwargs)
RRP=loadramscube(filenames, 'RRP',**kwargs)
LWC=RCP+RDP+RRP
LWC.rename('liquid water content')
#LWC.rename('mass_concentration_of_liquid_water_in_air')
return LWC
#
def calculate_rams_IWC(filenames,**kwargs):
RPP=loadramscube(filenames, 'RPP',**kwargs)
RSP=loadramscube(filenames, 'RSP',**kwargs)
RAP=loadramscube(filenames, 'RAP',**kwargs)
RGP=loadramscube(filenames, 'RGP',**kwargs)
RHP=loadramscube(filenames, 'RHP',**kwargs)
IWC=RPP+RSP+RAP+RGP+RHP
IWC.rename('ice water content')
#IWC.rename('mass_concentration_of_ice_water_in_air')
return IWC
def calculate_rams_airmass(filenames,**kwargs):
from iris.coords import AuxCoord
from numpy import diff
rho=loadramscube(filenames,'DN0',**kwargs)
z=rho.coord('geopotential_height')
z_dim=rho.coord_dims('geopotential_height')
z_diff=AuxCoord(mydiff(z.points),var_name='z_diff')
rho.add_aux_coord(z_diff,data_dims=z_dim)
dx=diff(rho.coord('projection_x_coordinate').points[0:2])
dy=diff(rho.coord('projection_y_coordinate').points[0:2])
Airmass=rho*rho.coord('z_diff')*dx*dy
Airmass.remove_coord('z_diff')
Airmass.rename('mass_of_air')
Airmass.units='kg'
return Airmass
def calculate_rams_airmass_path(filenames,**kwargs):
from iris.coords import AuxCoord
rho=loadramscube(filenames,'DN0',**kwargs)
z=rho.coord('geopotential_height')
z_dim=rho.coord_dims('geopotential_height')
z_diff=AuxCoord(mydiff(z.points),var_name='z_diff')
rho.add_aux_coord(z_diff,data_dims=z_dim)
Airmass=rho*rho.coord('z_diff')
Airmass.remove_coord('z_diff')
Airmass.rename('airmass_path')
Airmass.units='kg m-2'
return Airmass
def calculate_rams_air_temperature(filenames,**kwargs):
from iris.coords import AuxCoord
theta=loadramscube(filenames,'THETA',**kwargs)
pi=loadramscube(filenames,'PI',**kwargs)
cp=AuxCoord(1004,long_name='cp',units='J kg-1 K-1')
t=theta*pi/cp
t.rename('air_temperature')
return t
def calculate_rams_air_pressure(filenames,**kwargs):
from iris.coords import AuxCoord
pi=loadramscube(filenames,'PI',**kwargs)
cp=AuxCoord(1004,long_name='cp',units='J kg-1 K-1')
rd=AuxCoord(287,long_name='rd',units='J kg-1 K-1')
p = 100000 * (pi/cp)**(cp.points/rd.points) # Pressure in Pa
p.rename('air_pressure')
p.units='Pa'
return p
def calculate_rams_density(filenames,**kwargs):
rho=loadramscube(filenames,'DN0',**kwargs)
rho.rename('air_density')
rho.units='kg m-3'
return rho
def calculate_rams_LWP(filenames,**kwargs):
from iris.analysis import SUM
LWC=deriveramscube(filenames,'LWC',**kwargs)
Airmass=deriveramscube(filenames,'airmass_path',**kwargs)
LWP=(LWC*Airmass).collapsed(('geopotential_height'),SUM)
LWP.rename('liquid water path')
#LWP.rename('atmosphere_mass_content_of_cloud_liquid_water')
return LWP
#
def calculate_rams_IWP(filenames,**kwargs):
from iris.analysis import SUM
IWC=deriveramscube(filenames,'IWC',**kwargs)
Airmass=deriveramscube(filenames,'airmass_path',**kwargs)
IWP=(IWC*Airmass).collapsed(('geopotential_height'),SUM)
IWP.rename('ice water path')
#IWP.rename('atmosphere_mass_content_of_cloud_ice_water')
return IWP
def calculate_rams_IWV(filenames,**kwargs):
from iris.analysis import SUM
RV=loadramscube(filenames,'RV',**kwargs)
Airmass=deriveramscube(filenames,'airmass_path',**kwargs)
IWV=(RV*Airmass).collapsed(('geopotential_height'),SUM)
IWV.rename('integrated water vapor')
#IWP.rename('atmosphere_mass_content_of_cloud_ice_water')
return IWV
# Radiation fluxed at the top of the atmospere and at the surface
def calculate_rams_LWup_TOA(filenames,**kwargs):
from iris import Constraint
LWUP=loadramscube(filenames,'LWUP',**kwargs)
LWup_TOA=LWUP.extract(Constraint(model_level_number=LWUP.coord('model_level_number').points[-1]))
LWup_TOA.rename('LWup_TOA')
return LWup_TOA
def calculate_rams_LWup_sfc(filenames,**kwargs):
from iris import Constraint
LWUP=loadramscube(filenames,'LWUP',**kwargs)
LWup_sfc=LWUP.extract(Constraint(model_level_number=0))
LWup_sfc.rename('LWup_sfc')
return LWup_sfc
def calculate_rams_LWdn_TOA(filenames,**kwargs):
from iris import Constraint
LWDN=loadramscube(filenames,'LWDN',**kwargs)
LWdn_TOA=LWDN.extract(Constraint(model_level_number=LWDN.coord('model_level_number').points[-1]))
LWdn_TOA.rename('LWdn_TOA')
return LWdn_TOA
def calculate_rams_LWdn_sfc(filenames,**kwargs):
from iris import Constraint
LWDN=loadramscube(filenames,'LWDN',**kwargs)
LWdn_sfc=LWDN.extract(Constraint(model_level_number=0))
LWdn_sfc.rename('LWdn_sfc')
return LWdn_sfc
def calculate_rams_SWup_TOA(filenames,**kwargs):
from iris import Constraint
SWUP=loadramscube(filenames,'SWUP',**kwargs)
SWup_TOA=SWUP.extract(Constraint(model_level_number=SWUP.coord('model_level_number').points[-1]))
SWup_TOA.rename('SWup_TOA')
return SWup_TOA
def calculate_rams_SWup_sfc(filenames,**kwargs):
from iris import Constraint
SWUP=loadramscube(filenames,'SWUP',**kwargs)
SWup_sfc=SWUP.extract(Constraint(model_level_number=0))
SWup_sfc.rename('SWup_sfc')
return SWup_sfc
def calculate_rams_SWdn_TOA(filenames,**kwargs):
from iris import Constraint
SWDN=loadramscube(filenames,'SWDN',**kwargs)
SWdn_TOA=SWDN.extract(Constraint(model_level_number=SWDN.coord('model_level_number').points[-1]))
SWdn_TOA.rename('SWdn_TOA')
return SWdn_TOA
def calculate_rams_SWdn_sfc(filenames,**kwargs):
from iris import Constraint
SWDN=loadramscube(filenames,'SWDN',**kwargs)
SWdn_sfc=SWDN.extract(Constraint(model_level_number=0))
SWdn_sfc.rename('SWdn_sfc')
return SWdn_sfc
def calculate_rams_surface_precipitation_instantaneous(filenames,**kwargs):
PCPRR=loadramscube(filenames,'PCPRR',**kwargs)
PCPRD=loadramscube(filenames,'PCPRD',**kwargs)
PCPRS=loadramscube(filenames,'PCPRS',**kwargs)
PCPRP=loadramscube(filenames,'PCPRP',**kwargs)
PCPRA=loadramscube(filenames,'PCPRA',**kwargs)
PCPRH=loadramscube(filenames,'PCPRH',**kwargs)
PCPRG=loadramscube(filenames,'PCPRG',**kwargs)
surface_precip=PCPRR+PCPRD+PCPRS+PCPRP+PCPRA+PCPRG+PCPRH
surface_precip.rename('surface_precipitation_instantaneous')
return surface_precip
def calculate_rams_surface_precipitation_accumulated(filenames,**kwargs):
ACCPR=loadramscube(filenames,'ACCPR',**kwargs)
ACCPD=loadramscube(filenames,'ACCPD',**kwargs)
ACCPS=loadramscube(filenames,'ACCPS',**kwargs)
ACCPP=loadramscube(filenames,'ACCPP',**kwargs)
ACCPA=loadramscube(filenames,'ACCPA',**kwargs)
ACCPH=loadramscube(filenames,'ACCPH',**kwargs)
ACCPG=loadramscube(filenames,'ACCPG',**kwargs)
surface_precip_acc=ACCPR+ACCPD+ACCPS+ACCPP+ACCPA+ACCPG+ACCPH
surface_precip_acc.rename('surface_precipitation_accumulated')
#IWP.rename('atmosphere_mass_content_of_cloud_ice_water')
return surface_precip_acc
def calculate_rams_surface_precipitation_average(filenames,**kwargs):
from dask.array import concatenate
surface_precip_accum=calculate_rams_surface_precipitation_accumulated(filenames,**kwargs)
#caclulate timestep in hours
time_coord=surface_precip_accum.coord('time')
dt=(time_coord.units.num2date(time_coord.points[1])-time_coord.units.num2date(time_coord.points[0])).total_seconds()/3600.
#divide difference in precip between timesteps (in mm/h) by timestep (in h):
surface_precip=surface_precip_accum
surface_precip.data=concatenate((0*surface_precip.core_data()[[1],:,:],surface_precip.core_data()[1:,:,:]-surface_precip.core_data()[:-1:,:,:]),axis=0)/dt
surface_precip.rename('surface_precipitation_average')
surface_precip.units= 'mm/h'
return surface_precip
def mydiff(A):
import numpy as np
d1=np.diff(A)
d=np.zeros(A.shape)
d[0]=d1[0]
d[1:-1]=0.5*(d1[0:-1]+d1[1:])
d[-1]=d1[-1]
return d
def deriveramscube(filenames,variable,**kwargs):
# if variable in ['temperature','air_temperature']:
# variable_cube=calculate_rams_temperature(filenames,**kwargs)
# #variable_cube_out=addcoordinates(filenames, 'T',variable_cube,add_coordinates)
# elif variable == 'density':
# variable_cube=calculate_rams_density(filenames,**kwargs)
if variable == 'LWC':
variable_cube=calculate_rams_LWC(filenames,**kwargs)
elif variable == 'IWC':
variable_cube=calculate_rams_IWC(filenames,**kwargs)
elif variable == 'LWP':
variable_cube=calculate_rams_LWP(filenames,**kwargs)
elif variable == 'IWP':
variable_cube=calculate_rams_IWP(filenames,**kwargs)
elif variable == 'IWV':
variable_cube=calculate_rams_IWV(filenames,**kwargs)
elif variable == 'airmass':
variable_cube=calculate_rams_airmass(filenames,**kwargs)
elif variable == 'air_temperature':
variable_cube=calculate_rams_air_temperature(filenames,**kwargs)
elif variable=='air_pressure':
variable_cube=calculate_rams_air_pressure(filenames,**kwargs)
elif variable == 'air_density':
variable_cube=calculate_rams_density(filenames,**kwargs)
elif variable == 'airmass_path':
variable_cube=calculate_rams_airmass_path(filenames,**kwargs)
elif variable == 'surface_precipitation_average':
variable_cube=calculate_rams_surface_precipitation_average(filenames,**kwargs)
elif variable == 'surface_precipitation_accumulated':
variable_cube=calculate_rams_surface_precipitation_accumulated(filenames,**kwargs)
elif (variable == 'surface_precipitation_instantaneous') or (variable == 'surface_precipitation'):
variable_cube=calculate_rams_surface_precipitation_instantaneous(filenames,**kwargs)
elif (variable == 'LWup_TOA'):
variable_cube=calculate_rams_LWup_TOA(filenames,**kwargs)
elif (variable == 'LWup_sfc'):
variable_cube=calculate_rams_LWup_sfc(filenames,**kwargs)
elif (variable == 'LWdn_TOA'):
variable_cube=calculate_rams_LWdn_TOA(filenames,**kwargs)
elif (variable == 'LWdn_sfc'):
variable_cube=calculate_rams_LWdn_sfc(filenames,**kwargs)
elif (variable == 'SWup_TOA'):
variable_cube=calculate_rams_SWup_TOA(filenames,**kwargs)
elif (variable == 'SWup_sfc'):
variable_cube=calculate_rams_SWup_sfc(filenames,**kwargs)
elif (variable == 'SWdn_TOA'):
variable_cube=calculate_rams_SWdn_TOA(filenames,**kwargs)
elif (variable == 'SWdn_sfc'):
variable_cube=calculate_rams_SWdn_sfc(filenames,**kwargs)
else:
raise NameError(variable, 'is not a known variable')
return variable_cube
|
[
"iris.coords.AuxCoord",
"warnings.filterwarnings",
"iris.cube.CubeList",
"numpy.zeros",
"iris.load",
"iris.Constraint",
"datetime.datetime",
"datetime.datetime.strptime",
"iris.load_cube",
"iris.coords.DimCoord",
"numpy.arange",
"numpy.diff",
"datetime.timedelta",
"numpy.array"
] |
[((16, 84), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'UserWarning', 'append': '(True)'}), "('ignore', category=UserWarning, append=True)\n", (39, 84), False, 'import warnings\n'), ((2997, 3015), 'iris.load', 'load', (['filenames[0]'], {}), '(filenames[0])\n', (3001, 3015), False, 'from iris import load\n'), ((5014, 5044), 'iris.load_cube', 'load_cube', (['filenames', 'variable'], {}), '(filenames, variable)\n', (5023, 5044), False, 'from iris import load_cube, coords\n'), ((5569, 5588), 'iris.cube.CubeList', 'CubeList', (['cube_list'], {}), '(cube_list)\n', (5577, 5588), False, 'from iris.cube import CubeList\n'), ((8120, 8296), 'iris.coords.DimCoord', 'coords.DimCoord', (['time_days'], {'standard_name': '"""time"""', 'long_name': '"""time"""', 'var_name': '"""time"""', 'units': 'time_units', 'bounds': 'None', 'attributes': 'None', 'coord_system': 'None', 'circular': '(False)'}), "(time_days, standard_name='time', long_name='time', var_name\n ='time', units=time_units, bounds=None, attributes=None, coord_system=\n None, circular=False)\n", (8135, 8296), False, 'from iris import load_cube, coords\n'), ((8443, 8461), 'numpy.arange', 'arange', (['(0)', 'n_level'], {}), '(0, n_level)\n', (8449, 8461), False, 'from numpy import arange\n'), ((8484, 8570), 'iris.coords.AuxCoord', 'coords.AuxCoord', (['MODEL_LEVEL_NUMBER'], {'standard_name': '"""model_level_number"""', 'units': '"""1"""'}), "(MODEL_LEVEL_NUMBER, standard_name='model_level_number',\n units='1')\n", (8499, 8570), False, 'from iris import load_cube, coords\n'), ((11882, 12058), 'iris.coords.AuxCoord', 'coords.AuxCoord', (['latitude'], {'standard_name': '"""latitude"""', 'long_name': '"""latitude"""', 'var_name': '"""latitude"""', 'units': '"""degrees"""', 'bounds': 'None', 'attributes': 'None', 'coord_system': 'coord_system'}), "(latitude, standard_name='latitude', long_name='latitude',\n var_name='latitude', units='degrees', bounds=None, attributes=None,\n coord_system=coord_system)\n", (11897, 12058), False, 'from iris import load_cube, coords\n'), ((12065, 12245), 'iris.coords.AuxCoord', 'coords.AuxCoord', (['longitude'], {'standard_name': '"""longitude"""', 'long_name': '"""longitude"""', 'var_name': '"""longitude"""', 'units': '"""degrees"""', 'bounds': 'None', 'attributes': 'None', 'coord_system': 'coord_system'}), "(longitude, standard_name='longitude', long_name='longitude',\n var_name='longitude', units='degrees', bounds=None, attributes=None,\n coord_system=coord_system)\n", (12080, 12245), False, 'from iris import load_cube, coords\n'), ((15855, 15905), 'iris.coords.AuxCoord', 'AuxCoord', (['(1004)'], {'long_name': '"""cp"""', 'units': '"""J kg-1 K-1"""'}), "(1004, long_name='cp', units='J kg-1 K-1')\n", (15863, 15905), False, 'from iris.coords import AuxCoord\n'), ((16110, 16160), 'iris.coords.AuxCoord', 'AuxCoord', (['(1004)'], {'long_name': '"""cp"""', 'units': '"""J kg-1 K-1"""'}), "(1004, long_name='cp', units='J kg-1 K-1')\n", (16118, 16160), False, 'from iris.coords import AuxCoord\n'), ((16166, 16215), 'iris.coords.AuxCoord', 'AuxCoord', (['(287)'], {'long_name': '"""rd"""', 'units': '"""J kg-1 K-1"""'}), "(287, long_name='rd', units='J kg-1 K-1')\n", (16174, 16215), False, 'from iris.coords import AuxCoord\n'), ((21921, 21931), 'numpy.diff', 'np.diff', (['A'], {}), '(A)\n', (21928, 21931), True, 'import numpy as np\n'), ((21938, 21955), 'numpy.zeros', 'np.zeros', (['A.shape'], {}), '(A.shape)\n', (21946, 21955), True, 'import numpy as np\n'), ((7756, 7796), 'datetime.datetime.strptime', 'datetime.strptime', (['timestr', '"""%Y%m%d%H%M"""'], {}), "(timestr, '%Y%m%d%H%M')\n", (7773, 7796), False, 'from datetime import datetime, timedelta\n'), ((7855, 7874), 'datetime.datetime', 'datetime', (['(100)', '(1)', '(1)'], {}), '(100, 1, 1)\n', (7863, 7874), False, 'from datetime import datetime, timedelta\n'), ((7892, 7909), 'datetime.datetime', 'datetime', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (7900, 7909), False, 'from datetime import datetime, timedelta\n'), ((7936, 7956), 'datetime.datetime', 'datetime', (['(1970)', '(1)', '(1)'], {}), '(1970, 1, 1)\n', (7944, 7956), False, 'from datetime import datetime, timedelta\n'), ((9174, 9352), 'iris.coords.DimCoord', 'coords.DimCoord', (["coord_dict['ztn01']"], {'standard_name': '"""geopotential_height"""', 'long_name': '"""z"""', 'var_name': '"""z"""', 'units': '"""m"""', 'bounds': 'None', 'attributes': 'None', 'coord_system': 'coord_system'}), "(coord_dict['ztn01'], standard_name='geopotential_height',\n long_name='z', var_name='z', units='m', bounds=None, attributes=None,\n coord_system=coord_system)\n", (9189, 9352), False, 'from iris import load_cube, coords\n'), ((9982, 10173), 'iris.coords.DimCoord', 'coords.DimCoord', (["coord_dict['xtn0' + domain]"], {'standard_name': '"""projection_x_coordinate"""', 'long_name': '"""x"""', 'var_name': '"""x"""', 'units': '"""m"""', 'bounds': 'None', 'attributes': 'None', 'coord_system': 'coord_system'}), "(coord_dict['xtn0' + domain], standard_name=\n 'projection_x_coordinate', long_name='x', var_name='x', units='m',\n bounds=None, attributes=None, coord_system=coord_system)\n", (9997, 10173), False, 'from iris import load_cube, coords\n'), ((10250, 10441), 'iris.coords.DimCoord', 'coords.DimCoord', (["coord_dict['ytn0' + domain]"], {'standard_name': '"""projection_y_coordinate"""', 'long_name': '"""y"""', 'var_name': '"""y"""', 'units': '"""m"""', 'bounds': 'None', 'attributes': 'None', 'coord_system': 'coord_system'}), "(coord_dict['ytn0' + domain], standard_name=\n 'projection_y_coordinate', long_name='y', var_name='y', units='m',\n bounds=None, attributes=None, coord_system=coord_system)\n", (10265, 10441), False, 'from iris import load_cube, coords\n'), ((18126, 18158), 'iris.Constraint', 'Constraint', ([], {'model_level_number': '(0)'}), '(model_level_number=0)\n', (18136, 18158), False, 'from iris import Constraint\n'), ((18654, 18686), 'iris.Constraint', 'Constraint', ([], {'model_level_number': '(0)'}), '(model_level_number=0)\n', (18664, 18686), False, 'from iris import Constraint\n'), ((19182, 19214), 'iris.Constraint', 'Constraint', ([], {'model_level_number': '(0)'}), '(model_level_number=0)\n', (19192, 19214), False, 'from iris import Constraint\n'), ((19710, 19742), 'iris.Constraint', 'Constraint', ([], {'model_level_number': '(0)'}), '(model_level_number=0)\n', (19720, 19742), False, 'from iris import Constraint\n'), ((7796, 7816), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(1)'}), '(seconds=1)\n', (7805, 7816), False, 'from datetime import datetime, timedelta\n'), ((10970, 11161), 'iris.coords.DimCoord', 'coords.DimCoord', (["coord_dict['xtn0' + domain]"], {'standard_name': '"""projection_x_coordinate"""', 'long_name': '"""x"""', 'var_name': '"""x"""', 'units': '"""m"""', 'bounds': 'None', 'attributes': 'None', 'coord_system': 'coord_system'}), "(coord_dict['xtn0' + domain], standard_name=\n 'projection_x_coordinate', long_name='x', var_name='x', units='m',\n bounds=None, attributes=None, coord_system=coord_system)\n", (10985, 11161), False, 'from iris import load_cube, coords\n'), ((11238, 11429), 'iris.coords.DimCoord', 'coords.DimCoord', (["coord_dict['ytn0' + domain]"], {'standard_name': '"""projection_y_coordinate"""', 'long_name': '"""y"""', 'var_name': '"""y"""', 'units': '"""m"""', 'bounds': 'None', 'attributes': 'None', 'coord_system': 'coord_system'}), "(coord_dict['ytn0' + domain], standard_name=\n 'projection_y_coordinate', long_name='y', var_name='y', units='m',\n bounds=None, attributes=None, coord_system=coord_system)\n", (11253, 11429), False, 'from iris import load_cube, coords\n'), ((11776, 11803), 'iris.load_cube', 'load_cube', (['filename', '"""GLAT"""'], {}), "(filename, 'GLAT')\n", (11785, 11803), False, 'from iris import load_cube, coords\n'), ((11829, 11856), 'iris.load_cube', 'load_cube', (['filename', '"""GLON"""'], {}), "(filename, 'GLON')\n", (11838, 11856), False, 'from iris import load_cube, coords\n'), ((8071, 8088), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (8080, 8088), False, 'from datetime import datetime, timedelta\n'), ((6854, 6874), 'numpy.array', 'array', (['variable_list'], {}), '(variable_list)\n', (6859, 6874), False, 'from numpy import array\n')]
|
"""PPO Agent for CRMDPs."""
import torch
import random
import numpy as np
from typing import Generator, List
from safe_grid_agents.common.utils import track_metrics
from safe_grid_agents.common.agents.policy_cnn import PPOCNNAgent
from safe_grid_agents.types import Rollout
from ai_safety_gridworlds.environments.tomato_crmdp import REWARD_FACTOR
def _get_agent_position(board, agent_value):
x_pos, y_pos = np.unravel_index(
np.argwhere(np.ravel(board) == agent_value), board.shape
)
x_pos, y_pos = x_pos.flat[0], y_pos.flat[0]
return x_pos, y_pos
def _manhatten_distance(x1, x2, y1, y2):
return abs(x1 - x2) + abs(y1 - y2)
def d_tomato_crmdp(X, Y):
assert X.shape == Y.shape
return REWARD_FACTOR * np.sum(X != Y)
def d_toy_gridworlds(X, Y):
assert X.shape == Y.shape
X = X[0, ...]
Y = Y[0, ...]
# toy gridworlds use value 0 to denote the agent on the board
X_pos_x, X_pos_y = _get_agent_position(X, agent_value=0)
Y_pos_x, Y_pos_y = _get_agent_position(Y, agent_value=0)
return _manhatten_distance(X_pos_x, Y_pos_x, X_pos_y, Y_pos_y)
def d_trans_boat(X, Y):
assert X.shape == Y.shape
X_initial, X_final = X[0, ...], X[1, ...]
Y_initial, Y_final = Y[0, ...], Y[1, ...]
# deepmind gridworlds use value 2 to denote the agent on the board
X_initial_pos_x, X_initial_pos_y = _get_agent_position(X_initial, agent_value=2)
Y_initial_pos_x, Y_initial_pos_y = _get_agent_position(Y_initial, agent_value=2)
X_final_pos_x, X_final_pos_y = _get_agent_position(X_final, agent_value=2)
Y_final_pos_x, Y_final_pos_y = _get_agent_position(Y_final, agent_value=2)
X_direction_x = X_final_pos_x - X_initial_pos_x
X_direction_y = X_final_pos_y - X_initial_pos_y
Y_direction_x = Y_final_pos_x - Y_initial_pos_x
Y_direction_y = Y_final_pos_y - Y_initial_pos_y
initial_position_distance = _manhatten_distance(
X_initial_pos_x, Y_initial_pos_x, X_initial_pos_y, Y_initial_pos_y
)
direction_distance = int(X_direction_x != Y_direction_x)
direction_distance += int(X_direction_y != Y_direction_y)
return initial_position_distance + direction_distance
ENV_TO_D = {
"corners": d_toy_gridworlds,
"way": d_toy_gridworlds,
"tomato-crmdp": d_tomato_crmdp,
"trans-boat": d_trans_boat,
}
class PPOCRMDPAgent(PPOCNNAgent):
"""PPO Agent for CRMDPs."""
def __init__(self, env, args) -> None:
super().__init__(env, args)
self.states = dict()
self.d = ENV_TO_D[args.env_alias]
self.epsilon = 1e-3
self.rllb = dict()
self.state_memory_cap = 0
def _mark_state_corrupt(self, board, reward) -> None:
assert board.dtype == np.float32
self.states[board.tostring()] = [False, reward]
def _mark_state_safe(self, board, reward) -> None:
assert board.dtype == np.float32
self.states[board.tostring()] = [True, reward]
def _is_state_corrupt(self, board) -> bool:
if board.tostring() in self.states:
return not self.states[board.tostring()][0]
else:
return False
def _iterate_safe_states(self) -> Generator[np.array, None, None]:
for board_str in self.states.keys():
if self.states[board_str][0]:
board = np.fromstring(board_str, dtype=np.float32, count=self.n_input)
board = np.reshape(board, self.board_shape)
yield board, self.states[board_str][1]
def _iterate_corrupt_states(self) -> Generator[np.array, None, None]:
for board_str in self.states.keys():
if not self.states[board_str][0]:
board = np.fromstring(board_str, dtype=np.float32, count=self.n_input)
board = np.reshape(board, self.board_shape)
yield board, self.states[board_str][1]
def _update_rllb(self) -> None:
"""Update the reward lower Lipschitz bound."""
for corrupt_board, corrupt_reward in self._iterate_corrupt_states():
board_string = corrupt_board.tostring()
rllb = self.rllb.get(board_string, None)
for safe_board, safe_reward in self._iterate_safe_states():
bound = safe_reward - self.d(safe_board, corrupt_board)
if rllb is None or bound > rllb:
rllb = bound
self.rllb[board_string] = rllb
def _get_TLV(self, boardX, rewardX, state_iterator) -> float:
"""Return the total Lipschitz violation of a state X w.r.t a set of states.
Each state is only added once to the TLV."""
TLV = 0
unique_states = set()
for boardY, rewardY in state_iterator:
if boardY.tostring() not in unique_states:
TLV += max(0, abs(rewardX - rewardY) - self.d(boardY, boardX))
unique_states.add(boardY.tostring())
return TLV
def _purge_memory(self) -> None:
"""Drop random noncorrupt states from the memory for performance reasons."""
if len(self.states) > self.state_memory_cap:
to_remove = [
state
for state in random.sample(
self.states.keys(), len(self.states) - self.state_memory_cap / 2
)
if self.states[state][0]
]
for state in to_remove:
del self.states[state]
# we might have too many corrupt states, so update the bounds
if len(self.states) > 2 * self.state_memory_cap / 3:
self.state_memory_cap *= 2
def get_modified_reward(self, board, reward) -> float:
"""Return the reward to use for optimizing the policy based on the rllb."""
if self._is_state_corrupt(board):
return self.rllb[board.tostring()]
else:
return reward
def get_modified_rewards_for_rollout(self, boards, rewards) -> List[float]:
"""
Returns a list of rewards for a given rollout that has been updated based
on the rllb.
"""
new_rewards = []
for i in range(len(rewards)):
new_rewards.append(self.get_modified_reward(boards[i], rewards[i]))
return new_rewards
def identify_corruption_in_trajectory(self, boards, rewards) -> None:
"""Perform detection of corrupt states on a trajectory.
Updates the set of safe states and corrupt states with all new states,
that are being visited in this trajectory. Then updates the self.rllb
dict, so that we can get the modified reward function.
"""
boards = np.array(boards)
rewards = np.array(rewards)
TLV = np.zeros(len(boards))
for i in range(len(boards)):
TLV[i] = self._get_TLV(boards[i], rewards[i], zip(boards, rewards))
TLV_sort_idx = np.argsort(TLV)[::-1]
non_corrupt_idx = list(range(len(boards)))
added_corrupt_states = False
# iterate over all states in the trajectory in order decreasing by their TLV
for i in range(len(boards)):
idx = TLV_sort_idx[i]
if not added_corrupt_states:
# performance improvement
new_TLV = TLV[idx]
else:
new_TLV = self._get_TLV(
boards[idx],
rewards[idx],
zip(boards[non_corrupt_idx], rewards[non_corrupt_idx]),
)
if new_TLV <= self.epsilon:
if not self._is_state_corrupt(boards[idx]):
self._mark_state_safe(boards[idx], rewards[idx])
break
else:
self._mark_state_corrupt(boards[idx], rewards[idx])
non_corrupt_idx.remove(idx)
added_corrupt_states = True
if added_corrupt_states:
self._update_rllb()
def gather_rollout(self, env, env_state, history, args) -> Rollout:
"""Gather a single rollout from an old policy.
Based on the gather_rollout function of the regular PPO agents.
This version also tracks the successor states of each action.
Based on this the corrupted states can be detected before performing
the training step."""
state, reward, done, info = env_state
done = False
rollout = Rollout(states=[], actions=[], rewards=[], returns=[])
successors = []
for r in range(self.rollouts):
successors_r = []
# Rollout loop
states, actions, rewards, returns = [], [], [], []
while not done:
with torch.no_grad():
action = self.old_policy.act_explore(state)
successor, reward, done, info = env.step(action)
# Maybe cheat
if args.cheat:
reward = info["hidden_reward"]
# In case the agent is drunk, use the actual action they took
try:
action = info["extra_observations"]["actual_actions"]
except KeyError:
pass
# Store data from experience
states.append(state) # .flatten())
actions.append(action)
rewards.append(float(reward))
successors_r.append(successor)
state = successor
history["t"] += 1
if r != 0:
history["episode"] += 1
self.identify_corruption_in_trajectory(successors_r, rewards)
rewards = self.get_modified_rewards_for_rollout(successors_r, rewards)
returns = self.get_discounted_returns(rewards)
history = track_metrics(history, env)
rollout.states.append(states)
rollout.actions.append(actions)
rollout.rewards.append(rewards)
rollout.returns.append(returns)
successors.append(successors_r)
self.state_memory_cap = max(self.state_memory_cap, 20 * len(states))
self._purge_memory()
state = env.reset()
done = False
return rollout
|
[
"numpy.sum",
"safe_grid_agents.types.Rollout",
"numpy.ravel",
"numpy.argsort",
"numpy.array",
"numpy.reshape",
"safe_grid_agents.common.utils.track_metrics",
"torch.no_grad",
"numpy.fromstring"
] |
[((743, 757), 'numpy.sum', 'np.sum', (['(X != Y)'], {}), '(X != Y)\n', (749, 757), True, 'import numpy as np\n'), ((6632, 6648), 'numpy.array', 'np.array', (['boards'], {}), '(boards)\n', (6640, 6648), True, 'import numpy as np\n'), ((6667, 6684), 'numpy.array', 'np.array', (['rewards'], {}), '(rewards)\n', (6675, 6684), True, 'import numpy as np\n'), ((8363, 8417), 'safe_grid_agents.types.Rollout', 'Rollout', ([], {'states': '[]', 'actions': '[]', 'rewards': '[]', 'returns': '[]'}), '(states=[], actions=[], rewards=[], returns=[])\n', (8370, 8417), False, 'from safe_grid_agents.types import Rollout\n'), ((6863, 6878), 'numpy.argsort', 'np.argsort', (['TLV'], {}), '(TLV)\n', (6873, 6878), True, 'import numpy as np\n'), ((9768, 9795), 'safe_grid_agents.common.utils.track_metrics', 'track_metrics', (['history', 'env'], {}), '(history, env)\n', (9781, 9795), False, 'from safe_grid_agents.common.utils import track_metrics\n'), ((453, 468), 'numpy.ravel', 'np.ravel', (['board'], {}), '(board)\n', (461, 468), True, 'import numpy as np\n'), ((3317, 3379), 'numpy.fromstring', 'np.fromstring', (['board_str'], {'dtype': 'np.float32', 'count': 'self.n_input'}), '(board_str, dtype=np.float32, count=self.n_input)\n', (3330, 3379), True, 'import numpy as np\n'), ((3404, 3439), 'numpy.reshape', 'np.reshape', (['board', 'self.board_shape'], {}), '(board, self.board_shape)\n', (3414, 3439), True, 'import numpy as np\n'), ((3685, 3747), 'numpy.fromstring', 'np.fromstring', (['board_str'], {'dtype': 'np.float32', 'count': 'self.n_input'}), '(board_str, dtype=np.float32, count=self.n_input)\n', (3698, 3747), True, 'import numpy as np\n'), ((3772, 3807), 'numpy.reshape', 'np.reshape', (['board', 'self.board_shape'], {}), '(board, self.board_shape)\n', (3782, 3807), True, 'import numpy as np\n'), ((8651, 8666), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8664, 8666), False, 'import torch\n')]
|
"""Miscellaneous functions and helpers for the uclasm package."""
import numpy as np
def one_hot(idx, length):
"""Return a 1darray of zeros with a single one in the idx'th entry."""
one_hot = np.zeros(length, dtype=np.bool)
one_hot[idx] = True
return one_hot
def index_map(args):
"""Return a dict mapping elements to their indices.
Parameters
----------
args : Iterable[str]
Strings to be mapped to their indices.
"""
return {elm: idx for idx, elm in enumerate(args)}
# TODO: change the name of this function
def invert(dict_of_sets):
"""TODO: Docstring."""
new_dict = {}
for k, v in dict_of_sets.items():
for x in v:
new_dict[x] = new_dict.get(x, set()) | set([k])
return new_dict
def values_map_to_same_key(dict_of_sets):
"""TODO: Docstring."""
matches = {}
# get the sets of candidates
for key, val_set in dict_of_sets.items():
frozen_val_set = frozenset(val_set)
matches[frozen_val_set] = matches.get(frozen_val_set, set()) | {key}
return matches
def apply_index_map_to_cols(df, cols, values):
"""Replace df[cols] with their indexes as taken from names.
Parameters
----------
df : DataFrame
To be modified inplace.
cols : Iterable[str]
Columns of df to operate on.
values : Iterable[str]
Values expected to be present in df[cols] to be replaced with their
corresponding indexes.
"""
val_to_idx = index_map(values)
df[cols] = df[cols].applymap(val_to_idx.get)
|
[
"numpy.zeros"
] |
[((202, 233), 'numpy.zeros', 'np.zeros', (['length'], {'dtype': 'np.bool'}), '(length, dtype=np.bool)\n', (210, 233), True, 'import numpy as np\n')]
|
# Copyright 2020, The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
import numpy as np
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack import models
from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.data_structures import AttackInputData
class TrainedAttackerTest(absltest.TestCase):
def test_base_attacker_train_and_predict(self):
base_attacker = models.TrainedAttacker()
self.assertRaises(NotImplementedError, base_attacker.train_model, [], [])
self.assertRaises(AssertionError, base_attacker.predict, [])
def test_predict_before_training(self):
lr_attacker = models.LogisticRegressionAttacker()
self.assertRaises(AssertionError, lr_attacker.predict, [])
def test_create_attacker_data_loss_only(self):
attack_input = AttackInputData(
loss_train=np.array([1, 3]), loss_test=np.array([2, 4]))
attacker_data = models.create_attacker_data(attack_input, 2)
self.assertLen(attacker_data.features_all, 4)
def test_create_attacker_data_loss_and_logits(self):
attack_input = AttackInputData(
logits_train=np.array([[1, 2], [5, 6], [8, 9]]),
logits_test=np.array([[10, 11], [14, 15]]),
loss_train=np.array([3, 7, 10]),
loss_test=np.array([12, 16]))
attacker_data = models.create_attacker_data(attack_input, balance=False)
self.assertLen(attacker_data.features_all, 5)
self.assertLen(attacker_data.fold_indices, 5)
self.assertEmpty(attacker_data.left_out_indices)
def test_unbalanced_create_attacker_data_loss_and_logits(self):
attack_input = AttackInputData(
logits_train=np.array([[1, 2], [5, 6], [8, 9]]),
logits_test=np.array([[10, 11], [14, 15]]),
loss_train=np.array([3, 7, 10]),
loss_test=np.array([12, 16]))
attacker_data = models.create_attacker_data(attack_input, balance=True)
self.assertLen(attacker_data.features_all, 5)
self.assertLen(attacker_data.fold_indices, 4)
self.assertLen(attacker_data.left_out_indices, 1)
self.assertIn(attacker_data.left_out_indices[0], [0, 1, 2])
def test_balanced_create_attacker_data_loss_and_logits(self):
attack_input = AttackInputData(
logits_train=np.array([[1, 2], [5, 6], [8, 9]]),
logits_test=np.array([[10, 11], [14, 15], [17, 18]]),
loss_train=np.array([3, 7, 10]),
loss_test=np.array([12, 16, 19]))
attacker_data = models.create_attacker_data(attack_input)
self.assertLen(attacker_data.features_all, 6)
self.assertLen(attacker_data.fold_indices, 6)
self.assertEmpty(attacker_data.left_out_indices)
if __name__ == '__main__':
absltest.main()
|
[
"absl.testing.absltest.main",
"tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.models.LogisticRegressionAttacker",
"tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.models.TrainedAttacker",
"numpy.array",
"tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.models.create_attacker_data"
] |
[((3202, 3217), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (3215, 3217), False, 'from absl.testing import absltest\n'), ((964, 988), 'tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.models.TrainedAttacker', 'models.TrainedAttacker', ([], {}), '()\n', (986, 988), False, 'from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack import models\n'), ((1193, 1228), 'tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.models.LogisticRegressionAttacker', 'models.LogisticRegressionAttacker', ([], {}), '()\n', (1226, 1228), False, 'from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack import models\n'), ((1463, 1507), 'tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.models.create_attacker_data', 'models.create_attacker_data', (['attack_input', '(2)'], {}), '(attack_input, 2)\n', (1490, 1507), False, 'from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack import models\n'), ((1858, 1914), 'tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.models.create_attacker_data', 'models.create_attacker_data', (['attack_input'], {'balance': '(False)'}), '(attack_input, balance=False)\n', (1885, 1914), False, 'from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack import models\n'), ((2379, 2434), 'tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.models.create_attacker_data', 'models.create_attacker_data', (['attack_input'], {'balance': '(True)'}), '(attack_input, balance=True)\n', (2406, 2434), False, 'from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack import models\n'), ((2976, 3017), 'tensorflow_privacy.privacy.privacy_tests.membership_inference_attack.models.create_attacker_data', 'models.create_attacker_data', (['attack_input'], {}), '(attack_input)\n', (3003, 3017), False, 'from tensorflow_privacy.privacy.privacy_tests.membership_inference_attack import models\n'), ((1397, 1413), 'numpy.array', 'np.array', (['[1, 3]'], {}), '([1, 3])\n', (1405, 1413), True, 'import numpy as np\n'), ((1425, 1441), 'numpy.array', 'np.array', (['[2, 4]'], {}), '([2, 4])\n', (1433, 1441), True, 'import numpy as np\n'), ((1671, 1705), 'numpy.array', 'np.array', (['[[1, 2], [5, 6], [8, 9]]'], {}), '([[1, 2], [5, 6], [8, 9]])\n', (1679, 1705), True, 'import numpy as np\n'), ((1727, 1757), 'numpy.array', 'np.array', (['[[10, 11], [14, 15]]'], {}), '([[10, 11], [14, 15]])\n', (1735, 1757), True, 'import numpy as np\n'), ((1778, 1798), 'numpy.array', 'np.array', (['[3, 7, 10]'], {}), '([3, 7, 10])\n', (1786, 1798), True, 'import numpy as np\n'), ((1818, 1836), 'numpy.array', 'np.array', (['[12, 16]'], {}), '([12, 16])\n', (1826, 1836), True, 'import numpy as np\n'), ((2192, 2226), 'numpy.array', 'np.array', (['[[1, 2], [5, 6], [8, 9]]'], {}), '([[1, 2], [5, 6], [8, 9]])\n', (2200, 2226), True, 'import numpy as np\n'), ((2248, 2278), 'numpy.array', 'np.array', (['[[10, 11], [14, 15]]'], {}), '([[10, 11], [14, 15]])\n', (2256, 2278), True, 'import numpy as np\n'), ((2299, 2319), 'numpy.array', 'np.array', (['[3, 7, 10]'], {}), '([3, 7, 10])\n', (2307, 2319), True, 'import numpy as np\n'), ((2339, 2357), 'numpy.array', 'np.array', (['[12, 16]'], {}), '([12, 16])\n', (2347, 2357), True, 'import numpy as np\n'), ((2775, 2809), 'numpy.array', 'np.array', (['[[1, 2], [5, 6], [8, 9]]'], {}), '([[1, 2], [5, 6], [8, 9]])\n', (2783, 2809), True, 'import numpy as np\n'), ((2831, 2871), 'numpy.array', 'np.array', (['[[10, 11], [14, 15], [17, 18]]'], {}), '([[10, 11], [14, 15], [17, 18]])\n', (2839, 2871), True, 'import numpy as np\n'), ((2892, 2912), 'numpy.array', 'np.array', (['[3, 7, 10]'], {}), '([3, 7, 10])\n', (2900, 2912), True, 'import numpy as np\n'), ((2932, 2954), 'numpy.array', 'np.array', (['[12, 16, 19]'], {}), '([12, 16, 19])\n', (2940, 2954), True, 'import numpy as np\n')]
|
# %%
import sys, os
import pandas as pd
import networkx as nx
# import matplotlib.pyplot as plt
import numpy as np
import pickle
base_file_path = os.path.abspath(os.path.join(os.curdir, '..','..', '..')) # should point to the level above the src directory
data_path = os.path.join(base_file_path, 'data', 'Intercity_Dallas')
# (grocery_demand, fitness_demand, pharmacy_demand, physician_demand, hotel_demand, religion_demand, restaurant_demand)
# Entity indexes
# 0 - groceries
# 1 - fitness
# 2 - pharmacy
# 3 - physician
# 4 - hotel
# 5 - religion
# 6 - restaurant
# Data processing parameters
fitness_freq = 94/12 # visits per unique visitor per month
pharmacy_freq = 35/12 # visits per unique visitor per month
physician_freq = 1 # visits per unique visitor per month
hotel_freq = 1 # visits per unique visitor per month
# religion_freq = 25/12 # visits per unique visitor per month
grocery_freq = 2 # visits per unique visitor per month
restaurant_freq = 1 # Assume each restaurant-goer only visits a given restaurant once per month (if at all)
month_day_time_conversion = 1/30 # months/day
min_demand_val = 5
# %%
# First get a list of the counties in Dallas MSA
county_fitness = pd.read_excel(os.path.join(data_path,'TX_Fitness_County.xlsx'))
counties = list(county_fitness.CNTY_NM.unique())
num_counties = len(counties)
print(counties)
county_data = dict()
for county in counties:
county_data[county] = {'index' : counties.index(county)}
# %%
# In county data, save a list of the block groups belonging to each county.
for county in counties:
county_data[county]['bg_list'] = set()
# Load and store block-group statistics
bg_info = dict()
# Save population data by county
print('Processing population data...')
population_data = pd.read_excel(os.path.join(data_path, 'Population_bg_Dallas.xlsx'))
for index, row in population_data.iterrows():
county = row['NAME']
if county in counties:
bg_id = row['GEO_ID']
population = row['Population']
bg_info[bg_id] = dict()
bg_info[bg_id]['county'] = county
bg_info[bg_id]['population'] = population
county_data[county]['bg_list'].add(bg_id)
# Save devices data by county
print('Processing device data...')
device_data = pd.read_excel(os.path.join(data_path, 'TX_Devices_bg.xlsx'))
for index, row in device_data.iterrows():
bg_id = row['census_block_group']
if bg_id in bg_info.keys():
devices = row['number_devices_residing']
bg_info[bg_id]['devices'] = devices
# %%
# Create arrays to store population and related data
devices = np.zeros((num_counties,))
populations = np.zeros((num_counties,))
# Now save populations and device counts by county
for county in counties:
county_data[county]['population'] = 0
county_data[county]['devices'] = 0
# Iterate over the block groups in each county and add the population and device count
for bg_id in county_data[county]['bg_list']:
county_data[county]['population'] = county_data[county]['population'] + bg_info[bg_id]['population']
county_data[county]['devices'] = county_data[county]['devices'] + bg_info[bg_id]['devices']
devices[county_data[county]['index']] = county_data[county]['devices']
populations[county_data[county]['index']] = county_data[county]['population']
# %%
# Create a map from safegraph ID to county
sgid_to_county = dict()
fitness_county = pd.read_excel(os.path.join(data_path, 'TX_Fitness_County.xlsx'))
for index, row in fitness_county.iterrows():
sgid = row['safegraph_']
county = row['CNTY_NM']
sgid_to_county[sgid] = county
grocery_county = pd.read_excel(os.path.join(data_path, 'TX_Grocery_County.xlsx'))
for index, row in grocery_county.iterrows():
sgid = row['safegraph_']
county = row['CNTY_NM']
sgid_to_county[sgid] = county
hmotel_county = pd.read_excel(os.path.join(data_path, 'TX_HMotel_County.xlsx'))
for index, row in hmotel_county.iterrows():
sgid = row['safegraph_']
county = row['CNTY_NM']
sgid_to_county[sgid] = county
pharmacy_county = pd.read_excel(os.path.join(data_path, 'TX_Pharmacy_County.xlsx'))
for index, row in pharmacy_county.iterrows():
sgid = row['safegraph_']
county = row['CNTY_NM']
sgid_to_county[sgid] = county
physician_county = pd.read_excel(os.path.join(data_path, 'TX_Physician_County.xlsx'))
for index, row in physician_county.iterrows():
sgid = row['safegraph_']
county = row['CNTY_NM_1']
sgid_to_county[sgid] = county
restaurant_county = pd.read_excel(os.path.join(data_path, 'TX_Restaurant_County.xlsx'))
for index, row in restaurant_county.iterrows():
sgid = row['safegraph_']
county = row['CNTY_NM']
sgid_to_county[sgid] = county
# %%
# Create arrays to store demand data
fitness_demand = np.zeros((num_counties,1))
pharmacy_demand = np.zeros((num_counties,1))
physician_demand = np.zeros((num_counties,1))
hotel_demand = np.zeros((num_counties,1))
religion_demand = np.zeros((num_counties,1))
grocery_demand = np.zeros((num_counties,1))
restaurant_demand = np.zeros((num_counties,1))
# %%
# Process grocery data
print('Processing grocery data...')
grocery_data = pd.read_excel(os.path.join(data_path, 'Intercity_Dallas_Grocery.xlsx'))
grocery_demand_dest_mat = np.zeros((num_counties, num_counties))
for indexDF, rowDF in grocery_data.iterrows():
sgid = rowDF['safegraph_place_id']
destination_county = sgid_to_county[sgid]
origin_county = bg_info[rowDF['visitor_home_cbgs']]['county']
count = rowDF['Count']
destination_ind = county_data[destination_county]['index']
origin_ind = county_data[origin_county]['index']
grocery_demand_dest_mat[origin_ind, destination_ind] = \
int(grocery_demand_dest_mat[origin_ind, destination_ind] + (count * grocery_freq))
for i in range(num_counties):
for j in range(num_counties):
grocery_demand_dest_mat[i,j] = grocery_demand_dest_mat[i,j] * populations[i] / devices[i] * month_day_time_conversion
county_data[counties[i]]['grocery_demand_dest'] = grocery_demand_dest_mat[i, :]
for i in range(num_counties):
grocery_demand[i] = np.sum(grocery_demand_dest_mat[i,:])
if grocery_demand[i] <= min_demand_val:
grocery_demand[i] = min_demand_val
county_data[counties[i]]['grocery_demand'] = grocery_demand[i]
# %%
# Process fintess data
print('Processing fitness data...')
fitness_data = pd.read_excel(os.path.join(data_path, 'Intercity_Dallas_Fitness.xlsx'))
fitness_demand_dest_mat = np.zeros((num_counties, num_counties))
for indexDF, rowDF in fitness_data.iterrows():
sgid = rowDF['safegraph_place_id']
destination_county = sgid_to_county[sgid]
origin_county = bg_info[rowDF['visitor_home_cbgs']]['county']
count = rowDF['Count']
destination_ind = county_data[destination_county]['index']
origin_ind = county_data[origin_county]['index']
fitness_demand_dest_mat[origin_ind, destination_ind] = \
int(fitness_demand_dest_mat[origin_ind, destination_ind] + (count * fitness_freq))
for i in range(num_counties):
for j in range(num_counties):
fitness_demand_dest_mat[i,j] = fitness_demand_dest_mat[i,j] * populations[i] / devices[i] * month_day_time_conversion
county_data[counties[i]]['fitness_demand_dest'] = fitness_demand_dest_mat[i, :]
for i in range(num_counties):
fitness_demand[i] = np.sum(fitness_demand_dest_mat[i,:])
if fitness_demand[i] <= min_demand_val:
fitness_demand[i] = min_demand_val
county_data[counties[i]]['fitness_demand'] = fitness_demand[i]
# %%
# Process pharmacy data
print('Processing pharmacy data...')
pharmacy_data = pd.read_excel(os.path.join(data_path, 'Intercity_Dallas_Pharmacy.xlsx'))
pharmacy_demand_dest_mat = np.zeros((num_counties, num_counties))
for indexDF, rowDF in pharmacy_data.iterrows():
sgid = rowDF['safegraph_place_id']
destination_county = sgid_to_county[sgid]
origin_county = bg_info[rowDF['visitor_home_cbgs']]['county']
count = rowDF['Count']
destination_ind = county_data[destination_county]['index']
origin_ind = county_data[origin_county]['index']
pharmacy_demand_dest_mat[origin_ind, destination_ind] = \
int(pharmacy_demand_dest_mat[origin_ind, destination_ind] + (count * pharmacy_freq))
for i in range(num_counties):
for j in range(num_counties):
pharmacy_demand_dest_mat[i,j] = pharmacy_demand_dest_mat[i,j] * populations[i] / devices[i] * month_day_time_conversion
county_data[counties[i]]['pharmacy_demand_dest'] = pharmacy_demand_dest_mat[i, :]
for i in range(num_counties):
pharmacy_demand[i] = np.sum(pharmacy_demand_dest_mat[i,:])
if pharmacy_demand[i] <= min_demand_val:
pharmacy_demand[i] = min_demand_val
county_data[counties[i]]['pharmacy_demand'] = pharmacy_demand[i]
# %%
# Process physician data
print('Processing physician data...')
physician_data = pd.read_excel(os.path.join(data_path, 'Intercity_Dallas_Physician.xlsx'))
physician_demand_dest_mat = np.zeros((num_counties, num_counties))
for indexDF, rowDF in physician_data.iterrows():
sgid = rowDF['safegraph_place_id']
destination_county = sgid_to_county[sgid]
origin_county = bg_info[rowDF['visitor_home_cbgs']]['county']
count = rowDF['Count']
destination_ind = county_data[destination_county]['index']
origin_ind = county_data[origin_county]['index']
physician_demand_dest_mat[origin_ind, destination_ind] = \
int(physician_demand_dest_mat[origin_ind, destination_ind] + (count * physician_freq))
for i in range(num_counties):
for j in range(num_counties):
physician_demand_dest_mat[i,j] = physician_demand_dest_mat[i,j] * populations[i] / devices[i] * month_day_time_conversion
county_data[counties[i]]['physician_demand_dest'] = physician_demand_dest_mat[i, :]
for i in range(num_counties):
physician_demand[i] = np.sum(physician_demand_dest_mat[i,:])
if physician_demand[i] <= min_demand_val:
physician_demand[i] = min_demand_val
county_data[counties[i]]['physician_demand'] = physician_demand[i]
# %%
# Process hotel data
print('Processing hotel data...')
hotel_data = pd.read_excel(os.path.join(data_path, 'Intercity_Dallas_HotelMotel.xlsx'))
hotel_demand_dest_mat = np.zeros((num_counties, num_counties))
for indexDF, rowDF in hotel_data.iterrows():
sgid = rowDF['safegraph_place_id']
destination_county = sgid_to_county[sgid]
origin_county = bg_info[rowDF['visitor_home_cbgs']]['county']
count = rowDF['Count']
destination_ind = county_data[destination_county]['index']
origin_ind = county_data[origin_county]['index']
hotel_demand_dest_mat[origin_ind, destination_ind] = \
int(hotel_demand_dest_mat[origin_ind, destination_ind] + (count * hotel_freq))
for i in range(num_counties):
for j in range(num_counties):
hotel_demand_dest_mat[i,j] = hotel_demand_dest_mat[i,j] * populations[i] / devices[i] * month_day_time_conversion
county_data[counties[i]]['hotel_demand_dest'] = hotel_demand_dest_mat[i, :]
for i in range(num_counties):
hotel_demand[i] = np.sum(hotel_demand_dest_mat[i,:])
if hotel_demand[i] <= min_demand_val:
hotel_demand[i] = min_demand_val
county_data[counties[i]]['hotel_demand'] = hotel_demand[i]
# %%
# Process restaurant data
print('Processing restaurant data...')
restaurant_data = pd.read_excel(os.path.join(data_path, 'Intercity_Dallas_Restaurant.xlsx'))
restaurant_demand_dest_mat = np.zeros((num_counties, num_counties))
for indexDF, rowDF in restaurant_data.iterrows():
sgid = rowDF['safegraph_place_id']
destination_county = sgid_to_county[sgid]
origin_county = bg_info[rowDF['visitor_home_cbgs']]['county']
count = rowDF['Count']
destination_ind = county_data[destination_county]['index']
origin_ind = county_data[origin_county]['index']
restaurant_demand_dest_mat[origin_ind, destination_ind] = \
int(restaurant_demand_dest_mat[origin_ind, destination_ind] + (count * restaurant_freq))
for i in range(num_counties):
for j in range(num_counties):
restaurant_demand_dest_mat[i,j] = restaurant_demand_dest_mat[i,j] * populations[i] / devices[i] * month_day_time_conversion
county_data[counties[i]]['restaurant_demand_dest'] = restaurant_demand_dest_mat[i, :]
for i in range(num_counties):
restaurant_demand[i] = np.sum(restaurant_demand_dest_mat[i,:])
if restaurant_demand[i] <= min_demand_val:
restaurant_demand[i] = min_demand_val
county_data[counties[i]]['restaurant_demand'] = restaurant_demand[i]
# %%
# Save the results
# First check if the save directory exists
if not os.path.isdir(os.path.join(data_path, 'data_processing_outputs')):
os.mkdir(os.path.join(data_path, 'data_processing_outputs'))
demand_array=np.concatenate((grocery_demand, fitness_demand, pharmacy_demand, physician_demand, hotel_demand, restaurant_demand), axis=1)
demand_array.shape
print(demand_array)
np.save(os.path.join(data_path, 'data_processing_outputs', 'demand_array_dallas.npy'), demand_array)
np.save(os.path.join(data_path, 'data_processing_outputs', 'populations_array_dallas.npy'), populations)
pickle.dump(county_data, open(os.path.join(data_path, 'data_processing_outputs', 'county_data.p'), 'wb'))
# %%
|
[
"numpy.zeros",
"os.path.join",
"numpy.sum",
"numpy.concatenate"
] |
[((269, 325), 'os.path.join', 'os.path.join', (['base_file_path', '"""data"""', '"""Intercity_Dallas"""'], {}), "(base_file_path, 'data', 'Intercity_Dallas')\n", (281, 325), False, 'import sys, os\n'), ((2582, 2607), 'numpy.zeros', 'np.zeros', (['(num_counties,)'], {}), '((num_counties,))\n', (2590, 2607), True, 'import numpy as np\n'), ((2622, 2647), 'numpy.zeros', 'np.zeros', (['(num_counties,)'], {}), '((num_counties,))\n', (2630, 2647), True, 'import numpy as np\n'), ((4778, 4805), 'numpy.zeros', 'np.zeros', (['(num_counties, 1)'], {}), '((num_counties, 1))\n', (4786, 4805), True, 'import numpy as np\n'), ((4823, 4850), 'numpy.zeros', 'np.zeros', (['(num_counties, 1)'], {}), '((num_counties, 1))\n', (4831, 4850), True, 'import numpy as np\n'), ((4869, 4896), 'numpy.zeros', 'np.zeros', (['(num_counties, 1)'], {}), '((num_counties, 1))\n', (4877, 4896), True, 'import numpy as np\n'), ((4911, 4938), 'numpy.zeros', 'np.zeros', (['(num_counties, 1)'], {}), '((num_counties, 1))\n', (4919, 4938), True, 'import numpy as np\n'), ((4956, 4983), 'numpy.zeros', 'np.zeros', (['(num_counties, 1)'], {}), '((num_counties, 1))\n', (4964, 4983), True, 'import numpy as np\n'), ((5000, 5027), 'numpy.zeros', 'np.zeros', (['(num_counties, 1)'], {}), '((num_counties, 1))\n', (5008, 5027), True, 'import numpy as np\n'), ((5047, 5074), 'numpy.zeros', 'np.zeros', (['(num_counties, 1)'], {}), '((num_counties, 1))\n', (5055, 5074), True, 'import numpy as np\n'), ((5253, 5291), 'numpy.zeros', 'np.zeros', (['(num_counties, num_counties)'], {}), '((num_counties, num_counties))\n', (5261, 5291), True, 'import numpy as np\n'), ((6486, 6524), 'numpy.zeros', 'np.zeros', (['(num_counties, num_counties)'], {}), '((num_counties, num_counties))\n', (6494, 6524), True, 'import numpy as np\n'), ((7725, 7763), 'numpy.zeros', 'np.zeros', (['(num_counties, num_counties)'], {}), '((num_counties, num_counties))\n', (7733, 7763), True, 'import numpy as np\n'), ((8984, 9022), 'numpy.zeros', 'np.zeros', (['(num_counties, num_counties)'], {}), '((num_counties, num_counties))\n', (8992, 9022), True, 'import numpy as np\n'), ((10242, 10280), 'numpy.zeros', 'np.zeros', (['(num_counties, num_counties)'], {}), '((num_counties, num_counties))\n', (10250, 10280), True, 'import numpy as np\n'), ((11464, 11502), 'numpy.zeros', 'np.zeros', (['(num_counties, num_counties)'], {}), '((num_counties, num_counties))\n', (11472, 11502), True, 'import numpy as np\n'), ((12784, 12912), 'numpy.concatenate', 'np.concatenate', (['(grocery_demand, fitness_demand, pharmacy_demand, physician_demand,\n hotel_demand, restaurant_demand)'], {'axis': '(1)'}), '((grocery_demand, fitness_demand, pharmacy_demand,\n physician_demand, hotel_demand, restaurant_demand), axis=1)\n', (12798, 12912), True, 'import numpy as np\n'), ((163, 204), 'os.path.join', 'os.path.join', (['os.curdir', '""".."""', '""".."""', '""".."""'], {}), "(os.curdir, '..', '..', '..')\n", (175, 204), False, 'import sys, os\n'), ((1207, 1256), 'os.path.join', 'os.path.join', (['data_path', '"""TX_Fitness_County.xlsx"""'], {}), "(data_path, 'TX_Fitness_County.xlsx')\n", (1219, 1256), False, 'import sys, os\n'), ((1769, 1821), 'os.path.join', 'os.path.join', (['data_path', '"""Population_bg_Dallas.xlsx"""'], {}), "(data_path, 'Population_bg_Dallas.xlsx')\n", (1781, 1821), False, 'import sys, os\n'), ((2260, 2305), 'os.path.join', 'os.path.join', (['data_path', '"""TX_Devices_bg.xlsx"""'], {}), "(data_path, 'TX_Devices_bg.xlsx')\n", (2272, 2305), False, 'import sys, os\n'), ((3419, 3468), 'os.path.join', 'os.path.join', (['data_path', '"""TX_Fitness_County.xlsx"""'], {}), "(data_path, 'TX_Fitness_County.xlsx')\n", (3431, 3468), False, 'import sys, os\n'), ((3638, 3687), 'os.path.join', 'os.path.join', (['data_path', '"""TX_Grocery_County.xlsx"""'], {}), "(data_path, 'TX_Grocery_County.xlsx')\n", (3650, 3687), False, 'import sys, os\n'), ((3856, 3904), 'os.path.join', 'os.path.join', (['data_path', '"""TX_HMotel_County.xlsx"""'], {}), "(data_path, 'TX_HMotel_County.xlsx')\n", (3868, 3904), False, 'import sys, os\n'), ((4074, 4124), 'os.path.join', 'os.path.join', (['data_path', '"""TX_Pharmacy_County.xlsx"""'], {}), "(data_path, 'TX_Pharmacy_County.xlsx')\n", (4086, 4124), False, 'import sys, os\n'), ((4297, 4348), 'os.path.join', 'os.path.join', (['data_path', '"""TX_Physician_County.xlsx"""'], {}), "(data_path, 'TX_Physician_County.xlsx')\n", (4309, 4348), False, 'import sys, os\n'), ((4525, 4577), 'os.path.join', 'os.path.join', (['data_path', '"""TX_Restaurant_County.xlsx"""'], {}), "(data_path, 'TX_Restaurant_County.xlsx')\n", (4537, 4577), False, 'import sys, os\n'), ((5169, 5225), 'os.path.join', 'os.path.join', (['data_path', '"""Intercity_Dallas_Grocery.xlsx"""'], {}), "(data_path, 'Intercity_Dallas_Grocery.xlsx')\n", (5181, 5225), False, 'import sys, os\n'), ((6117, 6154), 'numpy.sum', 'np.sum', (['grocery_demand_dest_mat[i, :]'], {}), '(grocery_demand_dest_mat[i, :])\n', (6123, 6154), True, 'import numpy as np\n'), ((6402, 6458), 'os.path.join', 'os.path.join', (['data_path', '"""Intercity_Dallas_Fitness.xlsx"""'], {}), "(data_path, 'Intercity_Dallas_Fitness.xlsx')\n", (6414, 6458), False, 'import sys, os\n'), ((7351, 7388), 'numpy.sum', 'np.sum', (['fitness_demand_dest_mat[i, :]'], {}), '(fitness_demand_dest_mat[i, :])\n', (7357, 7388), True, 'import numpy as np\n'), ((7639, 7696), 'os.path.join', 'os.path.join', (['data_path', '"""Intercity_Dallas_Pharmacy.xlsx"""'], {}), "(data_path, 'Intercity_Dallas_Pharmacy.xlsx')\n", (7651, 7696), False, 'import sys, os\n'), ((8599, 8637), 'numpy.sum', 'np.sum', (['pharmacy_demand_dest_mat[i, :]'], {}), '(pharmacy_demand_dest_mat[i, :])\n', (8605, 8637), True, 'import numpy as np\n'), ((8896, 8954), 'os.path.join', 'os.path.join', (['data_path', '"""Intercity_Dallas_Physician.xlsx"""'], {}), "(data_path, 'Intercity_Dallas_Physician.xlsx')\n", (8908, 8954), False, 'import sys, os\n'), ((9867, 9906), 'numpy.sum', 'np.sum', (['physician_demand_dest_mat[i, :]'], {}), '(physician_demand_dest_mat[i, :])\n', (9873, 9906), True, 'import numpy as np\n'), ((10157, 10216), 'os.path.join', 'os.path.join', (['data_path', '"""Intercity_Dallas_HotelMotel.xlsx"""'], {}), "(data_path, 'Intercity_Dallas_HotelMotel.xlsx')\n", (10169, 10216), False, 'import sys, os\n'), ((11089, 11124), 'numpy.sum', 'np.sum', (['hotel_demand_dest_mat[i, :]'], {}), '(hotel_demand_dest_mat[i, :])\n', (11095, 11124), True, 'import numpy as np\n'), ((11374, 11433), 'os.path.join', 'os.path.join', (['data_path', '"""Intercity_Dallas_Restaurant.xlsx"""'], {}), "(data_path, 'Intercity_Dallas_Restaurant.xlsx')\n", (11386, 11433), False, 'import sys, os\n'), ((12356, 12396), 'numpy.sum', 'np.sum', (['restaurant_demand_dest_mat[i, :]'], {}), '(restaurant_demand_dest_mat[i, :])\n', (12362, 12396), True, 'import numpy as np\n'), ((12956, 13033), 'os.path.join', 'os.path.join', (['data_path', '"""data_processing_outputs"""', '"""demand_array_dallas.npy"""'], {}), "(data_path, 'data_processing_outputs', 'demand_array_dallas.npy')\n", (12968, 13033), False, 'import sys, os\n'), ((13057, 13143), 'os.path.join', 'os.path.join', (['data_path', '"""data_processing_outputs"""', '"""populations_array_dallas.npy"""'], {}), "(data_path, 'data_processing_outputs',\n 'populations_array_dallas.npy')\n", (13069, 13143), False, 'import sys, os\n'), ((12652, 12702), 'os.path.join', 'os.path.join', (['data_path', '"""data_processing_outputs"""'], {}), "(data_path, 'data_processing_outputs')\n", (12664, 12702), False, 'import sys, os\n'), ((12718, 12768), 'os.path.join', 'os.path.join', (['data_path', '"""data_processing_outputs"""'], {}), "(data_path, 'data_processing_outputs')\n", (12730, 12768), False, 'import sys, os\n'), ((13185, 13252), 'os.path.join', 'os.path.join', (['data_path', '"""data_processing_outputs"""', '"""county_data.p"""'], {}), "(data_path, 'data_processing_outputs', 'county_data.p')\n", (13197, 13252), False, 'import sys, os\n')]
|
from cv2 import cv2
import numpy as np
import anki_vector
from anki_vector.util import distance_mm, speed_mmps, degrees
def empty(a):
pass
robot=anki_vector.Robot()
robot.connect()
robot.camera.init_camera_feed()
robot.behavior.set_lift_height(0.0)
robot.behavior.set_head_angle(degrees(0))
cv2.namedWindow("TrackBars")
cv2.resizeWindow("TrackBars", 640, 600)
cv2.createTrackbar("Hue Min", "TrackBars", 10, 179, empty)
cv2.createTrackbar("Hue Max", "TrackBars", 47, 179, empty)
cv2.createTrackbar("Sat Min", "TrackBars", 66, 255, empty)
cv2.createTrackbar("Sat Max", "TrackBars", 186, 255, empty)
cv2.createTrackbar("Val Min", "TrackBars", 171, 255, empty)
cv2.createTrackbar("Val Max", "TrackBars", 255, 255, empty)
while True:
h_min = cv2.getTrackbarPos("Hue Min", "TrackBars")
h_max = cv2.getTrackbarPos("Hue Max", "TrackBars")
s_min = cv2.getTrackbarPos("Sat Min", "TrackBars")
s_max = cv2.getTrackbarPos("Sat Max", "TrackBars")
v_min = cv2.getTrackbarPos("Val Min", "TrackBars")
v_max = cv2.getTrackbarPos("Val Max", "TrackBars")
img = np.array(robot.camera.latest_image.raw_image)
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
imgBlur = cv2.GaussianBlur(img, (3,3), 1)
imgHSV = cv2.cvtColor(imgBlur, cv2.COLOR_BGR2HSV)
print(h_min, h_max, s_min, s_max, v_min, v_max)
lower = np.array([h_min, s_min, v_min])
upper = np.array([h_max, s_max, v_max])
mask = cv2.inRange(imgHSV, lower, upper)
# Alternative method to find the Ball: Approximation of the area with a Polygon.
contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
peri = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.02*peri,True)
objCor = len(approx) # Number of corners
print(objCor)
x, y, w, h = cv2.boundingRect(approx)
if objCor > 6:
cv2.circle(img, center=(int(x+w/2), int(y+h/2)), radius=int((h)/2), color=(0, 255, 0), thickness=3)
cv2.imshow("Camera", img)
cv2.imshow("Mask", mask)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
|
[
"cv2.cv2.namedWindow",
"cv2.cv2.arcLength",
"anki_vector.Robot",
"cv2.cv2.boundingRect",
"cv2.cv2.resizeWindow",
"cv2.cv2.getTrackbarPos",
"cv2.cv2.findContours",
"cv2.cv2.inRange",
"cv2.cv2.approxPolyDP",
"anki_vector.util.degrees",
"cv2.cv2.createTrackbar",
"numpy.array",
"cv2.cv2.GaussianBlur",
"cv2.cv2.waitKey",
"cv2.cv2.cvtColor",
"cv2.cv2.imshow"
] |
[((154, 173), 'anki_vector.Robot', 'anki_vector.Robot', ([], {}), '()\n', (171, 173), False, 'import anki_vector\n'), ((301, 329), 'cv2.cv2.namedWindow', 'cv2.namedWindow', (['"""TrackBars"""'], {}), "('TrackBars')\n", (316, 329), False, 'from cv2 import cv2\n'), ((330, 369), 'cv2.cv2.resizeWindow', 'cv2.resizeWindow', (['"""TrackBars"""', '(640)', '(600)'], {}), "('TrackBars', 640, 600)\n", (346, 369), False, 'from cv2 import cv2\n'), ((370, 428), 'cv2.cv2.createTrackbar', 'cv2.createTrackbar', (['"""Hue Min"""', '"""TrackBars"""', '(10)', '(179)', 'empty'], {}), "('Hue Min', 'TrackBars', 10, 179, empty)\n", (388, 428), False, 'from cv2 import cv2\n'), ((429, 487), 'cv2.cv2.createTrackbar', 'cv2.createTrackbar', (['"""Hue Max"""', '"""TrackBars"""', '(47)', '(179)', 'empty'], {}), "('Hue Max', 'TrackBars', 47, 179, empty)\n", (447, 487), False, 'from cv2 import cv2\n'), ((488, 546), 'cv2.cv2.createTrackbar', 'cv2.createTrackbar', (['"""Sat Min"""', '"""TrackBars"""', '(66)', '(255)', 'empty'], {}), "('Sat Min', 'TrackBars', 66, 255, empty)\n", (506, 546), False, 'from cv2 import cv2\n'), ((547, 606), 'cv2.cv2.createTrackbar', 'cv2.createTrackbar', (['"""Sat Max"""', '"""TrackBars"""', '(186)', '(255)', 'empty'], {}), "('Sat Max', 'TrackBars', 186, 255, empty)\n", (565, 606), False, 'from cv2 import cv2\n'), ((607, 666), 'cv2.cv2.createTrackbar', 'cv2.createTrackbar', (['"""Val Min"""', '"""TrackBars"""', '(171)', '(255)', 'empty'], {}), "('Val Min', 'TrackBars', 171, 255, empty)\n", (625, 666), False, 'from cv2 import cv2\n'), ((667, 726), 'cv2.cv2.createTrackbar', 'cv2.createTrackbar', (['"""Val Max"""', '"""TrackBars"""', '(255)', '(255)', 'empty'], {}), "('Val Max', 'TrackBars', 255, 255, empty)\n", (685, 726), False, 'from cv2 import cv2\n'), ((288, 298), 'anki_vector.util.degrees', 'degrees', (['(0)'], {}), '(0)\n', (295, 298), False, 'from anki_vector.util import distance_mm, speed_mmps, degrees\n'), ((757, 799), 'cv2.cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""Hue Min"""', '"""TrackBars"""'], {}), "('Hue Min', 'TrackBars')\n", (775, 799), False, 'from cv2 import cv2\n'), ((812, 854), 'cv2.cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""Hue Max"""', '"""TrackBars"""'], {}), "('Hue Max', 'TrackBars')\n", (830, 854), False, 'from cv2 import cv2\n'), ((867, 909), 'cv2.cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""Sat Min"""', '"""TrackBars"""'], {}), "('Sat Min', 'TrackBars')\n", (885, 909), False, 'from cv2 import cv2\n'), ((922, 964), 'cv2.cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""Sat Max"""', '"""TrackBars"""'], {}), "('Sat Max', 'TrackBars')\n", (940, 964), False, 'from cv2 import cv2\n'), ((977, 1019), 'cv2.cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""Val Min"""', '"""TrackBars"""'], {}), "('Val Min', 'TrackBars')\n", (995, 1019), False, 'from cv2 import cv2\n'), ((1032, 1074), 'cv2.cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""Val Max"""', '"""TrackBars"""'], {}), "('Val Max', 'TrackBars')\n", (1050, 1074), False, 'from cv2 import cv2\n'), ((1086, 1131), 'numpy.array', 'np.array', (['robot.camera.latest_image.raw_image'], {}), '(robot.camera.latest_image.raw_image)\n', (1094, 1131), True, 'import numpy as np\n'), ((1142, 1178), 'cv2.cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2BGR'], {}), '(img, cv2.COLOR_RGB2BGR)\n', (1154, 1178), False, 'from cv2 import cv2\n'), ((1193, 1225), 'cv2.cv2.GaussianBlur', 'cv2.GaussianBlur', (['img', '(3, 3)', '(1)'], {}), '(img, (3, 3), 1)\n', (1209, 1225), False, 'from cv2 import cv2\n'), ((1238, 1278), 'cv2.cv2.cvtColor', 'cv2.cvtColor', (['imgBlur', 'cv2.COLOR_BGR2HSV'], {}), '(imgBlur, cv2.COLOR_BGR2HSV)\n', (1250, 1278), False, 'from cv2 import cv2\n'), ((1344, 1375), 'numpy.array', 'np.array', (['[h_min, s_min, v_min]'], {}), '([h_min, s_min, v_min])\n', (1352, 1375), True, 'import numpy as np\n'), ((1388, 1419), 'numpy.array', 'np.array', (['[h_max, s_max, v_max]'], {}), '([h_max, s_max, v_max])\n', (1396, 1419), True, 'import numpy as np\n'), ((1431, 1464), 'cv2.cv2.inRange', 'cv2.inRange', (['imgHSV', 'lower', 'upper'], {}), '(imgHSV, lower, upper)\n', (1442, 1464), False, 'from cv2 import cv2\n'), ((1580, 1646), 'cv2.cv2.findContours', 'cv2.findContours', (['mask', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (1596, 1646), False, 'from cv2 import cv2\n'), ((2037, 2062), 'cv2.cv2.imshow', 'cv2.imshow', (['"""Camera"""', 'img'], {}), "('Camera', img)\n", (2047, 2062), False, 'from cv2 import cv2\n'), ((2067, 2091), 'cv2.cv2.imshow', 'cv2.imshow', (['"""Mask"""', 'mask'], {}), "('Mask', mask)\n", (2077, 2091), False, 'from cv2 import cv2\n'), ((1687, 1711), 'cv2.cv2.arcLength', 'cv2.arcLength', (['cnt', '(True)'], {}), '(cnt, True)\n', (1700, 1711), False, 'from cv2 import cv2\n'), ((1729, 1769), 'cv2.cv2.approxPolyDP', 'cv2.approxPolyDP', (['cnt', '(0.02 * peri)', '(True)'], {}), '(cnt, 0.02 * peri, True)\n', (1745, 1769), False, 'from cv2 import cv2\n'), ((1859, 1883), 'cv2.cv2.boundingRect', 'cv2.boundingRect', (['approx'], {}), '(approx)\n', (1875, 1883), False, 'from cv2 import cv2\n'), ((2100, 2114), 'cv2.cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2111, 2114), False, 'from cv2 import cv2\n')]
|
from __future__ import absolute_import
from unittest import TestCase, skip
from ..wcs import WCS
import numpy as np
import os
import re
import sys
from astropy.io import fits
from astropy.modeling import (models, fitting, Model)
import matplotlib.pyplot as plt
from ccdproc import CCDData
class TestWCSBase(TestCase):
def setUp(self):
self.data_path = os.path.join(
os.path.dirname(sys.modules['goodman_pipeline'].__file__),
'data/test_data/wcs_data')
self.wcs = WCS()
@staticmethod
def _recover_lines(ccd):
lines_pixel = []
lines_angstrom = []
pixel_keywords = ccd.header['GSP_P*']
for pixel_key in pixel_keywords:
if re.match(r'GSP_P\d{3}', pixel_key) is not None:
angstrom_key = re.sub('GSP_P', 'GSP_A', pixel_key)
if int(ccd.header[angstrom_key]) != 0:
lines_pixel.append(float(ccd.header[pixel_key]))
lines_angstrom.append(float(ccd.header[angstrom_key]))
return lines_pixel, lines_angstrom
class TestWCS(TestWCSBase):
# def test_wcs__call__(self):
# self.assertRaisesRegex(SystemExit, '1', self.wcs)
# self.assertRaises(SystemExit, self.wcs)
def test_fit_chebyshev(self):
test_file = os.path.join(self.data_path,
'goodman_comp_400M1_HgArNe.fits')
ccd = CCDData.read(test_file, unit='adu')
pixel, angstrom = self._recover_lines(ccd=ccd)
model = self.wcs.fit(physical=pixel, wavelength=angstrom)
self.assertIsInstance(model, Model)
self.assertEqual(model.__class__.__name__, ccd.header['GSP_FUNC'])
self.assertEqual(model.degree, ccd.header['GSP_ORDR'])
for i in range(model.degree + 1):
self.assertAlmostEqual(model.__getattribute__('c{:d}'.format(i)).value,
ccd.header['GSP_C{:03d}'.format(i)])
def test_fit_linear(self):
test_file = os.path.join(self.data_path,
'goodman_comp_400M1_HgArNe.fits')
ccd = CCDData.read(test_file, unit='adu')
pixel, angstrom = self._recover_lines(ccd=ccd)
model = self.wcs.fit(physical=pixel,
wavelength=angstrom,
model_name='linear')
self.assertIsInstance(model, Model)
def test_fit_invalid(self):
test_file = os.path.join(self.data_path,
'goodman_comp_400M1_HgArNe.fits')
ccd = CCDData.read(test_file, unit='adu')
pixel, angstrom = self._recover_lines(ccd=ccd)
self.assertRaisesRegex(NotImplementedError,
'The model invalid is not implemented',
self.wcs.fit,
pixel,
angstrom,
'invalid')
self.assertRaises(NotImplementedError,
self.wcs.fit,
pixel,
angstrom,
'invalid')
def test_fit__unable_to_fit(self):
pixel = [0, 1, 2, 3]
angstrom = [20, 30, 40]
# self.assertRaisesRegex(ValueError,
# 'x and y should have the same shape',
# self.wcs.fit, pixel, angstrom)
self.assertRaises(ValueError, self.wcs.fit, pixel, angstrom)
def test_read__linear(self):
test_file = os.path.join(self.data_path,
'linear_fits_solution.fits')
self.assertTrue(os.path.isfile(test_file))
ccd = CCDData.read(test_file, unit='adu')
result = self.wcs.read(ccd=ccd)
self.assertIsInstance(result, list)
self.assertEqual(len(result), 2)
self.assertIsInstance(self.wcs.get_model(), Model)
def test_read__log_linear(self):
test_file = os.path.join(self.data_path,
'log-linear_fits_solution.fits')
self.assertTrue(os.path.isfile(test_file))
ccd = CCDData.read(test_file, unit='adu')
#
# result = self.wcs.read(ccd=ccd)
#
# self.assertIsInstance(result, list)
# self.assertEqual(len(result), 2)
# self.assertIsInstance(self.wcs.get_model(), Model)
self.assertRaises(NotImplementedError, self.wcs.read, ccd)
def test_read__non_linear_chebyshev(self):
test_file = os.path.join(self.data_path,
'non-linear_fits_solution_cheb.fits')
self.assertTrue(os.path.isfile(test_file))
ccd = CCDData.read(test_file, unit='adu')
result = self.wcs.read(ccd=ccd)
self.assertIsInstance(self.wcs.model, Model)
self.assertEqual(self.wcs.model.__class__.__name__, 'Chebyshev1D')
def test_read__non_linear_legendre(self):
test_file = os.path.join(self.data_path,
'non-linear_fits_solution_legendre.fits')
self.assertTrue(os.path.isfile(test_file))
ccd = CCDData.read(test_file, unit='adu')
result = self.wcs.read(ccd=ccd)
self.assertIsInstance(self.wcs.model, Model)
self.assertEqual(self.wcs.model.__class__.__name__, 'Legendre1D')
def test_read__non_linear_lspline(self):
test_file = os.path.join(self.data_path,
'non-linear_fits_solution_linear-spline.fits')
self.assertTrue(os.path.isfile(test_file))
ccd = CCDData.read(test_file, unit='adu')
# self.wcs.read(ccd=ccd)
self.assertRaises(NotImplementedError, self.wcs.read, ccd)
self.assertRaisesRegex(NotImplementedError,
'Linear spline is not implemented',
self.wcs.read, ccd)
def test_read__non_linear_cspline(self):
test_file = os.path.join(self.data_path,
'non-linear_fits_solution_cubic-spline.fits')
self.assertTrue(os.path.isfile(test_file))
ccd = CCDData.read(test_file, unit='adu')
self.assertRaises(NotImplementedError, self.wcs.read, ccd)
self.assertRaisesRegex(NotImplementedError,
'Cubic spline is not implemented',
self.wcs.read, ccd)
def test_write_fits_wcs(self):
self.assertRaises(NotImplementedError, self.wcs.write_fits_wcs,
None,
None)
def test_read__invalid(self):
test_file = os.path.join(self.data_path,
'linear_fits_solution.fits')
self.assertTrue(os.path.isfile(test_file))
ccd = CCDData.read(test_file, unit='adu')
ccd.wcs.wcs.ctype[0] = 'INVALID'
self.assertRaisesRegex(NotImplementedError,
'CTYPE INVALID is not recognized',
self.wcs.read,
ccd)
self.assertRaises(NotImplementedError, self.wcs.read, ccd)
def test_write_gsp_wcs(self):
test_file = os.path.join(self.data_path,
'goodman_comp_400M1_HgArNe.fits')
ccd = CCDData.read(test_file, unit='adu')
pixel, angstrom = self._recover_lines(ccd=ccd)
model = self.wcs.fit(physical=pixel, wavelength=angstrom)
self.assertIsInstance(model, Model)
blank_ccd = CCDData(data=np.ones(ccd.data.shape),
meta=fits.Header(),
unit='adu')
blank_ccd.header.set('GSP_WREJ', value=None, comment='empty')
new_ccd = self.wcs.write_gsp_wcs(ccd=blank_ccd, model=model)
self.assertEqual(new_ccd.header['GSP_FUNC'], ccd.header['GSP_FUNC'])
self.assertEqual(new_ccd.header['GSP_ORDR'], ccd.header['GSP_ORDR'])
self.assertEqual(new_ccd.header['GSP_NPIX'], ccd.header['GSP_NPIX'])
for i in range(model.degree + 1):
self.assertAlmostEqual(new_ccd.header['GSP_C{:03d}'.format(i)],
ccd.header['GSP_C{:03d}'.format(i)])
def test_read_gsp_wcs(self):
test_file = os.path.join(self.data_path,
'goodman_comp_400M1_HgArNe.fits')
self.assertTrue(os.path.isfile(test_file))
ccd = CCDData.read(test_file, unit='adu')
result = self.wcs.read_gsp_wcs(ccd=ccd)
self.assertIsInstance(result, list)
self.assertEqual(len(result), 2)
self.assertIsInstance(self.wcs.get_model(), Model)
def test_get_model_is_None(self):
self.wcs.model = None
self.assertIsNone(self.wcs.get_model())
def test_get_model_is_not_None(self):
self.wcs.model = models.Chebyshev1D(degree=3)
self.assertIsInstance(self.wcs.get_model(), Model)
def test_pm_none(self):
# test_file = os.path.join(self.data_path,
# 'non-linear_fits_solution_cheb.fits')
# self.assertTrue(os.path.isfile(test_file))
#
# ccd = CCDData.read(test_file, unit='adu')
#
# WAT2_001 = 'wtype = multispec spec1 = "1 1 2 1. 1.5114461210693 4096 0. 834.39 864'
# WAT2_002 = '.39 1. 0. 1 3 1616.37 3259.98 5115.64008185559 535.515983711607 -0.7'
# WAT2_003 = '79265625182385"'
#
# dtype = -1
self.assertRaises(NotImplementedError, self.wcs._none)
|
[
"ccdproc.CCDData.read",
"os.path.dirname",
"re.match",
"numpy.ones",
"os.path.isfile",
"astropy.modeling.models.Chebyshev1D",
"astropy.io.fits.Header",
"os.path.join",
"re.sub"
] |
[((1309, 1371), 'os.path.join', 'os.path.join', (['self.data_path', '"""goodman_comp_400M1_HgArNe.fits"""'], {}), "(self.data_path, 'goodman_comp_400M1_HgArNe.fits')\n", (1321, 1371), False, 'import os\n'), ((1419, 1454), 'ccdproc.CCDData.read', 'CCDData.read', (['test_file'], {'unit': '"""adu"""'}), "(test_file, unit='adu')\n", (1431, 1454), False, 'from ccdproc import CCDData\n'), ((2003, 2065), 'os.path.join', 'os.path.join', (['self.data_path', '"""goodman_comp_400M1_HgArNe.fits"""'], {}), "(self.data_path, 'goodman_comp_400M1_HgArNe.fits')\n", (2015, 2065), False, 'import os\n'), ((2113, 2148), 'ccdproc.CCDData.read', 'CCDData.read', (['test_file'], {'unit': '"""adu"""'}), "(test_file, unit='adu')\n", (2125, 2148), False, 'from ccdproc import CCDData\n'), ((2446, 2508), 'os.path.join', 'os.path.join', (['self.data_path', '"""goodman_comp_400M1_HgArNe.fits"""'], {}), "(self.data_path, 'goodman_comp_400M1_HgArNe.fits')\n", (2458, 2508), False, 'import os\n'), ((2556, 2591), 'ccdproc.CCDData.read', 'CCDData.read', (['test_file'], {'unit': '"""adu"""'}), "(test_file, unit='adu')\n", (2568, 2591), False, 'from ccdproc import CCDData\n'), ((3535, 3592), 'os.path.join', 'os.path.join', (['self.data_path', '"""linear_fits_solution.fits"""'], {}), "(self.data_path, 'linear_fits_solution.fits')\n", (3547, 3592), False, 'import os\n'), ((3692, 3727), 'ccdproc.CCDData.read', 'CCDData.read', (['test_file'], {'unit': '"""adu"""'}), "(test_file, unit='adu')\n", (3704, 3727), False, 'from ccdproc import CCDData\n'), ((3972, 4033), 'os.path.join', 'os.path.join', (['self.data_path', '"""log-linear_fits_solution.fits"""'], {}), "(self.data_path, 'log-linear_fits_solution.fits')\n", (3984, 4033), False, 'import os\n'), ((4133, 4168), 'ccdproc.CCDData.read', 'CCDData.read', (['test_file'], {'unit': '"""adu"""'}), "(test_file, unit='adu')\n", (4145, 4168), False, 'from ccdproc import CCDData\n'), ((4516, 4582), 'os.path.join', 'os.path.join', (['self.data_path', '"""non-linear_fits_solution_cheb.fits"""'], {}), "(self.data_path, 'non-linear_fits_solution_cheb.fits')\n", (4528, 4582), False, 'import os\n'), ((4682, 4717), 'ccdproc.CCDData.read', 'CCDData.read', (['test_file'], {'unit': '"""adu"""'}), "(test_file, unit='adu')\n", (4694, 4717), False, 'from ccdproc import CCDData\n'), ((4954, 5024), 'os.path.join', 'os.path.join', (['self.data_path', '"""non-linear_fits_solution_legendre.fits"""'], {}), "(self.data_path, 'non-linear_fits_solution_legendre.fits')\n", (4966, 5024), False, 'import os\n'), ((5124, 5159), 'ccdproc.CCDData.read', 'CCDData.read', (['test_file'], {'unit': '"""adu"""'}), "(test_file, unit='adu')\n", (5136, 5159), False, 'from ccdproc import CCDData\n'), ((5394, 5469), 'os.path.join', 'os.path.join', (['self.data_path', '"""non-linear_fits_solution_linear-spline.fits"""'], {}), "(self.data_path, 'non-linear_fits_solution_linear-spline.fits')\n", (5406, 5469), False, 'import os\n'), ((5569, 5604), 'ccdproc.CCDData.read', 'CCDData.read', (['test_file'], {'unit': '"""adu"""'}), "(test_file, unit='adu')\n", (5581, 5604), False, 'from ccdproc import CCDData\n'), ((5941, 6015), 'os.path.join', 'os.path.join', (['self.data_path', '"""non-linear_fits_solution_cubic-spline.fits"""'], {}), "(self.data_path, 'non-linear_fits_solution_cubic-spline.fits')\n", (5953, 6015), False, 'import os\n'), ((6115, 6150), 'ccdproc.CCDData.read', 'CCDData.read', (['test_file'], {'unit': '"""adu"""'}), "(test_file, unit='adu')\n", (6127, 6150), False, 'from ccdproc import CCDData\n'), ((6614, 6671), 'os.path.join', 'os.path.join', (['self.data_path', '"""linear_fits_solution.fits"""'], {}), "(self.data_path, 'linear_fits_solution.fits')\n", (6626, 6671), False, 'import os\n'), ((6771, 6806), 'ccdproc.CCDData.read', 'CCDData.read', (['test_file'], {'unit': '"""adu"""'}), "(test_file, unit='adu')\n", (6783, 6806), False, 'from ccdproc import CCDData\n'), ((7171, 7233), 'os.path.join', 'os.path.join', (['self.data_path', '"""goodman_comp_400M1_HgArNe.fits"""'], {}), "(self.data_path, 'goodman_comp_400M1_HgArNe.fits')\n", (7183, 7233), False, 'import os\n'), ((7281, 7316), 'ccdproc.CCDData.read', 'CCDData.read', (['test_file'], {'unit': '"""adu"""'}), "(test_file, unit='adu')\n", (7293, 7316), False, 'from ccdproc import CCDData\n'), ((8235, 8297), 'os.path.join', 'os.path.join', (['self.data_path', '"""goodman_comp_400M1_HgArNe.fits"""'], {}), "(self.data_path, 'goodman_comp_400M1_HgArNe.fits')\n", (8247, 8297), False, 'import os\n'), ((8397, 8432), 'ccdproc.CCDData.read', 'CCDData.read', (['test_file'], {'unit': '"""adu"""'}), "(test_file, unit='adu')\n", (8409, 8432), False, 'from ccdproc import CCDData\n'), ((8811, 8839), 'astropy.modeling.models.Chebyshev1D', 'models.Chebyshev1D', ([], {'degree': '(3)'}), '(degree=3)\n', (8829, 8839), False, 'from astropy.modeling import models, fitting, Model\n'), ((395, 452), 'os.path.dirname', 'os.path.dirname', (["sys.modules['goodman_pipeline'].__file__"], {}), "(sys.modules['goodman_pipeline'].__file__)\n", (410, 452), False, 'import os\n'), ((3650, 3675), 'os.path.isfile', 'os.path.isfile', (['test_file'], {}), '(test_file)\n', (3664, 3675), False, 'import os\n'), ((4091, 4116), 'os.path.isfile', 'os.path.isfile', (['test_file'], {}), '(test_file)\n', (4105, 4116), False, 'import os\n'), ((4640, 4665), 'os.path.isfile', 'os.path.isfile', (['test_file'], {}), '(test_file)\n', (4654, 4665), False, 'import os\n'), ((5082, 5107), 'os.path.isfile', 'os.path.isfile', (['test_file'], {}), '(test_file)\n', (5096, 5107), False, 'import os\n'), ((5527, 5552), 'os.path.isfile', 'os.path.isfile', (['test_file'], {}), '(test_file)\n', (5541, 5552), False, 'import os\n'), ((6073, 6098), 'os.path.isfile', 'os.path.isfile', (['test_file'], {}), '(test_file)\n', (6087, 6098), False, 'import os\n'), ((6729, 6754), 'os.path.isfile', 'os.path.isfile', (['test_file'], {}), '(test_file)\n', (6743, 6754), False, 'import os\n'), ((8356, 8381), 'os.path.isfile', 'os.path.isfile', (['test_file'], {}), '(test_file)\n', (8370, 8381), False, 'import os\n'), ((722, 756), 're.match', 're.match', (['"""GSP_P\\\\d{3}"""', 'pixel_key'], {}), "('GSP_P\\\\d{3}', pixel_key)\n", (730, 756), False, 'import re\n'), ((801, 836), 're.sub', 're.sub', (['"""GSP_P"""', '"""GSP_A"""', 'pixel_key'], {}), "('GSP_P', 'GSP_A', pixel_key)\n", (807, 836), False, 'import re\n'), ((7516, 7539), 'numpy.ones', 'np.ones', (['ccd.data.shape'], {}), '(ccd.data.shape)\n', (7523, 7539), True, 'import numpy as np\n'), ((7572, 7585), 'astropy.io.fits.Header', 'fits.Header', ([], {}), '()\n', (7583, 7585), False, 'from astropy.io import fits\n')]
|
from sciapp.action import Free
import scipy.ndimage as ndimg
import numpy as np, wx
# from imagepy import IPy
#matplotlib.use('WXAgg')
import matplotlib.pyplot as plt
def block(arr):
img = np.zeros((len(arr),30,30), dtype=np.uint8)
img.T[:] = arr
return np.hstack(img)
class Temperature(Free):
title = 'Temperature Difference'
asyn = False
def run(self, para = None):
xs = np.array([1,2,3,4,5,6,7,8,9,10,11,12])
ys = np.array([1,2,1,2,2,3,8,9,8,10,9,10], dtype=np.float32)
ds = ndimg.convolve1d(ys, [0,1,-1])
lbs = ['Jan','Feb','Mar','Apr','May','June',
'Jul','Aug','Sep','Oct','Nov','Dec']
plt.xticks(xs, lbs)
plt.plot(xs, ys, '-o', label='Temperature')
plt.plot(xs, ds, '-o', label='Difference')
plt.grid()
plt.gca().legend()
plt.title('Temperature in XX')
plt.xlabel('Month')
plt.ylabel('Temperature (C)')
plt.show()
self.app.show_img([block((ys-ys.min())*(180/ys.max()-ys.min()))], 'Temperature')
self.app.show_img([block((ds-ds.min())*(180/ds.max()-ds.min()))], 'Difference')
class Shake(Free):
title = 'Shake Damping'
asyn = False
def run(self, para = None):
xs = np.array([1,2,3,4,5,6,7,8,9,10])
ys = np.array([10,-9,8,-7,6,-5,4,-3,2,-1], dtype=np.float32)
ds = ndimg.convolve1d(ys, [1/3,1/3,1/3])
print(ds)
plt.plot(xs, ys, '-o', label='Shake')
plt.plot(xs, ds, '-o', label='Damping')
plt.grid()
plt.gca().legend()
plt.title('Shake Damping')
plt.xlabel('Time')
plt.ylabel('Amplitude')
plt.show()
self.app.show_img([block(ys*10+128)], 'Shake')
self.app.show_img([block(ds*10+128)], 'Damping')
class Inertia(Free):
title = 'Psychological Inertia'
asyn = False
def run(self, para = None):
xs = np.array([1,2,3,4,5,6,7,8,9,10])
ys = np.array([90,88,93,95,91,70,89,92,94,89], dtype=np.float32)
ds = ndimg.convolve1d(ys, [1/3,1/3,1/3])
print(ds)
plt.plot(xs, ys, '-o', label='Psychological')
plt.plot(xs, ds, '-o', label='Inertia')
plt.grid()
plt.gca().legend()
plt.title('Psychological Inertia')
plt.xlabel('Time')
plt.ylabel('Score')
plt.show()
self.app.show_img([block((ys-80)*3+80)], 'Psychological')
self.app.show_img([block((ds-80)*3+80)], 'Inertia')
class GaussCore(Free):
title = 'Gaussian Core'
asyn = False
def run(self, para = None):
x, y = np.ogrid[-3:3:10j, -3:3:10j]
z = np.exp(-(x ** 2 + y ** 2)/1)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_wireframe(x, y, z)
z = np.exp(-(x ** 2 + y ** 2)/4)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_wireframe(x, y, z)
plt.show()
class LoGCore(Free):
title = 'Laplace of Gaussian Core'
asyn = False
def run(self, para = None):
plt.figure()
x = np.linspace(-3,3,50)
y = np.exp(-x**2)
dy = np.exp(-x**2)*(4*x**2-2)
plt.plot(x, y, label='Gauss')
plt.plot(x, -dy, label="Gauss''")
plt.grid()
plt.legend()
x, y = np.ogrid[-3:3:20j, -3:3:20j]
z = (4*x**2-2)*np.exp(-y**2-x**2)+(4*y**2-2)*np.exp(-x**2-y**2)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_wireframe(x, y, -z)
plt.show()
class DogCore(Free):
title = 'Difference of Gaussian Core'
asyn = False
def run(self, para = None):
plt.figure()
x = np.linspace(-3,3,50)
y = np.exp(-x**2)
yy = np.exp(-x**2/4)/2
plt.plot(x, y, label='sigma = 1')
plt.plot(x, yy, label='sigma = 2')
plt.plot(x, y-yy, 'r', lw=3, label="Difference")
plt.grid()
plt.legend()
x, y = np.ogrid[-3:3:20j, -3:3:20j]
z = np.exp(-(x ** 2 + y ** 2)/1)-np.exp(-(x ** 2 + y ** 2)/4)/2
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_wireframe(x, y, z)
plt.show()
class LaplaceSharp(Free):
title = 'Show how to Laplace Sharp'
asyn = False
def run(self, para = None):
x = np.linspace(-10,10,300)
y = np.arctan(x)
fig, axes = plt.subplots(nrows=2, ncols=2)
ax0, ax1, ax2, ax3 = axes.flatten()
ax0.set_title('y = arctan(x)')
ax0.plot(x, y)
ax0.grid()
ax1.set_title("y = arctan(x)'")
ax1.plot(x, y)
ax1.plot(x, 1/(x**2+1))
ax1.grid()
ax2.set_title("y = arctan(x)''")
ax2.plot(x, y)
ax2.plot(x, (2*x)/(x**4+2*x**2+1))
ax2.grid()
ax3.set_title("y = arctan(x) + arctan(x)''")
ax3.plot(x, y)
ax3.plot(x, y+(2*x)/(x**4+2*x**2+1))
ax3.grid()
fig.tight_layout()
plt.show()
self.app.show_img([(((y*70)+128)*np.ones((30,1))).astype(np.uint8)], 'tan(x)')
self.app.show_img([((100/(x**2+1))*np.ones((30,1))).astype(np.uint8)], "tan(x)'")
self.app.show_img([((((2*x)/(x**4+2*x**2+1)*70)+128)*
np.ones((30,1))).astype(np.uint8)], "tan(x))''")
self.app.show_img([((((y+(2*x)/(x**4+2*x**2+1))*70)+128)*
np.ones((30,1))).astype(np.uint8)], "tan(x)+tan(x)''")
class UnSharp(Free):
title = 'Show how to Unsharp Mask'
asyn = False
def run(self, para = None):
x = np.linspace(-10,10,300)
y = np.arctan(x)
fig, axes = plt.subplots(nrows=2, ncols=2)
ax0, ax1, ax2, ax3 = axes.flatten()
gy = ndimg.gaussian_filter1d(y, 30)
ax0, ax1, ax2, ax3 = axes.flatten()
ax0.set_title('y = arctan(x)')
ax0.plot(x, y)
ax0.grid()
ax1.set_title("gaussian")
ax1.plot(x, y)
ax1.plot(x, gy)
ax1.grid()
ax2.set_title("y = arctan(x) - gaussian")
ax2.plot(x, y)
ax2.plot(x, y-gy)
ax2.grid()
ax3.set_title("y = arctan(x) + diff")
ax3.plot(x, y)
ax3.plot(x, y+2*(y-gy))
ax3.grid()
fig.tight_layout()
plt.show()
self.app.show_img([((y*70+128)*np.ones((30,1))).astype(np.uint8)], 'tan(x)')
self.app.show_img([((gy*70+128)*np.ones((30,1))).astype(np.uint8)], 'gaussian')
self.app.show_img([(((y-gy)*100+128)*np.ones((30,1))).astype(np.uint8)], 'arctan(x) - gaussian')
self.app.show_img([(((y+2*(y-gy))*70+128)*np.ones((30,1))).astype(np.uint8)], "arctan(x) + diff")
plgs = [Temperature, Shake, Inertia, GaussCore, LoGCore, DogCore, LaplaceSharp, UnSharp]
|
[
"matplotlib.pyplot.title",
"scipy.ndimage.gaussian_filter1d",
"numpy.ones",
"matplotlib.pyplot.figure",
"numpy.exp",
"matplotlib.pyplot.gca",
"numpy.linspace",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"scipy.ndimage.convolve1d",
"matplotlib.pyplot.legend",
"numpy.hstack",
"numpy.arctan",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.plot",
"numpy.array",
"matplotlib.pyplot.xlabel"
] |
[((258, 272), 'numpy.hstack', 'np.hstack', (['img'], {}), '(img)\n', (267, 272), True, 'import numpy as np, wx\n'), ((384, 433), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]'], {}), '([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])\n', (392, 433), True, 'import numpy as np, wx\n'), ((430, 496), 'numpy.array', 'np.array', (['[1, 2, 1, 2, 2, 3, 8, 9, 8, 10, 9, 10]'], {'dtype': 'np.float32'}), '([1, 2, 1, 2, 2, 3, 8, 9, 8, 10, 9, 10], dtype=np.float32)\n', (438, 496), True, 'import numpy as np, wx\n'), ((493, 525), 'scipy.ndimage.convolve1d', 'ndimg.convolve1d', (['ys', '[0, 1, -1]'], {}), '(ys, [0, 1, -1])\n', (509, 525), True, 'import scipy.ndimage as ndimg\n'), ((619, 638), 'matplotlib.pyplot.xticks', 'plt.xticks', (['xs', 'lbs'], {}), '(xs, lbs)\n', (629, 638), True, 'import matplotlib.pyplot as plt\n'), ((642, 685), 'matplotlib.pyplot.plot', 'plt.plot', (['xs', 'ys', '"""-o"""'], {'label': '"""Temperature"""'}), "(xs, ys, '-o', label='Temperature')\n", (650, 685), True, 'import matplotlib.pyplot as plt\n'), ((688, 730), 'matplotlib.pyplot.plot', 'plt.plot', (['xs', 'ds', '"""-o"""'], {'label': '"""Difference"""'}), "(xs, ds, '-o', label='Difference')\n", (696, 730), True, 'import matplotlib.pyplot as plt\n'), ((733, 743), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (741, 743), True, 'import matplotlib.pyplot as plt\n'), ((768, 798), 'matplotlib.pyplot.title', 'plt.title', (['"""Temperature in XX"""'], {}), "('Temperature in XX')\n", (777, 798), True, 'import matplotlib.pyplot as plt\n'), ((801, 820), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Month"""'], {}), "('Month')\n", (811, 820), True, 'import matplotlib.pyplot as plt\n'), ((823, 852), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Temperature (C)"""'], {}), "('Temperature (C)')\n", (833, 852), True, 'import matplotlib.pyplot as plt\n'), ((856, 866), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (864, 866), True, 'import matplotlib.pyplot as plt\n'), ((1128, 1169), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]'], {}), '([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\n', (1136, 1169), True, 'import numpy as np, wx\n'), ((1168, 1232), 'numpy.array', 'np.array', (['[10, -9, 8, -7, 6, -5, 4, -3, 2, -1]'], {'dtype': 'np.float32'}), '([10, -9, 8, -7, 6, -5, 4, -3, 2, -1], dtype=np.float32)\n', (1176, 1232), True, 'import numpy as np, wx\n'), ((1231, 1274), 'scipy.ndimage.convolve1d', 'ndimg.convolve1d', (['ys', '[1 / 3, 1 / 3, 1 / 3]'], {}), '(ys, [1 / 3, 1 / 3, 1 / 3])\n', (1247, 1274), True, 'import scipy.ndimage as ndimg\n'), ((1281, 1318), 'matplotlib.pyplot.plot', 'plt.plot', (['xs', 'ys', '"""-o"""'], {'label': '"""Shake"""'}), "(xs, ys, '-o', label='Shake')\n", (1289, 1318), True, 'import matplotlib.pyplot as plt\n'), ((1321, 1360), 'matplotlib.pyplot.plot', 'plt.plot', (['xs', 'ds', '"""-o"""'], {'label': '"""Damping"""'}), "(xs, ds, '-o', label='Damping')\n", (1329, 1360), True, 'import matplotlib.pyplot as plt\n'), ((1363, 1373), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1371, 1373), True, 'import matplotlib.pyplot as plt\n'), ((1398, 1424), 'matplotlib.pyplot.title', 'plt.title', (['"""Shake Damping"""'], {}), "('Shake Damping')\n", (1407, 1424), True, 'import matplotlib.pyplot as plt\n'), ((1427, 1445), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (1437, 1445), True, 'import matplotlib.pyplot as plt\n'), ((1448, 1471), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Amplitude"""'], {}), "('Amplitude')\n", (1458, 1471), True, 'import matplotlib.pyplot as plt\n'), ((1474, 1484), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1482, 1484), True, 'import matplotlib.pyplot as plt\n'), ((1691, 1732), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]'], {}), '([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\n', (1699, 1732), True, 'import numpy as np, wx\n'), ((1731, 1799), 'numpy.array', 'np.array', (['[90, 88, 93, 95, 91, 70, 89, 92, 94, 89]'], {'dtype': 'np.float32'}), '([90, 88, 93, 95, 91, 70, 89, 92, 94, 89], dtype=np.float32)\n', (1739, 1799), True, 'import numpy as np, wx\n'), ((1798, 1841), 'scipy.ndimage.convolve1d', 'ndimg.convolve1d', (['ys', '[1 / 3, 1 / 3, 1 / 3]'], {}), '(ys, [1 / 3, 1 / 3, 1 / 3])\n', (1814, 1841), True, 'import scipy.ndimage as ndimg\n'), ((1848, 1893), 'matplotlib.pyplot.plot', 'plt.plot', (['xs', 'ys', '"""-o"""'], {'label': '"""Psychological"""'}), "(xs, ys, '-o', label='Psychological')\n", (1856, 1893), True, 'import matplotlib.pyplot as plt\n'), ((1896, 1935), 'matplotlib.pyplot.plot', 'plt.plot', (['xs', 'ds', '"""-o"""'], {'label': '"""Inertia"""'}), "(xs, ds, '-o', label='Inertia')\n", (1904, 1935), True, 'import matplotlib.pyplot as plt\n'), ((1938, 1948), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1946, 1948), True, 'import matplotlib.pyplot as plt\n'), ((1973, 2007), 'matplotlib.pyplot.title', 'plt.title', (['"""Psychological Inertia"""'], {}), "('Psychological Inertia')\n", (1982, 2007), True, 'import matplotlib.pyplot as plt\n'), ((2010, 2028), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (2020, 2028), True, 'import matplotlib.pyplot as plt\n'), ((2031, 2050), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Score"""'], {}), "('Score')\n", (2041, 2050), True, 'import matplotlib.pyplot as plt\n'), ((2053, 2063), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2061, 2063), True, 'import matplotlib.pyplot as plt\n'), ((2315, 2345), 'numpy.exp', 'np.exp', (['(-(x ** 2 + y ** 2) / 1)'], {}), '(-(x ** 2 + y ** 2) / 1)\n', (2321, 2345), True, 'import numpy as np, wx\n'), ((2352, 2364), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2362, 2364), True, 'import matplotlib.pyplot as plt\n'), ((2445, 2475), 'numpy.exp', 'np.exp', (['(-(x ** 2 + y ** 2) / 4)'], {}), '(-(x ** 2 + y ** 2) / 4)\n', (2451, 2475), True, 'import numpy as np, wx\n'), ((2482, 2494), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2492, 2494), True, 'import matplotlib.pyplot as plt\n'), ((2571, 2581), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2579, 2581), True, 'import matplotlib.pyplot as plt\n'), ((2686, 2698), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2696, 2698), True, 'import matplotlib.pyplot as plt\n'), ((2705, 2727), 'numpy.linspace', 'np.linspace', (['(-3)', '(3)', '(50)'], {}), '(-3, 3, 50)\n', (2716, 2727), True, 'import numpy as np, wx\n'), ((2732, 2747), 'numpy.exp', 'np.exp', (['(-x ** 2)'], {}), '(-x ** 2)\n', (2738, 2747), True, 'import numpy as np, wx\n'), ((2780, 2809), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'label': '"""Gauss"""'}), "(x, y, label='Gauss')\n", (2788, 2809), True, 'import matplotlib.pyplot as plt\n'), ((2812, 2845), 'matplotlib.pyplot.plot', 'plt.plot', (['x', '(-dy)'], {'label': '"""Gauss\'\'"""'}), '(x, -dy, label="Gauss\'\'")\n', (2820, 2845), True, 'import matplotlib.pyplot as plt\n'), ((2848, 2858), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2856, 2858), True, 'import matplotlib.pyplot as plt\n'), ((2861, 2873), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2871, 2873), True, 'import matplotlib.pyplot as plt\n'), ((2986, 2998), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2996, 2998), True, 'import matplotlib.pyplot as plt\n'), ((3076, 3086), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3084, 3086), True, 'import matplotlib.pyplot as plt\n'), ((3195, 3207), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3205, 3207), True, 'import matplotlib.pyplot as plt\n'), ((3214, 3236), 'numpy.linspace', 'np.linspace', (['(-3)', '(3)', '(50)'], {}), '(-3, 3, 50)\n', (3225, 3236), True, 'import numpy as np, wx\n'), ((3241, 3256), 'numpy.exp', 'np.exp', (['(-x ** 2)'], {}), '(-x ** 2)\n', (3247, 3256), True, 'import numpy as np, wx\n'), ((3282, 3315), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'label': '"""sigma = 1"""'}), "(x, y, label='sigma = 1')\n", (3290, 3315), True, 'import matplotlib.pyplot as plt\n'), ((3318, 3352), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'yy'], {'label': '"""sigma = 2"""'}), "(x, yy, label='sigma = 2')\n", (3326, 3352), True, 'import matplotlib.pyplot as plt\n'), ((3356, 3406), 'matplotlib.pyplot.plot', 'plt.plot', (['x', '(y - yy)', '"""r"""'], {'lw': '(3)', 'label': '"""Difference"""'}), "(x, y - yy, 'r', lw=3, label='Difference')\n", (3364, 3406), True, 'import matplotlib.pyplot as plt\n'), ((3407, 3417), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (3415, 3417), True, 'import matplotlib.pyplot as plt\n'), ((3420, 3432), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3430, 3432), True, 'import matplotlib.pyplot as plt\n'), ((3545, 3557), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3555, 3557), True, 'import matplotlib.pyplot as plt\n'), ((3634, 3644), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3642, 3644), True, 'import matplotlib.pyplot as plt\n'), ((3759, 3784), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)', '(300)'], {}), '(-10, 10, 300)\n', (3770, 3784), True, 'import numpy as np, wx\n'), ((3789, 3801), 'numpy.arctan', 'np.arctan', (['x'], {}), '(x)\n', (3798, 3801), True, 'import numpy as np, wx\n'), ((3817, 3847), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(2)'}), '(nrows=2, ncols=2)\n', (3829, 3847), True, 'import matplotlib.pyplot as plt\n'), ((4280, 4290), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4288, 4290), True, 'import matplotlib.pyplot as plt\n'), ((4790, 4815), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)', '(300)'], {}), '(-10, 10, 300)\n', (4801, 4815), True, 'import numpy as np, wx\n'), ((4820, 4832), 'numpy.arctan', 'np.arctan', (['x'], {}), '(x)\n', (4829, 4832), True, 'import numpy as np, wx\n'), ((4847, 4877), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(2)'}), '(nrows=2, ncols=2)\n', (4859, 4877), True, 'import matplotlib.pyplot as plt\n'), ((4923, 4953), 'scipy.ndimage.gaussian_filter1d', 'ndimg.gaussian_filter1d', (['y', '(30)'], {}), '(y, 30)\n', (4946, 4953), True, 'import scipy.ndimage as ndimg\n'), ((5344, 5354), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5352, 5354), True, 'import matplotlib.pyplot as plt\n'), ((2753, 2768), 'numpy.exp', 'np.exp', (['(-x ** 2)'], {}), '(-x ** 2)\n', (2759, 2768), True, 'import numpy as np, wx\n'), ((3262, 3281), 'numpy.exp', 'np.exp', (['(-x ** 2 / 4)'], {}), '(-x ** 2 / 4)\n', (3268, 3281), True, 'import numpy as np, wx\n'), ((3477, 3507), 'numpy.exp', 'np.exp', (['(-(x ** 2 + y ** 2) / 1)'], {}), '(-(x ** 2 + y ** 2) / 1)\n', (3483, 3507), True, 'import numpy as np, wx\n'), ((746, 755), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (753, 755), True, 'import matplotlib.pyplot as plt\n'), ((1376, 1385), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1383, 1385), True, 'import matplotlib.pyplot as plt\n'), ((1951, 1960), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1958, 1960), True, 'import matplotlib.pyplot as plt\n'), ((2929, 2953), 'numpy.exp', 'np.exp', (['(-y ** 2 - x ** 2)'], {}), '(-y ** 2 - x ** 2)\n', (2935, 2953), True, 'import numpy as np, wx\n'), ((2959, 2983), 'numpy.exp', 'np.exp', (['(-x ** 2 - y ** 2)'], {}), '(-x ** 2 - y ** 2)\n', (2965, 2983), True, 'import numpy as np, wx\n'), ((3506, 3536), 'numpy.exp', 'np.exp', (['(-(x ** 2 + y ** 2) / 4)'], {}), '(-(x ** 2 + y ** 2) / 4)\n', (3512, 3536), True, 'import numpy as np, wx\n'), ((4326, 4342), 'numpy.ones', 'np.ones', (['(30, 1)'], {}), '((30, 1))\n', (4333, 4342), True, 'import numpy as np, wx\n'), ((4409, 4425), 'numpy.ones', 'np.ones', (['(30, 1)'], {}), '((30, 1))\n', (4416, 4425), True, 'import numpy as np, wx\n'), ((4515, 4531), 'numpy.ones', 'np.ones', (['(30, 1)'], {}), '((30, 1))\n', (4522, 4531), True, 'import numpy as np, wx\n'), ((4627, 4643), 'numpy.ones', 'np.ones', (['(30, 1)'], {}), '((30, 1))\n', (4634, 4643), True, 'import numpy as np, wx\n'), ((5388, 5404), 'numpy.ones', 'np.ones', (['(30, 1)'], {}), '((30, 1))\n', (5395, 5404), True, 'import numpy as np, wx\n'), ((5468, 5484), 'numpy.ones', 'np.ones', (['(30, 1)'], {}), '((30, 1))\n', (5475, 5484), True, 'import numpy as np, wx\n'), ((5555, 5571), 'numpy.ones', 'np.ones', (['(30, 1)'], {}), '((30, 1))\n', (5562, 5571), True, 'import numpy as np, wx\n'), ((5659, 5675), 'numpy.ones', 'np.ones', (['(30, 1)'], {}), '((30, 1))\n', (5666, 5675), True, 'import numpy as np, wx\n')]
|
import pandas as pd
import numpy as np
from texttable import Texttable
from cape_privacy.pandas import dtypes
from cape_privacy.pandas.transformations import NumericPerturbation
from cape_privacy.pandas.transformations import DatePerturbation
from cape_privacy.pandas.transformations import NumericRounding
from cape_privacy.pandas.transformations import Tokenizer
from faker import Faker
from anonympy.pandas import utils_pandas as _utils
from sklearn.decomposition import PCA
class dfAnonymizer(object):
"""
Initializes pandas DataFrame as a dfAnonymizer object.
Parameters:
----------
df: pandas DataFrame
Returns:
----------
dfAnonymizer object
Raises
----------
Exception:
* If ``df`` is not a DataFrame
See also
----------
dfAnonymizer.to_df : Return a DataFrame
Examples
----------
>>> from anonympy.pandas import dfAnonymizer
>>> from anonympy.pandas.utils_pandas import load_dataset
Contructing dfAnonymizer object:
>>> df = load_dataset()
>>> anonym = dfAnonymizer(df)
>>> anonym.to_df()
name age ... email ssn
0 Bruce 33 ... <EMAIL> 343554334
1 Tony 48 ... <EMAIL> 656564664
"""
def __init__(self,
df: pd.DataFrame):
if df.__class__.__name__ != "DataFrame":
raise Exception(f"{df} is not a pandas DataFrame.")
# Private Attributes
self._df = df.copy()
self._df2 = df.copy()
self._methods_applied = {}
self._synthetic_data = 'Synthetic Data'
self._tokenization = 'Tokenization'
self._numeric_perturbation = 'Numeric Perturbation'
self._datetime_perturbation = 'Datetime Perturbation'
self._round = 'Generalization - Rounding'
self._bin = 'Generalization - Binning'
self._drop = 'Column Suppression'
self._sample = 'Resampling'
self._PCA = 'PCA Masking'
self._email = 'Partial Masking'
# Public Attributes
self.anonymized_columns = []
self.columns = self._df.columns.tolist()
self.unanonymized_columns = self.columns.copy()
self.numeric_columns = _utils.get_numeric_columns(self._df)
self.categorical_columns = _utils.get_categorical_columns(self._df)
self.datetime_columns = _utils.get_datetime_columns(self._df)
self._available_methods = _utils.av_methods
self._fake_methods = _utils.faker_methods
def __str__(self):
return self._info().draw()
def __repr__(self):
return self._info().draw()
def _dtype_checker(self, column: str):
'''
Returns the dtype of the column
Parameters
----------
column: str
Returns
----------
dtype: numpy dtype
'''
dtype = self._df[column].dtype
if dtype == np.float32:
return dtypes.Float
elif dtype == np.float64:
return dtypes.Double
elif dtype == np.byte:
return dtypes.Byte
elif dtype == np.short:
return dtypes.Short
elif dtype == np.int32:
return dtypes.Integer
elif dtype == np.int64:
return dtypes.Long
else:
return None
def anonymize(self,
methods=None,
locale=['en_US'],
seed=None,
inplace=True):
'''
Anonymize all columns using different methods for each dtype.
If dictionary is not provided, for numerical columns
``numeric_rounding`` is applied.
``categorical_fake`` and ``categorical_tokenization`` for
categorical columns
and ``datetime_noise`` or ``datetime_fake`` are applied for columns of
datetime type.
Parameters
----------
methods : Optional[Dict[str, str]], default None
{column_name: anonympy_method}. Call ``available_methods`` for list
of all methods.
locale : str or List[str], default ['en_US']
See https://faker.readthedocs.io/en/master/locales.html for all
faker's locales.
inplace : bool, default True
If True the changes will be applied to `dfAnonymizer` obejct, else
output is returned.
seed : Optional[int], default None
Pass an integer for reproducible output across multiple function
calls.
Returns
----------
If inplace is False, pandas Series or DataFrame is returned
See Also
--------
dfAnonymizer.categorical_fake_auto : Replace values with synthetically
generated ones
Examples
----------
>>> from anonympy.pandas import dfAnonymizer
>>> from anonympy.pandas.utils_pandas import load_dataset, \
available_methods
>>> df = load_dataset()
>>> anonym = dfAnonymizer(df)
If methods None:
>>> anonym.anonymize(inplace = False)
name age ... email ssn
0 <NAME> 30 ... <EMAIL> 718-51-5290
1 <NAME> 50 ... <EMAIL> 684-81-8137
Passing a dict for specifying which methods to apply:
>>> available_methods('numeric')
numeric_noise numeric_binning numeric_masking numeric_rounding
>>> anonym.anonymize({'name':'categorical_fake',
... 'age':'numeric_noise',
... 'email':'categorical_email_masking',
... 'salary': 'numeric_rounding'}, inplace = False)
name age email salary
0 <NAME> 37 <EMAIL> 60000.0
1 <NAME> 52 <EMAIL> 50000.0
'''
if not methods:
if inplace:
# try synthetic data
self.categorical_fake_auto(locale=locale, seed=seed)
# if there are still columns left unanonymized
if self.unanonymized_columns:
for column in self.unanonymized_columns.copy():
if column in self.numeric_columns:
self.numeric_rounding(column)
elif column in self.categorical_columns:
self.categorical_tokenization(column,
key=str(seed))
elif column in self.datetime_columns:
self.datetime_noise(column, seed=seed)
else:
# try synthetic data
temp = self.categorical_fake_auto(locale=locale,
inplace=False,
seed=seed)
unanonymized = self.unanonymized_columns.copy()
if isinstance(temp, pd.DataFrame):
unanonymized = [column for column in unanonymized
if column not in temp.columns.to_list()]
elif isinstance(temp, pd.Series):
unanonymized.remove(temp.name)
temp = pd.DataFrame(temp)
else: # if temp is a already a dataframe
temp = pd.DataFrame()
if unanonymized:
for column in unanonymized:
if column in self.numeric_columns:
temp[column] = self.numeric_rounding(column,
inplace=False)
elif column in self.categorical_columns:
temp[column] = self.categorical_tokenization(
column,
inplace=False,
key=str(seed))
elif column in self.datetime_columns:
temp[column] = self.datetime_noise(column,
inplace=False,
seed=seed)
return temp
# if dictionary with methods was passed
else:
if inplace:
for key, value in methods.items():
# numeric
if value == "numeric_noise":
self.numeric_noise(key, seed=seed)
elif value == "numeric_binning":
self.numeric_binning(key)
elif value == "numeric_masking":
self.numeric_masking(key)
elif value == "numeric_rounding":
self.numeric_rounding(key)
# categorical
elif value == "categorical_fake":
self.categorical_fake(key, seed=seed)
elif value == "categorical_resampling":
self.categorical_resampling(key, seed=seed)
elif value == "categorical_tokenization":
self.categorical_tokenization(key, key=str(seed))
elif value == "categorical_email_masking":
self.categorical_email_masking(key)
# datetime
elif value == "datetime_fake":
self.datetime_fake(key, seed=seed)
elif value == "datetime_noise":
self.datetime_noise(key, seed=seed)
# drop
elif value == "column_suppression":
self.column_suppression(key)
else:
temp = pd.DataFrame()
for key, value in methods.items():
# numeric
if value == "numeric_noise":
temp[key] = self.numeric_noise(key,
inplace=False,
seed=seed)
elif value == "numeric_binning":
temp[key] = self.numeric_binning(key, inplace=False)
elif value == "numeric_masking":
temp[key] = self.numeric_masking(key, inplace=False)
elif value == "numeric_rounding":
temp[key] = self.numeric_rounding(key, inplace=False)
# categorical
elif value == "categorical_fake":
temp[key] = self.categorical_fake(key,
inplace=False,
seed=seed)
elif value == "categorical_resampling":
temp[key] = self.categorical_resampling(key,
inplace=False,
seed=seed)
elif value == "categorical_tokenization":
temp[key] = self.categorical_tokenization(
key,
inplace=False,
key=str(seed))
elif value == 'categorical_email_masking':
temp[key] = self.categorical_email_masking(
key,
inplace=False)
# datetime
elif value == "datetime_fake":
temp[key] = self.datetime_fake(key,
inplace=False,
seed=seed)
elif value == "datetime_noise":
temp[key] = self.datetime_noise(key,
inplace=False,
seed=seed)
# drop
elif value == "column_suppression":
pass
if len(temp.columns) > 1:
return temp
elif len(temp.columns) == 1:
return pd.Series(temp[temp.columns[0]])
def _fake_column(self,
column,
method,
locale=['en_US'],
seed=None,
inplace=True):
'''
Anonymize pandas Series object using synthetic data generator
Based on faker.Faker.
Parameters
----------
column : str
Column name which data will be substituted.
method : str
Method name. List of all methods ``fake_methods``.
locale : str or List[str], default ['en_US']
See https://faker.readthedocs.io/en/master/locales.html for all
faker's locales.
seed : Optional[int], default None
Pass an integer for reproducible output across multiple function
calls.
inplace : bool, default True
If True the changes will be applied to `dfAnonymizer` obejct, else
output is returned.
Returns
----------
None if inplace is True, else pandas Series is returned
See also
----------
dfAnonymizer.categorical_fake : Replace values with synthetically
generated ones by specifying which methods to apply
'''
Faker.seed(seed)
fake = Faker(locale=locale)
method = getattr(fake, method)
faked = self._df[column].apply(lambda x: method())
if inplace:
if column in self.anonymized_columns:
print(f'`{column}` column already anonymized!')
else:
self._df[column] = faked
self.unanonymized_columns.remove(column)
self.anonymized_columns.append(column)
self._methods_applied[column] = self._synthetic_data
else:
return faked
def categorical_fake(self,
columns,
locale=['en_US'],
seed=None,
inplace=True):
'''
Replace data with synthetic data using faker's generator.
To see the list of all faker's methods, call ``fake_methods``.
If column name and faker's method are similar, then pass a string or a
list of strings for `columns` argument
Otherwise, pass a dictionary with column name as a key and faker's
method as a value `{col_name: fake_method}`.
Parameters
----------
columns : Union[str, List[str], Dict[str, str]]
If a string or list of strings is passed, function will assume that
method name is same as column name.
locale : str or List[str], default ['en_US']
See https://faker.readthedocs.io/en/master/locales.html for all
faker's locales.
seed : Optional[int], default None
Pass an integer for reproducible output across multiple function
calls.
inplace : bool, default True
If True the changes will be applied to `dfAnonymizer` obejct, else
output is returned.
Returns
----------
None if inplace is True, else pandas Series or pandas DataFrame is
returned
See Also
--------
dfAnonymizer.categorical_fake_auto : Replace values with synthetically
generated ones
Examples
----------
>>> from anonympy.pandas import dfAnonymizer
>>> from anonympy.pandas.utils_pandas import load_dataset
>>> df = load_dataset()
>>> anonym = dfAnonymizer(df)
If methods are not specified, locale Great Britain:
>>> anonym.categorical_fake(['name', 'email', 'ssn'],
... locale = 'en_GB',
... inplace = False)
name email ssn
0 <NAME> <EMAIL> ZZ 180372 T
1 <NAME> <EMAIL> ZZ780511T
Passing a specific method, locale Russia:
>>> fake_methods('n')
name, name_female, name_male, name_nonbinary, nic_handle,
nic_handles, null_boolean, numerify
>>> anonym.categorical_fake({'name': 'name_nonbinary', 'web': 'url'},
... locale = 'ru_RU',
... inplace = False)
name web
0 <NAME> https://shestakov.biz
1 <NAME> https://monetka.net
'''
# if a single column is passed (str)
if isinstance(columns, str) or (len(columns) == 1 and
isinstance(columns, list)):
if isinstance(columns, list):
columns = columns[0]
if inplace:
self._fake_column(columns,
columns,
inplace=True,
seed=seed,
locale=locale)
else:
return self._fake_column(columns,
columns,
inplace=False,
seed=seed,
locale=locale)
# if a list of columns is passed
elif isinstance(columns, list):
temp = pd.DataFrame()
if inplace:
for column in columns:
self._fake_column(column,
column,
inplace=True,
seed=seed,
locale=locale)
else:
for column in columns:
faked = self._fake_column(column,
column,
inplace=False,
seed=seed,
locale=locale)
temp[column] = faked
return temp
# if a dictionary with column name and method name is passed
elif isinstance(columns, dict):
temp = pd.DataFrame()
if inplace:
for column, method in columns.items():
self._fake_column(column,
method,
inplace=True,
seed=seed,
locale=locale)
else:
for column, method in columns.items():
faked = self._fake_column(column,
method,
inplace=False,
seed=seed,
locale=locale)
temp[column] = faked
if len(columns) == 1:
return temp[column]
else:
return temp
def categorical_fake_auto(self,
locale=['en_US'],
seed=None,
inplace=True):
'''
Anonymize only those column which names are in ``fake_methods`` list.
Parameters
----------
locale : str or List[str], default ['en_US']
See https://faker.readthedocs.io/en/master/locales.html for all
faker's locales.
seed : Optional[int], default None
Pass an integer for reproducible output across multiple function
calls.
inplace : bool, default True
If True the changes will be applied to `dfAnonymizer` obejct, else
output is returned.
Returns
----------
None if inplace = True, else an anonymized pandas Series or pandas
DataFrame
See also
----------
dfAnonymizer.categorical_fake : Replace values with synthetically
generated ones by specifying which methods to apply
Notes
----------
In order to produce synthetic data, column name should have same name
as faker's method name
Function will go over all columns and if column name mathces any
faker's method, values will be replaced.
Examples
----------
>>> from anonympy.pandas import dfAnonymizer
>>> from anonympy.pandas.utils_pandas import load_dataset, fake_methods
Change column names so the function can understand which method to
apply:
>>> df = load_dataset()
>>> fake_methods('n')
name, name_female, name_male, name_nonbinary, nic_handle,
nic_handles, null_boolean, numerify
>>> df.rename(columns={'name': 'name_female'}, inplace = True)
>>> anonym = dfAnonymizer(df)
Calling the method without specifying which methods to apply, locale
Japan:
>>> anonym.categorical_fake_auto(local = 'ja_JP',
... inplace = False)
name_female email ssn
0 西村 あすか <EMAIL> 783-28-2531
1 山口 直子 <EMAIL> 477-58-9577
'''
temp = pd.DataFrame()
for column in self.columns:
func = column.strip().lower()
if func in _utils._fake_methods:
if inplace:
if column in self.anonymized_columns:
print(f'`{column}` column already anonymized!')
else:
self._fake_column(column,
func,
inplace=True,
seed=seed,
locale=locale)
else:
temp[column] = self._fake_column(column,
func,
inplace=False,
seed=seed,
locale=locale)
if not inplace:
if len(temp.columns) > 1:
return temp
elif len(temp.columns) == 1:
return pd.Series(temp[temp.columns[0]])
else:
return None
def numeric_noise(self,
columns,
MIN=-10,
MAX=10,
seed=None,
inplace=True):
'''
Add uniform random noise
Based on cape-privacy's NumericPerturbation function.
Mask a numeric pandas Series/DataFrame by adding uniform random
noise to each value. The amount of noise is drawn from
the interval [min, max).
Parameters
----------
columns : Union[str, List[str]]
Column name or a list of column names.
MIN : (int, float), default -10
The values generated will be greater then or equal to min.
MAX : (int, float), default 10
The values generated will be less than max.
seed : int, default None
To initialize the random generator.
inplace : bool, default True
If True the changes will be applied to `dfAnonymizer` obejct,
else output is returned.
Returns
----------
ser: pandas Series or pandas DataFrame with uniform random noise added
See also
----------
dfAnonymizer.numeric_binning : Bin values into discrete intervals
dfAnonymizer.numeric_masking : Apply PCA masking to numeric values
dfAnonymizer.numeric_rounding : Round values to the given number
Examples
----------
>>> from anonympy.pandas import dfAnonymizer
>>> from anonympy.pandas.utils_pandas import load_dataset
>>> df = load_dataset()
>>> anonym = dfAnonymizer(df)
Applying numeric perturbation:
>>> anonym.numeric_noise('age', inplace = False)
0 29
1 48
dtype: int64
'''
# If a single column is passed
if isinstance(columns, str) or (len(columns) == 1 and
isinstance(columns, list)):
if isinstance(columns, list):
columns = columns[0]
dtype = self._dtype_checker(columns)
noise = NumericPerturbation(dtype=dtype,
min=MIN,
max=MAX,
seed=seed)
ser = noise(self._df[columns].copy()).astype(dtype)
if inplace:
if columns in self.anonymized_columns:
print(f'`{columns}` column already anonymized!')
else:
self._df[columns] = ser
self.anonymized_columns.append(columns)
self.unanonymized_columns.remove(columns)
self._methods_applied[columns] = self._numeric_perturbation
else:
return ser.astype(dtype)
# if a list of columns is passed
else:
temp = pd.DataFrame()
for column in columns:
dtype = self._dtype_checker(column)
noise = NumericPerturbation(dtype=dtype,
min=MIN,
max=MAX,
seed=seed)
ser = noise(self._df[column].copy()).astype(dtype)
if inplace:
if column in self.anonymized_columns:
print(f'`{column}` column already anonymized!')
else:
self._df[column] = ser
self.anonymized_columns.append(column)
self.unanonymized_columns.remove(column)
self._methods_applied[column] = self._numeric_perturbation # noqa: E501
else:
temp[column] = ser
if not inplace:
return temp
def datetime_noise(self,
columns,
frequency=("MONTH", "DAY"),
MIN=(-10, -5, -5),
MAX=(10, 5, 5),
seed=None,
inplace=True):
'''
Add uniform random noise to a Pandas series of timestamps
Based on cape-privacy's DatePerturbation function
Parameters
----------
columns : Union[str, List[str]]
Column name or a list of column names.
frequency : Union[str, Tuple[str]], default ("MONTH", "DAY")
One or more frequencies to perturbate
MIN : Union[int, Tuple[int, ...]], default (-10, -5, -5)
The values generated will be greater then or equal to min.
MAX : Union[int, Tuple[int, ...]], default (10, 5, 5)
The values generated will be less than max.
seed : int, default None
To initialize the random generator.
inplace : bool, default True
If True the changes will be applied to `dfAnonymizer` obejct,
else output is returned.
Returns
----------
ser: pandas Series or pandas DataFrame
See also
----------
dfAnonymizer.datetime_fake : Replace values with synthetic dates
Examples
----------
>>> from anonympy.pandas import dfAnonymizer
>>> from anonympy.pandas.utils_pandas import load_dataset
>>> df = load_dataset()
>>> anonym = dfAnonymizer(df)
Calling the method with specifying the frequency to perturbate:
>>> anonym.datetime_noise('birthdate',
frequency=('YEAR', 'MONTH', 'DAY'),
inplace = False)
0 1916-03-16
1 1971-04-24
Name: birthdate, dtype: datetime64[ns]
'''
# if a single column is passed
if isinstance(columns, str) or (len(columns) == 1 and
isinstance(columns, list)):
if isinstance(columns, list):
columns = columns[0]
noise = DatePerturbation(frequency=frequency,
min=MIN,
max=MAX,
seed=seed)
ser = noise(self._df[columns].copy())
if inplace:
if columns in self.anonymized_columns:
print(f'`{columns}` column already anonymized!')
else:
self._df[columns] = ser
self.anonymized_columns.append(columns)
self.unanonymized_columns.remove(columns)
self._methods_applied[columns] = self._datetime_perturbation # noqa: E501
else:
return ser
# if a list of columns is passed
else:
temp = pd.DataFrame()
for column in columns:
noise = DatePerturbation(frequency=frequency,
min=MIN,
max=MAX,
seed=seed)
ser = noise(self._df[column].copy())
if inplace:
if column in self.anonymized_columns:
print(f'`{column}` column already anonymized!')
else:
self._df[column] = ser
self.anonymized_columns.append(column)
self.unanonymized_columns.remove(column)
self._methods_applied[column] = self._datetime_perturbation # noqa: E501
else:
temp[column] = ser
if not inplace:
return temp
def numeric_rounding(self,
columns,
precision=None,
inplace=True):
'''
Round each value in the Pandas Series to the given number
Based on cape-privacy's NumericRounding.
Parameters
----------
columns : Union[str, List[str]]
Column name or a list of column names.
precision : int, default None
The number of digits.
inplace : bool, default True
If True the changes will be applied to `dfAnonymizer` obejct,
else output is returned.
Returns
----------
pandas Series or pandas DataFrame if inplace = False, else None
See also
----------
dfAnonymizer.numeric_binning : Bin values into discrete intervals
dfAnonymizer.numeric_masking : Apply PCA masking
dfAnonymizer.numeric_noise : Add uniform random noise
Examples
----------
>>> from anonympy.pandas import dfAnonymizer
>>> from anonympy.pandas.utils_pandas import load_dataset
>>> df = load_dataset()
>>> anonym = dfAnonymizer(df)
Apply Numeric Rounding:
>>> anonym.numeric_rounding(['age', 'salary'], inplace = False)
age salary
0 30 60000.0
1 50 50000.0
'''
# if a single column is passed
if isinstance(columns, str) or (len(columns) == 1 and
isinstance(columns, list)):
if isinstance(columns, list):
columns = columns[0]
dtype = self._dtype_checker(columns)
if precision is None:
precision = len(str(int(self._df[columns].mean()))) - 1
rounding = NumericRounding(dtype=dtype, precision=-precision)
ser = rounding(self._df[columns].copy()).astype(dtype)
if inplace:
if columns in self.anonymized_columns:
print(f'`{columns}` column already anonymized!')
else:
self._df[columns] = ser
self.anonymized_columns.append(columns)
self.unanonymized_columns.remove(columns)
self._methods_applied[columns] = self._round
else:
return ser
# if a list of columns is passed
else:
temp = pd.DataFrame()
for column in columns:
dtype = self._dtype_checker(column)
precision = len(str(int(self._df[column].mean()))) - 1
rounding = NumericRounding(dtype=dtype, precision=-precision)
ser = rounding(self._df[column].copy())
if inplace:
if column in self.anonymized_columns:
print(f'`{column}` column already anonymized!')
else:
self._df[column] = ser
self.anonymized_columns.append(column)
self.unanonymized_columns.remove(column)
self._methods_applied[column] = self._round
else:
temp[column] = ser.astype(dtype)
if not inplace:
return temp
def numeric_masking(self,
columns,
inplace=True):
'''
Apply PCA masking to a column/columns
Based on sklearn's PCA function
Parameters
----------
columns : Union[str, List[str]]
Column name or a list of column names.
inplace : bool, default True
If True the changes will be applied to `dfAnonymizer` obejct,
else output is returned.
Returns
----------
ser : pandas Series or pandas DataFrame
See also
----------
dfAnonymizer.numeric_binning : Bin values into discrete intervals
dfAnonymizer.numeric_rounding : Apply PCA masking
dfAnonymizer.numeric_noise : Round values to the given number
Examples
----------
>>> from anonympy.pandas import dfAnonymizer
>>> from anonympy.pandas.utils_pandas import load_dataset
>>> df = load_dataset()
>>> anonym = dfAnonymizer(df)
Apply PCA Masking:
>>> num_cols = anonym.numeric_columns
>>> anonym.numeric_masking(num_cols, inplace = False)
age salary
0 -4954.900676 5.840671e-15
1 4954.900676 5.840671e-15
'''
# if a single column is passed
if isinstance(columns, str) or (len(columns) == 1 and
isinstance(columns, list)):
if isinstance(columns, list):
columns = columns[0]
pca = PCA(n_components=1)
ser = pd.DataFrame(pca.fit_transform(self._df[[columns]]),
columns=[columns])
if inplace:
if columns in self.anonymized_columns:
print(f'`{columns}` column already anonymized!')
else:
self._df[columns] = ser[columns]
self.anonymized_columns.append(columns)
self.unanonymized_columns.remove(columns)
self._methods_applied[columns] = self._PCA
else:
return ser[columns]
# if a list of columns is passed
else:
if not inplace:
pca = PCA(n_components=len(columns))
return pd.DataFrame(pca.fit_transform(self._df[columns]),
columns=columns)
else:
for column in columns:
if column in self.anonymized_columns:
print(f'`{column}` column already anonymized!')
else:
self.anonymized_columns.append(column)
self.unanonymized_columns.remove(column)
self._methods_applied[column] = self._PCA
pca = PCA(n_components=len(columns))
self._df[columns] = pca.fit_transform(self._df[columns])
def categorical_tokenization(self,
columns,
max_token_len=10,
key=None,
inplace=True):
'''
Maps a string to a token (hexadecimal string) to obfuscate it.
Parameters
----------
columns : Union[str, List[str]]
Column name or a list of column names.
max_token_len : int, default 10
Control the token length.
key : str, default None
String or Byte String. If not specified, key will be set to a
random byte string.
inplace : bool, default True
If True the changes will be applied to `dfAnonymizer` obejct, else
output is returned.
Returns
----------
ser : pandas Series or pandas DataFrame
See also
----------
dfAnonymizer.categorical_fake : Replace values with synthetically
generated ones by specifying which methods to apply
dfAnonymizer.categorical_resampling : Resample values from the same
distribution
dfAnonymizer.categorical_email_masking : Apply partial masking to
emails
Examples
----------
>>> from anonympy.pandas import dfAnonymizer
>>> from anonympy.pandas.utils_pandas import load_dataset
>>> df = load_dataset()
>>> anonym = dfAnonymizer(df)
Passing only categorical columns:
>>> anonym.categorical_columns
['name', 'web', 'email', 'ssn']
>>> anonym.categorical_tokenization(['name', 'web', 'email', 'ssn'],
inplace = False)
name web email ssn
0 a6488532f8 f8516a7ce9 a07981a4d6 9285bc9cb7
1 f7231e5026 44dfa9af8e 25ca1a128b a7a16a7c7d
'''
# if a single column is passed
if isinstance(columns, str) or (len(columns) == 1 and
isinstance(columns, list)):
if isinstance(columns, list):
columns = columns[0]
tokenize = Tokenizer(max_token_len=max_token_len, key=key)
ser = tokenize(self._df[columns])
if inplace:
if columns in self.anonymized_columns:
print(f'`{columns}` column already anonymized!')
else:
self._df[columns] = ser
self.anonymized_columns.append(columns)
self.unanonymized_columns.remove(columns)
self._methods_applied[columns] = self._tokenization
else:
return ser
# if a list of columns is passed
else:
temp = pd.DataFrame()
for column in columns:
tokenize = Tokenizer(max_token_len=max_token_len, key=key)
ser = tokenize(self._df[column])
if inplace:
if column in self.anonymized_columns:
print(f'`{column}` column already anonymized!')
else:
self._df[column] = ser
self.anonymized_columns.append(column)
self.unanonymized_columns.remove(column)
self._methods_applied[column] = self._tokenization
else:
temp[column] = ser
if not inplace:
return temp
def _mask(self, s):
'''
Mask a single email
Parameters
----------
s : str
string to mask.
Returns
----------
masked : str
See also
----------
dfAnonymizer.categorical_email_masking : Apply partial masking to email
'''
lo = s.find('@')
if lo > 0:
masked = s[0] + '*****' + s[lo-1:]
return masked
else:
raise Exception('Invalid Email')
def categorical_email_masking(self,
columns,
inplace=True):
'''
Apply Partial Masking to emails.
Parameters
----------
columns: Union[str, List[str]]
Column name or a list of column names.
inplace: Optional[bool] = True
If True the changes will be applied to `dfAnonymizer` obejct, else
output is returned.
Returns
----------
ser : pandas Series or pandas DataFrame
See also
----------
dfAnonymizer.categorical_fake : Replace values with synthetically
generated ones by specifying which methods to apply
dfAnonymizer.categorical_resampling : Resample values from the same
distribution
dfAnonymizer.categorical_tokenization : Map a string to a token
Notes
----------
Applicable only to column with email strings.
Examples
----------
>>> from anonympy.pandas import dfAnonymizer
>>> from anonympy.pandas.utils_pandas import load_dataset
>>> df = load_dataset()
>>> anonym = dfAnonymizer(df)
Calling the method on email column:
>>> anonym.categorical_email_masking('email', inplace=False)
0 <EMAIL>
1 <EMAIL>
Name: email, dtype: object
'''
# if a single column is passed
if isinstance(columns, str) or (len(columns) == 1 and
isinstance(columns, list)):
if isinstance(columns, list):
columns = columns[0]
ser = self._df[columns].apply(lambda x: self._mask(x))
if inplace:
if columns in self.anonymized_columns:
print(f'`{columns}` column already anonymized!')
else:
self._df[columns] = ser
self.anonymized_columns.append(columns)
self.unanonymized_columns.remove(columns)
self._methods_applied[columns] = self._email
else:
return ser
# if a list of columns is passed
else:
temp = pd.DataFrame()
for column in columns:
ser = self._df[column].apply(lambda x: self._mask(x))
if inplace:
if column in self.anonymized_columns:
print(f'`{column}` column already anonymized!')
else:
self._df[column] = ser
self.anonymized_columns.append(column)
self.unanonymized_columns.remove(column)
self._methods_applied[column] = self._email
else:
temp[column] = ser
if not inplace:
return temp
def datetime_fake(self,
columns,
pattern='%Y-%m-%d',
end_datetime=None,
seed=None,
locale=['en_US'],
inplace=True):
'''
Replace Column's values with synthetic dates between January 1, 1970
and now.
Based on faker `date()` method
Parameters
----------
columns : Union[str, List[str]]
Column name or a list of column names.
pattern : str, default '%Y-%m-%d'
end_datetime : Union[datetime.date, datetime.datetime,
datetime.timedelta, str, int, None], default None
locale : str or List[str], default ['en_US']
See https://faker.readthedocs.io/en/master/locales.html for all
faker's locales.
inplace : bool, default True
If True the changes will be applied to `dfAnonymizer` obejct, else
output is returned.
Returns
----------
ser : pandas Series or pandas DataFrame
See also
----------
dfAnonymizer.datetime_noise : Add uniform random noise to the column
Examples
----------
>>> from anonympy.pandas import dfAnonymizer
>>> from anonympy.pandas.utils_pandas import load_dataset
>>> df = load_dataset()
>>> anonym = dfAnonymizer(df)
Calling the method with specifying the datetime column
>>> anonym.datetime_fake('birthdate', inplace = False)
0 2018-04-09
1 2005-05-28
Name: birthdate, dtype: datetime64[ns]
'''
Faker.seed(seed)
fake = Faker(locale=locale)
# if a single column is passed
if isinstance(columns, str) or (len(columns) == 1 and
isinstance(columns, list)):
if isinstance(columns, list):
columns = columns[0]
ser = self._df[columns].apply(lambda x: pd.to_datetime(fake.date(
pattern=pattern,
end_datetime=end_datetime)))
if inplace:
if columns in self.anonymized_columns:
print(f'`{columns}` column already anonymized!')
else:
self._df[columns] = ser
self.anonymized_columns.append(columns)
self.unanonymized_columns.remove(columns)
self._methods_applied[columns] = self._synthetic_data
else:
return ser
# if a list of columns is passed
else:
temp = pd.DataFrame()
for column in columns:
ser = self._df[column].apply(
lambda x: pd.to_datetime(fake.date(
pattern=pattern,
end_datetime=end_datetime)))
if inplace:
if column in self.anonymized_columns:
print(f'`{column}` column already anonymized!')
else:
self._df[column] = ser
self.anonymized_columns.append(column)
self.unanonymized_columns.remove(column)
self._methods_applied[column] = self._synthetic_data
else:
temp[column] = ser
if not inplace:
return temp
def column_suppression(self,
columns,
inplace=True):
'''
Redact a column (drop)
Based on pandas `drop` method
Parameters
----------
columns : Union[str, List[str]]
Column name or a list of column names.
inplace : bool, default True
If True the changes will be applied to `dfAnonymizer` obejct, else
output is returned.
Returns
----------
ser : None if inplace = True, else pandas Series or pandas DataFrame
Examples
----------
>>> from anonympy.pandas import dfAnonymizer
>>> from anonympy.pandas.utils_pandas import load_dataset
>>> df = load_dataset()
>>> anonym = dfAnonymizer(df)
>>> anonym.to_df()
name age ... email ssn
0 Bruce 33 ... <EMAIL> 343554334
1 Tony 48 ... <EMAIL> 656564664
Dropping `ssn` column
>>> anonym.column_suppression('ssn', inplace = False)
name age ... web email # noqa: E501
0 Bruce 33 ... http://www.alandrosenburgcpapc.co.uk <EMAIL>
1 Tony 48 ... http://www.capgeminiamerica.co.uk <EMAIL>
'''
# if single column is passed
if isinstance(columns, str) or (len(columns) == 1 and
isinstance(columns, list)):
if isinstance(columns, list):
columns = columns[0]
if inplace:
if columns in self.anonymized_columns:
print(f'`{columns}` column already anonymized!')
else:
self._df.drop(columns, axis=1, inplace=True)
self.anonymized_columns.append(columns)
self.unanonymized_columns.remove(columns)
self._methods_applied[columns] = self._drop
else:
return self._df2.drop(columns, axis=1, inplace=False)
# if a list of columns is passed
else:
if inplace:
for column in columns:
if column in self.anonymized_columns:
print(f'`{column}` column already anonymized!')
else:
self._df.drop(column, axis=1, inplace=True)
self.anonymized_columns.append(column)
self.unanonymized_columns.remove(column)
self._methods_applied[column] = self._drop
else:
return self._df2.drop(columns, axis=1, inplace=False)
def numeric_binning(self,
columns,
bins=4,
inplace=True):
'''
Bin values into discrete intervals.
Based on pandas `cut` method
Parameters
----------
columns : Union[str, List[str]]
Column name or a list of column names.
bins : int, default 4
the number of equal-width bins in the range of `bins`
inplace : bool, default True
If True the changes will be applied to `dfAnonymizer` obejct,
else output is returned.
Returns
----------
ser : None if inplace = True, else pandas Series or pandas DataFrame
See also
----------
dfAnonymizer.numeric_noise : Add uniform random noise
dfAnonymizer.numeric_masking : Apply PCA masking to numeric values
dfAnonymizer.numeric_rounding : Round values to the given number
Examples
----------
>>> from anonympy.pandas import dfAnonymizer
>>> from anonympy.pandas.utils_pandas import load_dataset
>>> df = load_dataset()
>>> anonym = dfAnonymizer(df)
Call the method with specifying the number of bins:
>>> anonym.numeric_binning('age', bins = 2, inplace = False)
0 (33.0, 40.0]
1 (40.0, 48.0]
Name: age, dtype: category
'''
# if a single column is passed
if isinstance(columns, str) or (len(columns) == 1 and
isinstance(columns, list)):
if isinstance(columns, list):
columns = columns[0]
ser = pd.cut(self._df[columns], bins=bins, precision=0)
if inplace:
if columns in self.anonymized_columns:
print(f'`{columns}` column already anonymized!')
else:
self._df[columns] = ser
self.anonymized_columns.append(columns)
self.unanonymized_columns.remove(columns)
self._methods_applied[columns] = self._bin
else:
return ser
# if a list of columns is passed
else:
temp = pd.DataFrame()
for column in columns:
ser = pd.cut(self._df[column], bins=bins, precision=0)
if inplace:
if column in self.anonymized_columns:
print(f'`{column}` column already anonymized!')
else:
self._df[column] = ser
self.anonymized_columns.append(column)
self.unanonymized_columns.remove(column)
self._methods_applied[column] = self._bin
else:
temp[column] = ser
if not inplace:
return temp
def categorical_resampling(self,
columns,
seed=None,
inplace=True):
'''
Sampling from the same distribution
Parameters
----------
columns : Union[str, List[str]]
Column name or a list of column names.
inplace : bool, default True
If True the changes will be applied to `dfAnonymizer` obejct, else
output is returned.
Returns
----------
ser : None if inplace = True, else pandas Series or pandas DataFrame
See also:
----------
dfAnonymizer.categorical_fake : Replace values with synthetically
generated ones by specifying which methods to apply
dfAnonymizer.categorical_email_masking : Apply partial masking to
email column
dfAnonymizer.categorical_tokenization : Map a string to a token
Notes
----------
This method should be used on categorical data with finite number of
unique elements.
Examples
----------
>>> from anonympy.pandas import dfAnonymizer
>>> from anonympy.pandas.utils_pandas import load_dataset
>>> df = load_dataset()
>>> anonym = dfAnonymizer(df)
>>> anonym.categorical_resampling('name', inplace =False)
0 Bruce
1 Bruce
dtype: object
'''
# if a single column is passed
np.random.seed(seed)
if isinstance(columns, str) or (len(columns) == 1 and
isinstance(columns, list)):
if isinstance(columns, list):
columns = columns[0]
counts = self._df[columns].value_counts(normalize=True)
if inplace:
if columns in self.anonymized_columns:
print(f'`{columns}` column already anonymized!')
else:
self._df[columns] = np.random.choice(counts.index,
p=counts.values,
size=len(self._df))
self.anonymized_columns.append(columns)
self.unanonymized_columns.remove(columns)
self._methods_applied[columns] = self._sample
else:
return pd.Series(np.random.choice(counts.index,
p=counts.values,
size=len(self._df)))
# if a list of columns is passed
else:
temp = pd.DataFrame()
for column in columns:
counts = self._df[column].value_counts(normalize=True)
if inplace:
if column in self.anonymized_columns:
print(f'`{column}` column already anonymized!')
else:
self._df[column] = np.random.choice(counts.index,
p=counts.values,
size=len(self._df))
self.anonymized_columns.append(column)
self.unanonymized_columns.remove(column)
self._methods_applied[column] = self._sample
else:
temp[column] = np.random.choice(counts.index,
p=counts.values,
size=len(self._df))
if not inplace:
return temp
def _info(self):
'''
Print a summary of the a DataFrame.
Which columns have been anonymized and which haven't.
Returns
----------
None
See also
----------
dfAnonymizer.info : Print a summy of the DataFrame
Examples
----------
>>> from anonympy.pandas import dfAnonymizer
>>> from anonympy.pandas.utils_pandas import load_dataset
>>> df = load_dataset()
>>> anonym = dfAnonymizer(df)
Method gets called when the instance of `dfAnonymizer` object is called
>>> anonym
+-------------------------------+
| Total number of columns: 7 |
+===============================+
| Anonymized Column -> Method: |
+-------------------------------+
| Unanonymized Columns: |
| - name |
| - age |
| - birthdate |
| - salary |
| - web |
| - email |
| - ssn |
+-------------------------------+
'''
t = Texttable(max_width=150)
header = f'Total number of columns: {self._df.shape[1]}'
row1 = 'Anonymized Column -> Method: '
for column in self.anonymized_columns:
row1 += '\n- ' + column + ' -> ' + \
self._methods_applied.get(column)
row2 = 'Unanonymized Columns: \n'
row2 += '\n'.join([f'- {i}' for i in self.unanonymized_columns])
t.add_rows([[header], [row1], [row2]])
return t
def info(self):
'''
Print a summary of the a DataFrame.
Which columns have been anonymized using which methods.
`status = 1 ` means the column have been anonymized and `status = 0 `
means the contrary.
Returns
----------
None
Examples
----------
>>> from anonympy.pandas import dfAnonymizer
>>> from anonympy.pandas.utils_pandas import load_dataset
>>> df = load_dataset()
>>> anonym = dfAnonymizer(df)
>>> anonym.info()
+-----------+--------+--------+
| Column | Status | Method |
+===========+========+========+
| name | 0 | |
+-----------+--------+--------+
| age | 0 | |
+-----------+--------+--------+
| birthdate | 0 | |
+-----------+--------+--------+
| salary | 0 | |
+-----------+--------+--------+
| web | 0 | |
+-----------+--------+--------+
| email | 0 | |
+-----------+--------+--------+
| ssn | 0 | |
+-----------+--------+--------+
'''
t = Texttable(150)
t.header(['Column', 'Status', 'Type', 'Method'])
for i in range(len(self.columns)):
column = self.columns[i]
if column in self.anonymized_columns:
status = 1
method = self._methods_applied[column]
else:
status = 0
method = ''
if column in self.numeric_columns:
dtype = 'numeric'
elif column in self.categorical_columns:
dtype = 'categorical'
elif column in self.datetime_columns:
dtype = 'datetime'
else:
dtype = str(self._df[column].dtype)
row = [column, status, dtype, method]
t.add_row(row)
print(t.draw())
def to_df(self):
'''
Convert dfAnonymizer object back to pandas DataFrame
Returns
----------
DataFrame object
Examples
----------
>>> from anonympy.pandas import dfAnonymizer
>>> from anonympy.pandas.utils_pandas import load_dataset
>>> df = load_dataset()
>>> anonym = dfAnonymizer(df)
>>> anonym.to_df()
name age ... email ssn
0 Bruce 33 ... <EMAIL> 343554334
1 Tony 48 ... <EMAIL> 656564664
'''
return self._df.copy()
|
[
"pandas.DataFrame",
"cape_privacy.pandas.transformations.DatePerturbation",
"numpy.random.seed",
"faker.Faker",
"faker.Faker.seed",
"anonympy.pandas.utils_pandas.get_datetime_columns",
"anonympy.pandas.utils_pandas.get_categorical_columns",
"anonympy.pandas.utils_pandas.get_numeric_columns",
"pandas.cut",
"cape_privacy.pandas.transformations.NumericPerturbation",
"sklearn.decomposition.PCA",
"pandas.Series",
"cape_privacy.pandas.transformations.Tokenizer",
"texttable.Texttable",
"cape_privacy.pandas.transformations.NumericRounding"
] |
[((2237, 2273), 'anonympy.pandas.utils_pandas.get_numeric_columns', '_utils.get_numeric_columns', (['self._df'], {}), '(self._df)\n', (2263, 2273), True, 'from anonympy.pandas import utils_pandas as _utils\n'), ((2309, 2349), 'anonympy.pandas.utils_pandas.get_categorical_columns', '_utils.get_categorical_columns', (['self._df'], {}), '(self._df)\n', (2339, 2349), True, 'from anonympy.pandas import utils_pandas as _utils\n'), ((2382, 2419), 'anonympy.pandas.utils_pandas.get_datetime_columns', '_utils.get_datetime_columns', (['self._df'], {}), '(self._df)\n', (2409, 2419), True, 'from anonympy.pandas import utils_pandas as _utils\n'), ((13962, 13978), 'faker.Faker.seed', 'Faker.seed', (['seed'], {}), '(seed)\n', (13972, 13978), False, 'from faker import Faker\n'), ((13994, 14014), 'faker.Faker', 'Faker', ([], {'locale': 'locale'}), '(locale=locale)\n', (13999, 14014), False, 'from faker import Faker\n'), ((22107, 22121), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (22119, 22121), True, 'import pandas as pd\n'), ((45869, 45885), 'faker.Faker.seed', 'Faker.seed', (['seed'], {}), '(seed)\n', (45879, 45885), False, 'from faker import Faker\n'), ((45901, 45921), 'faker.Faker', 'Faker', ([], {'locale': 'locale'}), '(locale=locale)\n', (45906, 45921), False, 'from faker import Faker\n'), ((54921, 54941), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (54935, 54941), True, 'import numpy as np\n'), ((58339, 58363), 'texttable.Texttable', 'Texttable', ([], {'max_width': '(150)'}), '(max_width=150)\n', (58348, 58363), False, 'from texttable import Texttable\n'), ((60062, 60076), 'texttable.Texttable', 'Texttable', (['(150)'], {}), '(150)\n', (60071, 60076), False, 'from texttable import Texttable\n'), ((25382, 25443), 'cape_privacy.pandas.transformations.NumericPerturbation', 'NumericPerturbation', ([], {'dtype': 'dtype', 'min': 'MIN', 'max': 'MAX', 'seed': 'seed'}), '(dtype=dtype, min=MIN, max=MAX, seed=seed)\n', (25401, 25443), False, 'from cape_privacy.pandas.transformations import NumericPerturbation\n'), ((26178, 26192), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (26190, 26192), True, 'import pandas as pd\n'), ((29307, 29373), 'cape_privacy.pandas.transformations.DatePerturbation', 'DatePerturbation', ([], {'frequency': 'frequency', 'min': 'MIN', 'max': 'MAX', 'seed': 'seed'}), '(frequency=frequency, min=MIN, max=MAX, seed=seed)\n', (29323, 29373), False, 'from cape_privacy.pandas.transformations import DatePerturbation\n'), ((30086, 30100), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (30098, 30100), True, 'import pandas as pd\n'), ((32784, 32834), 'cape_privacy.pandas.transformations.NumericRounding', 'NumericRounding', ([], {'dtype': 'dtype', 'precision': '(-precision)'}), '(dtype=dtype, precision=-precision)\n', (32799, 32834), False, 'from cape_privacy.pandas.transformations import NumericRounding\n'), ((33423, 33437), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (33435, 33437), True, 'import pandas as pd\n'), ((35844, 35863), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(1)'}), '(n_components=1)\n', (35847, 35863), False, 'from sklearn.decomposition import PCA\n'), ((39436, 39483), 'cape_privacy.pandas.transformations.Tokenizer', 'Tokenizer', ([], {'max_token_len': 'max_token_len', 'key': 'key'}), '(max_token_len=max_token_len, key=key)\n', (39445, 39483), False, 'from cape_privacy.pandas.transformations import Tokenizer\n'), ((40058, 40072), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (40070, 40072), True, 'import pandas as pd\n'), ((43539, 43553), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (43551, 43553), True, 'import pandas as pd\n'), ((46912, 46926), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (46924, 46926), True, 'import pandas as pd\n'), ((52186, 52235), 'pandas.cut', 'pd.cut', (['self._df[columns]'], {'bins': 'bins', 'precision': '(0)'}), '(self._df[columns], bins=bins, precision=0)\n', (52192, 52235), True, 'import pandas as pd\n'), ((52756, 52770), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (52768, 52770), True, 'import pandas as pd\n'), ((56095, 56109), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (56107, 56109), True, 'import pandas as pd\n'), ((9956, 9970), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (9968, 9970), True, 'import pandas as pd\n'), ((18114, 18128), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (18126, 18128), True, 'import pandas as pd\n'), ((26304, 26365), 'cape_privacy.pandas.transformations.NumericPerturbation', 'NumericPerturbation', ([], {'dtype': 'dtype', 'min': 'MIN', 'max': 'MAX', 'seed': 'seed'}), '(dtype=dtype, min=MIN, max=MAX, seed=seed)\n', (26323, 26365), False, 'from cape_privacy.pandas.transformations import NumericPerturbation\n'), ((30161, 30227), 'cape_privacy.pandas.transformations.DatePerturbation', 'DatePerturbation', ([], {'frequency': 'frequency', 'min': 'MIN', 'max': 'MAX', 'seed': 'seed'}), '(frequency=frequency, min=MIN, max=MAX, seed=seed)\n', (30177, 30227), False, 'from cape_privacy.pandas.transformations import DatePerturbation\n'), ((33624, 33674), 'cape_privacy.pandas.transformations.NumericRounding', 'NumericRounding', ([], {'dtype': 'dtype', 'precision': '(-precision)'}), '(dtype=dtype, precision=-precision)\n', (33639, 33674), False, 'from cape_privacy.pandas.transformations import NumericRounding\n'), ((40136, 40183), 'cape_privacy.pandas.transformations.Tokenizer', 'Tokenizer', ([], {'max_token_len': 'max_token_len', 'key': 'key'}), '(max_token_len=max_token_len, key=key)\n', (40145, 40183), False, 'from cape_privacy.pandas.transformations import Tokenizer\n'), ((52829, 52877), 'pandas.cut', 'pd.cut', (['self._df[column]'], {'bins': 'bins', 'precision': '(0)'}), '(self._df[column], bins=bins, precision=0)\n', (52835, 52877), True, 'import pandas as pd\n'), ((18980, 18994), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (18992, 18994), True, 'import pandas as pd\n'), ((23190, 23222), 'pandas.Series', 'pd.Series', (['temp[temp.columns[0]]'], {}), '(temp[temp.columns[0]])\n', (23199, 23222), True, 'import pandas as pd\n'), ((7305, 7323), 'pandas.DataFrame', 'pd.DataFrame', (['temp'], {}), '(temp)\n', (7317, 7323), True, 'import pandas as pd\n'), ((7410, 7424), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (7422, 7424), True, 'import pandas as pd\n'), ((12688, 12720), 'pandas.Series', 'pd.Series', (['temp[temp.columns[0]]'], {}), '(temp[temp.columns[0]])\n', (12697, 12720), True, 'import pandas as pd\n')]
|
import csv
import random
import re
import sys
import tqdm
import numpy as np
import torch
from torch.utils.data import TensorDataset
from transformers.tokenization_bert import BertTokenizer
def load_glove_txt(file_path="glove.840B.300d.txt"):
results = {}
num_file = sum([1 for i in open(file_path, "r", encoding='utf8')])
with open(file_path, 'r', encoding='utf8') as infile:
for line in tqdm.tqdm(infile, total=num_file):
data = line.strip().split(' ')
word = data[0]
results[word] = 1
return results
def clean_str(string):
# string = re.sub("[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub("\'s", " \'s", string)
string = re.sub("\'ve", " \'ve", string)
string = re.sub("n\'t", " n\'t", string)
string = re.sub("\'re", " \'re", string)
string = re.sub("\'d", " \'d", string)
string = re.sub("\'ll", " \'ll", string)
string = re.sub('"', " ", string)
string = re.sub("'", " ", string)
string = re.sub("`", " ", string)
string = re.sub(r"\\", " ", string)
string = re.sub(r"[\[\]<>/&#\^$%{}‘\.…*]", " ", string)
# string = re.sub(",", " , ", string)
# string = re.sub("!", " ! ", string)
# string = re.sub("\(", " \( ", string)
# string = re.sub("\)", " \) ", string)
# string = re.sub("\?", " \? ", string)
# string = re.sub("\\\?", "?", string)
# string = re.sub("\s{2,}", " ", string)
# string = re.sub("-", ' ', string)
return string.strip().split()
def shuffle_data(x, y):
idx = list(range(len(x)))
np.random.shuffle(idx)
new_x = []
new_y = []
for id_ in idx:
new_x.append(x[id_])
new_y.append(y[id_])
return new_x, new_y
def read_TREC(cv=None, scale_rate=1):
data = {}
def read(mode):
x, y = [], []
with open("data/TREC/" + mode + ".tsv", "r", encoding="utf-8") as f:
reader = csv.reader(f, delimiter="\t", quotechar=None)
for line in reader:
x.append(clean_str(line[0]))
y.append(line[1])
if mode == "train":
label2data = {}
for x_, y_ in zip(x, y):
if y_ not in label2data:
label2data[y_] = [x_]
else:
label2data[y_].append(x_)
new_train_x = []
new_train_y = []
for y_ in label2data.keys():
train_idx = max(int(len(label2data[y_]) * scale_rate), 1)
for x_ in label2data[y_][:train_idx]:
new_train_x.append(x_)
new_train_y.append(y_)
x, y = shuffle_data(new_train_x, new_train_y)
data["train_x"], data["train_y"] = x, y
else:
data["test_x"], data["test_y"] = x, y
read("train")
read("test")
return data
def read_SST1(cv=None, scale_rate=1):
data = {}
def read(mode):
x, y = [], []
with open("data/SST1/" + mode + ".tsv", "r", encoding="utf-8") as f:
reader = csv.reader(f, delimiter="\t", quotechar=None)
for line in reader:
y.append(line[1])
x.append(clean_str(line[0]))
# x.append(line[0])
if mode == "train":
with open("data/SST1/stsa.fine.phrases.train", "r", encoding="utf-8", errors='ignore') as f:
for line in f:
y.append(line[0])
x.append(clean_str(line[2:]))
label2data = {}
for x_, y_ in zip(x, y):
if y_ not in label2data:
label2data[y_] = [x_]
else:
label2data[y_].append(x_)
new_train_x = []
new_train_y = []
for y_ in label2data.keys():
train_idx = max(int(len(label2data[y_]) * scale_rate), 1)
for x_ in label2data[y_][:train_idx]:
new_train_x.append(x_)
new_train_y.append(y_)
x, y = shuffle_data(new_train_x, new_train_y)
data["train_x"], data["train_y"] = x, y
else:
data["test_x"], data["test_y"] = x, y
read("train")
read("test")
return data
def read_SST2(cv=None, scale_rate=1):
data = {}
def read(mode):
x, y = [], []
with open("data/SST2/" + mode + ".tsv", "r", encoding="utf-8") as f:
reader = csv.reader(f, delimiter="\t", quotechar=None)
for line in reader:
y.append(line[1])
x.append(clean_str(line[0]))
# x.append(line[0])
if mode == "train":
with open("data/SST2/stsa.binary.phrases.train", "r", encoding="utf-8", errors='ignore') as f:
for line in f:
y.append(line[0])
x.append(clean_str(line[2:]))
label2data = {}
for x_, y_ in zip(x, y):
if y_ not in label2data:
label2data[y_] = [x_]
else:
label2data[y_].append(x_)
new_train_x = []
new_train_y = []
for y_ in label2data.keys():
train_idx = max(int(len(label2data[y_]) * scale_rate), 1)
for x_ in label2data[y_][:train_idx]:
new_train_x.append(x_)
new_train_y.append(y_)
x, y = shuffle_data(new_train_x, new_train_y)
data["train_x"], data["train_y"] = x, y
else:
data["test_x"], data["test_y"] = x, y
read("train")
read("test")
return data
def read_SUBJ(cv=0, scale_rate=1):
data = {}
x, y = [], []
with open("data/SUBJ/subj.all", "r", encoding="utf-8", errors='ignore') as f:
# reader = csv.reader(f, delimiter="\t", quotechar=None)
for line in f:
x.append(clean_str(line[2:]))
# x.append(line[0])
y.append(line[0])
idx = list(range(len(x)))
np.random.shuffle(idx)
test_index = cv # 0-9
train_x = []
train_y = []
test_x = []
test_y = []
for i, id_ in enumerate(idx):
index = i % 10
if index == test_index:
test_x.append(x[id_])
test_y.append(y[id_])
else:
train_x.append(x[id_])
train_y.append(y[id_])
label2data = {}
for x_, y_ in zip(train_x, train_y):
if y_ not in label2data:
label2data[y_] = [x_]
else:
label2data[y_].append(x_)
new_train_x = []
new_train_y = []
for y_ in label2data.keys():
train_idx = max(int(len(label2data[y_]) * scale_rate), 1)
for x_ in label2data[y_][:train_idx]:
new_train_x.append(x_)
new_train_y.append(y_)
train_x, train_y = shuffle_data(new_train_x, new_train_y)
data["train_x"], data["train_y"] = train_x, train_y
data["test_x"], data["test_y"] = test_x, test_y
return data
def read_MR(cv=0, scale_rate=1):
data = {}
x, y = [], []
with open("data/MR/rt-polarity.pos", "r", encoding="utf-8") as f:
for line in f:
if line[-1] == "\n":
line = line[:-1]
x.append(clean_str(line))
y.append(1)
with open("data/MR/rt-polarity.neg", "r", encoding="utf-8") as f:
for line in f:
if line[-1] == "\n":
line = line[:-1]
x.append(clean_str(line))
y.append(0)
idx = list(range(len(x)))
np.random.shuffle(idx)
test_index = cv # 0-9
# dev_index = (cv+1)%10
train_x = []
train_y = []
test_x = []
test_y = []
for i, id_ in enumerate(idx):
index = i % 10
if index == test_index:
test_x.append(x[id_])
test_y.append(y[id_])
else:
train_x.append(x[id_])
train_y.append(y[id_])
label2data = {}
for x_, y_ in zip(train_x, train_y):
if y_ not in label2data:
label2data[y_] = [x_]
else:
label2data[y_].append(x_)
new_train_x = []
new_train_y = []
for y_ in label2data.keys():
train_idx = max(int(len(label2data[y_]) * scale_rate), 1)
for x_ in label2data[y_][:train_idx]:
new_train_x.append(x_)
new_train_y.append(y_)
train_x, train_y = shuffle_data(new_train_x, new_train_y)
data["train_x"], data["train_y"] = train_x, train_y
data["test_x"], data["test_y"] = test_x, test_y
return data
def refind_sent(sent, g_dict):
new_sent = []
for word in sent:
if word in g_dict:
new_sent.append(word)
elif '-' in word:
for wd in word.split('-'):
new_sent.append(wd)
elif '\/' in word:
for wd in word.split('\/'):
new_sent.append(wd)
elif word.lower() in g_dict:
new_sent.append(word.lower())
else:
continue
return new_sent
def preprocess_data(data, VOCAB_SIZE, MAX_SENT_LEN, dtype='train'):
x = []
for sent in data[dtype + "_x"]:
sent_tmp = [data['word_to_idx']["<BOS>"]]
for word in sent:
if len(sent_tmp) < MAX_SENT_LEN - 1:
sent_tmp.append(data['word_to_idx'][word])
sent_tmp.append(data['word_to_idx']["<EOS>"])
if len(sent_tmp) < MAX_SENT_LEN:
sent_tmp += [VOCAB_SIZE + 1] * (MAX_SENT_LEN - len(sent_tmp))
x.append(sent_tmp)
y = [data["classes"].index(c) for c in data[dtype + "_y"]]
x = torch.LongTensor(x)
y = torch.LongTensor(y)
return x, y
def load_dataset(options):
mod = sys.modules[__name__]
if options.classifier != 'BERT':
data = getattr(mod, f"read_{options.dataset}")(cv=options.cv, scale_rate=options.scale_rate)
g_dict = load_glove_txt()
for i in range(len(data['train_x'])):
data['train_x'][i] = refind_sent(data['train_x'][i], g_dict)
for i in range(len(data['test_x'])):
data['test_x'][i] = refind_sent(data['test_x'][i], g_dict)
data["vocab"] = sorted(
list(set([w for sent in data["train_x"] + data["test_x"] for w in sent] + ["<BOS>", "<EOS>"])))
data["classes"] = sorted(list(set(data["train_y"])))
data["word_to_idx"] = {w: i for i, w in enumerate(data["vocab"])}
data["idx_to_word"] = {i: w for i, w in enumerate(data["vocab"])}
options.VOCAB_SIZE = len(data["vocab"])
if not hasattr(options, 'MAX_SENT_LEN'):
options.MAX_SENT_LEN = max([len(sent) for sent in data["train_x"] + data["test_x"]])
options.CLASS_SIZE = len(data["classes"])
train_x, train_y = preprocess_data(data, options.VOCAB_SIZE, options.MAX_SENT_LEN, 'train')
train_set = TensorDataset(train_x, train_y)
test_x, test_y = preprocess_data(data, options.VOCAB_SIZE, options.MAX_SENT_LEN, 'test')
test_set = TensorDataset(test_x, test_y)
return train_set, test_set, data
else:
data = {}
dset = getattr(mod, f"{options.dataset}_Processor")(cv=options.cv)
train_examples = dset.train_examples
test_examples = dset.test_examples
data['tokenizer'] = BertTokenizer(vocab_file='./bert-base-uncased/vocab.txt'
, do_basic_tokenize=True)
data["classes"] = sorted(list(set([z.label for z in train_examples])))
options.CLASS_SIZE = len(data["classes"])
options.VOCAB_SIZE = len(data['tokenizer'].vocab)
if not hasattr(options, 'MAX_SENT_LEN'):
setattr(options, 'MAX_SENT_LEN',
max([len(example.text_a.split(' ')) for example in train_examples + test_examples]) + 2)
# print("max",max([len(example.text_a.split(' ')) for example in train_examples + test_examples]))
train_set = _make_data_loader(train_examples, data["classes"], data['tokenizer'], options.MAX_SENT_LEN)
test_set = _make_data_loader(test_examples, data["classes"], data['tokenizer'], options.MAX_SENT_LEN)
return train_set, test_set, data
def _make_data_loader(examples, label_list, tokenizer, MAX_SEQ_LENGTH):
all_features = _convert_examples_to_features(
examples=examples,
label_list=label_list,
max_seq_length=MAX_SEQ_LENGTH,
tokenizer=tokenizer,
output_mode='classification')
all_input_ids = torch.tensor(
[f.input_ids for f in all_features], dtype=torch.long)
all_input_mask = torch.tensor(
[f.input_mask for f in all_features], dtype=torch.long)
all_segment_ids = torch.tensor(
[f.segment_ids for f in all_features], dtype=torch.long)
all_label_ids = torch.tensor(
[f.label_id for f in all_features], dtype=torch.long)
all_ids = torch.arange(len(examples))
dataset = TensorDataset(
all_input_ids, all_input_mask, all_segment_ids, all_label_ids, all_ids)
return dataset
def _convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer, output_mode):
"""Loads a data file into a list of `InputBatch`s."""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
# print(len(input_ids),len(input_mask),len(segment_ids),max_seq_length)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if output_mode == "classification":
label_id = label_map[example.label]
elif output_mode == "regression":
label_id = float(example.label)
else:
raise KeyError(output_mode)
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal
# percent of tokens from each, since if one sequence is very short then each
# token that's truncated likely contains more information than a longer
# sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def csv_reader(filename):
print('read file:', filename)
f = open(filename, 'r', encoding='utf8')
reader = csv.reader(f, delimiter="\t", quotechar=None)
return reader
class InputExample:
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
def __getitem__(self, item):
return [self.input_ids, self.input_mask,
self.segment_ids, self.label_id][item]
class DatasetProcessor:
def get_train_examples(self):
raise NotImplementedError
def get_dev_examples(self):
raise NotImplementedError
def get_test_examples(self):
raise NotImplementedError
def get_labels(self):
raise NotImplementedError
class SST1_Processor(DatasetProcessor):
"""Processor for the SST-5 data set."""
def __init__(self, cv=0):
train_file = "./data/SST1/train.tsv"
test_file = "./data/SST1/test.tsv"
print("processing train_file{},test_file".format(train_file, test_file))
self._train_set, self._test_set = csv_reader(train_file), csv_reader(test_file)
self.train_examples, self.test_examples = self.get_train_examples(), self.get_test_examples()
x, y = [], []
with open("data/SST1/stsa.fine.phrases.train", "r", encoding="utf-8", errors='ignore') as f:
for line in f:
y.append(line[0])
x.append(line[2:])
self.train_examples_extra = self._create_examples(zip(x, y), "train")
self.train_examples = self.train_examples + self.train_examples_extra
def get_train_examples(self):
"""See base class."""
examples = self._create_examples(self._train_set, "train")
print('getting train examples,len = ', len(examples))
return examples
def get_test_examples(self):
"""See base class."""
examples = self._create_examples(self._test_set, "test")
print('getting test examples,len = ', len(examples))
return examples
def get_labels(self):
"""See base class."""
label_set = set()
for example in self.train_examples:
label_set.add(example.label)
return sorted(list(label_set))
def _create_examples(self, dataset, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, data) in enumerate(dataset):
guid = "%s-%s" % (set_type, i)
examples.append(InputExample(
guid=guid,
text_a=data[0],
label=data[1]
))
# return examples
return examples
class SST2_Processor(DatasetProcessor):
"""Processor for the SST-5 data set."""
def __init__(self, cv=0):
train_file = "./data/SST2/train.tsv"
test_file = "./data/SST2/test.tsv"
x, y = [], []
with open("data/SST2/stsa.binary.phrases.train", "r", encoding="utf-8", errors='ignore') as f:
for line in f:
y.append(line[0])
x.append(line[2:])
self.train_examples_extra = self._create_examples(zip(x, y), "train")
print("processing train_file{},test_file".format(train_file, test_file))
self._train_set, self._test_set = csv_reader(train_file), csv_reader(test_file)
self.train_examples, self.test_examples = self.get_train_examples(), self.get_test_examples()
self.train_examples = self.train_examples + self.train_examples_extra
def get_train_examples(self):
"""See base class."""
examples = self._create_examples(self._train_set, "train")
print('getting train examples,len = ', len(examples))
return examples
def get_test_examples(self):
"""See base class."""
examples = self._create_examples(self._test_set, "test")
print('getting test examples,len = ', len(examples))
return examples
def get_labels(self):
"""See base class."""
label_set = set()
for example in self.train_examples:
label_set.add(example.label)
return sorted(list(label_set))
def _create_examples(self, dataset, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, data) in enumerate(dataset):
guid = "%s-%s" % (set_type, i)
examples.append(InputExample(
guid=guid,
text_a=data[0],
label=data[1]
))
# return examples
return examples
class TREC_Processor(DatasetProcessor):
"""Processor for the SST-5 data set."""
def __init__(self, cv=0):
train_file = "./data/TREC/train.tsv"
test_file = "./data/TREC/test.tsv"
print("processing train_file{},test_file,{}".format(train_file, test_file))
self._train_set, self._test_set = csv_reader(train_file), csv_reader(test_file)
self.train_examples, self.test_examples = self.get_train_examples(), self.get_test_examples()
def get_train_examples(self):
"""See base class."""
examples = self._create_examples(self._train_set, "train")
print('getting train examples,len = ', len(examples))
return examples
def get_test_examples(self):
"""See base class."""
examples = self._create_examples(self._test_set, "test")
print('getting test examples,len = ', len(examples))
return examples
def get_labels(self):
"""See base class."""
label_set = set()
for example in self.train_examples:
label_set.add(example.label)
return sorted(list(label_set))
def _create_examples(self, dataset, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, data) in enumerate(dataset):
guid = "%s-%s" % (set_type, i)
examples.append(InputExample(
guid=guid,
text_a=data[0],
label=data[1]
))
# return examples
return examples
class SUBJ_Processor(DatasetProcessor):
"""Processor for the SST-5 data set."""
def __init__(self, cv):
all_file = "./data/SUBJ/data_all.tsv"
print("processing all_file{}".format(all_file))
self._all_set = csv_reader(all_file)
self.train_examples, self.test_examples = self.get_train_examples(cv=cv)
def _read_examples(self):
examples = self._create_examples(self._all_set, "all")
return examples
def get_train_examples(self, cv=0):
"""See base class."""
examples = self._read_examples()
idx = list(range(len(examples)))
np.random.shuffle(idx)
test_index = cv
test_example = []
train_example = []
for i, id_ in enumerate(idx):
index = i % 10
if index == test_index:
test_example.append(examples[id_])
else:
train_example.append(examples[id_])
return train_example, test_example
def get_labels(self):
"""See base class."""
label_set = set()
for example in self.train_examples:
label_set.add(example.label)
return sorted(list(label_set))
def _create_examples(self, dataset, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, data) in enumerate(dataset):
guid = "%s-%s" % (set_type, i)
examples.append(InputExample(
guid=guid,
text_a=data[0],
label=data[1]
))
return examples
# return shuffle_data(examples)
class MR_Processor(DatasetProcessor):
"""Processor for the SST-5 data set."""
def __init__(self, cv=0):
pos_file = "./data/MR/rt-polarity.pos"
neg_file = "./data/MR/rt-polarity.neg"
print("processing pos_file:{},neg_file:{}".format(pos_file, neg_file))
self._pos_set, self._neg_set = csv_reader(pos_file), csv_reader(neg_file)
self.train_examples, self.test_examples = self.get_train_examples(cv=cv)
def _read_examples(self):
pos_examples = self._create_examples(self._pos_set, "pos")
neg_examples = self._create_examples(self._neg_set, "neg")
examples = []
for ex in pos_examples:
examples.append(InputExample(
guid=ex.guid,
text_a=ex.text_a,
label=1
))
for ex in neg_examples:
examples.append(InputExample(
guid=ex.guid,
text_a=ex.text_a,
label=0
))
return examples
def get_train_examples(self, cv=0):
"""See base class."""
examples = self._read_examples()
idx = list(range(len(examples)))
np.random.shuffle(idx)
test_index = cv
test_example = []
train_example = []
for i, id_ in enumerate(idx):
index = i % 10
if index == test_index:
test_example.append(examples[id_])
else:
train_example.append(examples[id_])
return train_example, test_example
def get_labels(self):
"""See base class."""
label_set = set()
for example in self.train_examples:
label_set.add(example.label)
return sorted(list(label_set))
def _create_examples(self, dataset, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, data) in enumerate(dataset):
guid = "%s-%s" % (set_type, i)
examples.append(InputExample(
guid=guid,
text_a=data[0],
))
return examples
if __name__ == "__main__":
processor = TREC_Processor(cv=2)
print(processor.get_labels())
train = processor.train_examples
for x in train:
print(x.text_a, x.label)
break
# class OPT:
# def __init__(self):
# self.dataset="SUBJ"
# self.cv = "0"
# self.scale_rate=1
# self.MAX_SENT_LEN=-1
# opt = OPT()
# dset = getattr(sys.modules[__name__],'load_dataset')(opt)
# for x in dset[0]:
# print(x)
# break
# from torch.utils.data import DataLoader
# train_loader = DataLoader(dset[0], batch_size=50, shuffle=True)
|
[
"tqdm.tqdm",
"transformers.tokenization_bert.BertTokenizer",
"numpy.random.shuffle",
"csv.reader",
"torch.LongTensor",
"torch.utils.data.TensorDataset",
"re.sub",
"torch.tensor"
] |
[((663, 690), 're.sub', 're.sub', (['"""\'s"""', '""" \'s"""', 'string'], {}), '("\'s", " \'s", string)\n', (669, 690), False, 'import re\n'), ((706, 735), 're.sub', 're.sub', (['"""\'ve"""', '""" \'ve"""', 'string'], {}), '("\'ve", " \'ve", string)\n', (712, 735), False, 'import re\n'), ((751, 780), 're.sub', 're.sub', (['"""n\'t"""', '""" n\'t"""', 'string'], {}), '("n\'t", " n\'t", string)\n', (757, 780), False, 'import re\n'), ((796, 825), 're.sub', 're.sub', (['"""\'re"""', '""" \'re"""', 'string'], {}), '("\'re", " \'re", string)\n', (802, 825), False, 'import re\n'), ((841, 868), 're.sub', 're.sub', (['"""\'d"""', '""" \'d"""', 'string'], {}), '("\'d", " \'d", string)\n', (847, 868), False, 'import re\n'), ((884, 913), 're.sub', 're.sub', (['"""\'ll"""', '""" \'ll"""', 'string'], {}), '("\'ll", " \'ll", string)\n', (890, 913), False, 'import re\n'), ((929, 953), 're.sub', 're.sub', (['"""\\""""', '""" """', 'string'], {}), '(\'"\', \' \', string)\n', (935, 953), False, 'import re\n'), ((967, 991), 're.sub', 're.sub', (['"""\'"""', '""" """', 'string'], {}), '("\'", \' \', string)\n', (973, 991), False, 'import re\n'), ((1005, 1029), 're.sub', 're.sub', (['"""`"""', '""" """', 'string'], {}), "('`', ' ', string)\n", (1011, 1029), False, 'import re\n'), ((1043, 1070), 're.sub', 're.sub', (['"""\\\\\\\\"""', '""" """', 'string'], {}), "('\\\\\\\\', ' ', string)\n", (1049, 1070), False, 'import re\n'), ((1083, 1132), 're.sub', 're.sub', (['"""[\\\\[\\\\]<>/&#\\\\^$%{}‘\\\\.…*]"""', '""" """', 'string'], {}), "('[\\\\[\\\\]<>/&#\\\\^$%{}‘\\\\.…*]', ' ', string)\n", (1089, 1132), False, 'import re\n'), ((1568, 1590), 'numpy.random.shuffle', 'np.random.shuffle', (['idx'], {}), '(idx)\n', (1585, 1590), True, 'import numpy as np\n'), ((6033, 6055), 'numpy.random.shuffle', 'np.random.shuffle', (['idx'], {}), '(idx)\n', (6050, 6055), True, 'import numpy as np\n'), ((7559, 7581), 'numpy.random.shuffle', 'np.random.shuffle', (['idx'], {}), '(idx)\n', (7576, 7581), True, 'import numpy as np\n'), ((9609, 9628), 'torch.LongTensor', 'torch.LongTensor', (['x'], {}), '(x)\n', (9625, 9628), False, 'import torch\n'), ((9637, 9656), 'torch.LongTensor', 'torch.LongTensor', (['y'], {}), '(y)\n', (9653, 9656), False, 'import torch\n'), ((12490, 12557), 'torch.tensor', 'torch.tensor', (['[f.input_ids for f in all_features]'], {'dtype': 'torch.long'}), '([f.input_ids for f in all_features], dtype=torch.long)\n', (12502, 12557), False, 'import torch\n'), ((12588, 12656), 'torch.tensor', 'torch.tensor', (['[f.input_mask for f in all_features]'], {'dtype': 'torch.long'}), '([f.input_mask for f in all_features], dtype=torch.long)\n', (12600, 12656), False, 'import torch\n'), ((12688, 12757), 'torch.tensor', 'torch.tensor', (['[f.segment_ids for f in all_features]'], {'dtype': 'torch.long'}), '([f.segment_ids for f in all_features], dtype=torch.long)\n', (12700, 12757), False, 'import torch\n'), ((12787, 12853), 'torch.tensor', 'torch.tensor', (['[f.label_id for f in all_features]'], {'dtype': 'torch.long'}), '([f.label_id for f in all_features], dtype=torch.long)\n', (12799, 12853), False, 'import torch\n'), ((12920, 13009), 'torch.utils.data.TensorDataset', 'TensorDataset', (['all_input_ids', 'all_input_mask', 'all_segment_ids', 'all_label_ids', 'all_ids'], {}), '(all_input_ids, all_input_mask, all_segment_ids, all_label_ids,\n all_ids)\n', (12933, 13009), False, 'from torch.utils.data import TensorDataset\n'), ((16092, 16137), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '"""\t"""', 'quotechar': 'None'}), "(f, delimiter='\\t', quotechar=None)\n", (16102, 16137), False, 'import csv\n'), ((411, 444), 'tqdm.tqdm', 'tqdm.tqdm', (['infile'], {'total': 'num_file'}), '(infile, total=num_file)\n', (420, 444), False, 'import tqdm\n'), ((10854, 10885), 'torch.utils.data.TensorDataset', 'TensorDataset', (['train_x', 'train_y'], {}), '(train_x, train_y)\n', (10867, 10885), False, 'from torch.utils.data import TensorDataset\n'), ((11002, 11031), 'torch.utils.data.TensorDataset', 'TensorDataset', (['test_x', 'test_y'], {}), '(test_x, test_y)\n', (11015, 11031), False, 'from torch.utils.data import TensorDataset\n'), ((11292, 11378), 'transformers.tokenization_bert.BertTokenizer', 'BertTokenizer', ([], {'vocab_file': '"""./bert-base-uncased/vocab.txt"""', 'do_basic_tokenize': '(True)'}), "(vocab_file='./bert-base-uncased/vocab.txt', do_basic_tokenize\n =True)\n", (11305, 11378), False, 'from transformers.tokenization_bert import BertTokenizer\n'), ((23130, 23152), 'numpy.random.shuffle', 'np.random.shuffle', (['idx'], {}), '(idx)\n', (23147, 23152), True, 'import numpy as np\n'), ((25313, 25335), 'numpy.random.shuffle', 'np.random.shuffle', (['idx'], {}), '(idx)\n', (25330, 25335), True, 'import numpy as np\n'), ((1918, 1963), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '"""\t"""', 'quotechar': 'None'}), "(f, delimiter='\\t', quotechar=None)\n", (1928, 1963), False, 'import csv\n'), ((3057, 3102), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '"""\t"""', 'quotechar': 'None'}), "(f, delimiter='\\t', quotechar=None)\n", (3067, 3102), False, 'import csv\n'), ((4453, 4498), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '"""\t"""', 'quotechar': 'None'}), "(f, delimiter='\\t', quotechar=None)\n", (4463, 4498), False, 'import csv\n')]
|
import numpy as np
import torch
import pytorch_lightning as pl
from torch.utils.data import DataLoader
from implem.utils import device
class SimpleDataset(torch.utils.data.Dataset):
def __init__(self, data, offset=1, start=None, end=None):
super(SimpleDataset, self).__init__()
assert len(data.shape) >= 2 #[T,*D], where D can be [C,W,H] etc.
self.T = len(data)
self.data = data
self.offset = offset
self.start = 0 if start is None else start
self.end = self.T-np.asarray(self.offset).max() if end is None else end
assert self.end > self.start
self.idx = torch.arange(self.start, self.end, requires_grad=False, device='cpu')
def __getitem__(self, index):
""" Generate one batch of data """
x = self.data[self.idx[index]].reshape(*self.data.shape[1:])
y = self.data[self.idx[index]+self.offset].reshape(len(self.offset), *self.data.shape[1:])
return x,y
def __len__(self):
return len(self.idx)
class MultiTrialDataset(torch.utils.data.Dataset):
def __init__(self, data, offset=1, start=None, end=None):
super(MultiTrialDataset, self).__init__()
assert len(data.shape) >= 3 #[N,T,*D], where D can be [C,W,H] etc.
self.N, self.T = data.shape[:2]
self.data = data.reshape(-1, *data.shape[2:]) #[NT,*D]
self.offset = offset
self.start = 0 if start is None else start
self.end = self.T-np.asarray(self.offset).max() if end is None else end
assert self.end > self.start
idx = torch.arange(self.start, self.end, requires_grad=False, device='cpu')
idx = [idx for j in range(self.N)]
self.idx = torch.cat([j*self.T + idx[j] for j in range(len(idx))])
def __getitem__(self, index):
""" Generate one batch of data """
x = self.data[self.idx[index]].reshape(*self.data.shape[1:])
y = self.data[self.idx[index]+self.offset].reshape(*self.data.shape[1:])
return x,y
def __len__(self):
return len(self.idx)
class MultiStepMultiTrialDataset(MultiTrialDataset):
def __init__(self, data, offset=1, start=None, end=None):
super(MultiStepMultiTrialDataset, self).__init__(data=data, offset=offset, start=start, end=end)
self.offset = torch.as_tensor(np.asarray(offset, dtype=np.int).reshape(1,-1), device='cpu')
def __getitem__(self, index):
""" Generate one batch of data """
io = (self.idx[index].reshape(-1,1) + self.offset.reshape(1,-1)).flatten()
x = self.data[self.idx[index]].reshape(*self.data.shape[1:])
y = self.data[io].reshape(np.prod(self.offset.shape), *self.data.shape[1:])
return x,y
class DataModule(pl.LightningDataModule):
def __init__(self, data, train_valid_split: int = 0.9,
batch_size: int = 2, offset: int = 1, Dataset=SimpleDataset,
**kwargs):
super().__init__()
self.data = data
self.Dataset = Dataset
self.batch_size = batch_size
self.offset = offset if isinstance(offset, np.ndarray) else np.arange(offset)
self.num_workers = 0
assert 0. < train_valid_split and train_valid_split <= 1.
self.train_valid_split = train_valid_split
def setup(self, stage=None):
if stage == 'fit' or stage is None:
split_index = int(len(self.data) * self.train_valid_split)
self.train_data = self.Dataset(data = self.data[:split_index], offset = self.offset)
self.valid_data = self.Dataset(data = self.data[split_index:], offset = self.offset)
def train_dataloader(self):
return DataLoader(self.train_data, batch_size=self.batch_size, num_workers=self.num_workers,
shuffle=True, generator=torch.Generator(device=device))
def val_dataloader(self):
return DataLoader(self.valid_data, batch_size=self.batch_size, num_workers=self.num_workers,
shuffle=False, generator=torch.Generator(device=device))
|
[
"numpy.asarray",
"numpy.prod",
"numpy.arange",
"torch.arange",
"torch.Generator"
] |
[((653, 722), 'torch.arange', 'torch.arange', (['self.start', 'self.end'], {'requires_grad': '(False)', 'device': '"""cpu"""'}), "(self.start, self.end, requires_grad=False, device='cpu')\n", (665, 722), False, 'import torch\n'), ((1611, 1680), 'torch.arange', 'torch.arange', (['self.start', 'self.end'], {'requires_grad': '(False)', 'device': '"""cpu"""'}), "(self.start, self.end, requires_grad=False, device='cpu')\n", (1623, 1680), False, 'import torch\n'), ((2689, 2715), 'numpy.prod', 'np.prod', (['self.offset.shape'], {}), '(self.offset.shape)\n', (2696, 2715), True, 'import numpy as np\n'), ((3164, 3181), 'numpy.arange', 'np.arange', (['offset'], {}), '(offset)\n', (3173, 3181), True, 'import numpy as np\n'), ((3857, 3887), 'torch.Generator', 'torch.Generator', ([], {'device': 'device'}), '(device=device)\n', (3872, 3887), False, 'import torch\n'), ((4072, 4102), 'torch.Generator', 'torch.Generator', ([], {'device': 'device'}), '(device=device)\n', (4087, 4102), False, 'import torch\n'), ((2363, 2395), 'numpy.asarray', 'np.asarray', (['offset'], {'dtype': 'np.int'}), '(offset, dtype=np.int)\n', (2373, 2395), True, 'import numpy as np\n'), ((542, 565), 'numpy.asarray', 'np.asarray', (['self.offset'], {}), '(self.offset)\n', (552, 565), True, 'import numpy as np\n'), ((1505, 1528), 'numpy.asarray', 'np.asarray', (['self.offset'], {}), '(self.offset)\n', (1515, 1528), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 6 23:59:38 2021
@author: <NAME>
"""
# Adding Salt and Pepper Noise to image
import cv2 as cv
import numpy as np
import random
# Adding salt and pepper noise
def gaussian_noise(image):
row = 512
col = 512
mean = 0
var = 0.1
sigma = var**0.5
gauss = np.random.normal(mean,sigma,(row,col))
gauss = gauss.reshape(row,col)
gauss_noisy = image + gauss
return gauss_noisy
def salt_and_pepper_noise(image):
# Getting the dimensions of the image
row , col = img.shape
# Randomly pick some pixels in the
# image for coloring them white
# Pick a random number between 300 and 10000
number_of_pixels = random.randint(300, 10000)
for i in range(number_of_pixels):
# Pick a random y coordinate
y_coord=random.randint(0, row - 1)
# Pick a random x coordinate
x_coord=random.randint(0, col - 1)
# Color that pixel to white
img[y_coord][x_coord] = 255
# Randomly pick some pixels in
# the image for coloring them black
# Pick a random number between 300 and 10000
number_of_pixels = random.randint(300 , 10000)
for i in range(number_of_pixels):
# Pick a random y coordinate
y_coord=random.randint(0, row - 1)
# Pick a random x coordinate
x_coord=random.randint(0, col - 1)
# Color that pixel to black
img[y_coord][x_coord] = 0
return img
img = cv.imread('Lenna.jpg', 0)
gn = gaussian_noise(img)
snp = salt_and_pepper_noise(img)
|
[
"cv2.imread",
"random.randint",
"numpy.random.normal"
] |
[((1586, 1611), 'cv2.imread', 'cv.imread', (['"""Lenna.jpg"""', '(0)'], {}), "('Lenna.jpg', 0)\n", (1595, 1611), True, 'import cv2 as cv\n'), ((338, 379), 'numpy.random.normal', 'np.random.normal', (['mean', 'sigma', '(row, col)'], {}), '(mean, sigma, (row, col))\n', (354, 379), True, 'import numpy as np\n'), ((733, 759), 'random.randint', 'random.randint', (['(300)', '(10000)'], {}), '(300, 10000)\n', (747, 759), False, 'import random\n'), ((1219, 1245), 'random.randint', 'random.randint', (['(300)', '(10000)'], {}), '(300, 10000)\n', (1233, 1245), False, 'import random\n'), ((860, 886), 'random.randint', 'random.randint', (['(0)', '(row - 1)'], {}), '(0, row - 1)\n', (874, 886), False, 'import random\n'), ((951, 977), 'random.randint', 'random.randint', (['(0)', '(col - 1)'], {}), '(0, col - 1)\n', (965, 977), False, 'import random\n'), ((1347, 1373), 'random.randint', 'random.randint', (['(0)', '(row - 1)'], {}), '(0, row - 1)\n', (1361, 1373), False, 'import random\n'), ((1438, 1464), 'random.randint', 'random.randint', (['(0)', '(col - 1)'], {}), '(0, col - 1)\n', (1452, 1464), False, 'import random\n')]
|
import os.path
from unittest import TestCase
import numpy as np
from aspire.utils import (
Rotation,
crop_pad_2d,
get_aligned_rotations,
grid_2d,
grid_3d,
register_rotations,
uniform_random_angles,
)
DATA_DIR = os.path.join(os.path.dirname(__file__), "saved_test_data")
class UtilsTestCase(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testGrid2d(self):
# Note these reference files were created using Matlab compat grid indexing.
grid2d = grid_2d(8, indexing="xy")
self.assertTrue(
np.allclose(grid2d["x"], np.load(os.path.join(DATA_DIR, "grid2d_8_x.npy")))
)
self.assertTrue(
np.allclose(grid2d["y"], np.load(os.path.join(DATA_DIR, "grid2d_8_y.npy")))
)
self.assertTrue(
np.allclose(grid2d["r"], np.load(os.path.join(DATA_DIR, "grid2d_8_r.npy")))
)
self.assertTrue(
np.allclose(
grid2d["phi"], np.load(os.path.join(DATA_DIR, "grid2d_8_phi.npy"))
)
)
def testGrid3d(self):
# Note these reference files were created using Matlab compat grid indexing.
grid3d = grid_3d(8, indexing="xyz")
self.assertTrue(
np.allclose(grid3d["x"], np.load(os.path.join(DATA_DIR, "grid3d_8_x.npy")))
)
self.assertTrue(
np.allclose(grid3d["y"], np.load(os.path.join(DATA_DIR, "grid3d_8_y.npy")))
)
self.assertTrue(
np.allclose(grid3d["z"], np.load(os.path.join(DATA_DIR, "grid3d_8_z.npy")))
)
self.assertTrue(
np.allclose(grid3d["r"], np.load(os.path.join(DATA_DIR, "grid3d_8_r.npy")))
)
self.assertTrue(
np.allclose(
grid3d["phi"], np.load(os.path.join(DATA_DIR, "grid3d_8_phi.npy"))
)
)
self.assertTrue(
np.allclose(
grid3d["theta"], np.load(os.path.join(DATA_DIR, "grid3d_8_theta.npy"))
)
)
def testRegisterRots(self):
angles = uniform_random_angles(32, seed=0)
rots_ref = Rotation.from_euler(angles).matrices
q_ang = [[np.pi / 4, np.pi / 4, np.pi / 4]]
q_mat = Rotation.from_euler(q_ang).matrices[0]
flag = 0
regrots_ref = get_aligned_rotations(rots_ref, q_mat, flag)
q_mat_est, flag_est = register_rotations(rots_ref, regrots_ref)
self.assertTrue(np.allclose(flag_est, flag) and np.allclose(q_mat_est, q_mat))
def testSquareCrop2D(self):
# Test even/odd cases based on the convention that the center of a sequence of length n
# is (n+1)/2 if n is odd and n/2 + 1 if even.
# Cropping is done to keep the center of the sequence the same value before and after.
# Therefore the following apply:
# Cropping even to odd will result in the 0-index (beginning)
# of the sequence being chopped off (x marks the center, ~ marks deleted data):
# ---x-- => ~--x--
# Cropping odd to even will result in the -1-index (end)
# of the sequence being chopped off:
# ---x--- => ---x--~
# even to even
a = np.diag(np.arange(8))
test_a = np.diag(np.arange(1, 7))
self.assertTrue(np.array_equal(test_a, crop_pad_2d(a, 6)))
# even to odd
# the extra row/column cut off are the top and left
# due to the centering convention
a = np.diag(np.arange(8))
test_a = np.diag(np.arange(1, 8))
self.assertTrue(np.array_equal(test_a, crop_pad_2d(a, 7)))
# odd to odd
a = np.diag(np.arange(9))
test_a = np.diag(np.arange(1, 8))
self.assertTrue(np.array_equal(test_a, crop_pad_2d(a, 7)))
# odd to even
# the extra row/column cut off are the bottom and right
# due to the centering convention
a = np.diag(np.arange(9))
test_a = np.diag(np.arange(8))
self.assertTrue(np.array_equal(test_a, crop_pad_2d(a, 8)))
def testSquarePad2D(self):
# Test even/odd cases based on the convention that the center of a sequence of length n
# is (n+1)/2 if n is odd and n/2 + 1 if even.
# Padding is done to keep the center of the sequence the same value before and after.
# Therefore the following apply:
# Padding from even to odd results in the spare padding being added to the -1-index (end)
# of the sequence (x represents the center, + represents padding):
# ---x-- => ---x--+
# Padding from odd to even results in the spare padding being added to the 0-index (beginning)
# of the sequence:
# --x-- => +--x--
# even to even
a = np.diag(np.arange(1, 9))
test_a = np.diag([0, 1, 2, 3, 4, 5, 6, 7, 8, 0])
self.assertTrue(np.array_equal(test_a, crop_pad_2d(a, 10)))
# even to odd
# the extra padding is to the bottom and right
# due to the centering convention
a = np.diag(np.arange(1, 9))
test_a = np.diag([0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0])
self.assertTrue(np.array_equal(test_a, crop_pad_2d(a, 11)))
# odd to odd
a = np.diag(np.arange(1, 10))
test_a = np.diag([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
self.assertTrue(np.array_equal(test_a, crop_pad_2d(a, 11)))
# odd to even
# the extra padding is to the top and left
# due to the centering convention
a = np.diag(np.arange(1, 10))
test_a = np.diag([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
self.assertTrue(np.array_equal(test_a, crop_pad_2d(a, 10)))
def testRectCrop2D(self):
# Additional sanity checks for rectangular cropping case
# 12x10 -> 10x10
a = np.diag(np.arange(1, 11))
# augment to 12 rows
aug = np.vstack([a, np.zeros(10)])
aug = np.vstack([np.zeros(10), aug])
# make sure the top and bottom rows are stripped
self.assertTrue(np.array_equal(a, crop_pad_2d(aug, 10)))
# 10x12 -> 10x10
a = np.diag(np.arange(1, 11))
# augment to 12 columns
aug = np.column_stack([a, np.zeros(10)])
aug = np.column_stack([np.zeros(10), aug])
# make sure the left and right columns are stripped
self.assertTrue(np.array_equal(a, crop_pad_2d(aug, 10)))
# 9x7 -> 7x7
a = np.diag(np.arange(1, 8))
# augment to 9 rows
aug = np.vstack([a, np.zeros(7)])
aug = np.vstack([np.zeros(7), aug])
# make sure the top and bottom rows are stripped
self.assertTrue(np.array_equal(a, crop_pad_2d(aug, 7)))
# 7x9 -> 7x7
a = np.diag(np.arange(1, 8))
# augment to 9 columns
aug = np.column_stack([a, np.zeros(7)])
aug = np.column_stack([np.zeros(7), aug])
# make sure the left and right columns are stripped
self.assertTrue(np.array_equal(a, crop_pad_2d(aug, 7)))
def testRectPad2D(self):
# Additional sanity checks for rectangular padding case
# 12x10 -> 12x12
a = np.diag(np.arange(1, 11))
# augment to 12 rows
aug = np.vstack([a, np.zeros(10)])
aug = np.vstack([np.zeros(10), aug])
# expected result
padded = np.column_stack([aug, np.zeros(12)])
padded = np.column_stack([np.zeros(12), padded])
# make sure columns of fill value (0) are added to the
# left and right
self.assertTrue(np.array_equal(padded, crop_pad_2d(aug, 12)))
# 10x12 -> 12x12
a = np.diag(np.arange(1, 11))
# augment to 12 columns
aug = np.column_stack([a, np.zeros(10)])
aug = np.column_stack([np.zeros(10), aug])
# expected result
padded = np.vstack([aug, np.zeros(12)])
padded = np.vstack([np.zeros(12), padded])
# make sure rows of fill value (0) are added to the
# top and bottom
self.assertTrue(np.array_equal(padded, crop_pad_2d(aug, 12)))
# 9x7 -> 9x9
a = np.diag(np.arange(1, 8))
# augment to 9 rows
aug = np.vstack([a, np.zeros(7)])
aug = np.vstack([np.zeros(7), aug])
# expected result
padded = np.column_stack([aug, np.zeros(9)])
padded = np.column_stack([np.zeros(9), padded])
# make sure columns of fill value (0) are added to the
# left and right
self.assertTrue(np.array_equal(padded, crop_pad_2d(aug, 9)))
# 7x9 -> 9x9
a = np.diag(np.arange(1, 8))
# augment to 9 columns
aug = np.column_stack([a, np.zeros(7)])
aug = np.column_stack([np.zeros(7), aug])
# expected result
padded = np.vstack([aug, np.zeros(9)])
padded = np.vstack([np.zeros(9), padded])
# make sure rows of fill value (0) are added to the
# top and bottom
self.assertTrue(np.array_equal(padded, crop_pad_2d(aug, 9)))
def testCropPad2DError(self):
with self.assertRaises(ValueError) as e:
_ = crop_pad_2d(np.zeros((6, 10)), 8)
self.assertTrue(
"Cannot crop and pad an image at the same time.", str(e.exception)
)
def testCrop2DDtype(self):
# crop_pad_2d must return an array of the same dtype it was given
# in particular, because the method is used for Fourier downsampling
# methods involving cropping complex arrays
self.assertEqual(
crop_pad_2d(np.eye(10).astype("complex"), 5).dtype, np.dtype("complex128")
)
def testCrop2DFillValue(self):
# make sure the fill value is as expected
# we are padding from an odd to an even dimension
# so the padded column is added to the left
a = np.ones((4, 3))
b = crop_pad_2d(a, 4, fill_value=-1)
self.assertTrue(np.array_equal(b[:, 0], np.array([-1, -1, -1, -1])))
|
[
"aspire.utils.Rotation.from_euler",
"aspire.utils.grid_2d",
"numpy.eye",
"aspire.utils.get_aligned_rotations",
"numpy.allclose",
"aspire.utils.crop_pad_2d",
"numpy.dtype",
"numpy.ones",
"numpy.zeros",
"aspire.utils.uniform_random_angles",
"numpy.arange",
"numpy.array",
"aspire.utils.register_rotations",
"numpy.diag",
"aspire.utils.grid_3d"
] |
[((535, 560), 'aspire.utils.grid_2d', 'grid_2d', (['(8)'], {'indexing': '"""xy"""'}), "(8, indexing='xy')\n", (542, 560), False, 'from aspire.utils import Rotation, crop_pad_2d, get_aligned_rotations, grid_2d, grid_3d, register_rotations, uniform_random_angles\n'), ((1216, 1242), 'aspire.utils.grid_3d', 'grid_3d', (['(8)'], {'indexing': '"""xyz"""'}), "(8, indexing='xyz')\n", (1223, 1242), False, 'from aspire.utils import Rotation, crop_pad_2d, get_aligned_rotations, grid_2d, grid_3d, register_rotations, uniform_random_angles\n'), ((2103, 2136), 'aspire.utils.uniform_random_angles', 'uniform_random_angles', (['(32)'], {'seed': '(0)'}), '(32, seed=0)\n', (2124, 2136), False, 'from aspire.utils import Rotation, crop_pad_2d, get_aligned_rotations, grid_2d, grid_3d, register_rotations, uniform_random_angles\n'), ((2340, 2384), 'aspire.utils.get_aligned_rotations', 'get_aligned_rotations', (['rots_ref', 'q_mat', 'flag'], {}), '(rots_ref, q_mat, flag)\n', (2361, 2384), False, 'from aspire.utils import Rotation, crop_pad_2d, get_aligned_rotations, grid_2d, grid_3d, register_rotations, uniform_random_angles\n'), ((2415, 2456), 'aspire.utils.register_rotations', 'register_rotations', (['rots_ref', 'regrots_ref'], {}), '(rots_ref, regrots_ref)\n', (2433, 2456), False, 'from aspire.utils import Rotation, crop_pad_2d, get_aligned_rotations, grid_2d, grid_3d, register_rotations, uniform_random_angles\n'), ((4809, 4848), 'numpy.diag', 'np.diag', (['[0, 1, 2, 3, 4, 5, 6, 7, 8, 0]'], {}), '([0, 1, 2, 3, 4, 5, 6, 7, 8, 0])\n', (4816, 4848), True, 'import numpy as np\n'), ((5091, 5133), 'numpy.diag', 'np.diag', (['[0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0]'], {}), '([0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0])\n', (5098, 5133), True, 'import numpy as np\n'), ((5279, 5321), 'numpy.diag', 'np.diag', (['[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0]'], {}), '([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0])\n', (5286, 5321), True, 'import numpy as np\n'), ((5561, 5600), 'numpy.diag', 'np.diag', (['[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]'], {}), '([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\n', (5568, 5600), True, 'import numpy as np\n'), ((9797, 9812), 'numpy.ones', 'np.ones', (['(4, 3)'], {}), '((4, 3))\n', (9804, 9812), True, 'import numpy as np\n'), ((9825, 9857), 'aspire.utils.crop_pad_2d', 'crop_pad_2d', (['a', '(4)'], {'fill_value': '(-1)'}), '(a, 4, fill_value=-1)\n', (9836, 9857), False, 'from aspire.utils import Rotation, crop_pad_2d, get_aligned_rotations, grid_2d, grid_3d, register_rotations, uniform_random_angles\n'), ((2156, 2183), 'aspire.utils.Rotation.from_euler', 'Rotation.from_euler', (['angles'], {}), '(angles)\n', (2175, 2183), False, 'from aspire.utils import Rotation, crop_pad_2d, get_aligned_rotations, grid_2d, grid_3d, register_rotations, uniform_random_angles\n'), ((3232, 3244), 'numpy.arange', 'np.arange', (['(8)'], {}), '(8)\n', (3241, 3244), True, 'import numpy as np\n'), ((3271, 3286), 'numpy.arange', 'np.arange', (['(1)', '(7)'], {}), '(1, 7)\n', (3280, 3286), True, 'import numpy as np\n'), ((3500, 3512), 'numpy.arange', 'np.arange', (['(8)'], {}), '(8)\n', (3509, 3512), True, 'import numpy as np\n'), ((3539, 3554), 'numpy.arange', 'np.arange', (['(1)', '(8)'], {}), '(1, 8)\n', (3548, 3554), True, 'import numpy as np\n'), ((3665, 3677), 'numpy.arange', 'np.arange', (['(9)'], {}), '(9)\n', (3674, 3677), True, 'import numpy as np\n'), ((3704, 3719), 'numpy.arange', 'np.arange', (['(1)', '(8)'], {}), '(1, 8)\n', (3713, 3719), True, 'import numpy as np\n'), ((3937, 3949), 'numpy.arange', 'np.arange', (['(9)'], {}), '(9)\n', (3946, 3949), True, 'import numpy as np\n'), ((3976, 3988), 'numpy.arange', 'np.arange', (['(8)'], {}), '(8)\n', (3985, 3988), True, 'import numpy as np\n'), ((4775, 4790), 'numpy.arange', 'np.arange', (['(1)', '(9)'], {}), '(1, 9)\n', (4784, 4790), True, 'import numpy as np\n'), ((5057, 5072), 'numpy.arange', 'np.arange', (['(1)', '(9)'], {}), '(1, 9)\n', (5066, 5072), True, 'import numpy as np\n'), ((5244, 5260), 'numpy.arange', 'np.arange', (['(1)', '(10)'], {}), '(1, 10)\n', (5253, 5260), True, 'import numpy as np\n'), ((5526, 5542), 'numpy.arange', 'np.arange', (['(1)', '(10)'], {}), '(1, 10)\n', (5535, 5542), True, 'import numpy as np\n'), ((5811, 5827), 'numpy.arange', 'np.arange', (['(1)', '(11)'], {}), '(1, 11)\n', (5820, 5827), True, 'import numpy as np\n'), ((6114, 6130), 'numpy.arange', 'np.arange', (['(1)', '(11)'], {}), '(1, 11)\n', (6123, 6130), True, 'import numpy as np\n'), ((6431, 6446), 'numpy.arange', 'np.arange', (['(1)', '(8)'], {}), '(1, 8)\n', (6440, 6446), True, 'import numpy as np\n'), ((6725, 6740), 'numpy.arange', 'np.arange', (['(1)', '(8)'], {}), '(1, 8)\n', (6734, 6740), True, 'import numpy as np\n'), ((7135, 7151), 'numpy.arange', 'np.arange', (['(1)', '(11)'], {}), '(1, 11)\n', (7144, 7151), True, 'import numpy as np\n'), ((7611, 7627), 'numpy.arange', 'np.arange', (['(1)', '(11)'], {}), '(1, 11)\n', (7620, 7627), True, 'import numpy as np\n'), ((8083, 8098), 'numpy.arange', 'np.arange', (['(1)', '(8)'], {}), '(1, 8)\n', (8092, 8098), True, 'import numpy as np\n'), ((8548, 8563), 'numpy.arange', 'np.arange', (['(1)', '(8)'], {}), '(1, 8)\n', (8557, 8563), True, 'import numpy as np\n'), ((9556, 9578), 'numpy.dtype', 'np.dtype', (['"""complex128"""'], {}), "('complex128')\n", (9564, 9578), True, 'import numpy as np\n'), ((2262, 2288), 'aspire.utils.Rotation.from_euler', 'Rotation.from_euler', (['q_ang'], {}), '(q_ang)\n', (2281, 2288), False, 'from aspire.utils import Rotation, crop_pad_2d, get_aligned_rotations, grid_2d, grid_3d, register_rotations, uniform_random_angles\n'), ((2482, 2509), 'numpy.allclose', 'np.allclose', (['flag_est', 'flag'], {}), '(flag_est, flag)\n', (2493, 2509), True, 'import numpy as np\n'), ((2514, 2543), 'numpy.allclose', 'np.allclose', (['q_mat_est', 'q_mat'], {}), '(q_mat_est, q_mat)\n', (2525, 2543), True, 'import numpy as np\n'), ((3335, 3352), 'aspire.utils.crop_pad_2d', 'crop_pad_2d', (['a', '(6)'], {}), '(a, 6)\n', (3346, 3352), False, 'from aspire.utils import Rotation, crop_pad_2d, get_aligned_rotations, grid_2d, grid_3d, register_rotations, uniform_random_angles\n'), ((3603, 3620), 'aspire.utils.crop_pad_2d', 'crop_pad_2d', (['a', '(7)'], {}), '(a, 7)\n', (3614, 3620), False, 'from aspire.utils import Rotation, crop_pad_2d, get_aligned_rotations, grid_2d, grid_3d, register_rotations, uniform_random_angles\n'), ((3768, 3785), 'aspire.utils.crop_pad_2d', 'crop_pad_2d', (['a', '(7)'], {}), '(a, 7)\n', (3779, 3785), False, 'from aspire.utils import Rotation, crop_pad_2d, get_aligned_rotations, grid_2d, grid_3d, register_rotations, uniform_random_angles\n'), ((4037, 4054), 'aspire.utils.crop_pad_2d', 'crop_pad_2d', (['a', '(8)'], {}), '(a, 8)\n', (4048, 4054), False, 'from aspire.utils import Rotation, crop_pad_2d, get_aligned_rotations, grid_2d, grid_3d, register_rotations, uniform_random_angles\n'), ((4896, 4914), 'aspire.utils.crop_pad_2d', 'crop_pad_2d', (['a', '(10)'], {}), '(a, 10)\n', (4907, 4914), False, 'from aspire.utils import Rotation, crop_pad_2d, get_aligned_rotations, grid_2d, grid_3d, register_rotations, uniform_random_angles\n'), ((5181, 5199), 'aspire.utils.crop_pad_2d', 'crop_pad_2d', (['a', '(11)'], {}), '(a, 11)\n', (5192, 5199), False, 'from aspire.utils import Rotation, crop_pad_2d, get_aligned_rotations, grid_2d, grid_3d, register_rotations, uniform_random_angles\n'), ((5369, 5387), 'aspire.utils.crop_pad_2d', 'crop_pad_2d', (['a', '(11)'], {}), '(a, 11)\n', (5380, 5387), False, 'from aspire.utils import Rotation, crop_pad_2d, get_aligned_rotations, grid_2d, grid_3d, register_rotations, uniform_random_angles\n'), ((5648, 5666), 'aspire.utils.crop_pad_2d', 'crop_pad_2d', (['a', '(10)'], {}), '(a, 10)\n', (5659, 5666), False, 'from aspire.utils import Rotation, crop_pad_2d, get_aligned_rotations, grid_2d, grid_3d, register_rotations, uniform_random_angles\n'), ((5886, 5898), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (5894, 5898), True, 'import numpy as np\n'), ((5926, 5938), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (5934, 5938), True, 'import numpy as np\n'), ((6045, 6065), 'aspire.utils.crop_pad_2d', 'crop_pad_2d', (['aug', '(10)'], {}), '(aug, 10)\n', (6056, 6065), False, 'from aspire.utils import Rotation, crop_pad_2d, get_aligned_rotations, grid_2d, grid_3d, register_rotations, uniform_random_angles\n'), ((6198, 6210), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (6206, 6210), True, 'import numpy as np\n'), ((6244, 6256), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (6252, 6256), True, 'import numpy as np\n'), ((6366, 6386), 'aspire.utils.crop_pad_2d', 'crop_pad_2d', (['aug', '(10)'], {}), '(aug, 10)\n', (6377, 6386), False, 'from aspire.utils import Rotation, crop_pad_2d, get_aligned_rotations, grid_2d, grid_3d, register_rotations, uniform_random_angles\n'), ((6504, 6515), 'numpy.zeros', 'np.zeros', (['(7)'], {}), '(7)\n', (6512, 6515), True, 'import numpy as np\n'), ((6543, 6554), 'numpy.zeros', 'np.zeros', (['(7)'], {}), '(7)\n', (6551, 6554), True, 'import numpy as np\n'), ((6661, 6680), 'aspire.utils.crop_pad_2d', 'crop_pad_2d', (['aug', '(7)'], {}), '(aug, 7)\n', (6672, 6680), False, 'from aspire.utils import Rotation, crop_pad_2d, get_aligned_rotations, grid_2d, grid_3d, register_rotations, uniform_random_angles\n'), ((6807, 6818), 'numpy.zeros', 'np.zeros', (['(7)'], {}), '(7)\n', (6815, 6818), True, 'import numpy as np\n'), ((6852, 6863), 'numpy.zeros', 'np.zeros', (['(7)'], {}), '(7)\n', (6860, 6863), True, 'import numpy as np\n'), ((6973, 6992), 'aspire.utils.crop_pad_2d', 'crop_pad_2d', (['aug', '(7)'], {}), '(aug, 7)\n', (6984, 6992), False, 'from aspire.utils import Rotation, crop_pad_2d, get_aligned_rotations, grid_2d, grid_3d, register_rotations, uniform_random_angles\n'), ((7210, 7222), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (7218, 7222), True, 'import numpy as np\n'), ((7250, 7262), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (7258, 7262), True, 'import numpy as np\n'), ((7335, 7347), 'numpy.zeros', 'np.zeros', (['(12)'], {}), '(12)\n', (7343, 7347), True, 'import numpy as np\n'), ((7384, 7396), 'numpy.zeros', 'np.zeros', (['(12)'], {}), '(12)\n', (7392, 7396), True, 'import numpy as np\n'), ((7542, 7562), 'aspire.utils.crop_pad_2d', 'crop_pad_2d', (['aug', '(12)'], {}), '(aug, 12)\n', (7553, 7562), False, 'from aspire.utils import Rotation, crop_pad_2d, get_aligned_rotations, grid_2d, grid_3d, register_rotations, uniform_random_angles\n'), ((7695, 7707), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (7703, 7707), True, 'import numpy as np\n'), ((7741, 7753), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (7749, 7753), True, 'import numpy as np\n'), ((7820, 7832), 'numpy.zeros', 'np.zeros', (['(12)'], {}), '(12)\n', (7828, 7832), True, 'import numpy as np\n'), ((7863, 7875), 'numpy.zeros', 'np.zeros', (['(12)'], {}), '(12)\n', (7871, 7875), True, 'import numpy as np\n'), ((8018, 8038), 'aspire.utils.crop_pad_2d', 'crop_pad_2d', (['aug', '(12)'], {}), '(aug, 12)\n', (8029, 8038), False, 'from aspire.utils import Rotation, crop_pad_2d, get_aligned_rotations, grid_2d, grid_3d, register_rotations, uniform_random_angles\n'), ((8156, 8167), 'numpy.zeros', 'np.zeros', (['(7)'], {}), '(7)\n', (8164, 8167), True, 'import numpy as np\n'), ((8195, 8206), 'numpy.zeros', 'np.zeros', (['(7)'], {}), '(7)\n', (8203, 8206), True, 'import numpy as np\n'), ((8279, 8290), 'numpy.zeros', 'np.zeros', (['(9)'], {}), '(9)\n', (8287, 8290), True, 'import numpy as np\n'), ((8327, 8338), 'numpy.zeros', 'np.zeros', (['(9)'], {}), '(9)\n', (8335, 8338), True, 'import numpy as np\n'), ((8484, 8503), 'aspire.utils.crop_pad_2d', 'crop_pad_2d', (['aug', '(9)'], {}), '(aug, 9)\n', (8495, 8503), False, 'from aspire.utils import Rotation, crop_pad_2d, get_aligned_rotations, grid_2d, grid_3d, register_rotations, uniform_random_angles\n'), ((8630, 8641), 'numpy.zeros', 'np.zeros', (['(7)'], {}), '(7)\n', (8638, 8641), True, 'import numpy as np\n'), ((8675, 8686), 'numpy.zeros', 'np.zeros', (['(7)'], {}), '(7)\n', (8683, 8686), True, 'import numpy as np\n'), ((8753, 8764), 'numpy.zeros', 'np.zeros', (['(9)'], {}), '(9)\n', (8761, 8764), True, 'import numpy as np\n'), ((8795, 8806), 'numpy.zeros', 'np.zeros', (['(9)'], {}), '(9)\n', (8803, 8806), True, 'import numpy as np\n'), ((8949, 8968), 'aspire.utils.crop_pad_2d', 'crop_pad_2d', (['aug', '(9)'], {}), '(aug, 9)\n', (8960, 8968), False, 'from aspire.utils import Rotation, crop_pad_2d, get_aligned_rotations, grid_2d, grid_3d, register_rotations, uniform_random_angles\n'), ((9083, 9100), 'numpy.zeros', 'np.zeros', (['(6, 10)'], {}), '((6, 10))\n', (9091, 9100), True, 'import numpy as np\n'), ((9906, 9932), 'numpy.array', 'np.array', (['[-1, -1, -1, -1]'], {}), '([-1, -1, -1, -1])\n', (9914, 9932), True, 'import numpy as np\n'), ((9516, 9526), 'numpy.eye', 'np.eye', (['(10)'], {}), '(10)\n', (9522, 9526), True, 'import numpy as np\n')]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# created by <NAME>
# contact with <EMAIL>
import numpy as np
import datetime
import os
import pandas as pd
import random
import time
import threading
import multiprocessing
def check_file(tt,datelist,hour):
'''
chech file at the time of 'tt' and its continuous 24 hours and histort hours
pretime 25 times include time.now()
history time include 'hour' times
return if file is ready at the time of 'tt'
'''
ruitufile = '/data/output/ruitu_data/{}/{}.npy'.format(tt.strftime('%Y%m'),tt.strftime('%Y%m%d%H'))
sign = os.path.exists(ruitufile)
if sign:
pass
# shape0 = np.load(ruitufile).shape[0]
# sign = sign and shape0==25
# if not shape0==25:
# print(ruitufile)
# os.remove(ruitufile)
else:
return False
pretimelist = [ tt+datetime.timedelta(seconds=3600*i) for i in range(25)]
pretimelist = pretimelist+ [ tt-datetime.timedelta(seconds=3600*i) for i in range(hour)]
for pretime in pretimelist:
# gaughDir = '/data/output/guance_data/{}/{}.npy'.format(pretime)
timestring = pretime.strftime("%Y%m%d%H%M")
sign = (timestring in datelist ) and sign
if sign==False :
# print(timestring,os.path.exists(ruitufile),timestring in datelist)
break
return sign
def file_dataset(hour ):
'''write a data-ready file list'''
print('creating the dataset with history ', hour, ' hours')
file_dict = pd.read_csv('/data/output/all_guance_data_name_list/all_gc_filename_list.csv',index_col=0)
datelist = [str(line).split('_')[1] for line in file_dict.values]
file_dict.index = datelist
start_time, end_time = datetime.datetime(2016,10,1,0),datetime.datetime(2019,4,1,0)
pretimelist=[]
pretime= start_time
while pretime<=end_time:
if check_file(pretime,datelist,hour):
pretimelist.append(pretime)
pretime += datetime.timedelta(seconds=3600*3)
pretimelist = np.array(pretimelist)
np.save('/data/code/ml/pretimelist_{}.npy'.format(hour),pretimelist)
print('finishing creating dataset with history ', hour, ' hours')
return None
def my_test_dataset( batch, history_hour, season=None ):
'''return list shape [number , batch]'''
file_dict = pd.read_csv('/data/output/all_guance_data_name_list/2019_04_07_gc_filename_list.csv', index_col=0)
datelist = [str(line).split('_')[1] for line in file_dict.values]
file_dict.index = datelist
target = '/data/code/ml/pretimelist_test_{}.npy'.format(history_hour)
if not os.path.exists(target):
file_test_dataset( history_hour )
pretimelist = np.load(target, allow_pickle=True)
if season=='summer':
tmp = []
for pretime in pretimelist:
if pretime.month in [4,5,6,7,8,9]:
tmp.append(pretime)
pretimelist = np.array(tmp)
print('dataset lenght',len(pretimelist))
pretimelist = pretimelist[:len(pretimelist)//batch*batch]
pretimelist = np.array(pretimelist).reshape(-1, batch)
return pretimelist, file_dict
def file_test_dataset(hour ):
'''write a data-ready file list'''
print('creating the dataset with history ', hour, ' hours')
file_dict = pd.read_csv('/data/output/all_guance_data_name_list/2019_04_07_gc_filename_list.csv',index_col=0)
datelist = [str(line).split('_')[1] for line in file_dict.values]
file_dict.index = datelist
start_time, end_time = datetime.datetime(2019,4,1,0),datetime.datetime(2019,7,31,21)
pretimelist=[]
pretime= start_time
while pretime<=end_time:
if check_file(pretime,datelist,hour):
pretimelist.append(pretime)
pretime += datetime.timedelta(seconds=3600*3)
pretimelist = np.array(pretimelist)
np.save('/data/code/ml/pretimelist_test_{}.npy'.format(hour),pretimelist)
print('finishing creating dataset with history ', hour, ' hours')
return None
def my_dataset( batch, history_hour, season=None ):
'''return list shape [number , batch]'''
file_dict = pd.read_csv('/data/output/all_guance_data_name_list/all_gc_filename_list.csv', index_col=0)
datelist = [str(line).split('_')[1] for line in file_dict.values]
file_dict.index = datelist
target = '/data/code/ml/pretimelist_{}.npy'.format(history_hour)
if not os.path.exists(target):
file_dataset( history_hour )
pretimelist = np.load(target, allow_pickle=True)
if season=='summer':
tmp = []
for pretime in pretimelist:
if pretime.month in [6,7,8,9]:
tmp.append(pretime)
pretimelist = np.array(tmp)
print('dataset lenght',len(pretimelist))
pretimelist = pretimelist[:len(pretimelist)//batch*batch]
random.shuffle(pretimelist)
pretimelist = np.array(pretimelist).reshape(-1, batch)
return pretimelist, file_dict
def conbime_thread(batch_list, batch_time):
'''
parallization the thread to read the data
'''
print("Sub-process(es) begin.")
ruitulist, gaugelist, histgaugelist, jobresults = [], [], [], []
pool = multiprocessing.Pool(processes=8)
for filelist, pretime in zip(batch_list, batch_time):
jobresults.append(pool.apply_async(read_one, (filelist, pretime)))
for res in jobresults:
ruituFile, gaugeFile, histgaugeFile = res.get()
ruitulist.append(ruituFile)
gaugelist.append(gaugeFile)
histgaugelist.append(histgaugeFile)
pool.close() # 关闭进程池,表示不能在往进程池中添加进程
pool.join() # 等待进程池中的所有进程执行完毕,必须在close()之后调用
print("Sub-process(es) done.")
gaugelist, ruitulist, histgaugelist = np.array(gaugelist), np.array(ruitulist), np.array(histgaugelist)
# print(gaugelist.shape, ruitulist.shape, histgaugelist.shape)
return ruitulist, gaugelist, histgaugelist
def read_one(filelist, pretime):
'''read single data in training data with preprocessing '''
# tt = time.time()
ruituFile = np.load(filelist[0])[:,:,:80,:84]
# print('processing',pretime)
gaugeFile = np.array([np.load(file) for file in filelist[1:25]])[:,4:5,:80,:84]
histgaugeFile = np.array([np.load(file) for file in filelist[25:]])[:,:,:80,:84]
ruituFile, gaugeFile, histgaugeFile = norm_preprocess(ruituFile, gaugeFile, histgaugeFile, pretime)
# print(time.time()-tt)
return ruituFile, gaugeFile, histgaugeFile
def norm_preprocess(ruituFile, gaugeFile, histgaugeFile, pretime):
'''
processing with abnormal values, time , geography values, normalized values.
'''
# print(ruituFile.shape, gaugeFile.shape, histgaugeFile.shape)
#remoev the abnormal value
assert ruituFile.shape[0]==25,print(pretime,'without full prediction')
if (np.abs(ruituFile) > 10000).any():
meantmp = ruituFile.mean(axis=(0,2,3))
for i in range(ruituFile.shape[1]):
ruituFile[:,i,:,:][np.abs(ruituFile[:,i,:,:])>10000] = meantmp[i]
histgaugeFile[np.isnan(histgaugeFile)]=200000
if (np.abs(histgaugeFile) > 10000).any():
meantmp = histgaugeFile.mean(axis=(0,2,3))
for i in range(histgaugeFile.shape[1]):
histgaugeFile[:,i,:,:][np.abs(histgaugeFile[:,i,:,:])>10000] = meantmp[i]
#normal the value
ruituInfo = pd.read_csv('/data/output/ruitu_info.csv')
ruitu_mean, ruitu_std = np.ones_like(ruituFile),np.ones_like(ruituFile)
for i in range(len(ruituInfo)):
ruitu_mean[:,i,:,:] *= ruituInfo['mean'].iloc[i]
ruitu_std[:,i,:,:] *= ruituInfo['std'].iloc[i]
ruituFile = (ruituFile-ruitu_mean)/ruitu_std
gaugeInfo = pd.read_csv('/data/output/gauge_info.csv')
gauge_mean, gauge_std = np.ones_like(histgaugeFile),np.ones_like(histgaugeFile)
for i in range(len(gaugeInfo)):
gauge_mean[:,i,:,:] *= gaugeInfo['mean'].iloc[i]
gauge_std[:,i,:,:] *= gaugeInfo['std'].iloc[i]
histgaugeFile = (histgaugeFile-gauge_mean)/gauge_std
#add time and geo info
geoinfo = np.load('/data/output/height_norm.npy')
hist_hour = histgaugeFile.shape[0]
pretimelist = [pretime+datetime.timedelta(seconds=i*3600) for i in range(-hist_hour+1, 25)]
yearvariancelist = [ np.sin(2*np.pi*(tt.toordinal()-730180)/365.25) for tt in pretimelist]
dayvariancelist = [ np.sin(2*np.pi*(tt.hour-3)/24) for tt in pretimelist]
ruituFile[1:25, 32:35, :, :] = ruituFile[1:25, 32:35, :, :] - ruituFile[0:24,32:35,:,:]
ruituFile_new = ruituFile[1:].copy()
histgaugeFile[:,7,:,:] = np.array([geoinfo]*histgaugeFile.shape[0])
histgaugeFile[:,10,:,:] = np.array([sli*yvar for sli, yvar in zip(np.ones([hist_hour,80,84]),yearvariancelist[:hist_hour])])
histgaugeFile[:,11,:,:] = np.array([sli*dvar for sli, dvar in zip(np.ones([hist_hour,80,84]),dayvariancelist[:hist_hour])])
tmpyear = np.expand_dims([sli*yvar for sli, yvar in zip(np.ones([24,80,84]),yearvariancelist[hist_hour:])], axis=1)
tmpday = np.expand_dims([sli*dvar for sli, dvar in zip(np.ones([24,80,84]),dayvariancelist[hist_hour:])], axis=1)
tmpgeo = np.expand_dims(np.array([geoinfo]*ruituFile_new.shape[0]),axis=1)
ruituFile_new = np.concatenate((ruituFile_new, tmpyear, tmpday, tmpgeo),axis=1)
# print(ruituFile_new.shape, gaugeFile.shape, histgaugeFile.shape)
return ruituFile_new, gaugeFile, histgaugeFile
def load_data2(pretimelist, file_dict, history_hour, binary=False):
'''
load batch data in parallized way, more faster.
input args: load_data2(pretimelist, file_dict, history_hour, binary=False)
return args: ruitudata, gaugedata, histgaugedata
shape: [batch ,24, channels_1, height, width],[batch ,24 , 1, height, width],[batch , historyhour, channels_2, height, width]
if binary is True, the gaugedata will return in shape [batch ,time, 2, height, width]
'''
pretimelist = list(pretimelist)
batchfile = []
for batch_time in pretimelist:
ruituFile = ['/data/output/ruitu_data/{}/{}.npy'.format(batch_time.strftime('%Y%m'),batch_time.strftime('%Y%m%d%H'))]
time24h = [ batch_time+datetime.timedelta(seconds=3600*i) for i in range(1,25)]
gaugeFile = ['/data/output/guance_data/{}/{}'.format(tt.strftime('%Y%m'),file_dict.loc[tt.strftime('%Y%m%d%H%M')].values[0]) for tt in time24h]
timehist = [ batch_time-datetime.timedelta(seconds=3600*i) for i in range(history_hour)]
histgaugeFile = ['/data/output/guance_data/{}/{}'.format(tt.strftime('%Y%m'),file_dict.loc[tt.strftime('%Y%m%d%H%M')].values[0]) for tt in timehist]
singlefile = ruituFile+gaugeFile+histgaugeFile
batchfile.append(singlefile)
ruitudata, gaugedata, histgaugedata = conbime_thread(batchfile, pretimelist)
if binary:
# gaugedata = (gaugedata>=0.1).astype('int')
gaugebinary = np.concatenate((gaugedata>=0.1, gaugedata<0.1),axis=2).astype('int')
gaugedata[ gaugedata<0.1]=0
return np.array(ruitudata)[:,:,:,:80,:80], np.array(gaugebinary)[:,:,:,:80,:80], np.array(gaugedata[:,:,:,:80,:80])
# def load_data(pretimelist,file_dict):
# '''pretimelist is a batch timelist at once
# output shape = [batch, 24, channel, 80, 84],[batch, 24, channel, 80, 84]
# '''
# print('old')
# t1 = time.time()
# pretimelist = list(pretimelist)
# gaugedata = []
# ruitudata = []
# for batch_time in pretimelist:
# ruitutmp = np.load('/data/output/ruitu_data/{}/{}.npy'.format(batch_time.strftime('%Y%m'),batch_time.strftime('%Y%m%d%H')))[:24,:,:80,:84]
# time24h = [ batch_time+datetime.timedelta(seconds=3600) for i in range(24)]
# guagetmp = np.array([np.load('/data/output/guance_data/{}/{}'.format(tt.strftime('%Y%m'),file_dict.loc[tt.strftime('%Y%m%d%H%M')].values[0])) for tt in time24h])[:,4:5,:80,:84]
# gaugedata.append(guagetmp)
# ruitudata.append(ruitutmp)
# print('total:',time.time()-t1)
# return np.array(gaugedata), np.array(ruitudata)
if __name__=='__main__':
batch = 8
historyhour = 24
batch_filelist, file_dict = my_dataset( batch, historyhour,season='summer')
split_num=0.7
train_num = int(len(batch_filelist)*split_num)
mydataset = {'train':batch_filelist[:train_num], 'test': batch_filelist[train_num:]}
for filelist in mydataset['train']:
tt = time.time()
ruitudata, gaugedata, histgaugedata = load_data2(filelist,file_dict,historyhour, binary=True)
print(gaugedata.shape, ruitudata.shape, histgaugedata.shape, 'finished time cost:',time.time()-tt)
# print(gaugedata.mean(axis=(0,1,3,4)),gaugedata.std(axis=(0,1,3,4)))
# print(ruitudata.mean(axis=(0,1,3,4)),ruitudata.std(axis=(0,1,3,4)))
# print(histgaugedata.mean(axis=(0,1,3,4)),histgaugedata.std(axis=(0,1,3,4)))
|
[
"numpy.load",
"numpy.ones_like",
"numpy.abs",
"pandas.read_csv",
"random.shuffle",
"os.path.exists",
"numpy.ones",
"numpy.isnan",
"datetime.datetime",
"time.time",
"numpy.sin",
"numpy.array",
"datetime.timedelta",
"multiprocessing.Pool",
"numpy.concatenate"
] |
[((593, 618), 'os.path.exists', 'os.path.exists', (['ruitufile'], {}), '(ruitufile)\n', (607, 618), False, 'import os\n'), ((1522, 1617), 'pandas.read_csv', 'pd.read_csv', (['"""/data/output/all_guance_data_name_list/all_gc_filename_list.csv"""'], {'index_col': '(0)'}), "('/data/output/all_guance_data_name_list/all_gc_filename_list.csv',\n index_col=0)\n", (1533, 1617), True, 'import pandas as pd\n'), ((2033, 2054), 'numpy.array', 'np.array', (['pretimelist'], {}), '(pretimelist)\n', (2041, 2054), True, 'import numpy as np\n'), ((2333, 2440), 'pandas.read_csv', 'pd.read_csv', (['"""/data/output/all_guance_data_name_list/2019_04_07_gc_filename_list.csv"""'], {'index_col': '(0)'}), "(\n '/data/output/all_guance_data_name_list/2019_04_07_gc_filename_list.csv',\n index_col=0)\n", (2344, 2440), True, 'import pandas as pd\n'), ((2702, 2736), 'numpy.load', 'np.load', (['target'], {'allow_pickle': '(True)'}), '(target, allow_pickle=True)\n', (2709, 2736), True, 'import numpy as np\n'), ((3289, 3396), 'pandas.read_csv', 'pd.read_csv', (['"""/data/output/all_guance_data_name_list/2019_04_07_gc_filename_list.csv"""'], {'index_col': '(0)'}), "(\n '/data/output/all_guance_data_name_list/2019_04_07_gc_filename_list.csv',\n index_col=0)\n", (3300, 3396), True, 'import pandas as pd\n'), ((3808, 3829), 'numpy.array', 'np.array', (['pretimelist'], {}), '(pretimelist)\n', (3816, 3829), True, 'import numpy as np\n'), ((4109, 4204), 'pandas.read_csv', 'pd.read_csv', (['"""/data/output/all_guance_data_name_list/all_gc_filename_list.csv"""'], {'index_col': '(0)'}), "('/data/output/all_guance_data_name_list/all_gc_filename_list.csv',\n index_col=0)\n", (4120, 4204), True, 'import pandas as pd\n'), ((4461, 4495), 'numpy.load', 'np.load', (['target'], {'allow_pickle': '(True)'}), '(target, allow_pickle=True)\n', (4468, 4495), True, 'import numpy as np\n'), ((4805, 4832), 'random.shuffle', 'random.shuffle', (['pretimelist'], {}), '(pretimelist)\n', (4819, 4832), False, 'import random\n'), ((5151, 5184), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': '(8)'}), '(processes=8)\n', (5171, 5184), False, 'import multiprocessing\n'), ((7309, 7351), 'pandas.read_csv', 'pd.read_csv', (['"""/data/output/ruitu_info.csv"""'], {}), "('/data/output/ruitu_info.csv')\n", (7320, 7351), True, 'import pandas as pd\n'), ((7642, 7684), 'pandas.read_csv', 'pd.read_csv', (['"""/data/output/gauge_info.csv"""'], {}), "('/data/output/gauge_info.csv')\n", (7653, 7684), True, 'import pandas as pd\n'), ((8020, 8059), 'numpy.load', 'np.load', (['"""/data/output/height_norm.npy"""'], {}), "('/data/output/height_norm.npy')\n", (8027, 8059), True, 'import numpy as np\n'), ((8530, 8574), 'numpy.array', 'np.array', (['([geoinfo] * histgaugeFile.shape[0])'], {}), '([geoinfo] * histgaugeFile.shape[0])\n', (8538, 8574), True, 'import numpy as np\n'), ((9167, 9231), 'numpy.concatenate', 'np.concatenate', (['(ruituFile_new, tmpyear, tmpday, tmpgeo)'], {'axis': '(1)'}), '((ruituFile_new, tmpyear, tmpday, tmpgeo), axis=1)\n', (9181, 9231), True, 'import numpy as np\n'), ((1742, 1775), 'datetime.datetime', 'datetime.datetime', (['(2016)', '(10)', '(1)', '(0)'], {}), '(2016, 10, 1, 0)\n', (1759, 1775), False, 'import datetime\n'), ((1773, 1805), 'datetime.datetime', 'datetime.datetime', (['(2019)', '(4)', '(1)', '(0)'], {}), '(2019, 4, 1, 0)\n', (1790, 1805), False, 'import datetime\n'), ((1980, 2016), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(3600 * 3)'}), '(seconds=3600 * 3)\n', (1998, 2016), False, 'import datetime\n'), ((2618, 2640), 'os.path.exists', 'os.path.exists', (['target'], {}), '(target)\n', (2632, 2640), False, 'import os\n'), ((2925, 2938), 'numpy.array', 'np.array', (['tmp'], {}), '(tmp)\n', (2933, 2938), True, 'import numpy as np\n'), ((3516, 3548), 'datetime.datetime', 'datetime.datetime', (['(2019)', '(4)', '(1)', '(0)'], {}), '(2019, 4, 1, 0)\n', (3533, 3548), False, 'import datetime\n'), ((3546, 3580), 'datetime.datetime', 'datetime.datetime', (['(2019)', '(7)', '(31)', '(21)'], {}), '(2019, 7, 31, 21)\n', (3563, 3580), False, 'import datetime\n'), ((3755, 3791), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(3600 * 3)'}), '(seconds=3600 * 3)\n', (3773, 3791), False, 'import datetime\n'), ((4382, 4404), 'os.path.exists', 'os.path.exists', (['target'], {}), '(target)\n', (4396, 4404), False, 'import os\n'), ((4680, 4693), 'numpy.array', 'np.array', (['tmp'], {}), '(tmp)\n', (4688, 4693), True, 'import numpy as np\n'), ((5684, 5703), 'numpy.array', 'np.array', (['gaugelist'], {}), '(gaugelist)\n', (5692, 5703), True, 'import numpy as np\n'), ((5705, 5724), 'numpy.array', 'np.array', (['ruitulist'], {}), '(ruitulist)\n', (5713, 5724), True, 'import numpy as np\n'), ((5726, 5749), 'numpy.array', 'np.array', (['histgaugelist'], {}), '(histgaugelist)\n', (5734, 5749), True, 'import numpy as np\n'), ((6003, 6023), 'numpy.load', 'np.load', (['filelist[0]'], {}), '(filelist[0])\n', (6010, 6023), True, 'import numpy as np\n'), ((7000, 7023), 'numpy.isnan', 'np.isnan', (['histgaugeFile'], {}), '(histgaugeFile)\n', (7008, 7023), True, 'import numpy as np\n'), ((7380, 7403), 'numpy.ones_like', 'np.ones_like', (['ruituFile'], {}), '(ruituFile)\n', (7392, 7403), True, 'import numpy as np\n'), ((7404, 7427), 'numpy.ones_like', 'np.ones_like', (['ruituFile'], {}), '(ruituFile)\n', (7416, 7427), True, 'import numpy as np\n'), ((7713, 7740), 'numpy.ones_like', 'np.ones_like', (['histgaugeFile'], {}), '(histgaugeFile)\n', (7725, 7740), True, 'import numpy as np\n'), ((7741, 7768), 'numpy.ones_like', 'np.ones_like', (['histgaugeFile'], {}), '(histgaugeFile)\n', (7753, 7768), True, 'import numpy as np\n'), ((8314, 8352), 'numpy.sin', 'np.sin', (['(2 * np.pi * (tt.hour - 3) / 24)'], {}), '(2 * np.pi * (tt.hour - 3) / 24)\n', (8320, 8352), True, 'import numpy as np\n'), ((9096, 9140), 'numpy.array', 'np.array', (['([geoinfo] * ruituFile_new.shape[0])'], {}), '([geoinfo] * ruituFile_new.shape[0])\n', (9104, 9140), True, 'import numpy as np\n'), ((11018, 11056), 'numpy.array', 'np.array', (['gaugedata[:, :, :, :80, :80]'], {}), '(gaugedata[:, :, :, :80, :80])\n', (11026, 11056), True, 'import numpy as np\n'), ((12345, 12356), 'time.time', 'time.time', ([], {}), '()\n', (12354, 12356), False, 'import time\n'), ((878, 914), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(3600 * i)'}), '(seconds=3600 * i)\n', (896, 914), False, 'import datetime\n'), ((3064, 3085), 'numpy.array', 'np.array', (['pretimelist'], {}), '(pretimelist)\n', (3072, 3085), True, 'import numpy as np\n'), ((4851, 4872), 'numpy.array', 'np.array', (['pretimelist'], {}), '(pretimelist)\n', (4859, 4872), True, 'import numpy as np\n'), ((8126, 8162), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(i * 3600)'}), '(seconds=i * 3600)\n', (8144, 8162), False, 'import datetime\n'), ((10944, 10963), 'numpy.array', 'np.array', (['ruitudata'], {}), '(ruitudata)\n', (10952, 10963), True, 'import numpy as np\n'), ((10980, 11001), 'numpy.array', 'np.array', (['gaugebinary'], {}), '(gaugebinary)\n', (10988, 11001), True, 'import numpy as np\n'), ((969, 1005), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(3600 * i)'}), '(seconds=3600 * i)\n', (987, 1005), False, 'import datetime\n'), ((6097, 6110), 'numpy.load', 'np.load', (['file'], {}), '(file)\n', (6104, 6110), True, 'import numpy as np\n'), ((6185, 6198), 'numpy.load', 'np.load', (['file'], {}), '(file)\n', (6192, 6198), True, 'import numpy as np\n'), ((6766, 6783), 'numpy.abs', 'np.abs', (['ruituFile'], {}), '(ruituFile)\n', (6772, 6783), True, 'import numpy as np\n'), ((7040, 7061), 'numpy.abs', 'np.abs', (['histgaugeFile'], {}), '(histgaugeFile)\n', (7046, 7061), True, 'import numpy as np\n'), ((10095, 10131), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(3600 * i)'}), '(seconds=3600 * i)\n', (10113, 10131), False, 'import datetime\n'), ((10336, 10372), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(3600 * i)'}), '(seconds=3600 * i)\n', (10354, 10372), False, 'import datetime\n'), ((10827, 10886), 'numpy.concatenate', 'np.concatenate', (['(gaugedata >= 0.1, gaugedata < 0.1)'], {'axis': '(2)'}), '((gaugedata >= 0.1, gaugedata < 0.1), axis=2)\n', (10841, 10886), True, 'import numpy as np\n'), ((12550, 12561), 'time.time', 'time.time', ([], {}), '()\n', (12559, 12561), False, 'import time\n'), ((6922, 6951), 'numpy.abs', 'np.abs', (['ruituFile[:, i, :, :]'], {}), '(ruituFile[:, i, :, :])\n', (6928, 6951), True, 'import numpy as np\n'), ((7212, 7245), 'numpy.abs', 'np.abs', (['histgaugeFile[:, i, :, :]'], {}), '(histgaugeFile[:, i, :, :])\n', (7218, 7245), True, 'import numpy as np\n'), ((8643, 8671), 'numpy.ones', 'np.ones', (['[hist_hour, 80, 84]'], {}), '([hist_hour, 80, 84])\n', (8650, 8671), True, 'import numpy as np\n'), ((8772, 8800), 'numpy.ones', 'np.ones', (['[hist_hour, 80, 84]'], {}), '([hist_hour, 80, 84])\n', (8779, 8800), True, 'import numpy as np\n'), ((8890, 8911), 'numpy.ones', 'np.ones', (['[24, 80, 84]'], {}), '([24, 80, 84])\n', (8897, 8911), True, 'import numpy as np\n'), ((9009, 9030), 'numpy.ones', 'np.ones', (['[24, 80, 84]'], {}), '([24, 80, 84])\n', (9016, 9030), True, 'import numpy as np\n')]
|
from src.ddpg.train import Trainer
from src.ddpg.buffer import MemoryBuffer
from statistics import mean
import gym
import numpy as np
import random
import scipy.stats
class EvolutionaryDDPG:
def __init__(self, n_networks, max_buffer, max_episodes, max_steps, episodes_ready, explore_prob, explore_factors):
self.n = n_networks # liczba sieci
self.max_buffer = max_buffer
self.max_episodes = max_episodes
self.max_steps = max_steps
self.episodes_ready = episodes_ready
if len(self.episodes_ready) < n_networks:
print("episodes_ready.len() != n_networks")
raise Exception
self.explore_prob = explore_prob - int(explore_prob)
self.explore_factors = explore_factors
self.rams = []
# początkowe ostatnie 10 cząstkowych wyników dla wszystkich sieci ustawiamy na -100
self.last_ten_scores = [[-100 for _ in range(10)] for _ in range(self.n)]
self.envs = self.create_envs()
self.ddpgs = self.create_ddpg()
def create_envs(self):
envs = []
for i in range(self.n):
env = gym.make('BipedalWalker-v2')
envs.append(env)
return envs
def create_ddpg(self):
ddpgs = []
for i in range(self.n):
env = self.envs[i]
s_dim = env.observation_space.shape[0]
a_dim = env.action_space.shape[0]
a_max = env.action_space.high[0]
print(' State Dimensions :- ', s_dim)
print(' Action Dimensions :- ', a_dim)
print(' Action Max :- ', a_max)
ram = MemoryBuffer(self.max_buffer)
self.rams.append(ram)
trainer = Trainer(s_dim, a_dim, a_max, ram)
ddpgs.append(trainer)
return ddpgs
def exploit(self, idx):
"""
Eksploatacja polega na jednolitym próbkowaniu innego (losowo wybranego) agenta w populacji,
a następnie porównaniu ostatnich 10 cząstkowych nagród przy użyciu Welch’s t-test.
Jeśli próbkowany agent ma wyższą średnią cząstkową nagrodę i spełnia warunki t-test,
wagi z hiperparametrami są kopiowane do obecnego agenta.
:param idx: indeks sieci, dla której wywołujemy exploit()
"""
# losujemy indeks sieci różnej od obecnej
random_idx = random.randrange(self.n)
while random_idx == idx:
random_idx = random.randrange(self.n)
# wybieramy lepszą sieć
best_net_idx = self.pick_net(idx, random_idx)
# jeśli wylosowana sieć okazała się być lepsza
if idx != best_net_idx:
# podmieniamy wagi
new_param = self.ddpgs[best_net_idx].actor.parameters()
for param in self.ddpgs[idx].actor.parameters():
param.data.copy_(next(new_param))
new_param = self.ddpgs[best_net_idx].critic.parameters()
for param in self.ddpgs[idx].critic.parameters():
param.data.copy_(next(new_param))
print("<exploit", idx, "> Wczytano nowe wagi z sieci nr ", best_net_idx)
else:
print("<exploit", idx, "> Wagi zostają, są lepsze od sieci nr ", random_idx)
def explore(self, idx):
if random.random() < 0.5:
for param in self.ddpgs[idx].actor.parameters():
param.data.mul_(self.explore_factors[0])
for param in self.ddpgs[idx].critic.parameters():
param.data.mul_(self.explore_factors[0])
print("<explore", idx, "> Przemnożono wagi przez ", self.explore_factors[0])
else:
for param in self.ddpgs[idx].actor.parameters():
param.data.mul_(self.explore_factors[1])
for param in self.ddpgs[idx].critic.parameters():
param.data.mul_(self.explore_factors[1])
print("<explore", idx, "> Przemnożono wagi przez ", self.explore_factors[1])
def pick_net(self, idx1, idx2):
"""
Porównanie nagród cząstkowych dwóch sieci przy użyciu Welch's t-test
:param idx1: obecna sieć
:param idx2: losowo wybrana sieć
:return: indeks najlepszej sieci
"""
statistic, pvalue = scipy.stats.ttest_ind(self.last_ten_scores[idx1], self.last_ten_scores[idx2],
equal_var=False)
if pvalue <= 0.05:
# przeszło welch's t-test, teraz porównanie średnich z ostatnich 10 wyników
if mean(self.last_ten_scores[idx1]) > mean(self.last_ten_scores[idx2]):
return idx1 # obecna sieć lepsza
else:
return idx2 # losowo wybrana sieć lepsza
else:
return idx1 # nie przeszło welch's t-test
def train(self):
# Liczba iteracji algorytmu
for episode in range(self.max_episodes):
# Dla każdej sieci
for ddpg_idx in range(self.n):
trainer = self.ddpgs[ddpg_idx]
ram = self.rams[ddpg_idx]
env = self.envs[ddpg_idx]
# Zresetuj środowisko
observation = env.reset()
# Zliczamy całkowity uzyskany wynik
total_reward = 0
# Wykonaj max_steps kroków
for r in range(self.max_steps):
# env.render()
state = np.float32(observation)
action = trainer.get_exploration_action(state)
new_observation, reward, done, info = env.step(action)
total_reward = total_reward + reward
if not done:
new_state = np.float32(new_observation)
ram.add(state, action, reward, new_state)
observation = new_observation
trainer.optimize()
if done:
break
self.append_score(ddpg_idx, total_reward)
print('NETWORK ', ddpg_idx, ' EPISODE : ', episode, ' SCORE : ', total_reward)
# każda sieć ma swój max epizodów, po których zostaną wywołane metody exploit i explore
if episode % self.episodes_ready[ddpg_idx] == 0 and episode != 0:
self.exploit(ddpg_idx)
if random.random() < self.explore_prob:
self.explore(ddpg_idx)
if episode % 100 == 0:
self.save_ckpt(episode)
def append_score(self, idx, new_score):
"""
Usuwa ostatni wynik z 10 ostatnich cząstkowych wyników i dodaje nowy
:param idx: indeks sieci
:param new_score: nowy wynik
"""
self.last_ten_scores[idx] = self.last_ten_scores[idx][1:]
self.last_ten_scores[idx].append(new_score)
def save_ckpt(self, episode):
idx_ddpg = 0
for ddpg in self.ddpgs:
ddpg.save_models_path(idx_ddpg, episode)
idx_ddpg = idx_ddpg + 1
print('Models saved successfully')
def load_ckpt(self, episode):
idx_ddpg = 0
for ddpg in self.ddpgs:
ddpg.load_models_path('Models/' + str(idx_ddpg) + '_' + str(episode) + '_actor.pt',
'Models/' + str(idx_ddpg) + '_' + str(episode) + '_critic.pt')
idx_ddpg = idx_ddpg + 1
print('Models loaded successfully')
|
[
"gym.make",
"src.ddpg.buffer.MemoryBuffer",
"numpy.float32",
"random.random",
"random.randrange",
"statistics.mean",
"src.ddpg.train.Trainer"
] |
[((2347, 2371), 'random.randrange', 'random.randrange', (['self.n'], {}), '(self.n)\n', (2363, 2371), False, 'import random\n'), ((1135, 1163), 'gym.make', 'gym.make', (['"""BipedalWalker-v2"""'], {}), "('BipedalWalker-v2')\n", (1143, 1163), False, 'import gym\n'), ((1630, 1659), 'src.ddpg.buffer.MemoryBuffer', 'MemoryBuffer', (['self.max_buffer'], {}), '(self.max_buffer)\n', (1642, 1659), False, 'from src.ddpg.buffer import MemoryBuffer\n'), ((1716, 1749), 'src.ddpg.train.Trainer', 'Trainer', (['s_dim', 'a_dim', 'a_max', 'ram'], {}), '(s_dim, a_dim, a_max, ram)\n', (1723, 1749), False, 'from src.ddpg.train import Trainer\n'), ((2430, 2454), 'random.randrange', 'random.randrange', (['self.n'], {}), '(self.n)\n', (2446, 2454), False, 'import random\n'), ((3252, 3267), 'random.random', 'random.random', ([], {}), '()\n', (3265, 3267), False, 'import random\n'), ((4499, 4531), 'statistics.mean', 'mean', (['self.last_ten_scores[idx1]'], {}), '(self.last_ten_scores[idx1])\n', (4503, 4531), False, 'from statistics import mean\n'), ((4534, 4566), 'statistics.mean', 'mean', (['self.last_ten_scores[idx2]'], {}), '(self.last_ten_scores[idx2])\n', (4538, 4566), False, 'from statistics import mean\n'), ((5398, 5421), 'numpy.float32', 'np.float32', (['observation'], {}), '(observation)\n', (5408, 5421), True, 'import numpy as np\n'), ((5692, 5719), 'numpy.float32', 'np.float32', (['new_observation'], {}), '(new_observation)\n', (5702, 5719), True, 'import numpy as np\n'), ((6343, 6358), 'random.random', 'random.random', ([], {}), '()\n', (6356, 6358), False, 'import random\n')]
|
from collections import defaultdict
import numpy as np
import networkx as nx
import networkx.algorithms.approximation as approx
import networkx.algorithms.coloring as coloring
import pulp
def clique_random_sequential(graph : nx.Graph) -> list:
"""Perform minimum clique cover with random sequential greedy method
This method will create clique greedily. At least finish with O(|V|^2).
Args:
graph (nx.Graph): graph to solve
Returns:
list: list of node names for each clique
"""
graph = graph.copy()
clique_list = []
while len(graph.nodes())>0:
clique = []
node_list = list(graph.nodes())
np.random.permutation(node_list)
for node in node_list:
flag = True
for exist_node in clique:
if node not in graph[exist_node]:
flag =False
break
if flag:
clique.append(node)
graph.remove_nodes_from(clique)
clique_list.append(clique)
return clique_list
def clique_approx_find_greedy_eliminate(graph: nx.Graph) -> list:
"""Perform minimum clique cover by approximatly find maximum clique and iteratively eliminate it.
Find the maximum clique with approximatino methods and iteratively eliminate it.
Args:
graph (nx.Graph): graph to solve
Returns:
list: list of node names for each clique
"""
_, clique_list = approx.clique_removal(graph)
clique_list = [list(item) for item in clique_list]
return clique_list
def clique_exact_find_greedy_eliminate(graph: nx.Graph) -> list:
"""Perform minimum clique cover by exactly find maximum clique and iteratively eliminate it.
Find the maximum clique by enumerating all the cliques and iteratively eliminate it.
Args:
graph (nx.Graph): graph to solve
Returns:
list: list of node names for each clique
"""
graph = graph.copy()
clique_list = []
while len(graph.nodes())>0:
max_size = 0
max_clique = []
for clique in nx.find_cliques(graph):
size = len(clique)
if size > max_size:
max_size = size
max_clique = clique
graph.remove_nodes_from(max_clique)
clique_list.append(max_clique)
return clique_list
def clique_exact_find_once_greedy_eliminate(graph: nx.Graph) -> list:
"""Perform minimum clique cover by exactly find maximum clique and iteratively eliminate it.
Find the maximum clique by enumerating all the cliques once and iteratively eliminate it.
Args:
graph (nx.Graph): graph to solve
Returns:
list: list of node names for each clique
"""
max_cliques = sorted(nx.find_cliques(graph), key=lambda x: len(x), reverse=True)
max_cliques = [set(i) for i in max_cliques]
clique_list = []
while np.sum([len(i) for i in max_cliques]) > 0:
max_clique = max_cliques[0]
max_cliques = [i - max_clique for i in max_cliques]
max_cliques = sorted(max_cliques, key=lambda x: len(x), reverse=True)
clique_list.append(max_clique)
return clique_list
def coloring_greedy(graph: nx.Graph, strategy: str) -> list:
"""Perform minimum clique cover by reducing problem into coloring problem and using approximation methods.
See https://networkx.github.io/documentation/stable/reference/algorithms/coloring.html
for detailed algorithms
Args:
graph (nx.Graph): graph to solve
strategy (str): name of strategy
Returns:
list: list of node names for each clique
"""
graph = nx.complement(graph)
result = coloring.greedy_color(graph, strategy=strategy)
clique_dict = defaultdict(list)
for node,color in result.items():
clique_dict[color].append(node)
return list(clique_dict.values())
class AbortedError(Exception):
pass
def integer_programming(graph: nx.Graph) -> list:
"""Perform minimum clique cover by reducing problem into integer programming.
If solver says optimal, optimal solution for minimum clique cover is obtained,
but it may take very long time for large problems.
TODO: Check installation of commercial IP solvers such as CPLEX, Gurobi, and
use them if they are installed.
Args:
graph (nx.Graph): graph to solve
Returns:
list: list of node names for each clique
Raises:
Exception: Solver cannot solve IP problem.
"""
problem = pulp.LpProblem("clique_cover", pulp.LpMinimize)
clique_max_count = len(graph.nodes())
clique_vars = []
for ind in range(clique_max_count):
var = pulp.LpVariable("clique{}".format(ind), cat="Binary")
clique_vars.append(var)
node_belong_vars = []
for ind in range(clique_max_count):
node_belong_vars.append({})
for node in graph.nodes():
nodename = str(node)
nodename = nodename.replace(" ","0").replace(" i","1").replace(" -","2").replace("-i","3")
var = pulp.LpVariable("{}_{}".format(nodename,ind), cat = "Binary")
node_belong_vars[ind][node] = var
# minimize used cliques
problem += sum(clique_vars)
# if node belongs, clique must be used
for ind in range(clique_max_count):
for node in graph.nodes():
problem += (node_belong_vars[ind][node] <= clique_vars[ind])
# clique must be exclusive
for node in graph.nodes():
items = []
for ind in range(clique_max_count):
items.append(node_belong_vars[ind][node])
problem += (sum(items)==1)
# not-neighboring nodes cannot belong the same clique
for ind in range(clique_max_count):
for i1, n1 in enumerate(graph.nodes()):
for i2, n2 in enumerate(graph.nodes()):
if i2<=i1: continue
if n2 not in graph[n1]:
problem += (node_belong_vars[ind][n1]+node_belong_vars[ind][n2]<=1)
#status = problem.solve()
import multiprocessing
cpu_count = multiprocessing.cpu_count()
status = problem.solve(pulp.PULP_CBC_CMD(threads=cpu_count, keepFiles=0, mip=1, maxSeconds=5))
#status = problem.solve(pulp.PULP_CBC_CMD(maxSeconds=5, msg=0, fracGap=0))
#print(problem)
#print(pulp.LpStatus[status])
#print(problem.objective.value())
# cannot solve
if status <= 0:
raise AbortedError("Solver cannot solve problem.")
clique_dict = defaultdict(list)
node_count = 0
for node in graph.nodes():
for index in range(clique_max_count):
var = node_belong_vars[index][node]
if(var.value()>=0.5):
clique_dict[index].append(node)
node_count += 1
break
return list(clique_dict.values())
strategy_func = {
"clique_random_sequential" : clique_random_sequential,
"clique_approx_find_greedy_eliminate" : clique_approx_find_greedy_eliminate,
"clique_exact_find_greedy_eliminate" : clique_exact_find_greedy_eliminate,
"clique_exact_find_once_greedy_eliminate" : clique_exact_find_once_greedy_eliminate,
"coloring_largest_first" : None,
"coloring_smallest_last" : None,
"coloring_random_sequential" : None,
"coloring_independent_set" : None,
"coloring_connected_sequential_bfs" : None,
"coloring_connected_sequential_dfs" : None,
"coloring_saturation_largest_first" : None,
"integer_programming" : integer_programming,
}
clique_cover_strategies = strategy_func.keys()
def clique_cover(graph: nx.graph, strategy:str ="clique_random_sequential") -> list:
"""Perform minimum clique cover using several strategies
Args:
graph (nx.Graph): graph to solve
strategy (str): name of strategy
Returns:
list: list of node names for each clique
"""
if strategy not in strategy_func:
raise ValueError("Unknown strategy, choose from {}".format(strategy_func.keys()))
coloring_prefix = "coloring_"
if coloring_prefix in strategy:
return coloring_greedy(graph, strategy = strategy[len(coloring_prefix):])
return strategy_func[strategy](graph)
|
[
"networkx.algorithms.coloring.greedy_color",
"networkx.complement",
"collections.defaultdict",
"networkx.find_cliques",
"numpy.random.permutation",
"pulp.LpProblem",
"pulp.PULP_CBC_CMD",
"networkx.algorithms.approximation.clique_removal",
"multiprocessing.cpu_count"
] |
[((1497, 1525), 'networkx.algorithms.approximation.clique_removal', 'approx.clique_removal', (['graph'], {}), '(graph)\n', (1518, 1525), True, 'import networkx.algorithms.approximation as approx\n'), ((3741, 3761), 'networkx.complement', 'nx.complement', (['graph'], {}), '(graph)\n', (3754, 3761), True, 'import networkx as nx\n'), ((3776, 3823), 'networkx.algorithms.coloring.greedy_color', 'coloring.greedy_color', (['graph'], {'strategy': 'strategy'}), '(graph, strategy=strategy)\n', (3797, 3823), True, 'import networkx.algorithms.coloring as coloring\n'), ((3843, 3860), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3854, 3860), False, 'from collections import defaultdict\n'), ((4632, 4679), 'pulp.LpProblem', 'pulp.LpProblem', (['"""clique_cover"""', 'pulp.LpMinimize'], {}), "('clique_cover', pulp.LpMinimize)\n", (4646, 4679), False, 'import pulp\n'), ((6243, 6270), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (6268, 6270), False, 'import multiprocessing\n'), ((6670, 6687), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (6681, 6687), False, 'from collections import defaultdict\n'), ((687, 719), 'numpy.random.permutation', 'np.random.permutation', (['node_list'], {}), '(node_list)\n', (708, 719), True, 'import numpy as np\n'), ((2143, 2165), 'networkx.find_cliques', 'nx.find_cliques', (['graph'], {}), '(graph)\n', (2158, 2165), True, 'import networkx as nx\n'), ((2833, 2855), 'networkx.find_cliques', 'nx.find_cliques', (['graph'], {}), '(graph)\n', (2848, 2855), True, 'import networkx as nx\n'), ((6299, 6369), 'pulp.PULP_CBC_CMD', 'pulp.PULP_CBC_CMD', ([], {'threads': 'cpu_count', 'keepFiles': '(0)', 'mip': '(1)', 'maxSeconds': '(5)'}), '(threads=cpu_count, keepFiles=0, mip=1, maxSeconds=5)\n', (6316, 6369), False, 'import pulp\n')]
|
import webbrowser
import numpy as np
import pandas as pd
from pandas_datareader import data as web
from sklearn import linear_model
webbrowser.open("https://github.com/philliphsu/BottomSheetPickers")
class ScikitBacktest(object):
def __init__(self, sys):
self.data = d
self.matrix = m
self.lags = 5
self.symbol = sys
self.get_data()
self.lm = linear_model.LogisticRegression(C=1e3)
def get_data(self):
d = web.DataReader(self.sys, data_source='yahoo')['Adj Close']
d = pd.DataFrame(d)
d.columns = [self.symbol]
d['returns'] = np.log(d / d.shift())
def select_data(self, start, end):
d = self.data[(self.data.index >= start) & (self.data.index <= end)].copy()
return d
def get_matrix(self, start, end):
d = self.select_data(start, end)
m = np.zeros((self.lags + 1, len(d) - self.lags))
for i in range(self.lags + 1):
if i == self.lags:
m[i] = d[i:]
else:
m[i] = d[i:i - self.lags]
def fit_model(self, start, end):
self.get_matrix(start, end)
self.lm.fit(self.matrix[:self.lags], np.sign(self.matrix[self.lags]))
def predict_moves(self, start, end):
self.get_matrix(start, end)
pred = self.lm.predict(self.matrix[:self.lags])
return pred
def run_strategy(self, start_tr, end_tr, start_te, end_te, lags):
self.lags = lags
self.fit_model(start_tr, end_tr)
pred = self.predict_moves(start_te, end_te)
d = self.select_data(start_te, end_te)
d['pred'] = 0.0
d['pred'].ix[self.lags:] = pred
d['strategy'] = d.pred * d.returns
title = '%s to %s for %d lags' % (start_te, end_te, self.lags)
d[['returns', 'strategy']].ix[self.lags:].cumsum().apply(np.exp).plot(title=title)
|
[
"pandas.DataFrame",
"webbrowser.open",
"pandas_datareader.data.DataReader",
"sklearn.linear_model.LogisticRegression",
"numpy.sign"
] |
[((134, 201), 'webbrowser.open', 'webbrowser.open', (['"""https://github.com/philliphsu/BottomSheetPickers"""'], {}), "('https://github.com/philliphsu/BottomSheetPickers')\n", (149, 201), False, 'import webbrowser\n'), ((399, 440), 'sklearn.linear_model.LogisticRegression', 'linear_model.LogisticRegression', ([], {'C': '(1000.0)'}), '(C=1000.0)\n', (430, 440), False, 'from sklearn import linear_model\n'), ((546, 561), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {}), '(d)\n', (558, 561), True, 'import pandas as pd\n'), ((475, 520), 'pandas_datareader.data.DataReader', 'web.DataReader', (['self.sys'], {'data_source': '"""yahoo"""'}), "(self.sys, data_source='yahoo')\n", (489, 520), True, 'from pandas_datareader import data as web\n'), ((1198, 1229), 'numpy.sign', 'np.sign', (['self.matrix[self.lags]'], {}), '(self.matrix[self.lags])\n', (1205, 1229), True, 'import numpy as np\n')]
|
import sys
import cv2
from matplotlib import pyplot as plt
from skimage.filters import sobel
import numpy as np
import math
from Etch import Etch
PRINT_ETCH = True
class Image:
def __init__(self):
self.points = []
self.image = cv2.imread("C:/Users/wnetz/Documents/etch-a-sketch/python/tests/protocol1_0/tri.png", 0)
self.imageShape = 0
self.etch = Etch()
self.sourceFile = open('C:/Users/wnetz/Documents/etch-a-sketch/python/tests/protocol1_0/test.txt', 'w')
self.sourceFile2 = open('C:/Users/wnetz/Documents/etch-a-sketch/python/tests/protocol1_0/test2.txt', 'w')
np.set_printoptions(threshold=sys.maxsize)
def processImage(self):
self.imageShape = self.image.shape
sig = .3
median = np.median(self.image)
lower = int(max(0,(1.0-sig)*median))
upper = int(min(255,(1.0+sig)*median))
self.image = cv2.Canny(self.image,lower,upper)
plt.imshow(self.image, cmap='gray')
plt.show()
def sort(self):
#loop x
for x in range(self.imageShape[0]):
#loop y
for y in range(self.imageShape[1]):
#if there is an edge pixle
if self.image[x][y] == 255:
point = (((x -self.imageShape[1] + 1) * -1) * 18000/self.imageShape[1], y * 12000/self.imageShape[0])
self.points.append(point)
#print ("("+str(point[0]) + "," + str(point[1])+")")
print("X",end='',file = self.sourceFile)
else:
print(" ",end='',file = self.sourceFile)
print("",file = self.sourceFile)
print(len(self.points))
def drawImage(self):
avg = 0
numpoints = 0
minpoint = [0,0]
length = len(self.points)
while len(self.points) > 1:
oldmin = minpoint
min = math.pow(math.pow(18000,2) + math.pow(12000,2),.5)
minpoint = []
lessmin = []
for point in self.points:
dist = math.pow(math.pow(point[0]-oldmin[0],2) + math.pow(point[1]-oldmin[1],2),.5)
if min < dist and dist < 100:
lessmin.append(point)
if dist < min:
min = dist
minpoint = point
#if min < 3:
#break
if len(minpoint) > 0:
print(str(min) + " (" + str(minpoint[0]) + "," + str(minpoint[1]) + ")", file = self.sourceFile2)
if min > 1:
avg = avg + min
numpoints = numpoints + 1
for point in lessmin:
self.points.remove(point)
if len(minpoint) > 0:
self.points.remove(minpoint)
self.etch.goto(minpoint[0],minpoint[1],PRINT_ETCH)
if len(self.points) % 1000 == 0:
print(len(self.points))
print(str(min) + " (" + str(minpoint) + ") ",len(self.points))
print("total " + str(avg) + " " + str(numpoints))
print("total " + str(avg/numpoints))
def end(self):
self.sourceFile.close()
self.sourceFile2.close()
self.etch.goto(0,0,PRINT_ETCH)
image = Image()
#print("enter image path")
#print(input())
image.processImage()
image.sort()
image.drawImage()
image.end()
|
[
"Etch.Etch",
"cv2.Canny",
"numpy.set_printoptions",
"matplotlib.pyplot.show",
"math.pow",
"numpy.median",
"matplotlib.pyplot.imshow",
"cv2.imread"
] |
[((249, 346), 'cv2.imread', 'cv2.imread', (['"""C:/Users/wnetz/Documents/etch-a-sketch/python/tests/protocol1_0/tri.png"""', '(0)'], {}), "(\n 'C:/Users/wnetz/Documents/etch-a-sketch/python/tests/protocol1_0/tri.png',\n 0)\n", (259, 346), False, 'import cv2\n'), ((386, 392), 'Etch.Etch', 'Etch', ([], {}), '()\n', (390, 392), False, 'from Etch import Etch\n'), ((627, 669), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'sys.maxsize'}), '(threshold=sys.maxsize)\n', (646, 669), True, 'import numpy as np\n'), ((775, 796), 'numpy.median', 'np.median', (['self.image'], {}), '(self.image)\n', (784, 796), True, 'import numpy as np\n'), ((910, 945), 'cv2.Canny', 'cv2.Canny', (['self.image', 'lower', 'upper'], {}), '(self.image, lower, upper)\n', (919, 945), False, 'import cv2\n'), ((952, 987), 'matplotlib.pyplot.imshow', 'plt.imshow', (['self.image'], {'cmap': '"""gray"""'}), "(self.image, cmap='gray')\n", (962, 987), True, 'from matplotlib import pyplot as plt\n'), ((996, 1006), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1004, 1006), True, 'from matplotlib import pyplot as plt\n'), ((1929, 1947), 'math.pow', 'math.pow', (['(18000)', '(2)'], {}), '(18000, 2)\n', (1937, 1947), False, 'import math\n'), ((1949, 1967), 'math.pow', 'math.pow', (['(12000)', '(2)'], {}), '(12000, 2)\n', (1957, 1967), False, 'import math\n'), ((2092, 2125), 'math.pow', 'math.pow', (['(point[0] - oldmin[0])', '(2)'], {}), '(point[0] - oldmin[0], 2)\n', (2100, 2125), False, 'import math\n'), ((2125, 2158), 'math.pow', 'math.pow', (['(point[1] - oldmin[1])', '(2)'], {}), '(point[1] - oldmin[1], 2)\n', (2133, 2158), False, 'import math\n')]
|
import os
import sys
path = os.environ.get('TRAVIS_BUILD_DIR')
sys.path.insert(0, path+'/protlearn')
import numpy as np
from preprocessing import txt_to_df
from feature_engineering import aaindex1
def test_aaindex1():
"Test AAIndex1"
# load data
df = txt_to_df(path+'/tests/docs/test_seq.txt', 0)
# get aaindex1
aaind1 = aaindex1(df)
# test shape
assert aaind1.shape == (4, 553)
# test some indices
ANDN920101 = np.array([4.3, 4.40555, 4.48714, 4.46])
QIAN880126 = np.array([.01166, -.17111, .05857, -.04333])
KARS160122 = np.array([2.014, 5.48522, 2.789, 1.751])
np.testing.assert_equal(np.round(aaind1['ANDN920101'], 3),\
np.round(ANDN920101, 3))
np.testing.assert_equal(np.round(aaind1['QIAN880126'], 3),\
np.round(QIAN880126, 3))
np.testing.assert_equal(np.round(aaind1['KARS160122'], 3),\
np.round(KARS160122, 3))
# test standardization (zscore)
aaind1_z = aaindex1(df, 'zscore')
# test mean = 0
for i in range(aaind1_z.shape[0]):
assert abs(round(aaind1_z.iloc[:,1].mean())) == 0
# test std --> 1
for i in range(aaind1_z.shape[0]):
assert round(aaind1_z.iloc[:,i].std(), 1) ==\
round(aaind1_z.iloc[:,0].std(), 1)
# test standardization (minmax)
aaind1_mm = aaindex1(df, 'minmax')
# test minimum and maximum
for i in range(aaind1_mm.shape[0]):
assert round(aaind1_mm.iloc[:,i].min()) == 0
assert round(aaind1_mm.iloc[:,i].max()) == 1
|
[
"preprocessing.txt_to_df",
"sys.path.insert",
"os.environ.get",
"numpy.array",
"numpy.round",
"feature_engineering.aaindex1"
] |
[((28, 62), 'os.environ.get', 'os.environ.get', (['"""TRAVIS_BUILD_DIR"""'], {}), "('TRAVIS_BUILD_DIR')\n", (42, 62), False, 'import os\n'), ((63, 102), 'sys.path.insert', 'sys.path.insert', (['(0)', "(path + '/protlearn')"], {}), "(0, path + '/protlearn')\n", (78, 102), False, 'import sys\n'), ((271, 318), 'preprocessing.txt_to_df', 'txt_to_df', (["(path + '/tests/docs/test_seq.txt')", '(0)'], {}), "(path + '/tests/docs/test_seq.txt', 0)\n", (280, 318), False, 'from preprocessing import txt_to_df\n'), ((354, 366), 'feature_engineering.aaindex1', 'aaindex1', (['df'], {}), '(df)\n', (362, 366), False, 'from feature_engineering import aaindex1\n'), ((471, 510), 'numpy.array', 'np.array', (['[4.3, 4.40555, 4.48714, 4.46]'], {}), '([4.3, 4.40555, 4.48714, 4.46])\n', (479, 510), True, 'import numpy as np\n'), ((528, 576), 'numpy.array', 'np.array', (['[0.01166, -0.17111, 0.05857, -0.04333]'], {}), '([0.01166, -0.17111, 0.05857, -0.04333])\n', (536, 576), True, 'import numpy as np\n'), ((590, 630), 'numpy.array', 'np.array', (['[2.014, 5.48522, 2.789, 1.751]'], {}), '([2.014, 5.48522, 2.789, 1.751])\n', (598, 630), True, 'import numpy as np\n'), ((1038, 1060), 'feature_engineering.aaindex1', 'aaindex1', (['df', '"""zscore"""'], {}), "(df, 'zscore')\n", (1046, 1060), False, 'from feature_engineering import aaindex1\n'), ((1403, 1425), 'feature_engineering.aaindex1', 'aaindex1', (['df', '"""minmax"""'], {}), "(df, 'minmax')\n", (1411, 1425), False, 'from feature_engineering import aaindex1\n'), ((659, 692), 'numpy.round', 'np.round', (["aaind1['ANDN920101']", '(3)'], {}), "(aaind1['ANDN920101'], 3)\n", (667, 692), True, 'import numpy as np\n'), ((723, 746), 'numpy.round', 'np.round', (['ANDN920101', '(3)'], {}), '(ANDN920101, 3)\n', (731, 746), True, 'import numpy as np\n'), ((776, 809), 'numpy.round', 'np.round', (["aaind1['QIAN880126']", '(3)'], {}), "(aaind1['QIAN880126'], 3)\n", (784, 809), True, 'import numpy as np\n'), ((840, 863), 'numpy.round', 'np.round', (['QIAN880126', '(3)'], {}), '(QIAN880126, 3)\n', (848, 863), True, 'import numpy as np\n'), ((893, 926), 'numpy.round', 'np.round', (["aaind1['KARS160122']", '(3)'], {}), "(aaind1['KARS160122'], 3)\n", (901, 926), True, 'import numpy as np\n'), ((957, 980), 'numpy.round', 'np.round', (['KARS160122', '(3)'], {}), '(KARS160122, 3)\n', (965, 980), True, 'import numpy as np\n')]
|
from __future__ import print_function, absolute_import
# Script to predict (or test) the model using protein (kinase) sequence and SMILE pattern of a compound.
# Usage: python2 get_kinase_pki.py protein_sequence "SMILE_Pattern"
import numpy as np
from pydpi.pypro import PyPro
import pandas as pd
import json
import multiprocessing as mp
import os
import sys
import numpy as np
from sklearn.externals import joblib
from utility import FeatureGenerator
#from keras.models import load_model
import pickle
class pKiPred(object):
def __init__(self):
self.model = joblib.load(os.path.join(os.path.dirname(__file__), 'Random_forest_gridsearch_py27.mdl'))
def get_smi_features(self, smiles):
try:
feat_gen = FeatureGenerator(smiles)
features = feat_gen.toTPATF()
return features
except:
return None
def get_features(self, seq, smi):
p = PyPro()
try:
p.ReadProteinSequence(seq)
features = list(p.GetALL().values())
smi_features = self.get_smi_features(smi)
smi_features2 = list(np.array([f for f in smi_features], dtype=np.float32))
total_features = np.array(features+smi_features2)[np.newaxis, :]
# total_features = np.array(smi_features2+features)[np.newaxis, :] # does not work...!
return total_features
except Exception as e:
print(str(e))
return None
def predict(self, seq, smi):
protein_feature = self.get_features(seq, smi)
return self.model.predict(protein_feature)
def main():
seq = "MGCGCSSHPEDDWMENIDVCENCHYPIVPLDGKGTLLIRNGSEVRDPLVTYEGSNPPASPLQDNLVIALHSYEPSHDGDLGFEKGEQLRILEQSGEWWKAQSLTTGQEGFIPFNFVAKANSLEPEPWFFK<KEY>"
smile = "CC(C)Oc1ccc(cc1Cl)c2noc(n2)c3ccc(N[C@H]4CC[C@H](C4)C(=O)O)cc3"
pkipred = pKiPred()
if len(sys.argv) == 1:
print(pkipred.predict(seq, smile))
else:
print(pkipred.predict(sys.argv[1], sys.argv[2]))
if __name__=="__main__":
main()
|
[
"pydpi.pypro.PyPro",
"os.path.dirname",
"utility.FeatureGenerator",
"numpy.array"
] |
[((933, 940), 'pydpi.pypro.PyPro', 'PyPro', ([], {}), '()\n', (938, 940), False, 'from pydpi.pypro import PyPro\n'), ((747, 771), 'utility.FeatureGenerator', 'FeatureGenerator', (['smiles'], {}), '(smiles)\n', (763, 771), False, 'from utility import FeatureGenerator\n'), ((605, 630), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (620, 630), False, 'import os\n'), ((1128, 1181), 'numpy.array', 'np.array', (['[f for f in smi_features]'], {'dtype': 'np.float32'}), '([f for f in smi_features], dtype=np.float32)\n', (1136, 1181), True, 'import numpy as np\n'), ((1217, 1251), 'numpy.array', 'np.array', (['(features + smi_features2)'], {}), '(features + smi_features2)\n', (1225, 1251), True, 'import numpy as np\n')]
|
from keras.models import load_model
from keras.preprocessing import image
import numpy as np
import cv2
from keras.backend import tensorflow_backend as K
import os
import glob
import time
import keras
from matplotlib import pyplot as plt
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K2
#IOU calc
iou_smooth=1.
#Unet ile plaka bulmak icin gerekli input size'ları
img_width, img_height = 256, 256
char_list = ["0","1","2","3","4","5","6","7","8","9","A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","R","S","T","U","V","Y","Z","X","W"]
#Unet icin gereken loss fonksyonu, kesisen alana gore loss hesaplar
def IOU_calc(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return 2*(intersection + iou_smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + iou_smooth)
def IOU_calc_loss(y_true, y_pred):
return 1-IOU_calc(y_true, y_pred)
#Plakadaki karakterlerin sıralanması icin, karakterleri en'lerine bakarak sıralar
def compareRectWidth(a,b):
return a < b
# Unet modeli yukluyor,
model_unet = load_model('../src/gumruk_unetGU002.h5',custom_objects={'IOU_calc_loss': IOU_calc_loss, 'IOU_calc': IOU_calc})
#CNN modelini yukluyor, karakter tanıma icin
# CNN modelinin input sizeları
img_rows, img_cols = 28, 28
batch_size = 128
num_classes = 35
epochs = 12
if K2.image_data_format() == 'channels_first':
input_shape = (1, img_rows, img_cols)
else:
input_shape = (img_rows, img_cols, 1)
model_cnn = Sequential()
model_cnn.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model_cnn.add(Conv2D(64, (3, 3), activation='relu'))
model_cnn.add(MaxPooling2D(pool_size=(2, 2)))
model_cnn.add(Dropout(0.25))
model_cnn.add(Flatten())
model_cnn.add(Dense(128, activation='relu'))
model_cnn.add(Dropout(0.5))
model_cnn.add(Dense(num_classes, activation='softmax'))
model_cnn.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model_cnn.load_weights('../src/mert_cnn.h5')
#unet ile plakayi bulup, return ediyor. input olarak image path alıyor
def getPlateImage(filepath):
image = cv2.imread(filepath)
plate = image
originalImage = image
##model icin gerekli input boyutunu hazirliyor.
image = cv2.resize(image, (256, 256)).astype("float32")
image = np.expand_dims(image, axis=0)
#prediction binary image dönüyor
pred = model_unet.predict(image)
pred = pred.reshape((256,256,1))
pred = pred.astype(np.float32)
pred = pred*255
pred = cv2.resize(pred, (originalImage.shape[1], originalImage.shape[0]))
pred=np.uint8(pred)
#resimdeki en buyuk beyaz alanı alıp(plaka lokasyonu) kesiyor
contours, hierarchy = cv2.findContours(pred,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
largestArea = 0
for contour in contours:
tArea = cv2.contourArea(contour)
if tArea > largestArea:
largestArea = tArea
x,y,w,h = cv2.boundingRect(contour)
if largestArea > 0:
plate = originalImage[y:y+h,x:x+w]
else:
print("PLATE COULD NOT FOUND")
return plate
#plaka resmini alıp
def getPlateString(plate):
grayPlate = cv2.cvtColor(plate, cv2.COLOR_BGR2GRAY)
roiList = []
wList = []
charList = []
retval, binary = cv2.threshold(grayPlate, 30.0, 255.0, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
contours,hierarchy = cv2.findContours(binary,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
idx =0
plateStr = []
for cnt in contours:
idx += 1
x,y,w,h = cv2.boundingRect(cnt)
roi=plate[y:y+h,x:x+w]
if w > 15 and h > 30 and w <100 and h< 100:
roiList.append(roi)
wList.append(x)
#cv2.imwrite("/home/utku/Desktop/rois/" + str(idx) +".jpg", roi)
#cv2.waitKey(100)
#predict roi, resize may needed
#roi = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
roi = np.asarray(roi)
roi = np.resize(roi, (28,28))
if K2.image_data_format() == 'channels_first':
roi = roi.reshape(roi.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
roi = roi.reshape(1, img_rows, img_cols, 1)
#roi = np.resize(roi, (28,28,1))
#roi = np.expand_dims(roi, axis=0)
roi = roi/255
pred = model_cnn.predict(roi)
#get index
print("pred: ", pred)
predd = pred[0]
char_idx = np.argmax(predd)
#char_idx = np.where(predd == 1) ##1 olanın indexi
plate_char = char_list[char_idx];
#append result to plateStr, may map the predict to a char(BUT HOW)
plateStr.append(plate_char)
print("plate_char is: ", plate_char)
#break
#sorting from left to right
charList = [x for _,x in sorted(zip(wList,plateStr))]
return charList
#plate = getPlateImage("sampleplate.jpg")
#plateString = getPlateString(plate)
#if 'X' in plateString: plateString.remove('X')
#print("plateString: ", plateString)
|
[
"keras.models.load_model",
"keras.optimizers.Adadelta",
"numpy.resize",
"numpy.argmax",
"keras.backend.tensorflow_backend.flatten",
"cv2.contourArea",
"cv2.cvtColor",
"keras.layers.Flatten",
"cv2.boundingRect",
"keras.layers.MaxPooling2D",
"cv2.resize",
"numpy.uint8",
"keras.layers.Dropout",
"numpy.asarray",
"keras.layers.Conv2D",
"keras.backend.tensorflow_backend.sum",
"keras.backend.image_data_format",
"cv2.threshold",
"numpy.expand_dims",
"cv2.imread",
"keras.layers.Dense",
"keras.models.Sequential",
"cv2.findContours"
] |
[((1228, 1343), 'keras.models.load_model', 'load_model', (['"""../src/gumruk_unetGU002.h5"""'], {'custom_objects': "{'IOU_calc_loss': IOU_calc_loss, 'IOU_calc': IOU_calc}"}), "('../src/gumruk_unetGU002.h5', custom_objects={'IOU_calc_loss':\n IOU_calc_loss, 'IOU_calc': IOU_calc})\n", (1238, 1343), False, 'from keras.models import load_model\n'), ((1643, 1655), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1653, 1655), False, 'from keras.models import Sequential\n'), ((812, 829), 'keras.backend.tensorflow_backend.flatten', 'K.flatten', (['y_true'], {}), '(y_true)\n', (821, 829), True, 'from keras.backend import tensorflow_backend as K\n'), ((842, 859), 'keras.backend.tensorflow_backend.flatten', 'K.flatten', (['y_pred'], {}), '(y_pred)\n', (851, 859), True, 'from keras.backend import tensorflow_backend as K\n'), ((876, 902), 'keras.backend.tensorflow_backend.sum', 'K.sum', (['(y_true_f * y_pred_f)'], {}), '(y_true_f * y_pred_f)\n', (881, 902), True, 'from keras.backend import tensorflow_backend as K\n'), ((1496, 1518), 'keras.backend.image_data_format', 'K2.image_data_format', ([], {}), '()\n', (1516, 1518), True, 'from keras import backend as K2\n'), ((1670, 1744), 'keras.layers.Conv2D', 'Conv2D', (['(32)'], {'kernel_size': '(3, 3)', 'activation': '"""relu"""', 'input_shape': 'input_shape'}), "(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape)\n", (1676, 1744), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((1794, 1831), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""'}), "(64, (3, 3), activation='relu')\n", (1800, 1831), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((1847, 1877), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (1859, 1877), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((1893, 1906), 'keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (1900, 1906), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((1922, 1931), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (1929, 1931), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((1947, 1976), 'keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (1952, 1976), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((1992, 2004), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (1999, 2004), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((2020, 2060), 'keras.layers.Dense', 'Dense', (['num_classes'], {'activation': '"""softmax"""'}), "(num_classes, activation='softmax')\n", (2025, 2060), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((2371, 2391), 'cv2.imread', 'cv2.imread', (['filepath'], {}), '(filepath)\n', (2381, 2391), False, 'import cv2\n'), ((2547, 2576), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (2561, 2576), True, 'import numpy as np\n'), ((2740, 2806), 'cv2.resize', 'cv2.resize', (['pred', '(originalImage.shape[1], originalImage.shape[0])'], {}), '(pred, (originalImage.shape[1], originalImage.shape[0]))\n', (2750, 2806), False, 'import cv2\n'), ((2813, 2827), 'numpy.uint8', 'np.uint8', (['pred'], {}), '(pred)\n', (2821, 2827), True, 'import numpy as np\n'), ((2915, 2977), 'cv2.findContours', 'cv2.findContours', (['pred', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(pred, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (2931, 2977), False, 'import cv2\n'), ((3320, 3359), 'cv2.cvtColor', 'cv2.cvtColor', (['plate', 'cv2.COLOR_BGR2GRAY'], {}), '(plate, cv2.COLOR_BGR2GRAY)\n', (3332, 3359), False, 'import cv2\n'), ((3419, 3493), 'cv2.threshold', 'cv2.threshold', (['grayPlate', '(30.0)', '(255.0)', '(cv2.THRESH_BINARY + cv2.THRESH_OTSU)'], {}), '(grayPlate, 30.0, 255.0, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n', (3432, 3493), False, 'import cv2\n'), ((3515, 3579), 'cv2.findContours', 'cv2.findContours', (['binary', 'cv2.RETR_LIST', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(binary, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n', (3531, 3579), False, 'import cv2\n'), ((2149, 2176), 'keras.optimizers.Adadelta', 'keras.optimizers.Adadelta', ([], {}), '()\n', (2174, 2176), False, 'import keras\n'), ((3030, 3054), 'cv2.contourArea', 'cv2.contourArea', (['contour'], {}), '(contour)\n', (3045, 3054), False, 'import cv2\n'), ((3648, 3669), 'cv2.boundingRect', 'cv2.boundingRect', (['cnt'], {}), '(cnt)\n', (3664, 3669), False, 'import cv2\n'), ((2489, 2518), 'cv2.resize', 'cv2.resize', (['image', '(256, 256)'], {}), '(image, (256, 256))\n', (2499, 2518), False, 'import cv2\n'), ((3117, 3142), 'cv2.boundingRect', 'cv2.boundingRect', (['contour'], {}), '(contour)\n', (3133, 3142), False, 'import cv2\n'), ((3966, 3981), 'numpy.asarray', 'np.asarray', (['roi'], {}), '(roi)\n', (3976, 3981), True, 'import numpy as np\n'), ((3991, 4015), 'numpy.resize', 'np.resize', (['roi', '(28, 28)'], {}), '(roi, (28, 28))\n', (4000, 4015), True, 'import numpy as np\n'), ((4428, 4444), 'numpy.argmax', 'np.argmax', (['predd'], {}), '(predd)\n', (4437, 4444), True, 'import numpy as np\n'), ((946, 961), 'keras.backend.tensorflow_backend.sum', 'K.sum', (['y_true_f'], {}), '(y_true_f)\n', (951, 961), True, 'from keras.backend import tensorflow_backend as K\n'), ((964, 979), 'keras.backend.tensorflow_backend.sum', 'K.sum', (['y_pred_f'], {}), '(y_pred_f)\n', (969, 979), True, 'from keras.backend import tensorflow_backend as K\n'), ((4022, 4044), 'keras.backend.image_data_format', 'K2.image_data_format', ([], {}), '()\n', (4042, 4044), True, 'from keras import backend as K2\n')]
|
import json
import os
import SimpleITK as sitk
import numpy
import pydicom
import numpy as np
from ltr.admin.environment import env_settings
from ltr.data.processing_utils import str_analyse
from ltr.dataset.base_dataset import BaseDataset
from pydoctor.evaluation import Study
from pydoctor.evaluation.data import StudyList
def _read_file(path):
with open(path, 'r') as f:
json_file = json.loads(f.read())
return json_file
class Lumbar3d(BaseDataset):
"""
The Lumbar dataset from official TianChi competition.
organized as follows.
-lumbar
-lumbar_testA50
-study...
-lumbar_train150
-study...
-lumbar_train51
-study...
lumbar_train150_annotation.json
lumbar_train51_annotation.json
"""
def __init__(self, root=None, split='train'):
"""
args:
:param root:path to the lumbar dataset.
:param split: string name 'train','val','test'
"""
root = env_settings().lumbar_dir if root is None else root
super().__init__('lumbar', root)
# dataset split for competition.
if split == 'train':
self.studies_path = os.path.join(root, 'DatasetA','lumbar_train150')
self.anno_path = os.path.join(root, 'DatasetA','lumbar_train150_annotation.json')
self.anno_meta = self._load_anno(self.anno_path)
elif split == 'val':
self.studies_path = os.path.join(root, 'DatasetA','lumbar_train51')
self.anno_path = os.path.join(root, 'DatasetA','lumbar_train51_annotation.json')
self.anno_meta = self._load_anno(self.anno_path)
elif split == 'testA':
self.studies_path = os.path.join(root,'datasetA','lumbar_testA50')
elif split == 'testB':
self.studies_path = os.path.join(root, 'datasetB', 'lumbar_testB50')
else:
raise ValueError('Unknow split name.')
# All folders inside the root.
self.study_list = self._get_study_list()
self.body_id = {'L1':0,'L2':1,'L3':2,'L4':3,'L5':4}
self.body_class = {'V1':0,'V2':1}
self.disc_id = {'T12-L1':0,'L1-L2':1,'L2-L3':2,'L3-L4':3,'L4-L5':4,'L5-S1':5}
self.disc_class = {'V1':0,'V2':1,'V3':2,'V4':3,'V5':4}
def _get_study_list(self):
return os.listdir(self.studies_path)
def get_name(self):
return 'lumbar'
def _get_study_path(self, std_id):
return os.path.join(self.studies_path, self.study_list[std_id])
def _get_key_image_info(self, folder,frame_num=3):
global key_image_path
reader = sitk.ImageSeriesReader()
file_path = os.path.join(folder, os.listdir(folder)[0])
study_uid = pydicom.read_file(file_path).get(0x0020000d).value
study_meta = self.anno_meta[str(study_uid)]
dicom_path_list = reader.GetGDCMSeriesFileNames(folder, study_meta['seriesUid'])
dicom_slice = [[pydicom.read_file(file), file] for file in dicom_path_list]
dicom_slice.sort(key=lambda x: float(x[0].ImagePositionPatient[0]))
data_path = dicom_slice[len(dicom_path_list) // 2][1]
middile_index = study_meta['point'][0]['zIndex']
frame_list = []
for dcm_path in range(middile_index - frame_num // 2,middile_index + frame_num // 2 + 1,1):
frame_list.append(np.squeeze(sitk.GetArrayFromImage(sitk.ReadImage(dicom_slice[dcm_path][1]))))
key_image = numpy.stack(frame_list,axis=0)
key_image = np.uint8((key_image - key_image.min()) / (key_image.max() - key_image.min()) * 255.0)
return key_image, study_meta['point']
def _load_anno(self, anno_path):
anno_list = _read_file(anno_path)
anno_dict = {}
for anno in anno_list:
tmp_dict = {anno['studyUid']: {'seriesUid': anno['data'][0]['seriesUid'],
'instanceUid': anno['data'][0]['instanceUid'],
'point': anno['data'][0]['annotation'][0]['data']['point']}}
anno_dict.update(tmp_dict)
return anno_dict
def _deal_point_dict(self,point_list):
body_dict,disc_dict = {},{}
for ann in point_list:
coord = ann.get('coord',None)
identification = ann['tag'].get('identification',None)
if identification in self.body_id:
class_num = self.body_class[str_analyse(ann['tag'].get('vertebra','v1').upper())]
body_dict.update({identification:{'coord':coord,'class_num':class_num}})
elif identification in self.disc_id:
class_num = self.disc_class[str_analyse(ann['tag'].get('disc','v1').upper())]
disc_dict.update({identification:{'coord':coord,'class_num':class_num}})
return body_dict, disc_dict
def get_frames(self, std_id, frame_num=5,anno=None):
dicom_folder = self._get_study_path(std_id)
key_frame,point_list = self._get_key_image_info(dicom_folder)
body_dict, disc_dict = self._deal_point_dict(point_list)
return key_frame, body_dict, disc_dict
def get_study_list(self):
return StudyList([self._construct_study(s) for s in self.study_list])
def _construct_study(self,study_name):
study_folder_path = os.path.join(self.studies_path,study_name)
# series_ids = sitk.ImageSeriesReader.GetGDCMSeriesIDs(study_folder_path)
# for id in series_ids:
file_list = [os.path.join(study_folder_path,i) for i in os.listdir(study_folder_path)]
dicom_slice = [[pydicom.read_file(file),file]for file in file_list]
dicom_slice.sort(key=lambda x:float(x[0].ImagePositionPatient[0]))
data_path = dicom_slice[len(file_list)//2][1]
return Study(name=study_name,dataset='lumbar_test',frame_path=data_path,index=len(file_list)//2)
|
[
"numpy.stack",
"pydicom.read_file",
"SimpleITK.ReadImage",
"ltr.admin.environment.env_settings",
"os.path.join",
"os.listdir",
"SimpleITK.ImageSeriesReader"
] |
[((2384, 2413), 'os.listdir', 'os.listdir', (['self.studies_path'], {}), '(self.studies_path)\n', (2394, 2413), False, 'import os\n'), ((2518, 2574), 'os.path.join', 'os.path.join', (['self.studies_path', 'self.study_list[std_id]'], {}), '(self.studies_path, self.study_list[std_id])\n', (2530, 2574), False, 'import os\n'), ((2678, 2702), 'SimpleITK.ImageSeriesReader', 'sitk.ImageSeriesReader', ([], {}), '()\n', (2700, 2702), True, 'import SimpleITK as sitk\n'), ((3510, 3541), 'numpy.stack', 'numpy.stack', (['frame_list'], {'axis': '(0)'}), '(frame_list, axis=0)\n', (3521, 3541), False, 'import numpy\n'), ((5368, 5411), 'os.path.join', 'os.path.join', (['self.studies_path', 'study_name'], {}), '(self.studies_path, study_name)\n', (5380, 5411), False, 'import os\n'), ((1243, 1292), 'os.path.join', 'os.path.join', (['root', '"""DatasetA"""', '"""lumbar_train150"""'], {}), "(root, 'DatasetA', 'lumbar_train150')\n", (1255, 1292), False, 'import os\n'), ((1321, 1386), 'os.path.join', 'os.path.join', (['root', '"""DatasetA"""', '"""lumbar_train150_annotation.json"""'], {}), "(root, 'DatasetA', 'lumbar_train150_annotation.json')\n", (1333, 1386), False, 'import os\n'), ((5546, 5580), 'os.path.join', 'os.path.join', (['study_folder_path', 'i'], {}), '(study_folder_path, i)\n', (5558, 5580), False, 'import os\n'), ((1047, 1061), 'ltr.admin.environment.env_settings', 'env_settings', ([], {}), '()\n', (1059, 1061), False, 'from ltr.admin.environment import env_settings\n'), ((1508, 1556), 'os.path.join', 'os.path.join', (['root', '"""DatasetA"""', '"""lumbar_train51"""'], {}), "(root, 'DatasetA', 'lumbar_train51')\n", (1520, 1556), False, 'import os\n'), ((1585, 1649), 'os.path.join', 'os.path.join', (['root', '"""DatasetA"""', '"""lumbar_train51_annotation.json"""'], {}), "(root, 'DatasetA', 'lumbar_train51_annotation.json')\n", (1597, 1649), False, 'import os\n'), ((2744, 2762), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (2754, 2762), False, 'import os\n'), ((3003, 3026), 'pydicom.read_file', 'pydicom.read_file', (['file'], {}), '(file)\n', (3020, 3026), False, 'import pydicom\n'), ((5589, 5618), 'os.listdir', 'os.listdir', (['study_folder_path'], {}), '(study_folder_path)\n', (5599, 5618), False, 'import os\n'), ((5644, 5667), 'pydicom.read_file', 'pydicom.read_file', (['file'], {}), '(file)\n', (5661, 5667), False, 'import pydicom\n'), ((1773, 1821), 'os.path.join', 'os.path.join', (['root', '"""datasetA"""', '"""lumbar_testA50"""'], {}), "(root, 'datasetA', 'lumbar_testA50')\n", (1785, 1821), False, 'import os\n'), ((2787, 2815), 'pydicom.read_file', 'pydicom.read_file', (['file_path'], {}), '(file_path)\n', (2804, 2815), False, 'import pydicom\n'), ((1883, 1931), 'os.path.join', 'os.path.join', (['root', '"""datasetB"""', '"""lumbar_testB50"""'], {}), "(root, 'datasetB', 'lumbar_testB50')\n", (1895, 1931), False, 'import os\n'), ((3446, 3486), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['dicom_slice[dcm_path][1]'], {}), '(dicom_slice[dcm_path][1])\n', (3460, 3486), True, 'import SimpleITK as sitk\n')]
|
__doc__ = """
Various data utilities.
"""
####################################################################
# Packages
####################################################################
import os
import h5py
import numpy as np
import pandas as pd
####################################################################
# Globals/Constants
####################################################################
PROJECT_DIR = os.path.dirname(
os.path.dirname(
os.path.realpath(__file__)))
DATA_DIR = os.path.join(PROJECT_DIR, 'data')
TRAIN_DATA_FILE = os.path.join(DATA_DIR, 'train.h5')
####################################################################
# Functions
####################################################################
def get_data(path=None):
if path:
data_set = DataSet(path)
else:
data_set = DataSet(TRAIN_DATA_FILE)
return data_set
####################################################################
# Classes
####################################################################
class DataSet(object):
"""class for dataset processing"""
def __init__(self, path=TRAIN_DATA_FILE):
self.path = path
self.data_dict = self._get_data_dict()
self.df = self._get_df()
def _get_data_dict(self):
with h5py.File(self.path,'r') as hf:
train_hf = hf.get('train')
data_dict = { hf_key: np.array(train_hf.get(hf_key))
for hf_key in train_hf.keys()}
return data_dict
def _get_df(self):
with pd.HDFStore(self.path, "r") as train:
df = train.get("train")
return df
def __repr__(self):
sets = [ "{}: {}".format(key,data_set.shape)
for key, data_set in
self.data_dict.iteritems()]
return "; ".join(sets)
def keys(self):
return self.data_dict.keys()
def get(self, key):
return self.data_dict.get(key, None)
def to_df(self):
return self.df
def get_batch(self, slice_index, batch_size, columns=None, random=False):
if random:
samples = self.df.sample(n=batch_size)
else:
num_samples = self.df.shape[0]
if (slice_index+1)*batch_size >= num_samples:
print("Slice is out of range. Taking last batch_size slice")
sample_range = (num_samples - batch_size, num_samples)
else:
sample_range = (slice_index*batch_size, (slice_index+1)*batch_size)
samples = self.df[sample_range[0] : sample_range[1]]
samples_matrix = np.array(samples.as_matrix(columns=columns)) if columns else np.array(samples.as_matrix())
return samples_matrix
def get_numpy_data(self):
df = self.df
means = []
stds = []
# Assuming column order remains consistent throughout the class
for col in df.columns:
if col not in ['y', 'timestamp', 'index', 'id']:
data = df[col].dropna().as_matrix()
means.append(np.mean(data))
stds.append(np.std(data))
col_means = np.array(means)
col_stds = np.array(stds)
# Ensure values are sorted by time
df = df.sort_values(by=['id', 'timestamp'], ascending=True)
max_seq_len_raw = 1820
# Simply mean-fill missing values for now
df = df.fillna(df.mean())
ids = np.unique(df['id'].as_matrix())
examples = []
targets = []
weights = []
for id in ids:
slice = df[df.id == id]
num_timesteps = slice.shape[0]
#y = slice['y'].as_matrix()
# Pad df to max seq len
padded = slice.reset_index().reindex(range(max_seq_len_raw),
fill_value=0)
target = padded['y'].as_matrix()
padded.drop('y', axis=1, inplace=True)
padded.drop('timestamp', axis=1, inplace=True)
padded.drop('index', axis=1, inplace=True)
padded.drop('id', axis=1, inplace=True)
example = padded.as_matrix()
examples.append(example)
targets.append(target)
weight = [1]*num_timesteps + [0]*(max_seq_len_raw - num_timesteps)
weights.append(weight)
examples = np.array(examples)
targets = np.array(targets)
weights = np.array(weights)
# Normalize the data
examples = (examples - col_means)/col_stds
# TODO: Supply these outside the function later: col_means, col_stds
return examples, targets, weights
def split_valid(self, examples, targets, weights, valid_split_ratio=0.5):
"""
Args:
valid_split_ratio: float range 0-1.; percentage of data reserved
for validation. Note that two validation sets are reserved: unique
ids are reserved entirely for validation, and, latter timesteps for
sequences used in training are also used in validation.
"""
num_ids = examples.shape[0]
valid_num = int(round(num_ids*valid_split_ratio))
examples_train_pre = examples[:-valid_num]
targets_train_pre = targets[:-valid_num]
weights_train_pre = weights[:-valid_num]
examples_valid = examples[-valid_num:]
targets_valid = targets[-valid_num:]
weights_valid = weights[-valid_num:]
examples_train = []
targets_train = []
weights_train = []
examples_train_valid = []
targets_train_valid = []
weights_train_valid = []
valid_len = 300 # Hardcoded for now
for arr1, arr2, arr3 in zip(examples_train_pre, targets_train_pre,
weights_train_pre):
examples_train.append(arr1[:-valid_len])
targets_train.append(arr2[:-valid_len])
weights_train.append(arr3[:-valid_len])
examples_train_valid.append(arr1[-valid_len:])
targets_train_valid.append(arr2[-valid_len:])
weights_train_valid.append(arr3[-valid_len:])
trainset = (np.array(examples_train), np.array(targets_train),
np.array(weights_train))
train_validset = (np.array(examples_train_valid),
np.array(targets_train_valid),
np.array(weights_train_valid))
validset = (examples_valid, targets_valid, weights_valid)
return trainset, train_validset, validset
def get_numpy_batch(self, dataset, batch_size, seq_len):
examples = []
targets = []
weights = []
#for _ in range(batch_size):
while len(targets) < batch_size:
# Sample a random id
idx = np.random.choice(range(dataset[0].shape[0]))
# Take random slice
max_seq_len = dataset[0][idx].shape[0]
assert max_seq_len >= seq_len
slice = np.random.choice(range(max_seq_len - seq_len))
# Let's just go with full length for now
w = dataset[2][idx][slice:slice+seq_len]
if np.sum(w) != len(w):
continue
examples.append(dataset[0][idx][slice:slice+seq_len])
targets.append(dataset[1][idx][slice:slice+seq_len])
weights.append(w)
return np.array(examples), np.array(targets), np.array(weights)
|
[
"h5py.File",
"pandas.HDFStore",
"numpy.sum",
"numpy.std",
"os.path.realpath",
"numpy.mean",
"numpy.array",
"os.path.join"
] |
[((545, 578), 'os.path.join', 'os.path.join', (['PROJECT_DIR', '"""data"""'], {}), "(PROJECT_DIR, 'data')\n", (557, 578), False, 'import os\n'), ((597, 631), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""train.h5"""'], {}), "(DATA_DIR, 'train.h5')\n", (609, 631), False, 'import os\n'), ((505, 531), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (521, 531), False, 'import os\n'), ((3229, 3244), 'numpy.array', 'np.array', (['means'], {}), '(means)\n', (3237, 3244), True, 'import numpy as np\n'), ((3264, 3278), 'numpy.array', 'np.array', (['stds'], {}), '(stds)\n', (3272, 3278), True, 'import numpy as np\n'), ((4506, 4524), 'numpy.array', 'np.array', (['examples'], {}), '(examples)\n', (4514, 4524), True, 'import numpy as np\n'), ((4543, 4560), 'numpy.array', 'np.array', (['targets'], {}), '(targets)\n', (4551, 4560), True, 'import numpy as np\n'), ((4579, 4596), 'numpy.array', 'np.array', (['weights'], {}), '(weights)\n', (4587, 4596), True, 'import numpy as np\n'), ((1335, 1360), 'h5py.File', 'h5py.File', (['self.path', '"""r"""'], {}), "(self.path, 'r')\n", (1344, 1360), False, 'import h5py\n'), ((1596, 1623), 'pandas.HDFStore', 'pd.HDFStore', (['self.path', '"""r"""'], {}), "(self.path, 'r')\n", (1607, 1623), True, 'import pandas as pd\n'), ((6397, 6421), 'numpy.array', 'np.array', (['examples_train'], {}), '(examples_train)\n', (6405, 6421), True, 'import numpy as np\n'), ((6423, 6446), 'numpy.array', 'np.array', (['targets_train'], {}), '(targets_train)\n', (6431, 6446), True, 'import numpy as np\n'), ((6500, 6523), 'numpy.array', 'np.array', (['weights_train'], {}), '(weights_train)\n', (6508, 6523), True, 'import numpy as np\n'), ((6551, 6581), 'numpy.array', 'np.array', (['examples_train_valid'], {}), '(examples_train_valid)\n', (6559, 6581), True, 'import numpy as np\n'), ((6631, 6660), 'numpy.array', 'np.array', (['targets_train_valid'], {}), '(targets_train_valid)\n', (6639, 6660), True, 'import numpy as np\n'), ((6710, 6739), 'numpy.array', 'np.array', (['weights_train_valid'], {}), '(weights_train_valid)\n', (6718, 6739), True, 'import numpy as np\n'), ((7756, 7774), 'numpy.array', 'np.array', (['examples'], {}), '(examples)\n', (7764, 7774), True, 'import numpy as np\n'), ((7776, 7793), 'numpy.array', 'np.array', (['targets'], {}), '(targets)\n', (7784, 7793), True, 'import numpy as np\n'), ((7795, 7812), 'numpy.array', 'np.array', (['weights'], {}), '(weights)\n', (7803, 7812), True, 'import numpy as np\n'), ((7520, 7529), 'numpy.sum', 'np.sum', (['w'], {}), '(w)\n', (7526, 7529), True, 'import numpy as np\n'), ((3147, 3160), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (3154, 3160), True, 'import numpy as np\n'), ((3190, 3202), 'numpy.std', 'np.std', (['data'], {}), '(data)\n', (3196, 3202), True, 'import numpy as np\n')]
|
from click.testing import CliRunner
from unittest import mock
import numpy as np
import os
import pandas as pd
import shutil
import tempfile
import textwrap
from mlflow import experiments
from mlflow.runs import list_run
import mlflow
def test_list_run():
with mlflow.start_run(run_name="apple"):
pass
result = CliRunner().invoke(list_run, ["--experiment-id", "0"])
assert "apple" in result.output
def test_list_run_experiment_id_required():
result = CliRunner().invoke(list_run, [])
assert "Missing option '--experiment-id'" in result.output
def test_csv_generation():
with mock.patch("mlflow.experiments.fluent.search_runs") as mock_search_runs:
mock_search_runs.return_value = pd.DataFrame(
{
"run_id": np.array(["all_set", "with_none", "with_nan"]),
"experiment_id": np.array([1, 1, 1]),
"param_optimizer": np.array(["Adam", None, "Adam"]),
"avg_loss": np.array([42.0, None, np.nan], dtype=np.float32),
},
columns=["run_id", "experiment_id", "param_optimizer", "avg_loss"],
)
expected_csv = textwrap.dedent(
"""\
run_id,experiment_id,param_optimizer,avg_loss
all_set,1,Adam,42.0
with_none,1,,
with_nan,1,Adam,
"""
)
tempdir = tempfile.mkdtemp()
try:
result_filename = os.path.join(tempdir, "result.csv")
CliRunner().invoke(
experiments.generate_csv_with_runs,
["--experiment-id", "1", "--filename", result_filename],
)
with open(result_filename, "r") as fd:
assert expected_csv == fd.read()
finally:
shutil.rmtree(tempdir)
|
[
"textwrap.dedent",
"mlflow.start_run",
"unittest.mock.patch",
"tempfile.mkdtemp",
"numpy.array",
"shutil.rmtree",
"click.testing.CliRunner",
"os.path.join"
] |
[((270, 304), 'mlflow.start_run', 'mlflow.start_run', ([], {'run_name': '"""apple"""'}), "(run_name='apple')\n", (286, 304), False, 'import mlflow\n'), ((616, 667), 'unittest.mock.patch', 'mock.patch', (['"""mlflow.experiments.fluent.search_runs"""'], {}), "('mlflow.experiments.fluent.search_runs')\n", (626, 667), False, 'from unittest import mock\n'), ((1160, 1330), 'textwrap.dedent', 'textwrap.dedent', (['""" run_id,experiment_id,param_optimizer,avg_loss\n all_set,1,Adam,42.0\n with_none,1,,\n with_nan,1,Adam,\n """'], {}), '(\n """ run_id,experiment_id,param_optimizer,avg_loss\n all_set,1,Adam,42.0\n with_none,1,,\n with_nan,1,Adam,\n """\n )\n', (1175, 1330), False, 'import textwrap\n'), ((1363, 1381), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (1379, 1381), False, 'import tempfile\n'), ((332, 343), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (341, 343), False, 'from click.testing import CliRunner\n'), ((482, 493), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (491, 493), False, 'from click.testing import CliRunner\n'), ((1425, 1460), 'os.path.join', 'os.path.join', (['tempdir', '"""result.csv"""'], {}), "(tempdir, 'result.csv')\n", (1437, 1460), False, 'import os\n'), ((1761, 1783), 'shutil.rmtree', 'shutil.rmtree', (['tempdir'], {}), '(tempdir)\n', (1774, 1783), False, 'import shutil\n'), ((783, 829), 'numpy.array', 'np.array', (["['all_set', 'with_none', 'with_nan']"], {}), "(['all_set', 'with_none', 'with_nan'])\n", (791, 829), True, 'import numpy as np\n'), ((864, 883), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (872, 883), True, 'import numpy as np\n'), ((920, 952), 'numpy.array', 'np.array', (["['Adam', None, 'Adam']"], {}), "(['Adam', None, 'Adam'])\n", (928, 952), True, 'import numpy as np\n'), ((982, 1030), 'numpy.array', 'np.array', (['[42.0, None, np.nan]'], {'dtype': 'np.float32'}), '([42.0, None, np.nan], dtype=np.float32)\n', (990, 1030), True, 'import numpy as np\n'), ((1473, 1484), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (1482, 1484), False, 'from click.testing import CliRunner\n')]
|
"""
This example implements the model from the paper
> [Design Space for Graph Neural Networks](https://arxiv.org/abs/2011.08843)<br>
> <NAME>, <NAME>, <NAME>
using the PROTEINS dataset.
The configuration at the top of the file is the best one identified in the
paper, and should work well for many different datasets without changes.
Note: the results reported in the paper are averaged over 3 random repetitions
with an 80/20 split.
"""
import numpy as np
import tensorflow as tf
from tensorflow.keras.losses import CategoricalCrossentropy
from tensorflow.keras.metrics import categorical_accuracy
from tensorflow.keras.optimizers import Adam
from spektral.data import DisjointLoader
from spektral.datasets import TUDataset
from spektral.models import GeneralGNN
physical_devices = tf.config.list_physical_devices("GPU")
if len(physical_devices) > 0:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
################################################################################
# Config
################################################################################
batch_size = 32
learning_rate = 0.01
epochs = 400
################################################################################
# Load data
################################################################################
data = TUDataset("PROTEINS")
# Train/test split
np.random.shuffle(data)
split = int(0.8 * len(data))
data_tr, data_te = data[:split], data[split:]
# Data loaders
loader_tr = DisjointLoader(data_tr, batch_size=batch_size, epochs=epochs)
loader_te = DisjointLoader(data_te, batch_size=batch_size)
################################################################################
# Build model
################################################################################
model = GeneralGNN(data.n_labels, activation="softmax")
optimizer = Adam(learning_rate)
loss_fn = CategoricalCrossentropy()
################################################################################
# Fit model
################################################################################
@tf.function(input_signature=loader_tr.tf_signature(), experimental_relax_shapes=True)
def train_step(inputs, target):
with tf.GradientTape() as tape:
predictions = model(inputs, training=True)
loss = loss_fn(target, predictions) + sum(model.losses)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
acc = tf.reduce_mean(categorical_accuracy(target, predictions))
return loss, acc
def evaluate(loader):
output = []
step = 0
while step < loader.steps_per_epoch:
step += 1
inputs, target = loader.__next__()
pred = model(inputs, training=False)
outs = (
loss_fn(target, pred),
tf.reduce_mean(categorical_accuracy(target, pred)),
len(target), # Keep track of batch size
)
output.append(outs)
if step == loader.steps_per_epoch:
output = np.array(output)
return np.average(output[:, :-1], 0, weights=output[:, -1])
epoch = step = 0
results = []
for batch in loader_tr:
step += 1
loss, acc = train_step(*batch)
results.append((loss, acc))
if step == loader_tr.steps_per_epoch:
step = 0
epoch += 1
results_te = evaluate(loader_te)
print(
"Ep. {} - Loss: {:.3f} - Acc: {:.3f} - Test loss: {:.3f} - Test acc: {:.3f}".format(
epoch, *np.mean(results, 0), *results_te
)
)
results = []
################################################################################
# Evaluate model
################################################################################
results_te = evaluate(loader_te)
print("Final results - Loss: {:.3f} - Acc: {:.3f}".format(*results_te))
|
[
"numpy.average",
"tensorflow.config.list_physical_devices",
"spektral.data.DisjointLoader",
"spektral.datasets.TUDataset",
"tensorflow.config.experimental.set_memory_growth",
"tensorflow.keras.losses.CategoricalCrossentropy",
"numpy.mean",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.metrics.categorical_accuracy",
"numpy.array",
"spektral.models.GeneralGNN",
"tensorflow.GradientTape",
"numpy.random.shuffle"
] |
[((798, 836), 'tensorflow.config.list_physical_devices', 'tf.config.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (829, 836), True, 'import tensorflow as tf\n'), ((1343, 1364), 'spektral.datasets.TUDataset', 'TUDataset', (['"""PROTEINS"""'], {}), "('PROTEINS')\n", (1352, 1364), False, 'from spektral.datasets import TUDataset\n'), ((1385, 1408), 'numpy.random.shuffle', 'np.random.shuffle', (['data'], {}), '(data)\n', (1402, 1408), True, 'import numpy as np\n'), ((1512, 1573), 'spektral.data.DisjointLoader', 'DisjointLoader', (['data_tr'], {'batch_size': 'batch_size', 'epochs': 'epochs'}), '(data_tr, batch_size=batch_size, epochs=epochs)\n', (1526, 1573), False, 'from spektral.data import DisjointLoader\n'), ((1586, 1632), 'spektral.data.DisjointLoader', 'DisjointLoader', (['data_te'], {'batch_size': 'batch_size'}), '(data_te, batch_size=batch_size)\n', (1600, 1632), False, 'from spektral.data import DisjointLoader\n'), ((1818, 1865), 'spektral.models.GeneralGNN', 'GeneralGNN', (['data.n_labels'], {'activation': '"""softmax"""'}), "(data.n_labels, activation='softmax')\n", (1828, 1865), False, 'from spektral.models import GeneralGNN\n'), ((1878, 1897), 'tensorflow.keras.optimizers.Adam', 'Adam', (['learning_rate'], {}), '(learning_rate)\n', (1882, 1897), False, 'from tensorflow.keras.optimizers import Adam\n'), ((1908, 1933), 'tensorflow.keras.losses.CategoricalCrossentropy', 'CategoricalCrossentropy', ([], {}), '()\n', (1931, 1933), False, 'from tensorflow.keras.losses import CategoricalCrossentropy\n'), ((871, 938), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['physical_devices[0]', '(True)'], {}), '(physical_devices[0], True)\n', (911, 938), True, 'import tensorflow as tf\n'), ((2238, 2255), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (2253, 2255), True, 'import tensorflow as tf\n'), ((2541, 2582), 'tensorflow.keras.metrics.categorical_accuracy', 'categorical_accuracy', (['target', 'predictions'], {}), '(target, predictions)\n', (2561, 2582), False, 'from tensorflow.keras.metrics import categorical_accuracy\n'), ((3076, 3092), 'numpy.array', 'np.array', (['output'], {}), '(output)\n', (3084, 3092), True, 'import numpy as np\n'), ((3112, 3164), 'numpy.average', 'np.average', (['output[:, :-1]', '(0)'], {'weights': 'output[:, -1]'}), '(output[:, :-1], 0, weights=output[:, -1])\n', (3122, 3164), True, 'import numpy as np\n'), ((2884, 2918), 'tensorflow.keras.metrics.categorical_accuracy', 'categorical_accuracy', (['target', 'pred'], {}), '(target, pred)\n', (2904, 2918), False, 'from tensorflow.keras.metrics import categorical_accuracy\n'), ((3557, 3576), 'numpy.mean', 'np.mean', (['results', '(0)'], {}), '(results, 0)\n', (3564, 3576), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 23 15:57:12 2018
@author: coelhorp
"""
import numpy as np
from sklearn.metrics import roc_auc_score
from rpa.helpers.transfer_learning.utils import transform_org2rct, transform_rct2str, transform_rct2rot
from rpa.helpers.transfer_learning.utils import transform_org2rct_p300, transform_rct2rot_p300
from rpa.helpers.transfer_learning.utils import get_sourcetarget_split_motorimagery, get_sourcetarget_split_p300
def RPA_recenter(source, target_train, target_test, paradigm='MI', weight_samples=False):
if paradigm == 'P300':
return transform_org2rct_p300(source, target_train, target_test, weight_samples)
else:
return transform_org2rct(source, target_train, target_test)
def RPA_stretch(source, target_train, target_test, paradigm='MI'):
return transform_rct2str(source, target_train, target_test)
def RPA_rotate(source, target_train, target_test, paradigm='MI', class_weights=None, distance='euc'):
if paradigm == 'P300':
return transform_rct2rot_p300(source, target_train, target_test, class_weights, distance)
else:
return transform_rct2rot(source, target_train, target_test, class_weights, distance)
def get_sourcetarget_split(source, target, ncovs_train, paradigm='MI'):
if (paradigm == 'P300'):
return get_sourcetarget_split_p300(source, target, ncovs_train)
else:
return get_sourcetarget_split_motorimagery(source, target, ncovs_train)
def get_score_notransfer(clf, target_train, target_test, paradigm='MI'):
covs_train = target_train['covs']
y_train = target_train['labels']
covs_test = target_test['covs']
y_test = target_test['labels']
clf.fit(covs_train, y_train)
y_pred = clf.predict(covs_test)
y_test = np.array([y_test == i for i in np.unique(y_test)]).T
y_pred = np.array([y_pred == i for i in np.unique(y_pred)]).T
return roc_auc_score(y_test, y_pred)
def get_score_transferlearning(clf, source, target_train, target_test, paradigm='MI'):
covs_source, y_source = source['covs'], source['labels']
covs_target_train, y_target_train = target_train['covs'], target_train['labels']
covs_target_test, y_target_test = target_test['covs'], target_test['labels']
covs_train = np.concatenate([covs_source, covs_target_train])
y_train = np.concatenate([y_source, y_target_train])
clf.fit(covs_train, y_train)
covs_test = covs_target_test
y_test = y_target_test
y_pred = clf.predict(covs_test)
y_test = np.array([y_test == i for i in np.unique(y_test)]).T
y_pred = np.array([y_pred == i for i in np.unique(y_pred)]).T
return roc_auc_score(y_test, y_pred)
|
[
"rpa.helpers.transfer_learning.utils.get_sourcetarget_split_motorimagery",
"numpy.unique",
"rpa.helpers.transfer_learning.utils.transform_rct2rot",
"sklearn.metrics.roc_auc_score",
"rpa.helpers.transfer_learning.utils.get_sourcetarget_split_p300",
"rpa.helpers.transfer_learning.utils.transform_org2rct_p300",
"rpa.helpers.transfer_learning.utils.transform_rct2str",
"rpa.helpers.transfer_learning.utils.transform_rct2rot_p300",
"rpa.helpers.transfer_learning.utils.transform_org2rct",
"numpy.concatenate"
] |
[((849, 901), 'rpa.helpers.transfer_learning.utils.transform_rct2str', 'transform_rct2str', (['source', 'target_train', 'target_test'], {}), '(source, target_train, target_test)\n', (866, 901), False, 'from rpa.helpers.transfer_learning.utils import transform_org2rct, transform_rct2str, transform_rct2rot\n'), ((1934, 1963), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (1947, 1963), False, 'from sklearn.metrics import roc_auc_score\n'), ((2298, 2346), 'numpy.concatenate', 'np.concatenate', (['[covs_source, covs_target_train]'], {}), '([covs_source, covs_target_train])\n', (2312, 2346), True, 'import numpy as np\n'), ((2361, 2403), 'numpy.concatenate', 'np.concatenate', (['[y_source, y_target_train]'], {}), '([y_source, y_target_train])\n', (2375, 2403), True, 'import numpy as np\n'), ((2680, 2709), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (2693, 2709), False, 'from sklearn.metrics import roc_auc_score\n'), ((618, 691), 'rpa.helpers.transfer_learning.utils.transform_org2rct_p300', 'transform_org2rct_p300', (['source', 'target_train', 'target_test', 'weight_samples'], {}), '(source, target_train, target_test, weight_samples)\n', (640, 691), False, 'from rpa.helpers.transfer_learning.utils import transform_org2rct_p300, transform_rct2rot_p300\n'), ((717, 769), 'rpa.helpers.transfer_learning.utils.transform_org2rct', 'transform_org2rct', (['source', 'target_train', 'target_test'], {}), '(source, target_train, target_test)\n', (734, 769), False, 'from rpa.helpers.transfer_learning.utils import transform_org2rct, transform_rct2str, transform_rct2rot\n'), ((1047, 1133), 'rpa.helpers.transfer_learning.utils.transform_rct2rot_p300', 'transform_rct2rot_p300', (['source', 'target_train', 'target_test', 'class_weights', 'distance'], {}), '(source, target_train, target_test, class_weights,\n distance)\n', (1069, 1133), False, 'from rpa.helpers.transfer_learning.utils import transform_org2rct_p300, transform_rct2rot_p300\n'), ((1155, 1232), 'rpa.helpers.transfer_learning.utils.transform_rct2rot', 'transform_rct2rot', (['source', 'target_train', 'target_test', 'class_weights', 'distance'], {}), '(source, target_train, target_test, class_weights, distance)\n', (1172, 1232), False, 'from rpa.helpers.transfer_learning.utils import transform_org2rct, transform_rct2str, transform_rct2rot\n'), ((1350, 1406), 'rpa.helpers.transfer_learning.utils.get_sourcetarget_split_p300', 'get_sourcetarget_split_p300', (['source', 'target', 'ncovs_train'], {}), '(source, target, ncovs_train)\n', (1377, 1406), False, 'from rpa.helpers.transfer_learning.utils import get_sourcetarget_split_motorimagery, get_sourcetarget_split_p300\n'), ((1432, 1496), 'rpa.helpers.transfer_learning.utils.get_sourcetarget_split_motorimagery', 'get_sourcetarget_split_motorimagery', (['source', 'target', 'ncovs_train'], {}), '(source, target, ncovs_train)\n', (1467, 1496), False, 'from rpa.helpers.transfer_learning.utils import get_sourcetarget_split_motorimagery, get_sourcetarget_split_p300\n'), ((1834, 1851), 'numpy.unique', 'np.unique', (['y_test'], {}), '(y_test)\n', (1843, 1851), True, 'import numpy as np\n'), ((1900, 1917), 'numpy.unique', 'np.unique', (['y_pred'], {}), '(y_pred)\n', (1909, 1917), True, 'import numpy as np\n'), ((2580, 2597), 'numpy.unique', 'np.unique', (['y_test'], {}), '(y_test)\n', (2589, 2597), True, 'import numpy as np\n'), ((2646, 2663), 'numpy.unique', 'np.unique', (['y_pred'], {}), '(y_pred)\n', (2655, 2663), True, 'import numpy as np\n')]
|
import matplotlib.pyplot as plt
import numpy as np
import math
from scipy.stats import norm
from matplotlib import rc
__author__ = 'ernesto'
# if use latex or mathtext
rc('text', usetex=False)
rc('mathtext', fontset='cm')
# auxiliar function for plot ticks of equal length in x and y axis despite its scales.
def convert_display_to_data_coordinates(transData, length=10):
# create a transform which will take from display to data coordinates
inv = transData.inverted()
# transform from display coordinates to data coordinates in x axis
data_coords = inv.transform([(0, 0), (length, 0)])
# get the length of the segment in data units
yticks_len = data_coords[1, 0] - data_coords[0, 0]
# transform from display coordinates to data coordinates in y axis
data_coords = inv.transform([(0, 0), (0, length)])
# get the length of the segment in data units
xticks_len = data_coords[1, 1] - data_coords[0, 1]
return xticks_len, yticks_len
#####################################
# PARAMETROS - Puede ser modificado #
#####################################
# distribución uniforme en (0, T)
T = 0.5
# range of x of interest
xmin = -0.1
xmax = 3.5 * T
ymin = 0
ymax = 1 / T
#####################
# FIN DE PARAMETROS #
#####################
# parametros de las densidades de x_i: media y varianza
eta = T / 2
var = (T ** 2) / 12
# cantidad de variables aleatorias x_i a sumar
na = 2
nb = 3
# media y varianza de la suma
eta2 = na * eta
var2 = na * var
eta3 = nb * eta
var3 = nb * var
# pdf teorica
x = np.linspace(xmin, xmax, 300)
f2 = norm.pdf(x, eta2, math.sqrt(var2))
f3 = norm.pdf(x, eta3, math.sqrt(var3))
# axis parameters
dx = 0.1
xmin_ax = xmin - dx
xmax_ax = xmax + 2 * dx
dy = 0.2
ymin_ax = ymin - dy
ymax_ax = ymax + 0.4
# parámetros de la figura
# length of the ticks for all subplot (6 pixels)
display_length = 6 # in pixels
# x ticks labels margin
xtm = -0.23
ytm = -0.07
# font size
fontsize = 14
fig = plt.figure(0, figsize=(10, 3), frameon=False)
ax = plt.subplot2grid((1, 6), (0, 0), rowspan=1, colspan=2)
plt.xlim(xmin_ax, xmax_ax)
plt.ylim(ymin_ax, ymax_ax)
# axis arrows
plt.annotate("", xytext=(xmin_ax, 0), xycoords='data', xy=(xmax_ax, 0), textcoords='data',
arrowprops=dict(width=0.1, headwidth=6, headlength=8, facecolor='black', shrink=0.002))
plt.annotate("", xytext=(0, ymin_ax), xycoords='data', xy=(0, ymax_ax), textcoords='data',
arrowprops=dict(width=0.1, headwidth=6, headlength=8, facecolor='black', shrink=0.002))
# f(x)
plt.plot([0, T], [1/T, 1/T], 'k', linewidth=2)
plt.plot([T, T], [0, 1/T], 'k', linewidth=2)
plt.plot([0, 0], [0, 1/T], 'k', linewidth=2)
plt.plot([xmin, 0], [0, 0], 'k', linewidth=2)
plt.plot([T, xmax], [0, 0], 'k', linewidth=2)
# labels
# xlables
plt.text(xmax_ax, xtm, '$x$', fontsize=fontsize, ha='right', va='baseline')
plt.text(T, xtm, '$T$', fontsize=fontsize, ha='center', va='baseline')
plt.text(ytm, xtm, '$0$', fontsize=fontsize, ha='right', va='baseline')
# ylabels
plt.text(ytm, 1/T, '$\dfrac{1}{T}$', fontsize=fontsize, ha='right', va='center')
plt.text(-ytm, ymax_ax, '$f(x)$', fontsize=fontsize, ha='left', va='center')
plt.axis('off')
fig = plt.figure(0, figsize=(10, 3), frameon=False)
ax = plt.subplot2grid((1, 6), (0, 2), rowspan=1, colspan=2)
plt.xlim(xmin_ax, xmax_ax)
plt.ylim(ymin_ax, ymax_ax)
# horizontal and vertical ticks length
xtl, ytl = convert_display_to_data_coordinates(ax.transData, length=display_length)
# axis arrows
plt.annotate("", xytext=(xmin_ax, 0), xycoords='data', xy=(xmax_ax, 0), textcoords='data',
arrowprops=dict(width=0.1, headwidth=6, headlength=8, facecolor='black', shrink=0.002))
plt.annotate("", xytext=(0, ymin_ax), xycoords='data', xy=(0, ymax_ax), textcoords='data',
arrowprops=dict(width=0.1, headwidth=6, headlength=8, facecolor='black', shrink=0.002))
# f2(x)
plt.plot([0, T], [0, 1/T], 'k', linewidth=2, label='$f(x)*f(x)$')
plt.plot([T, 2 * T], [1/T, 0], 'k', linewidth=2)
plt.plot([xmin, 0], [0, 0], 'k', linewidth=2)
plt.plot([2*T, xmax], [0, 0], 'k', linewidth=2)
# aproximación gaussiana
plt.plot(x, f2, 'r', linewidth=2, zorder=0, label='$N\left(T,\,\dfrac{T^2}{6}\\right)$')
# ticks
plt.plot([T, T], [0, xtl], 'k')
plt.plot([2*T, 2*T], [0, xtl], 'k')
plt.plot([0, ytl], [1/T, 1/T], 'k')
# labels
# xlables
plt.text(xmax_ax, xtm, '$x$', fontsize=fontsize, ha='right', va='baseline')
plt.text(ytm, xtm, '$0$', fontsize=fontsize, ha='right', va='baseline')
plt.text(T, xtm, '$T$', fontsize=fontsize, ha='center', va='baseline')
plt.text(2*T, xtm, '$2T$', fontsize=fontsize, ha='center', va='baseline')
# ylabels
plt.text(ytm, 1/T, '$\dfrac{1}{T}$', fontsize=fontsize, ha='right', va='center')
#plt.text(-ytm, ymax_ax, '$f_2(x)$', fontsize=fontsize, ha='left', va='center')
leg = leg = plt.legend(loc=(0.45, 0.7), frameon=False, fontsize=12)
plt.axis('off')
fig = plt.figure(0, figsize=(10, 3), frameon=False)
ax = plt.subplot2grid((1, 6), (0, 4), rowspan=1, colspan=2)
plt.xlim(xmin_ax, xmax_ax)
plt.ylim(ymin_ax, ymax_ax)
# horizontal and vertical ticks length
xtl, ytl = convert_display_to_data_coordinates(ax.transData, length=display_length)
# axis arrows
plt.annotate("", xytext=(xmin_ax, 0), xycoords='data', xy=(xmax_ax, 0), textcoords='data',
arrowprops=dict(width=0.1, headwidth=6, headlength=8, facecolor='black', shrink=0.002))
plt.annotate("", xytext=(0, ymin_ax), xycoords='data', xy=(0, ymax_ax), textcoords='data',
arrowprops=dict(width=0.1, headwidth=6, headlength=8, facecolor='black', shrink=0.002))
# f3(x)
c = 2 * (T ** 3)
xa = np.linspace(0, T, 100)
plt.plot(xa, np.polyval([1, 0, 0], xa) / c, 'k', linewidth=2, label='$f(x)*f(x)*f(x)$')
xa = np.linspace(T, 2 * T, 100)
plt.plot(xa, np.polyval([-2, 6 * T, -3 * (T ** 2)], xa) / c, 'k', linewidth=2)
xa = np.linspace(2 * T, 3 * T, 100)
plt.plot(xa, np.polyval([1, -6 * T, 9 * (T ** 2)], xa) / c, 'k', linewidth=2)
plt.plot([xmin, 0], [0, 0], 'k', linewidth=2)
plt.plot([3*T, xmax], [0, 0], 'k', linewidth=2)
# aproximación gaussiana
plt.plot(x, f3, 'r', linewidth=2, zorder=0, label='$N\left(\dfrac{3T}{2},\,\dfrac{T^2}{4}\\right)$')
# ticks
plt.plot([T, T], [0, xtl], 'k')
plt.plot([2*T, 2*T], [0, xtl], 'k')
plt.plot([3*T, 3*T], [0, xtl], 'k')
plt.plot([0, ytl], [1/T, 1/T], 'k')
plt.plot([0, ytl], [1/(2*T), 1/(2*T)], 'k')
# labels
# xlables
plt.text(xmax_ax, xtm, '$x$', fontsize=fontsize, ha='right', va='baseline')
plt.text(ytm, xtm, '$0$', fontsize=fontsize, ha='right', va='baseline')
plt.text(T, xtm, '$T$', fontsize=fontsize, ha='center', va='baseline')
plt.text(2*T, xtm, '$2T$', fontsize=fontsize, ha='center', va='baseline')
plt.text(3*T, xtm, '$3T$', fontsize=fontsize, ha='center', va='baseline')
# ylabels
plt.text(ytm, 1/T, '$\dfrac{1}{T}$', fontsize=fontsize, ha='right', va='center')
plt.text(ytm, 1/(2*T), '$\dfrac{1}{2T}$', fontsize=fontsize, ha='right', va='center')
#plt.text(-ytm, ymax_ax, '$f_3(x)$', fontsize=fontsize, ha='left', va='center')
leg = leg = plt.legend(loc=(0.28, 0.7), frameon=False, fontsize=12)
plt.axis('off')
# save as eps image
plt.savefig('example_7_15.pdf', bbox_inches='tight')
plt.show()
|
[
"matplotlib.pyplot.xlim",
"matplotlib.rc",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"math.sqrt",
"numpy.polyval",
"matplotlib.pyplot.subplot2grid",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.text",
"matplotlib.pyplot.figure",
"numpy.linspace",
"matplotlib.pyplot.savefig"
] |
[((171, 195), 'matplotlib.rc', 'rc', (['"""text"""'], {'usetex': '(False)'}), "('text', usetex=False)\n", (173, 195), False, 'from matplotlib import rc\n'), ((196, 224), 'matplotlib.rc', 'rc', (['"""mathtext"""'], {'fontset': '"""cm"""'}), "('mathtext', fontset='cm')\n", (198, 224), False, 'from matplotlib import rc\n'), ((1544, 1572), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', '(300)'], {}), '(xmin, xmax, 300)\n', (1555, 1572), True, 'import numpy as np\n'), ((1965, 2010), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {'figsize': '(10, 3)', 'frameon': '(False)'}), '(0, figsize=(10, 3), frameon=False)\n', (1975, 2010), True, 'import matplotlib.pyplot as plt\n'), ((2016, 2070), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(1, 6)', '(0, 0)'], {'rowspan': '(1)', 'colspan': '(2)'}), '((1, 6), (0, 0), rowspan=1, colspan=2)\n', (2032, 2070), True, 'import matplotlib.pyplot as plt\n'), ((2072, 2098), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xmin_ax', 'xmax_ax'], {}), '(xmin_ax, xmax_ax)\n', (2080, 2098), True, 'import matplotlib.pyplot as plt\n'), ((2099, 2125), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ymin_ax', 'ymax_ax'], {}), '(ymin_ax, ymax_ax)\n', (2107, 2125), True, 'import matplotlib.pyplot as plt\n'), ((2533, 2583), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, T]', '[1 / T, 1 / T]', '"""k"""'], {'linewidth': '(2)'}), "([0, T], [1 / T, 1 / T], 'k', linewidth=2)\n", (2541, 2583), True, 'import matplotlib.pyplot as plt\n'), ((2580, 2626), 'matplotlib.pyplot.plot', 'plt.plot', (['[T, T]', '[0, 1 / T]', '"""k"""'], {'linewidth': '(2)'}), "([T, T], [0, 1 / T], 'k', linewidth=2)\n", (2588, 2626), True, 'import matplotlib.pyplot as plt\n'), ((2625, 2671), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 0]', '[0, 1 / T]', '"""k"""'], {'linewidth': '(2)'}), "([0, 0], [0, 1 / T], 'k', linewidth=2)\n", (2633, 2671), True, 'import matplotlib.pyplot as plt\n'), ((2670, 2715), 'matplotlib.pyplot.plot', 'plt.plot', (['[xmin, 0]', '[0, 0]', '"""k"""'], {'linewidth': '(2)'}), "([xmin, 0], [0, 0], 'k', linewidth=2)\n", (2678, 2715), True, 'import matplotlib.pyplot as plt\n'), ((2716, 2761), 'matplotlib.pyplot.plot', 'plt.plot', (['[T, xmax]', '[0, 0]', '"""k"""'], {'linewidth': '(2)'}), "([T, xmax], [0, 0], 'k', linewidth=2)\n", (2724, 2761), True, 'import matplotlib.pyplot as plt\n'), ((2782, 2857), 'matplotlib.pyplot.text', 'plt.text', (['xmax_ax', 'xtm', '"""$x$"""'], {'fontsize': 'fontsize', 'ha': '"""right"""', 'va': '"""baseline"""'}), "(xmax_ax, xtm, '$x$', fontsize=fontsize, ha='right', va='baseline')\n", (2790, 2857), True, 'import matplotlib.pyplot as plt\n'), ((2858, 2928), 'matplotlib.pyplot.text', 'plt.text', (['T', 'xtm', '"""$T$"""'], {'fontsize': 'fontsize', 'ha': '"""center"""', 'va': '"""baseline"""'}), "(T, xtm, '$T$', fontsize=fontsize, ha='center', va='baseline')\n", (2866, 2928), True, 'import matplotlib.pyplot as plt\n'), ((2929, 3000), 'matplotlib.pyplot.text', 'plt.text', (['ytm', 'xtm', '"""$0$"""'], {'fontsize': 'fontsize', 'ha': '"""right"""', 'va': '"""baseline"""'}), "(ytm, xtm, '$0$', fontsize=fontsize, ha='right', va='baseline')\n", (2937, 3000), True, 'import matplotlib.pyplot as plt\n'), ((3011, 3099), 'matplotlib.pyplot.text', 'plt.text', (['ytm', '(1 / T)', '"""$\\\\dfrac{1}{T}$"""'], {'fontsize': 'fontsize', 'ha': '"""right"""', 'va': '"""center"""'}), "(ytm, 1 / T, '$\\\\dfrac{1}{T}$', fontsize=fontsize, ha='right', va=\n 'center')\n", (3019, 3099), True, 'import matplotlib.pyplot as plt\n'), ((3092, 3168), 'matplotlib.pyplot.text', 'plt.text', (['(-ytm)', 'ymax_ax', '"""$f(x)$"""'], {'fontsize': 'fontsize', 'ha': '"""left"""', 'va': '"""center"""'}), "(-ytm, ymax_ax, '$f(x)$', fontsize=fontsize, ha='left', va='center')\n", (3100, 3168), True, 'import matplotlib.pyplot as plt\n'), ((3170, 3185), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3178, 3185), True, 'import matplotlib.pyplot as plt\n'), ((3194, 3239), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {'figsize': '(10, 3)', 'frameon': '(False)'}), '(0, figsize=(10, 3), frameon=False)\n', (3204, 3239), True, 'import matplotlib.pyplot as plt\n'), ((3245, 3299), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(1, 6)', '(0, 2)'], {'rowspan': '(1)', 'colspan': '(2)'}), '((1, 6), (0, 2), rowspan=1, colspan=2)\n', (3261, 3299), True, 'import matplotlib.pyplot as plt\n'), ((3301, 3327), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xmin_ax', 'xmax_ax'], {}), '(xmin_ax, xmax_ax)\n', (3309, 3327), True, 'import matplotlib.pyplot as plt\n'), ((3328, 3354), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ymin_ax', 'ymax_ax'], {}), '(ymin_ax, ymax_ax)\n', (3336, 3354), True, 'import matplotlib.pyplot as plt\n'), ((3887, 3954), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, T]', '[0, 1 / T]', '"""k"""'], {'linewidth': '(2)', 'label': '"""$f(x)*f(x)$"""'}), "([0, T], [0, 1 / T], 'k', linewidth=2, label='$f(x)*f(x)$')\n", (3895, 3954), True, 'import matplotlib.pyplot as plt\n'), ((3953, 4003), 'matplotlib.pyplot.plot', 'plt.plot', (['[T, 2 * T]', '[1 / T, 0]', '"""k"""'], {'linewidth': '(2)'}), "([T, 2 * T], [1 / T, 0], 'k', linewidth=2)\n", (3961, 4003), True, 'import matplotlib.pyplot as plt\n'), ((4002, 4047), 'matplotlib.pyplot.plot', 'plt.plot', (['[xmin, 0]', '[0, 0]', '"""k"""'], {'linewidth': '(2)'}), "([xmin, 0], [0, 0], 'k', linewidth=2)\n", (4010, 4047), True, 'import matplotlib.pyplot as plt\n'), ((4048, 4097), 'matplotlib.pyplot.plot', 'plt.plot', (['[2 * T, xmax]', '[0, 0]', '"""k"""'], {'linewidth': '(2)'}), "([2 * T, xmax], [0, 0], 'k', linewidth=2)\n", (4056, 4097), True, 'import matplotlib.pyplot as plt\n'), ((4121, 4217), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'f2', '"""r"""'], {'linewidth': '(2)', 'zorder': '(0)', 'label': '"""$N\\\\left(T,\\\\,\\\\dfrac{T^2}{6}\\\\right)$"""'}), "(x, f2, 'r', linewidth=2, zorder=0, label=\n '$N\\\\left(T,\\\\,\\\\dfrac{T^2}{6}\\\\right)$')\n", (4129, 4217), True, 'import matplotlib.pyplot as plt\n'), ((4219, 4250), 'matplotlib.pyplot.plot', 'plt.plot', (['[T, T]', '[0, xtl]', '"""k"""'], {}), "([T, T], [0, xtl], 'k')\n", (4227, 4250), True, 'import matplotlib.pyplot as plt\n'), ((4251, 4290), 'matplotlib.pyplot.plot', 'plt.plot', (['[2 * T, 2 * T]', '[0, xtl]', '"""k"""'], {}), "([2 * T, 2 * T], [0, xtl], 'k')\n", (4259, 4290), True, 'import matplotlib.pyplot as plt\n'), ((4287, 4326), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, ytl]', '[1 / T, 1 / T]', '"""k"""'], {}), "([0, ytl], [1 / T, 1 / T], 'k')\n", (4295, 4326), True, 'import matplotlib.pyplot as plt\n'), ((4343, 4418), 'matplotlib.pyplot.text', 'plt.text', (['xmax_ax', 'xtm', '"""$x$"""'], {'fontsize': 'fontsize', 'ha': '"""right"""', 'va': '"""baseline"""'}), "(xmax_ax, xtm, '$x$', fontsize=fontsize, ha='right', va='baseline')\n", (4351, 4418), True, 'import matplotlib.pyplot as plt\n'), ((4419, 4490), 'matplotlib.pyplot.text', 'plt.text', (['ytm', 'xtm', '"""$0$"""'], {'fontsize': 'fontsize', 'ha': '"""right"""', 'va': '"""baseline"""'}), "(ytm, xtm, '$0$', fontsize=fontsize, ha='right', va='baseline')\n", (4427, 4490), True, 'import matplotlib.pyplot as plt\n'), ((4491, 4561), 'matplotlib.pyplot.text', 'plt.text', (['T', 'xtm', '"""$T$"""'], {'fontsize': 'fontsize', 'ha': '"""center"""', 'va': '"""baseline"""'}), "(T, xtm, '$T$', fontsize=fontsize, ha='center', va='baseline')\n", (4499, 4561), True, 'import matplotlib.pyplot as plt\n'), ((4562, 4637), 'matplotlib.pyplot.text', 'plt.text', (['(2 * T)', 'xtm', '"""$2T$"""'], {'fontsize': 'fontsize', 'ha': '"""center"""', 'va': '"""baseline"""'}), "(2 * T, xtm, '$2T$', fontsize=fontsize, ha='center', va='baseline')\n", (4570, 4637), True, 'import matplotlib.pyplot as plt\n'), ((4646, 4734), 'matplotlib.pyplot.text', 'plt.text', (['ytm', '(1 / T)', '"""$\\\\dfrac{1}{T}$"""'], {'fontsize': 'fontsize', 'ha': '"""right"""', 'va': '"""center"""'}), "(ytm, 1 / T, '$\\\\dfrac{1}{T}$', fontsize=fontsize, ha='right', va=\n 'center')\n", (4654, 4734), True, 'import matplotlib.pyplot as plt\n'), ((4820, 4875), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(0.45, 0.7)', 'frameon': '(False)', 'fontsize': '(12)'}), '(loc=(0.45, 0.7), frameon=False, fontsize=12)\n', (4830, 4875), True, 'import matplotlib.pyplot as plt\n'), ((4877, 4892), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (4885, 4892), True, 'import matplotlib.pyplot as plt\n'), ((4901, 4946), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {'figsize': '(10, 3)', 'frameon': '(False)'}), '(0, figsize=(10, 3), frameon=False)\n', (4911, 4946), True, 'import matplotlib.pyplot as plt\n'), ((4952, 5006), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(1, 6)', '(0, 4)'], {'rowspan': '(1)', 'colspan': '(2)'}), '((1, 6), (0, 4), rowspan=1, colspan=2)\n', (4968, 5006), True, 'import matplotlib.pyplot as plt\n'), ((5008, 5034), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xmin_ax', 'xmax_ax'], {}), '(xmin_ax, xmax_ax)\n', (5016, 5034), True, 'import matplotlib.pyplot as plt\n'), ((5035, 5061), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ymin_ax', 'ymax_ax'], {}), '(ymin_ax, ymax_ax)\n', (5043, 5061), True, 'import matplotlib.pyplot as plt\n'), ((5616, 5638), 'numpy.linspace', 'np.linspace', (['(0)', 'T', '(100)'], {}), '(0, T, 100)\n', (5627, 5638), True, 'import numpy as np\n'), ((5732, 5758), 'numpy.linspace', 'np.linspace', (['T', '(2 * T)', '(100)'], {}), '(T, 2 * T, 100)\n', (5743, 5758), True, 'import numpy as np\n'), ((5843, 5873), 'numpy.linspace', 'np.linspace', (['(2 * T)', '(3 * T)', '(100)'], {}), '(2 * T, 3 * T, 100)\n', (5854, 5873), True, 'import numpy as np\n'), ((5953, 5998), 'matplotlib.pyplot.plot', 'plt.plot', (['[xmin, 0]', '[0, 0]', '"""k"""'], {'linewidth': '(2)'}), "([xmin, 0], [0, 0], 'k', linewidth=2)\n", (5961, 5998), True, 'import matplotlib.pyplot as plt\n'), ((5999, 6048), 'matplotlib.pyplot.plot', 'plt.plot', (['[3 * T, xmax]', '[0, 0]', '"""k"""'], {'linewidth': '(2)'}), "([3 * T, xmax], [0, 0], 'k', linewidth=2)\n", (6007, 6048), True, 'import matplotlib.pyplot as plt\n'), ((6072, 6181), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'f3', '"""r"""'], {'linewidth': '(2)', 'zorder': '(0)', 'label': '"""$N\\\\left(\\\\dfrac{3T}{2},\\\\,\\\\dfrac{T^2}{4}\\\\right)$"""'}), "(x, f3, 'r', linewidth=2, zorder=0, label=\n '$N\\\\left(\\\\dfrac{3T}{2},\\\\,\\\\dfrac{T^2}{4}\\\\right)$')\n", (6080, 6181), True, 'import matplotlib.pyplot as plt\n'), ((6182, 6213), 'matplotlib.pyplot.plot', 'plt.plot', (['[T, T]', '[0, xtl]', '"""k"""'], {}), "([T, T], [0, xtl], 'k')\n", (6190, 6213), True, 'import matplotlib.pyplot as plt\n'), ((6214, 6253), 'matplotlib.pyplot.plot', 'plt.plot', (['[2 * T, 2 * T]', '[0, xtl]', '"""k"""'], {}), "([2 * T, 2 * T], [0, xtl], 'k')\n", (6222, 6253), True, 'import matplotlib.pyplot as plt\n'), ((6250, 6289), 'matplotlib.pyplot.plot', 'plt.plot', (['[3 * T, 3 * T]', '[0, xtl]', '"""k"""'], {}), "([3 * T, 3 * T], [0, xtl], 'k')\n", (6258, 6289), True, 'import matplotlib.pyplot as plt\n'), ((6286, 6325), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, ytl]', '[1 / T, 1 / T]', '"""k"""'], {}), "([0, ytl], [1 / T, 1 / T], 'k')\n", (6294, 6325), True, 'import matplotlib.pyplot as plt\n'), ((6322, 6373), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, ytl]', '[1 / (2 * T), 1 / (2 * T)]', '"""k"""'], {}), "([0, ytl], [1 / (2 * T), 1 / (2 * T)], 'k')\n", (6330, 6373), True, 'import matplotlib.pyplot as plt\n'), ((6386, 6461), 'matplotlib.pyplot.text', 'plt.text', (['xmax_ax', 'xtm', '"""$x$"""'], {'fontsize': 'fontsize', 'ha': '"""right"""', 'va': '"""baseline"""'}), "(xmax_ax, xtm, '$x$', fontsize=fontsize, ha='right', va='baseline')\n", (6394, 6461), True, 'import matplotlib.pyplot as plt\n'), ((6462, 6533), 'matplotlib.pyplot.text', 'plt.text', (['ytm', 'xtm', '"""$0$"""'], {'fontsize': 'fontsize', 'ha': '"""right"""', 'va': '"""baseline"""'}), "(ytm, xtm, '$0$', fontsize=fontsize, ha='right', va='baseline')\n", (6470, 6533), True, 'import matplotlib.pyplot as plt\n'), ((6534, 6604), 'matplotlib.pyplot.text', 'plt.text', (['T', 'xtm', '"""$T$"""'], {'fontsize': 'fontsize', 'ha': '"""center"""', 'va': '"""baseline"""'}), "(T, xtm, '$T$', fontsize=fontsize, ha='center', va='baseline')\n", (6542, 6604), True, 'import matplotlib.pyplot as plt\n'), ((6605, 6680), 'matplotlib.pyplot.text', 'plt.text', (['(2 * T)', 'xtm', '"""$2T$"""'], {'fontsize': 'fontsize', 'ha': '"""center"""', 'va': '"""baseline"""'}), "(2 * T, xtm, '$2T$', fontsize=fontsize, ha='center', va='baseline')\n", (6613, 6680), True, 'import matplotlib.pyplot as plt\n'), ((6679, 6754), 'matplotlib.pyplot.text', 'plt.text', (['(3 * T)', 'xtm', '"""$3T$"""'], {'fontsize': 'fontsize', 'ha': '"""center"""', 'va': '"""baseline"""'}), "(3 * T, xtm, '$3T$', fontsize=fontsize, ha='center', va='baseline')\n", (6687, 6754), True, 'import matplotlib.pyplot as plt\n'), ((6763, 6851), 'matplotlib.pyplot.text', 'plt.text', (['ytm', '(1 / T)', '"""$\\\\dfrac{1}{T}$"""'], {'fontsize': 'fontsize', 'ha': '"""right"""', 'va': '"""center"""'}), "(ytm, 1 / T, '$\\\\dfrac{1}{T}$', fontsize=fontsize, ha='right', va=\n 'center')\n", (6771, 6851), True, 'import matplotlib.pyplot as plt\n'), ((6844, 6939), 'matplotlib.pyplot.text', 'plt.text', (['ytm', '(1 / (2 * T))', '"""$\\\\dfrac{1}{2T}$"""'], {'fontsize': 'fontsize', 'ha': '"""right"""', 'va': '"""center"""'}), "(ytm, 1 / (2 * T), '$\\\\dfrac{1}{2T}$', fontsize=fontsize, ha=\n 'right', va='center')\n", (6852, 6939), True, 'import matplotlib.pyplot as plt\n'), ((7023, 7078), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(0.28, 0.7)', 'frameon': '(False)', 'fontsize': '(12)'}), '(loc=(0.28, 0.7), frameon=False, fontsize=12)\n', (7033, 7078), True, 'import matplotlib.pyplot as plt\n'), ((7080, 7095), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (7088, 7095), True, 'import matplotlib.pyplot as plt\n'), ((7117, 7169), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""example_7_15.pdf"""'], {'bbox_inches': '"""tight"""'}), "('example_7_15.pdf', bbox_inches='tight')\n", (7128, 7169), True, 'import matplotlib.pyplot as plt\n'), ((7170, 7180), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7178, 7180), True, 'import matplotlib.pyplot as plt\n'), ((1596, 1611), 'math.sqrt', 'math.sqrt', (['var2'], {}), '(var2)\n', (1605, 1611), False, 'import math\n'), ((1636, 1651), 'math.sqrt', 'math.sqrt', (['var3'], {}), '(var3)\n', (1645, 1651), False, 'import math\n'), ((5652, 5677), 'numpy.polyval', 'np.polyval', (['[1, 0, 0]', 'xa'], {}), '([1, 0, 0], xa)\n', (5662, 5677), True, 'import numpy as np\n'), ((5772, 5812), 'numpy.polyval', 'np.polyval', (['[-2, 6 * T, -3 * T ** 2]', 'xa'], {}), '([-2, 6 * T, -3 * T ** 2], xa)\n', (5782, 5812), True, 'import numpy as np\n'), ((5887, 5926), 'numpy.polyval', 'np.polyval', (['[1, -6 * T, 9 * T ** 2]', 'xa'], {}), '([1, -6 * T, 9 * T ** 2], xa)\n', (5897, 5926), True, 'import numpy as np\n')]
|
# Modified work:
# -----------------------------------------------------------------------------
# Copyright (c) 2019 Preferred Infrastructure, Inc.
# Copyright (c) 2019 Preferred Networks, Inc.
# -----------------------------------------------------------------------------
# Original work:
# -----------------------------------------------------------------------------
# Copyright (c) 2015 by Contributors
# \file roi_pooling.cu
# \brief roi pooling operator
# \author <NAME>, <NAME>, <NAME>
# \changed to roi_align by <NAME>
# \file roi_align.cu
# \roi align operator described in Mask RCNN
# -----------------------------------------------------------------------------
from __future__ import division
import numbers
import numpy as np
import six
from chainer.backends import cuda
from chainer import function
from chainer.utils import type_check
from chainercv.functions.ps_roi_average_align_2d \
import _GET_BILINEAR_INTERP_KERNEL
from chainercv.functions.ps_roi_average_align_2d \
import _get_bilinear_interp_params
from chainercv.functions.ps_roi_average_align_2d import _get_bounds
from chainercv.functions.ps_roi_average_align_2d import _pair
from chainercv.functions.ps_roi_average_pooling_2d import _outsize
class PSROIMaxAlign2D(function.Function):
def __init__(
self, outsize, spatial_scale,
group_size, sampling_ratio=None
):
out_c, out_h, out_w = _outsize(outsize)
if out_c is not None and \
not (isinstance(out_c, numbers.Integral) and out_c > 0):
raise TypeError(
'outsize[0] must be positive integer: {}, {}'
.format(type(out_c), out_c))
if not (isinstance(out_h, numbers.Integral) and out_h > 0):
raise TypeError(
'outsize[1] must be positive integer: {}, {}'
.format(type(out_h), out_h))
if not (isinstance(out_w, numbers.Integral) and out_w > 0):
raise TypeError(
'outsize[2] must be positive integer: {}, {}'
.format(type(out_w), out_w))
if isinstance(spatial_scale, numbers.Integral):
spatial_scale = float(spatial_scale)
if not (isinstance(spatial_scale, numbers.Real)
and spatial_scale > 0):
raise TypeError(
'spatial_scale must be a positive float number: {}, {}'
.format(type(spatial_scale), spatial_scale))
if not (isinstance(group_size, numbers.Integral)
and group_size > 0):
raise TypeError(
'group_size must be positive integer: {}, {}'
.format(type(group_size), group_size))
sampling_ratio = _pair(sampling_ratio)
if not all((isinstance(s, numbers.Integral) and s >= 1) or s is None
for s in sampling_ratio):
raise TypeError(
'sampling_ratio must be integer >= 1 or a pair of it: {}'
.format(sampling_ratio))
self.out_c, self.out_h, self.out_w = out_c, out_h, out_w
self.spatial_scale = spatial_scale
self.group_size = group_size
self.sampling_ratio = sampling_ratio
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 3)
x_type, roi_type, roi_index_type = in_types
type_check.expect(
x_type.dtype == np.float32,
x_type.ndim == 4,
roi_type.dtype == np.float32,
roi_type.ndim == 2,
roi_type.shape[1] == 4,
roi_index_type.dtype == np.int32,
roi_index_type.ndim == 1,
roi_type.shape[0] == roi_index_type.shape[0]
)
def forward_cpu(self, inputs):
self.retain_inputs((1, 2))
self._bottom_data_shape = inputs[0].shape
bottom_data, bottom_rois, bottom_roi_indices = inputs
channel, height, width = bottom_data.shape[1:]
if self.out_c is None:
if channel % (self.group_size * self.group_size) != 0:
raise ValueError(
'input channel must be divided by group_size * group_size:'
'{} % {} != 0'
.format(channel, self.group_size * self.group_size))
out_c = channel // (self.group_size * self.group_size)
else:
if channel != self.out_c * self.group_size * self.group_size:
raise ValueError(
'input channel must be equal to '
'outsize[0] * group_size * group_size: {} != {}'
.format(channel,
self.out_c * self.group_size * self.group_size))
out_c = self.out_c
n_roi = bottom_rois.shape[0]
top_data = np.empty(
(n_roi, out_c, self.out_h, self.out_w), dtype=np.float32)
self.argmax_data = np.empty(top_data.shape, dtype=np.int32)
group_size = self.group_size
pooled_width, pooled_height \
= self.out_w, self.out_h
spatial_scale = self.spatial_scale
for i in six.moves.range(top_data.size):
n, ctop, ph, pw = np.unravel_index(i, top_data.shape)
roi_batch_ind = bottom_roi_indices[n]
roi_start_h = bottom_rois[n, 0] * spatial_scale
roi_start_w = bottom_rois[n, 1] * spatial_scale
roi_end_h = bottom_rois[n, 2] * spatial_scale
roi_end_w = bottom_rois[n, 3] * spatial_scale
roi_height = max(roi_end_h - roi_start_h, 0.1)
roi_width = max(roi_end_w - roi_start_w, 0.1)
bin_size_h = roi_height / pooled_height
bin_size_w = roi_width / pooled_width
gh = int(np.floor(ph * group_size / pooled_height))
gw = int(np.floor(pw * group_size / pooled_width))
gh = min(max(gh, 0), group_size - 1)
gw = min(max(gw, 0), group_size - 1)
c = (ctop * group_size + gh) * group_size + gw
if self.sampling_ratio[0] is None:
roi_bin_grid_h = int(np.ceil(roi_height / pooled_height))
else:
roi_bin_grid_h = self.sampling_ratio[0]
if self.sampling_ratio[1] is None:
roi_bin_grid_w = int(np.ceil(roi_width / pooled_width))
else:
roi_bin_grid_w = self.sampling_ratio[1]
maxval = - np.inf
maxidx = -1
for iy in six.moves.range(roi_bin_grid_h):
y = roi_start_h + ph * bin_size_h + \
(iy + .5) * bin_size_h / roi_bin_grid_h
y, y_low, y_high = _get_bounds(y, height)
if y is None or y_low is None or y_high is None:
continue
for ix in six.moves.range(roi_bin_grid_w):
x = roi_start_w + pw * bin_size_w + \
(ix + .5) * bin_size_w / roi_bin_grid_w
x, x_low, x_high = _get_bounds(x, width)
if x is None or x_low is None or x_high is None:
continue
# bilinear interpolation {{
w1, w2, w3, w4 = _get_bilinear_interp_params(
y, x, y_low, x_low, y_high, x_high)
tmpval = 0.0
isvalid = False
bottom_index = iy * roi_bin_grid_w + ix
if w1 > 0 and y_low >= 0 and x_low >= 0:
v1 = bottom_data[roi_batch_ind, c, y_low, x_low]
tmpval += w1 * v1
isvalid = True
if w2 > 0 and y_low >= 0 and x_high <= width - 1:
v2 = bottom_data[roi_batch_ind, c, y_low, x_high]
tmpval += w2 * v2
isvalid = True
if w3 > 0 and y_high <= height - 1 and x_low >= 0:
v3 = bottom_data[roi_batch_ind, c, y_high, x_low]
tmpval += w3 * v3
isvalid = True
if w4 > 0 and y_high <= height - 1 and x_high <= width - 1:
v4 = bottom_data[roi_batch_ind, c, y_high, x_high]
tmpval += w4 * v4
isvalid = True
if isvalid and tmpval > maxval:
maxval = tmpval
maxidx = bottom_index
# }}
top_data[n, ctop, ph, pw] = maxval
self.argmax_data[n, ctop, ph, pw] = maxidx
return top_data,
def forward_gpu(self, inputs):
self.retain_inputs((1, 2))
self._bottom_data_shape = inputs[0].shape
bottom_data, bottom_rois, bottom_roi_indices = inputs
channel, height, width = bottom_data.shape[1:]
if self.out_c is None:
if channel % (self.group_size * self.group_size) != 0:
raise ValueError(
'input channel must be divided by group_size * group_size:'
'{} % {} != 0'
.format(channel, self.group_size * self.group_size))
out_c = channel // (self.group_size * self.group_size)
else:
if channel != self.out_c * self.group_size * self.group_size:
raise ValueError(
'input channel must be equal to '
'outsize[0] * group_size * group_size: {} != {}'
.format(channel,
self.out_c * self.group_size * self.group_size))
out_c = self.out_c
n_roi = bottom_rois.shape[0]
top_data = cuda.cupy.empty(
(n_roi, out_c, self.out_h, self.out_w), dtype=np.float32)
self.argmax_data = cuda.cupy.empty(top_data.shape, np.int32)
if self.sampling_ratio[0] is None:
sampling_ratio_h = 0
else:
sampling_ratio_h = self.sampling_ratio[0]
if self.sampling_ratio[1] is None:
sampling_ratio_w = 0
else:
sampling_ratio_w = self.sampling_ratio[1]
cuda.elementwise(
'''
raw T bottom_data, raw T bottom_rois,
raw int32 bottom_roi_indices,
T spatial_scale, int32 channel,
int32 height, int32 width,
int32 pooled_dim, int32 pooled_height, int32 pooled_width,
int32 group_size, int32 sampling_ratio_h, int32 sampling_ratio_w
''',
'T top_data, int32 argmax_data',
'''
// pos in output filter
int ph = (i / pooled_width) % pooled_height;
int pw = i % pooled_width;
int ctop = (i / pooled_width / pooled_height) % pooled_dim;
int n = i / pooled_width / pooled_height / pooled_dim;
int roi_batch_ind = bottom_roi_indices[n];
T roi_start_h = bottom_rois[n * 4 + 0] * spatial_scale;
T roi_start_w = bottom_rois[n * 4 + 1] * spatial_scale;
T roi_end_h = bottom_rois[n * 4 + 2] * spatial_scale;
T roi_end_w = bottom_rois[n * 4 + 3] * spatial_scale;
// Force too small ROIs to be 1x1
T roi_height = max(roi_end_h - roi_start_h, 0.1);
T roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
// Compute w and h at bottom
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
// Compute c at bottom
int gh = floor(
static_cast<T>(ph) * group_size / pooled_height);
int gw = floor(
static_cast<T>(pw) * group_size / pooled_width);
gh = min(max(gh, 0), group_size - 1);
gw = min(max(gw, 0), group_size - 1);
int c = (ctop * group_size + gh) * group_size + gw;
int bottom_data_offset =
(roi_batch_ind * channel + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio_h > 0)
? sampling_ratio_h
: ceil(roi_height / pooled_height); // e.g. = 2
int roi_bin_grid_w = (sampling_ratio_w > 0)
? sampling_ratio_w
: ceil(roi_width / pooled_width);
T maxval = - (T) (1.0 / 0.0);
int maxidx = -1;
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g. iy = 0, 1
{
T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g. 0.5, 1.5
int y_low, y_high;
bool y_ret = get_bounds(y, height, y_low, y_high);
if (!y_ret) continue;
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
int x_low, x_high;
bool x_ret = get_bounds(x, width, x_low, x_high);
if (!x_ret) continue;
// bilinear_interpolation {{
T w1, w2, w3, w4;
get_bilinear_interp_params(
y, x, y_low, x_low, y_high, x_high, w1, w2, w3, w4);
T tmpval = 0.;
bool isvalid = false;
int bottom_index = iy * roi_bin_grid_w + ix;
if (w1 > 0 && y_low >= 0 && x_low >= 0) {
T v1 = bottom_data[
bottom_data_offset + y_low * width + x_low];
tmpval += w1 * v1;
isvalid = true;
}
if (w2 > 0 && y_low >= 0 && x_high <= width - 1) {
T v2 = bottom_data[
bottom_data_offset + y_low * width + x_high];
tmpval += w2 * v2;
isvalid = true;
}
if (w3 > 0 && y_high <= height - 1 && x_low >= 0) {
T v3 = bottom_data[
bottom_data_offset + y_high * width + x_low];
tmpval += w3 * v3;
isvalid = true;
}
if (w4 > 0 && y_high <= height - 1 &&
x_high <= width - 1) {
T v4 = bottom_data[
bottom_data_offset + y_high * width + x_high];
tmpval += w4 * v4;
isvalid = true;
}
// }}
if (isvalid && tmpval > maxval) {
maxval = tmpval;
maxidx = bottom_index;
}
}
}
top_data = maxval;
argmax_data = maxidx;
''',
'ps_roi_max_align_2d_fwd',
preamble=_GET_BILINEAR_INTERP_KERNEL,
)(bottom_data, bottom_rois, bottom_roi_indices,
self.spatial_scale, channel, height, width,
out_c, self.out_h, self.out_w,
self.group_size, sampling_ratio_h, sampling_ratio_w,
top_data, self.argmax_data)
return top_data,
def backward_cpu(self, inputs, gy):
_, bottom_rois, bottom_roi_indices = inputs
height, width = self._bottom_data_shape[2:]
bottom_diff = np.zeros(self._bottom_data_shape, np.float32)
spatial_scale = self.spatial_scale
pooled_height = self.out_h
pooled_width = self.out_w
group_size = self.group_size
top_diff = gy[0]
for i in six.moves.range(top_diff.size):
n, ctop, ph, pw = np.unravel_index(i, top_diff.shape)
roi_batch_ind = bottom_roi_indices[n]
roi_start_h = bottom_rois[n, 0] * spatial_scale
roi_start_w = bottom_rois[n, 1] * spatial_scale
roi_end_h = bottom_rois[n, 2] * spatial_scale
roi_end_w = bottom_rois[n, 3] * spatial_scale
roi_height = max(roi_end_h - roi_start_h, 0.1)
roi_width = max(roi_end_w - roi_start_w, 0.1)
bin_size_h = roi_height / pooled_height
bin_size_w = roi_width / pooled_width
gh = int(np.floor(float(ph) * group_size / pooled_height))
gw = int(np.floor(float(pw) * group_size / pooled_width))
gh = min(max(gh, 0), group_size - 1)
gw = min(max(gw, 0), group_size - 1)
c = (ctop * group_size + gh) * group_size + gw
top_diff_this_bin = top_diff[n, ctop, ph, pw]
maxidx = self.argmax_data[n, ctop, ph, pw]
if maxidx != -1:
if self.sampling_ratio[0] is None:
roi_bin_grid_h = int(np.ceil(roi_height / pooled_height))
else:
roi_bin_grid_h = self.sampling_ratio[0]
if self.sampling_ratio[1] is None:
roi_bin_grid_w = int(np.ceil(roi_width / pooled_width))
else:
roi_bin_grid_w = self.sampling_ratio[1]
iy = int(maxidx / roi_bin_grid_w)
ix = maxidx % roi_bin_grid_w
y = roi_start_h + ph * bin_size_h + \
(iy + .5) * bin_size_h / roi_bin_grid_h
x = roi_start_w + pw * bin_size_w + \
(ix + .5) * bin_size_w / roi_bin_grid_w
y, y_low, y_high = _get_bounds(y, height)
if y is None or y_low is None or y_high is None:
continue
x, x_low, x_high = _get_bounds(x, width)
if x is None or x_low is None or x_high is None:
continue
# bilinear_interpolation_gradient {{
w1, w2, w3, w4 = _get_bilinear_interp_params(
y, x, y_low, x_low, y_high, x_high)
if w1 > 0 and y_low >= 0 and x_low >= 0:
g1 = top_diff_this_bin * w1
bottom_diff[roi_batch_ind, c, y_low, x_low] += g1
if w2 > 0 and y_low >= 0 and x_high <= width - 1:
g2 = top_diff_this_bin * w2
bottom_diff[roi_batch_ind, c, y_low, x_high] += g2
if w3 > 0 and y_high <= height - 1 and x_low >= 0:
g3 = top_diff_this_bin * w3
bottom_diff[roi_batch_ind, c, y_high, x_low] += g3
if w4 > 0 and y_high <= height - 1 and x_high <= width - 1:
g4 = top_diff_this_bin * w4
bottom_diff[roi_batch_ind, c, y_high, x_high] += g4
# }}
return bottom_diff, None, None
def backward_gpu(self, inputs, gy):
_, bottom_rois, bottom_roi_indices = inputs
channel, height, width = self._bottom_data_shape[1:]
out_c, out_h, out_w = gy[0].shape[1:]
bottom_diff = cuda.cupy.zeros(self._bottom_data_shape, np.float32)
if self.sampling_ratio[0] is None:
sampling_ratio_h = 0
else:
sampling_ratio_h = self.sampling_ratio[0]
if self.sampling_ratio[1] is None:
sampling_ratio_w = 0
else:
sampling_ratio_w = self.sampling_ratio[1]
cuda.elementwise(
'''
raw T top_diff, raw int32 argmax_data,
raw T bottom_rois, raw int32 bottom_roi_indices,
T spatial_scale, int32 channel, int32 height, int32 width,
int32 pooled_dim, int32 pooled_height, int32 pooled_width,
int32 group_size, int32 sampling_ratio_h, int32 sampling_ratio_w
''',
'raw T bottom_diff',
'''
// (n, c, h, w) coords in bottom data
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int ctop = (i / pooled_width / pooled_height) % pooled_dim;
int n = i / pooled_width / pooled_height / pooled_dim;
// Do not using rounding; this implementation detail is critical
int roi_batch_ind = bottom_roi_indices[n];
T roi_start_h = bottom_rois[n * 4 + 0] * spatial_scale;
T roi_start_w = bottom_rois[n * 4 + 1] * spatial_scale;
T roi_end_h = bottom_rois[n * 4 + 2] * spatial_scale;
T roi_end_w = bottom_rois[n * 4 + 3] * spatial_scale;
// Force too small ROIs to be 1x1
T roi_height = max(roi_end_h - roi_start_h, 0.1);
T roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
// Compute w and h at bottom
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
// Compute c at bottom
int gh = floor(
static_cast<T>(ph) * group_size / pooled_height);
int gw = floor(
static_cast<T>(pw) * group_size / pooled_width);
gh = min(max(gh, 0), group_size - 1);
gw = min(max(gw, 0), group_size - 1);
int c = (ctop * group_size + gh) * group_size + gw;
int bottom_diff_offset =
(roi_batch_ind * channel + c) * height * width;
int top_offset =
(n * pooled_dim + ctop) * pooled_height * pooled_width;
T top_diff_this_bin =
top_diff[top_offset + ph * pooled_width + pw];
int maxidx = argmax_data[top_offset + ph * pooled_width + pw];
if (maxidx != -1) {
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio_h > 0)
? sampling_ratio_h
: ceil(roi_height / pooled_height); // e.g. = 2
int roi_bin_grid_w = (sampling_ratio_w > 0)
? sampling_ratio_w
: ceil(roi_width / pooled_width);
int iy = maxidx / roi_bin_grid_w;
int ix = maxidx % roi_bin_grid_w;
T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g. 0.5, 1.5
T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
int y_low, y_high;
bool y_ret = get_bounds(y, height, y_low, y_high);
if (!y_ret) continue;
int x_low, x_high;
bool x_ret = get_bounds(x, width, x_low, x_high);
if (!x_ret) continue;
// bilinear_interpolation_gradient {{
T w1, w2, w3, w4;
get_bilinear_interp_params(
y, x, y_low, x_low, y_high, x_high, w1, w2, w3, w4);
if (w1 > 0 && y_low >= 0 && x_low >= 0) {
T g1 = top_diff_this_bin * w1;
atomicAdd(&bottom_diff[
bottom_diff_offset + y_low * width + x_low], g1);
}
if (w2 > 0 && y_low >= 0 && x_high <= width - 1) {
T g2 = top_diff_this_bin * w2;
atomicAdd(&bottom_diff[
bottom_diff_offset + y_low * width + x_high], g2);
}
if (w3 > 0 && y_high <= height - 1 && x_low >= 0) {
T g3 = top_diff_this_bin * w3;
atomicAdd(&bottom_diff[
bottom_diff_offset + y_high * width + x_low], g3);
}
if (w4 > 0 && y_high <= height - 1 && x_high <= width - 1) {
T g4 = top_diff_this_bin * w4;
atomicAdd(&bottom_diff[
bottom_diff_offset + y_high * width + x_high], g4);
}
// }}
}
''',
'ps_roi_max_align_2d_bwd',
preamble=_GET_BILINEAR_INTERP_KERNEL,
)(gy[0], self.argmax_data, bottom_rois, bottom_roi_indices,
self.spatial_scale, channel, height, width,
out_c, out_h, out_w, self.group_size,
sampling_ratio_h, sampling_ratio_w, bottom_diff,
size=gy[0].size)
return bottom_diff, None, None
def ps_roi_max_align_2d(
x, rois, roi_indices, outsize,
spatial_scale, group_size, sampling_ratio=None
):
"""Position Sensitive Region of Interest (ROI) Max align function.
This function computes position sensitive max value of input spatial patch
with the given region of interests. Each ROI is splitted into
:math:`(group\_size, group\_size)` regions, and position sensitive values
in each region is computed.
Args:
x (~chainer.Variable): Input variable. The shape is expected to be
4 dimentional: (n: batch, c: channel, h, height, w: width).
rois (array): Input roi. The shape is expected to
be :math:`(R, 4)`, and each datum is set as below:
(y_min, x_min, y_max, x_max). The dtype is :obj:`numpy.float32`.
roi_indices (array): Input roi indices. The shape is expected to
be :math:`(R, )`. The dtype is :obj:`numpy.int32`.
outsize ((int, int, int) or (int, int) or int): Expected output size
after pooled: (channel, height, width) or (height, width)
or outsize. ``outsize=o`` and ``outsize=(o, o)`` are equivalent.
Channel parameter is used to assert the input shape.
spatial_scale (float): Scale of the roi is resized.
group_size (int): Position sensitive group size.
sampling_ratio ((int, int) or int): Sampling step for the alignment.
It must be an integer over :math:`1` or :obj:`None`, and the value
is automatically decided when :obj:`None` is passed. Use of
different ratio in height and width axis is also supported by
passing tuple of int as ``(sampling_ratio_h, sampling_ratio_w)``.
``sampling_ratio=s`` and ``sampling_ratio=(s, s)`` are equivalent.
Returns:
~chainer.Variable: Output variable.
See the original paper proposing PSROIPooling:
`R-FCN <https://arxiv.org/abs/1605.06409>`_.
See the original paper proposing ROIAlign:
`Mask R-CNN <https://arxiv.org/abs/1703.06870>`_.
"""
return PSROIMaxAlign2D(
outsize, spatial_scale,
group_size, sampling_ratio)(x, rois, roi_indices)
|
[
"chainer.utils.type_check.expect",
"numpy.ceil",
"six.moves.range",
"chainercv.functions.ps_roi_average_align_2d._get_bounds",
"numpy.empty",
"chainercv.functions.ps_roi_average_align_2d._pair",
"numpy.floor",
"numpy.zeros",
"numpy.unravel_index",
"chainer.backends.cuda.cupy.zeros",
"chainer.backends.cuda.cupy.empty",
"chainercv.functions.ps_roi_average_align_2d._get_bilinear_interp_params",
"chainer.backends.cuda.elementwise",
"chainercv.functions.ps_roi_average_pooling_2d._outsize"
] |
[((1420, 1437), 'chainercv.functions.ps_roi_average_pooling_2d._outsize', '_outsize', (['outsize'], {}), '(outsize)\n', (1428, 1437), False, 'from chainercv.functions.ps_roi_average_pooling_2d import _outsize\n'), ((2718, 2739), 'chainercv.functions.ps_roi_average_align_2d._pair', '_pair', (['sampling_ratio'], {}), '(sampling_ratio)\n', (2723, 2739), False, 'from chainercv.functions.ps_roi_average_align_2d import _pair\n'), ((3351, 3609), 'chainer.utils.type_check.expect', 'type_check.expect', (['(x_type.dtype == np.float32)', '(x_type.ndim == 4)', '(roi_type.dtype == np.float32)', '(roi_type.ndim == 2)', '(roi_type.shape[1] == 4)', '(roi_index_type.dtype == np.int32)', '(roi_index_type.ndim == 1)', '(roi_type.shape[0] == roi_index_type.shape[0])'], {}), '(x_type.dtype == np.float32, x_type.ndim == 4, roi_type.\n dtype == np.float32, roi_type.ndim == 2, roi_type.shape[1] == 4, \n roi_index_type.dtype == np.int32, roi_index_type.ndim == 1, roi_type.\n shape[0] == roi_index_type.shape[0])\n', (3368, 3609), False, 'from chainer.utils import type_check\n'), ((4773, 4839), 'numpy.empty', 'np.empty', (['(n_roi, out_c, self.out_h, self.out_w)'], {'dtype': 'np.float32'}), '((n_roi, out_c, self.out_h, self.out_w), dtype=np.float32)\n', (4781, 4839), True, 'import numpy as np\n'), ((4880, 4920), 'numpy.empty', 'np.empty', (['top_data.shape'], {'dtype': 'np.int32'}), '(top_data.shape, dtype=np.int32)\n', (4888, 4920), True, 'import numpy as np\n'), ((5095, 5125), 'six.moves.range', 'six.moves.range', (['top_data.size'], {}), '(top_data.size)\n', (5110, 5125), False, 'import six\n'), ((9671, 9744), 'chainer.backends.cuda.cupy.empty', 'cuda.cupy.empty', (['(n_roi, out_c, self.out_h, self.out_w)'], {'dtype': 'np.float32'}), '((n_roi, out_c, self.out_h, self.out_w), dtype=np.float32)\n', (9686, 9744), False, 'from chainer.backends import cuda\n'), ((9785, 9826), 'chainer.backends.cuda.cupy.empty', 'cuda.cupy.empty', (['top_data.shape', 'np.int32'], {}), '(top_data.shape, np.int32)\n', (9800, 9826), False, 'from chainer.backends import cuda\n'), ((15665, 15710), 'numpy.zeros', 'np.zeros', (['self._bottom_data_shape', 'np.float32'], {}), '(self._bottom_data_shape, np.float32)\n', (15673, 15710), True, 'import numpy as np\n'), ((15904, 15934), 'six.moves.range', 'six.moves.range', (['top_diff.size'], {}), '(top_diff.size)\n', (15919, 15934), False, 'import six\n'), ((19203, 19255), 'chainer.backends.cuda.cupy.zeros', 'cuda.cupy.zeros', (['self._bottom_data_shape', 'np.float32'], {}), '(self._bottom_data_shape, np.float32)\n', (19218, 19255), False, 'from chainer.backends import cuda\n'), ((5157, 5192), 'numpy.unravel_index', 'np.unravel_index', (['i', 'top_data.shape'], {}), '(i, top_data.shape)\n', (5173, 5192), True, 'import numpy as np\n'), ((6451, 6482), 'six.moves.range', 'six.moves.range', (['roi_bin_grid_h'], {}), '(roi_bin_grid_h)\n', (6466, 6482), False, 'import six\n'), ((10124, 15177), 'chainer.backends.cuda.elementwise', 'cuda.elementwise', (['"""\n raw T bottom_data, raw T bottom_rois,\n raw int32 bottom_roi_indices,\n T spatial_scale, int32 channel,\n int32 height, int32 width,\n int32 pooled_dim, int32 pooled_height, int32 pooled_width,\n int32 group_size, int32 sampling_ratio_h, int32 sampling_ratio_w\n """', '"""T top_data, int32 argmax_data"""', '"""\n // pos in output filter\n int ph = (i / pooled_width) % pooled_height;\n int pw = i % pooled_width;\n int ctop = (i / pooled_width / pooled_height) % pooled_dim;\n int n = i / pooled_width / pooled_height / pooled_dim;\n\n int roi_batch_ind = bottom_roi_indices[n];\n T roi_start_h = bottom_rois[n * 4 + 0] * spatial_scale;\n T roi_start_w = bottom_rois[n * 4 + 1] * spatial_scale;\n T roi_end_h = bottom_rois[n * 4 + 2] * spatial_scale;\n T roi_end_w = bottom_rois[n * 4 + 3] * spatial_scale;\n\n // Force too small ROIs to be 1x1\n T roi_height = max(roi_end_h - roi_start_h, 0.1);\n T roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0\n\n // Compute w and h at bottom\n T bin_size_h = roi_height / static_cast<T>(pooled_height);\n T bin_size_w = roi_width / static_cast<T>(pooled_width);\n\n // Compute c at bottom\n int gh = floor(\n static_cast<T>(ph) * group_size / pooled_height);\n int gw = floor(\n static_cast<T>(pw) * group_size / pooled_width);\n gh = min(max(gh, 0), group_size - 1);\n gw = min(max(gw, 0), group_size - 1);\n int c = (ctop * group_size + gh) * group_size + gw;\n\n int bottom_data_offset =\n (roi_batch_ind * channel + c) * height * width;\n\n // We use roi_bin_grid to sample the grid and mimic integral\n int roi_bin_grid_h = (sampling_ratio_h > 0)\n ? sampling_ratio_h\n : ceil(roi_height / pooled_height); // e.g. = 2\n int roi_bin_grid_w = (sampling_ratio_w > 0)\n ? sampling_ratio_w\n : ceil(roi_width / pooled_width);\n\n T maxval = - (T) (1.0 / 0.0);\n int maxidx = -1;\n for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g. iy = 0, 1\n {\n T y = roi_start_h + ph * bin_size_h +\n static_cast<T>(iy + .5f) * bin_size_h /\n static_cast<T>(roi_bin_grid_h); // e.g. 0.5, 1.5\n int y_low, y_high;\n bool y_ret = get_bounds(y, height, y_low, y_high);\n if (!y_ret) continue;\n for (int ix = 0; ix < roi_bin_grid_w; ix++) {\n T x = roi_start_w + pw * bin_size_w +\n static_cast<T>(ix + .5f) * bin_size_w /\n static_cast<T>(roi_bin_grid_w);\n\n int x_low, x_high;\n bool x_ret = get_bounds(x, width, x_low, x_high);\n if (!x_ret) continue;\n // bilinear_interpolation {{\n T w1, w2, w3, w4;\n get_bilinear_interp_params(\n y, x, y_low, x_low, y_high, x_high, w1, w2, w3, w4);\n\n T tmpval = 0.;\n bool isvalid = false;\n int bottom_index = iy * roi_bin_grid_w + ix;\n if (w1 > 0 && y_low >= 0 && x_low >= 0) {\n T v1 = bottom_data[\n bottom_data_offset + y_low * width + x_low];\n tmpval += w1 * v1;\n isvalid = true;\n }\n if (w2 > 0 && y_low >= 0 && x_high <= width - 1) {\n T v2 = bottom_data[\n bottom_data_offset + y_low * width + x_high];\n tmpval += w2 * v2;\n isvalid = true;\n }\n if (w3 > 0 && y_high <= height - 1 && x_low >= 0) {\n T v3 = bottom_data[\n bottom_data_offset + y_high * width + x_low];\n tmpval += w3 * v3;\n isvalid = true;\n }\n if (w4 > 0 && y_high <= height - 1 &&\n x_high <= width - 1) {\n T v4 = bottom_data[\n bottom_data_offset + y_high * width + x_high];\n tmpval += w4 * v4;\n isvalid = true;\n }\n\n // }}\n\n if (isvalid && tmpval > maxval) {\n maxval = tmpval;\n maxidx = bottom_index;\n }\n }\n }\n top_data = maxval;\n argmax_data = maxidx;\n """', '"""ps_roi_max_align_2d_fwd"""'], {'preamble': '_GET_BILINEAR_INTERP_KERNEL'}), '(\n """\n raw T bottom_data, raw T bottom_rois,\n raw int32 bottom_roi_indices,\n T spatial_scale, int32 channel,\n int32 height, int32 width,\n int32 pooled_dim, int32 pooled_height, int32 pooled_width,\n int32 group_size, int32 sampling_ratio_h, int32 sampling_ratio_w\n """\n , \'T top_data, int32 argmax_data\',\n """\n // pos in output filter\n int ph = (i / pooled_width) % pooled_height;\n int pw = i % pooled_width;\n int ctop = (i / pooled_width / pooled_height) % pooled_dim;\n int n = i / pooled_width / pooled_height / pooled_dim;\n\n int roi_batch_ind = bottom_roi_indices[n];\n T roi_start_h = bottom_rois[n * 4 + 0] * spatial_scale;\n T roi_start_w = bottom_rois[n * 4 + 1] * spatial_scale;\n T roi_end_h = bottom_rois[n * 4 + 2] * spatial_scale;\n T roi_end_w = bottom_rois[n * 4 + 3] * spatial_scale;\n\n // Force too small ROIs to be 1x1\n T roi_height = max(roi_end_h - roi_start_h, 0.1);\n T roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0\n\n // Compute w and h at bottom\n T bin_size_h = roi_height / static_cast<T>(pooled_height);\n T bin_size_w = roi_width / static_cast<T>(pooled_width);\n\n // Compute c at bottom\n int gh = floor(\n static_cast<T>(ph) * group_size / pooled_height);\n int gw = floor(\n static_cast<T>(pw) * group_size / pooled_width);\n gh = min(max(gh, 0), group_size - 1);\n gw = min(max(gw, 0), group_size - 1);\n int c = (ctop * group_size + gh) * group_size + gw;\n\n int bottom_data_offset =\n (roi_batch_ind * channel + c) * height * width;\n\n // We use roi_bin_grid to sample the grid and mimic integral\n int roi_bin_grid_h = (sampling_ratio_h > 0)\n ? sampling_ratio_h\n : ceil(roi_height / pooled_height); // e.g. = 2\n int roi_bin_grid_w = (sampling_ratio_w > 0)\n ? sampling_ratio_w\n : ceil(roi_width / pooled_width);\n\n T maxval = - (T) (1.0 / 0.0);\n int maxidx = -1;\n for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g. iy = 0, 1\n {\n T y = roi_start_h + ph * bin_size_h +\n static_cast<T>(iy + .5f) * bin_size_h /\n static_cast<T>(roi_bin_grid_h); // e.g. 0.5, 1.5\n int y_low, y_high;\n bool y_ret = get_bounds(y, height, y_low, y_high);\n if (!y_ret) continue;\n for (int ix = 0; ix < roi_bin_grid_w; ix++) {\n T x = roi_start_w + pw * bin_size_w +\n static_cast<T>(ix + .5f) * bin_size_w /\n static_cast<T>(roi_bin_grid_w);\n\n int x_low, x_high;\n bool x_ret = get_bounds(x, width, x_low, x_high);\n if (!x_ret) continue;\n // bilinear_interpolation {{\n T w1, w2, w3, w4;\n get_bilinear_interp_params(\n y, x, y_low, x_low, y_high, x_high, w1, w2, w3, w4);\n\n T tmpval = 0.;\n bool isvalid = false;\n int bottom_index = iy * roi_bin_grid_w + ix;\n if (w1 > 0 && y_low >= 0 && x_low >= 0) {\n T v1 = bottom_data[\n bottom_data_offset + y_low * width + x_low];\n tmpval += w1 * v1;\n isvalid = true;\n }\n if (w2 > 0 && y_low >= 0 && x_high <= width - 1) {\n T v2 = bottom_data[\n bottom_data_offset + y_low * width + x_high];\n tmpval += w2 * v2;\n isvalid = true;\n }\n if (w3 > 0 && y_high <= height - 1 && x_low >= 0) {\n T v3 = bottom_data[\n bottom_data_offset + y_high * width + x_low];\n tmpval += w3 * v3;\n isvalid = true;\n }\n if (w4 > 0 && y_high <= height - 1 &&\n x_high <= width - 1) {\n T v4 = bottom_data[\n bottom_data_offset + y_high * width + x_high];\n tmpval += w4 * v4;\n isvalid = true;\n }\n\n // }}\n\n if (isvalid && tmpval > maxval) {\n maxval = tmpval;\n maxidx = bottom_index;\n }\n }\n }\n top_data = maxval;\n argmax_data = maxidx;\n """\n , \'ps_roi_max_align_2d_fwd\', preamble=_GET_BILINEAR_INTERP_KERNEL)\n', (10140, 15177), False, 'from chainer.backends import cuda\n'), ((15966, 16001), 'numpy.unravel_index', 'np.unravel_index', (['i', 'top_diff.shape'], {}), '(i, top_diff.shape)\n', (15982, 16001), True, 'import numpy as np\n'), ((19553, 24296), 'chainer.backends.cuda.elementwise', 'cuda.elementwise', (['"""\n raw T top_diff, raw int32 argmax_data,\n raw T bottom_rois, raw int32 bottom_roi_indices,\n T spatial_scale, int32 channel, int32 height, int32 width,\n int32 pooled_dim, int32 pooled_height, int32 pooled_width,\n int32 group_size, int32 sampling_ratio_h, int32 sampling_ratio_w\n """', '"""raw T bottom_diff"""', '"""\n // (n, c, h, w) coords in bottom data\n int pw = i % pooled_width;\n int ph = (i / pooled_width) % pooled_height;\n int ctop = (i / pooled_width / pooled_height) % pooled_dim;\n int n = i / pooled_width / pooled_height / pooled_dim;\n\n // Do not using rounding; this implementation detail is critical\n int roi_batch_ind = bottom_roi_indices[n];\n T roi_start_h = bottom_rois[n * 4 + 0] * spatial_scale;\n T roi_start_w = bottom_rois[n * 4 + 1] * spatial_scale;\n T roi_end_h = bottom_rois[n * 4 + 2] * spatial_scale;\n T roi_end_w = bottom_rois[n * 4 + 3] * spatial_scale;\n\n // Force too small ROIs to be 1x1\n T roi_height = max(roi_end_h - roi_start_h, 0.1);\n T roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0\n\n // Compute w and h at bottom\n T bin_size_h = roi_height / static_cast<T>(pooled_height);\n T bin_size_w = roi_width / static_cast<T>(pooled_width);\n\n // Compute c at bottom\n int gh = floor(\n static_cast<T>(ph) * group_size / pooled_height);\n int gw = floor(\n static_cast<T>(pw) * group_size / pooled_width);\n gh = min(max(gh, 0), group_size - 1);\n gw = min(max(gw, 0), group_size - 1);\n int c = (ctop * group_size + gh) * group_size + gw;\n\n int bottom_diff_offset =\n (roi_batch_ind * channel + c) * height * width;\n\n int top_offset =\n (n * pooled_dim + ctop) * pooled_height * pooled_width;\n T top_diff_this_bin =\n top_diff[top_offset + ph * pooled_width + pw];\n int maxidx = argmax_data[top_offset + ph * pooled_width + pw];\n\n if (maxidx != -1) {\n // We use roi_bin_grid to sample the grid and mimic integral\n int roi_bin_grid_h = (sampling_ratio_h > 0)\n ? sampling_ratio_h\n : ceil(roi_height / pooled_height); // e.g. = 2\n int roi_bin_grid_w = (sampling_ratio_w > 0)\n ? sampling_ratio_w\n : ceil(roi_width / pooled_width);\n\n int iy = maxidx / roi_bin_grid_w;\n int ix = maxidx % roi_bin_grid_w;\n\n T y = roi_start_h + ph * bin_size_h +\n static_cast<T>(iy + .5f) * bin_size_h /\n static_cast<T>(roi_bin_grid_h); // e.g. 0.5, 1.5\n T x = roi_start_w + pw * bin_size_w +\n static_cast<T>(ix + .5f) * bin_size_w /\n static_cast<T>(roi_bin_grid_w);\n\n int y_low, y_high;\n bool y_ret = get_bounds(y, height, y_low, y_high);\n if (!y_ret) continue;\n int x_low, x_high;\n bool x_ret = get_bounds(x, width, x_low, x_high);\n if (!x_ret) continue;\n\n // bilinear_interpolation_gradient {{\n T w1, w2, w3, w4;\n get_bilinear_interp_params(\n y, x, y_low, x_low, y_high, x_high, w1, w2, w3, w4);\n\n if (w1 > 0 && y_low >= 0 && x_low >= 0) {\n T g1 = top_diff_this_bin * w1;\n atomicAdd(&bottom_diff[\n bottom_diff_offset + y_low * width + x_low], g1);\n }\n if (w2 > 0 && y_low >= 0 && x_high <= width - 1) {\n T g2 = top_diff_this_bin * w2;\n atomicAdd(&bottom_diff[\n bottom_diff_offset + y_low * width + x_high], g2);\n }\n if (w3 > 0 && y_high <= height - 1 && x_low >= 0) {\n T g3 = top_diff_this_bin * w3;\n atomicAdd(&bottom_diff[\n bottom_diff_offset + y_high * width + x_low], g3);\n }\n if (w4 > 0 && y_high <= height - 1 && x_high <= width - 1) {\n T g4 = top_diff_this_bin * w4;\n atomicAdd(&bottom_diff[\n bottom_diff_offset + y_high * width + x_high], g4);\n }\n\n // }}\n }\n """', '"""ps_roi_max_align_2d_bwd"""'], {'preamble': '_GET_BILINEAR_INTERP_KERNEL'}), '(\n """\n raw T top_diff, raw int32 argmax_data,\n raw T bottom_rois, raw int32 bottom_roi_indices,\n T spatial_scale, int32 channel, int32 height, int32 width,\n int32 pooled_dim, int32 pooled_height, int32 pooled_width,\n int32 group_size, int32 sampling_ratio_h, int32 sampling_ratio_w\n """\n , \'raw T bottom_diff\',\n """\n // (n, c, h, w) coords in bottom data\n int pw = i % pooled_width;\n int ph = (i / pooled_width) % pooled_height;\n int ctop = (i / pooled_width / pooled_height) % pooled_dim;\n int n = i / pooled_width / pooled_height / pooled_dim;\n\n // Do not using rounding; this implementation detail is critical\n int roi_batch_ind = bottom_roi_indices[n];\n T roi_start_h = bottom_rois[n * 4 + 0] * spatial_scale;\n T roi_start_w = bottom_rois[n * 4 + 1] * spatial_scale;\n T roi_end_h = bottom_rois[n * 4 + 2] * spatial_scale;\n T roi_end_w = bottom_rois[n * 4 + 3] * spatial_scale;\n\n // Force too small ROIs to be 1x1\n T roi_height = max(roi_end_h - roi_start_h, 0.1);\n T roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0\n\n // Compute w and h at bottom\n T bin_size_h = roi_height / static_cast<T>(pooled_height);\n T bin_size_w = roi_width / static_cast<T>(pooled_width);\n\n // Compute c at bottom\n int gh = floor(\n static_cast<T>(ph) * group_size / pooled_height);\n int gw = floor(\n static_cast<T>(pw) * group_size / pooled_width);\n gh = min(max(gh, 0), group_size - 1);\n gw = min(max(gw, 0), group_size - 1);\n int c = (ctop * group_size + gh) * group_size + gw;\n\n int bottom_diff_offset =\n (roi_batch_ind * channel + c) * height * width;\n\n int top_offset =\n (n * pooled_dim + ctop) * pooled_height * pooled_width;\n T top_diff_this_bin =\n top_diff[top_offset + ph * pooled_width + pw];\n int maxidx = argmax_data[top_offset + ph * pooled_width + pw];\n\n if (maxidx != -1) {\n // We use roi_bin_grid to sample the grid and mimic integral\n int roi_bin_grid_h = (sampling_ratio_h > 0)\n ? sampling_ratio_h\n : ceil(roi_height / pooled_height); // e.g. = 2\n int roi_bin_grid_w = (sampling_ratio_w > 0)\n ? sampling_ratio_w\n : ceil(roi_width / pooled_width);\n\n int iy = maxidx / roi_bin_grid_w;\n int ix = maxidx % roi_bin_grid_w;\n\n T y = roi_start_h + ph * bin_size_h +\n static_cast<T>(iy + .5f) * bin_size_h /\n static_cast<T>(roi_bin_grid_h); // e.g. 0.5, 1.5\n T x = roi_start_w + pw * bin_size_w +\n static_cast<T>(ix + .5f) * bin_size_w /\n static_cast<T>(roi_bin_grid_w);\n\n int y_low, y_high;\n bool y_ret = get_bounds(y, height, y_low, y_high);\n if (!y_ret) continue;\n int x_low, x_high;\n bool x_ret = get_bounds(x, width, x_low, x_high);\n if (!x_ret) continue;\n\n // bilinear_interpolation_gradient {{\n T w1, w2, w3, w4;\n get_bilinear_interp_params(\n y, x, y_low, x_low, y_high, x_high, w1, w2, w3, w4);\n\n if (w1 > 0 && y_low >= 0 && x_low >= 0) {\n T g1 = top_diff_this_bin * w1;\n atomicAdd(&bottom_diff[\n bottom_diff_offset + y_low * width + x_low], g1);\n }\n if (w2 > 0 && y_low >= 0 && x_high <= width - 1) {\n T g2 = top_diff_this_bin * w2;\n atomicAdd(&bottom_diff[\n bottom_diff_offset + y_low * width + x_high], g2);\n }\n if (w3 > 0 && y_high <= height - 1 && x_low >= 0) {\n T g3 = top_diff_this_bin * w3;\n atomicAdd(&bottom_diff[\n bottom_diff_offset + y_high * width + x_low], g3);\n }\n if (w4 > 0 && y_high <= height - 1 && x_high <= width - 1) {\n T g4 = top_diff_this_bin * w4;\n atomicAdd(&bottom_diff[\n bottom_diff_offset + y_high * width + x_high], g4);\n }\n\n // }}\n }\n """\n , \'ps_roi_max_align_2d_bwd\', preamble=_GET_BILINEAR_INTERP_KERNEL)\n', (19569, 24296), False, 'from chainer.backends import cuda\n'), ((5722, 5763), 'numpy.floor', 'np.floor', (['(ph * group_size / pooled_height)'], {}), '(ph * group_size / pooled_height)\n', (5730, 5763), True, 'import numpy as np\n'), ((5786, 5826), 'numpy.floor', 'np.floor', (['(pw * group_size / pooled_width)'], {}), '(pw * group_size / pooled_width)\n', (5794, 5826), True, 'import numpy as np\n'), ((6633, 6655), 'chainercv.functions.ps_roi_average_align_2d._get_bounds', '_get_bounds', (['y', 'height'], {}), '(y, height)\n', (6644, 6655), False, 'from chainercv.functions.ps_roi_average_align_2d import _get_bounds\n'), ((6776, 6807), 'six.moves.range', 'six.moves.range', (['roi_bin_grid_w'], {}), '(roi_bin_grid_w)\n', (6791, 6807), False, 'import six\n'), ((17733, 17755), 'chainercv.functions.ps_roi_average_align_2d._get_bounds', '_get_bounds', (['y', 'height'], {}), '(y, height)\n', (17744, 17755), False, 'from chainercv.functions.ps_roi_average_align_2d import _get_bounds\n'), ((17885, 17906), 'chainercv.functions.ps_roi_average_align_2d._get_bounds', '_get_bounds', (['x', 'width'], {}), '(x, width)\n', (17896, 17906), False, 'from chainercv.functions.ps_roi_average_align_2d import _get_bounds\n'), ((18088, 18151), 'chainercv.functions.ps_roi_average_align_2d._get_bilinear_interp_params', '_get_bilinear_interp_params', (['y', 'x', 'y_low', 'x_low', 'y_high', 'x_high'], {}), '(y, x, y_low, x_low, y_high, x_high)\n', (18115, 18151), False, 'from chainercv.functions.ps_roi_average_align_2d import _get_bilinear_interp_params\n'), ((6070, 6105), 'numpy.ceil', 'np.ceil', (['(roi_height / pooled_height)'], {}), '(roi_height / pooled_height)\n', (6077, 6105), True, 'import numpy as np\n'), ((6265, 6298), 'numpy.ceil', 'np.ceil', (['(roi_width / pooled_width)'], {}), '(roi_width / pooled_width)\n', (6272, 6298), True, 'import numpy as np\n'), ((6971, 6992), 'chainercv.functions.ps_roi_average_align_2d._get_bounds', '_get_bounds', (['x', 'width'], {}), '(x, width)\n', (6982, 6992), False, 'from chainercv.functions.ps_roi_average_align_2d import _get_bounds\n'), ((7181, 7244), 'chainercv.functions.ps_roi_average_align_2d._get_bilinear_interp_params', '_get_bilinear_interp_params', (['y', 'x', 'y_low', 'x_low', 'y_high', 'x_high'], {}), '(y, x, y_low, x_low, y_high, x_high)\n', (7208, 7244), False, 'from chainercv.functions.ps_roi_average_align_2d import _get_bilinear_interp_params\n'), ((17044, 17079), 'numpy.ceil', 'np.ceil', (['(roi_height / pooled_height)'], {}), '(roi_height / pooled_height)\n', (17051, 17079), True, 'import numpy as np\n'), ((17255, 17288), 'numpy.ceil', 'np.ceil', (['(roi_width / pooled_width)'], {}), '(roi_width / pooled_width)\n', (17262, 17288), True, 'import numpy as np\n')]
|
import pyqtgraph as pg
from pyqtgraph.dockarea import *
import numpy as np
import os
import numbers
try:
from PyQt4.QtGui import QFileDialog
from PyQt4 import QtCore, QtGui
from PyQt4.QtGui import QMainWindow
except ImportError:
from PyQt5.QtWidgets import QFileDialog
from PyQt5 import QtCore, QtGui
from PyQt5.QtWidgets import QApplication, QMainWindow
from neutronbraggedge.experiment_handler import *
from ImagingReso import _utilities
from __code.ui_resonance_imaging_experiment_vs_theory import Ui_MainWindow as UiMainWindow
class ImageWindow(QMainWindow):
pen_color = ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'w']
pen_symbol = ['o', 's', 't', 'd', '+']
stack = []
integrated_stack = []
working_folder = ''
x_axis = {'file_index': [],
'tof': [],
'ev': [],
'lambda': []}
x_axis_label = {'file_index': 'file index',
'tof': u'TOF (\u00B5s)',
'ev': 'eV',
'lambda': u'\u03BB (\u212B)',
}
y_axis = {'label': 'Mean Counts', 'data': []}
elements_to_plot = {} # ex U, U235...etc to plot
spectra_file = ''
b_enable_only_file_index_button = True
def __init__(self, parent=None, stack=[], working_folder='', o_reso=None):
QMainWindow.__init__(self, parent=parent)
self.ui = UiMainWindow()
self.ui.setupUi(self)
self.setWindowTitle("Select Rotation Angle for All Images")
self.stack = np.array(stack)
self.integrated_stack = self.stack.sum(axis=0)
self.working_folder = working_folder
self.o_reso = o_reso
self.initialize_pyqtgraph()
self.init_label()
self.init_list_of_things_to_plot()
self.update_radio_button_status()
self.display_image()
self.update_x_axis()
self.roi_changed()
def update_plot(self):
# self.update_x_axis()
self.plot()
def init_label(self):
_tof_label = u"TOF (\u00B5s)"
self.ui.tof_radio_button.setText(_tof_label)
_lambda_label = u"lambda (\u212B)"
self.ui.lambda_radio_button.setText(_lambda_label)
_offset_label = u"\u00B5s"
self.ui.detector_offset_units.setText(_offset_label)
def display_image(self):
self.ui.image_view.setImage(self.integrated_stack)
def plot(self):
x_axis_selected = self.get_x_axis_selected()
x_axis_data = self.x_axis[x_axis_selected]
y_axis_data = self.y_axis['data']
# print("for {}".format(x_axis_selected))
# pprint.pprint(y_axis_data[0:10])
# pprint.pprint(x_axis_data[0:10])
# print()
y_axis_label = self.y_axis['label']
if x_axis_selected == 'ev':
y_axis_data = y_axis_data[::-1]
x_axis_data = x_axis_data[::-1]
x_axis_data = x_axis_data[0: len(y_axis_data)]
self.counts_vs_index.clear()
try:
self.legend.scene().removeItem(self.legend)
except:
pass
self.legend = self.counts_vs_index.addLegend()
self.counts_vs_index.plot(
x_axis_data, y_axis_data, name='Experimental')
self.counts_vs_index.setLabel('bottom', x_axis_selected)
self.counts_vs_index.setLabel('left', y_axis_label)
# plot all elements
elements_to_plot = self.elements_to_plot
_index_pen_color = 0
_index_pen_symbol = 0
for _label in elements_to_plot.keys():
_x_axis_data = elements_to_plot[_label]['x_axis']
_y_axis_data = elements_to_plot[_label]['y_axis']
self.counts_vs_index.plot(
_x_axis_data,
_y_axis_data,
name=_label,
pen=self.pen_color[_index_pen_color],
penSymbol=self.pen_symbol[_index_pen_symbol])
_index_pen_color += 1
if _index_pen_color >= len(self.pen_color):
_index_pen_color = 0
_index_pen_symbol += 1
if _index_pen_symbol == len(self.pen_symbol):
_index_pen_color = 0
_index_pen_symbol = 0
def initialize_pyqtgraph(self):
area = DockArea()
area.setVisible(True)
d1 = Dock("Image Integrated Preview", size=(300, 800))
d2 = Dock("Counts vs Image Index of Selection", size=(300, 800))
area.addDock(d1, 'right')
area.addDock(d2, 'left')
preview_widget = pg.GraphicsLayoutWidget()
pg.setConfigOptions(antialias=True)
# image view
self.ui.image_view = pg.ImageView()
self.ui.image_view.ui.menuBtn.hide()
self.ui.image_view.ui.roiBtn.hide()
# default ROI
self.ui.roi = pg.ROI([0, 0], [20, 20],
pen=(62, 13, 244),
scaleSnap=True) #blue
self.ui.roi.addScaleHandle([1, 1], [0, 0])
self.ui.image_view.addItem(self.ui.roi)
self.ui.roi.sigRegionChanged.connect(self.roi_changed)
d1.addWidget(self.ui.image_view)
self.counts_vs_index = pg.PlotWidget(title='')
self.counts_vs_index.plot()
d2.addWidget(self.counts_vs_index)
vertical_layout = QtGui.QVBoxLayout()
vertical_layout.addWidget(area)
self.ui.widget.setLayout(vertical_layout)
def roi_changed(self):
region = self.ui.roi.getArraySlice(self.integrated_stack,
self.ui.image_view.imageItem)
x0 = region[0][0].start
x1 = region[0][0].stop - 1
y0 = region[0][1].start
y1 = region[0][1].stop - 1
mean_selection = [_data[x0:x1, y0:y1].mean() for _data in self.stack]
self.y_axis['data'] = mean_selection
self.plot()
# x_axis
def get_x_axis_selected(self):
if self.ui.file_index_ratio_button.isChecked():
return 'file_index'
elif self.ui.tof_radio_button.isChecked():
return 'tof'
elif self.ui.lambda_radio_button.isChecked():
return 'lambda'
else:
return 'ev'
def update_radio_button_status(self):
x_axis_selected = self.get_x_axis_selected()
# enable or not list of element to display
if x_axis_selected == 'file_index':
list_status = False
else:
list_status = True
self.ui.list_to_plot_widget.setEnabled(list_status)
b_enable_only_file_index_button = False
spectra_file = self.spectra_file
if not os.path.exists(spectra_file):
x_axis_selected = 'file_index'
b_enable_only_file_index_button = True
distance_source_detector = self.ui.distance_source_detector_value.text()
if not distance_source_detector:
x_axis_selected = 'file_index'
b_enable_only_file_index_button = True
elif not isinstance(float(distance_source_detector), numbers.Number):
x_axis_selected = 'file_index'
b_enable_only_file_index_button = True
detector_offset = str(self.ui.detector_offset_value.text())
if not detector_offset:
x_axis_selected = 'file_index'
b_enable_only_file_index_button = True
elif not isinstance(float(detector_offset), numbers.Number):
x_axis_selected = 'file_index'
b_enable_only_file_index_button = True
self.set_radio_buttons_status(
b_enable_only_file_index_button=b_enable_only_file_index_button)
self.b_enable_only_file_index_button = b_enable_only_file_index_button
self.update_x_axis()
def update_x_axis(self):
self.x_axis['file_index'] = np.arange(len(self.stack))
if not self.b_enable_only_file_index_button:
# tof
spectra_file = self.spectra_file
_tof_handler = TOF(filename=spectra_file)
self.x_axis['tof'] = _tof_handler.tof_array
# lambda
distance_source_detector = self.ui.distance_source_detector_value.text()
detector_offset = str(self.ui.detector_offset_value.text())
_exp = Experiment(
tof=_tof_handler.tof_array,
distance_source_detector_m=float(distance_source_detector),
detector_offset_micros=float(detector_offset))
self.x_axis['lambda'] = _exp.lambda_array * 1e10
# ev
_exp = Experiment(tof = _tof_handler.tof_array,
distance_source_detector_m = float(distance_source_detector),
detector_offset_micros= float(detector_offset))
_exp_ev = _utilities.convert_x_axis(array=_exp.lambda_array*1e10,
from_units='angstroms',
to_units='ev',
offset_us=float(detector_offset),
source_to_detector_m=float(distance_source_detector))
# _exp_ev = np.linspace(1, 3000, len(_tof_handler.tof_array))
# import scipy
# _exp_ev = scipy.random.ranf(len(_tof_handler.tof_array)) * 3000000
# _exp_ev.sort()
# _exp_ev = _exp_ev[::-1]
self.x_axis['ev'] = _exp_ev
# with open('/users/j35/Desktop/test_output.txt', 'w') as f:
# for _data in _exp_ev:
# f.write(str(_data) + '\n')
else:
self.x_axis['ev'] = []
self.x_axis['tof'] = []
self.x_axis['lambda'] = []
def set_radio_buttons_status(self, b_enable_only_file_index_button=False):
self.ui.tof_radio_button.setEnabled(
not b_enable_only_file_index_button)
self.ui.lambda_radio_button.setEnabled(
not b_enable_only_file_index_button)
self.ui.energy_radio_button.setEnabled(
not b_enable_only_file_index_button)
if b_enable_only_file_index_button:
self.ui.file_index_ratio_button.setChecked(True)
def radio_button_clicked(self):
self.update_radio_button_status()
self.plot()
def distance_source_detector_validated(self):
self.update_radio_button_status()
self.update_x_axis()
self.plot()
def detector_offset_validated(self):
self.update_radio_button_status()
self.update_x_axis()
self.plot()
def time_spectra_file_browse_button_clicked(self):
spectra_file = QFileDialog.getOpenFileName(
caption='Select Time Spectra',
directory=self.working_folder,
filter='txt (*_Spectra.txt);;All (*.*)')
if spectra_file:
self.ui.time_spectra_file.setText(os.path.basename(spectra_file))
self.spectra_file = spectra_file
self.update_radio_button_status()
self.update_x_axis()
self.plot()
def init_list_of_things_to_plot(self):
list_things_to_plot = []
stack = self.o_reso.stack
list_layers = stack.keys()
for _layer in list_layers:
list_things_to_plot.append(_layer)
list_elements = stack[_layer]['elements']
for _element in list_elements:
list_things_to_plot.append(_layer + ' -> ' + _element)
list_isotopes = stack[_layer][_element]['isotopes']['list']
for _isotope in list_isotopes:
list_things_to_plot.append(_layer + ' -> ' + _element +
' -> ' + _isotope)
self.ui.list_to_plot_widget.addItems(list_things_to_plot)
def done_button_clicked(self):
self.close()
def plot_selection_changed(self, item):
_elements_to_plot = {}
# init
x_axis_ev = []
x_axis_selected = self.get_x_axis_selected()
if x_axis_selected == 'file_index':
self.elements_to_plot = _elements_to_plot
return
# retrieve data to display
for _item in self.ui.list_to_plot_widget.selectedIndexes():
_row_selected = _item.row()
_text = self.ui.list_to_plot_widget.item(_row_selected).text()
_layer_element_isotope = self.__parse_layer_element_isotope(_text)
_layer = _layer_element_isotope['layer']
_element = _layer_element_isotope['element']
_isotope = _layer_element_isotope['isotope']
if _element == '':
transmission = self.o_reso.stack_signal[_layer]['transmission']
x_axis_ev = self.o_reso.stack_signal[_layer]['energy_eV']
elif _isotope == '':
transmission = self.o_reso.stack_signal[_layer][_element][
'transmission']
x_axis_ev = self.o_reso.stack_signal[_layer][_element][
'energy_eV']
else:
transmission = self.o_reso.stack_signal[_layer][_element][
_isotope]['transmission']
x_axis_ev = self.o_reso.stack_signal[_layer][_element][
_isotope]['energy_eV']
_elements_to_plot[_text] = {}
_elements_to_plot[_text]['y_axis'] = transmission
x_axis = []
if x_axis_selected == 'lambda':
x_axis = _utilities.convert_x_axis(
array=x_axis_ev, from_units='ev', to_units='angstroms')
elif x_axis_selected == 'tof':
detector_offset = float(self.ui.detector_offset_value.text())
distance_source_detector = float(
self.ui.distance_source_detector_value.text())
x_axis = _utilities.convert_x_axis(
array=x_axis_ev,
from_units='ev',
to_units='s',
offset_us=detector_offset,
source_to_detector_m=distance_source_detector)
else: # ev
x_axis = x_axis_ev
_elements_to_plot[_text]['x_axis'] = x_axis
self.elements_to_plot = _elements_to_plot
self.plot()
def __parse_layer_element_isotope(self, text):
''' this will create a dictionary of each data to plot
'''
_dict = {'layer': '', 'element': '', 'isotope': ''}
parse_text = text.split(' -> ')
_dict['layer'] = parse_text[0]
if len(parse_text) >= 2:
_dict['element'] = parse_text[1]
if len(parse_text) >= 3:
_dict['isotope'] = parse_text[2]
return _dict
def closeEvent(self, event=None):
pass
|
[
"PyQt5.QtWidgets.QMainWindow.__init__",
"os.path.basename",
"pyqtgraph.PlotWidget",
"os.path.exists",
"pyqtgraph.ImageView",
"PyQt5.QtGui.QVBoxLayout",
"PyQt5.QtWidgets.QFileDialog.getOpenFileName",
"numpy.array",
"pyqtgraph.ROI",
"pyqtgraph.setConfigOptions",
"ImagingReso._utilities.convert_x_axis",
"__code.ui_resonance_imaging_experiment_vs_theory.Ui_MainWindow",
"pyqtgraph.GraphicsLayoutWidget"
] |
[((1319, 1360), 'PyQt5.QtWidgets.QMainWindow.__init__', 'QMainWindow.__init__', (['self'], {'parent': 'parent'}), '(self, parent=parent)\n', (1339, 1360), False, 'from PyQt5.QtWidgets import QApplication, QMainWindow\n'), ((1379, 1393), '__code.ui_resonance_imaging_experiment_vs_theory.Ui_MainWindow', 'UiMainWindow', ([], {}), '()\n', (1391, 1393), True, 'from __code.ui_resonance_imaging_experiment_vs_theory import Ui_MainWindow as UiMainWindow\n'), ((1514, 1529), 'numpy.array', 'np.array', (['stack'], {}), '(stack)\n', (1522, 1529), True, 'import numpy as np\n'), ((4558, 4583), 'pyqtgraph.GraphicsLayoutWidget', 'pg.GraphicsLayoutWidget', ([], {}), '()\n', (4581, 4583), True, 'import pyqtgraph as pg\n'), ((4592, 4627), 'pyqtgraph.setConfigOptions', 'pg.setConfigOptions', ([], {'antialias': '(True)'}), '(antialias=True)\n', (4611, 4627), True, 'import pyqtgraph as pg\n'), ((4679, 4693), 'pyqtgraph.ImageView', 'pg.ImageView', ([], {}), '()\n', (4691, 4693), True, 'import pyqtgraph as pg\n'), ((4828, 4887), 'pyqtgraph.ROI', 'pg.ROI', (['[0, 0]', '[20, 20]'], {'pen': '(62, 13, 244)', 'scaleSnap': '(True)'}), '([0, 0], [20, 20], pen=(62, 13, 244), scaleSnap=True)\n', (4834, 4887), True, 'import pyqtgraph as pg\n'), ((5188, 5211), 'pyqtgraph.PlotWidget', 'pg.PlotWidget', ([], {'title': '""""""'}), "(title='')\n", (5201, 5211), True, 'import pyqtgraph as pg\n'), ((5318, 5337), 'PyQt5.QtGui.QVBoxLayout', 'QtGui.QVBoxLayout', ([], {}), '()\n', (5335, 5337), False, 'from PyQt5 import QtCore, QtGui\n'), ((10755, 10890), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QFileDialog.getOpenFileName', ([], {'caption': '"""Select Time Spectra"""', 'directory': 'self.working_folder', 'filter': '"""txt (*_Spectra.txt);;All (*.*)"""'}), "(caption='Select Time Spectra', directory=self.\n working_folder, filter='txt (*_Spectra.txt);;All (*.*)')\n", (10782, 10890), False, 'from PyQt5.QtWidgets import QFileDialog\n'), ((6642, 6670), 'os.path.exists', 'os.path.exists', (['spectra_file'], {}), '(spectra_file)\n', (6656, 6670), False, 'import os\n'), ((10994, 11024), 'os.path.basename', 'os.path.basename', (['spectra_file'], {}), '(spectra_file)\n', (11010, 11024), False, 'import os\n'), ((13599, 13685), 'ImagingReso._utilities.convert_x_axis', '_utilities.convert_x_axis', ([], {'array': 'x_axis_ev', 'from_units': '"""ev"""', 'to_units': '"""angstroms"""'}), "(array=x_axis_ev, from_units='ev', to_units=\n 'angstroms')\n", (13624, 13685), False, 'from ImagingReso import _utilities\n'), ((13965, 14116), 'ImagingReso._utilities.convert_x_axis', '_utilities.convert_x_axis', ([], {'array': 'x_axis_ev', 'from_units': '"""ev"""', 'to_units': '"""s"""', 'offset_us': 'detector_offset', 'source_to_detector_m': 'distance_source_detector'}), "(array=x_axis_ev, from_units='ev', to_units='s',\n offset_us=detector_offset, source_to_detector_m=distance_source_detector)\n", (13990, 14116), False, 'from ImagingReso import _utilities\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import math
import itertools_recipes as it
data=np.array([[1,1],[5,2],[3,3],[0,2],[9,4],[4,8]])
x=data[:,0]
y=data[:,1]
def choose():
q=[]
u=list(it.permutations([0,1,2,3,4,5],6))
m=np.zeros((6,2))
n=np.zeros((6,2))
for i in range(len(u)):
m[0]=data[u[i][0]]
m[1]=data[u[i][1]]
m[2]=data[u[i][2]]
m[3]=data[u[i][3]]
m[4]=data[u[i][4]]
m[5]=data[u[i][5]]
distance(m)
q.append(distance(m))
k=min(q)
print('最短路程为',k)
g=q.index(k)
n[0] = data[u[g][0]]
n[1] = data[u[g][1]]
n[2] = data[u[g][2]]
n[3] = data[u[g][3]]
n[4] = data[u[g][4]]
n[5] = data[u[g][5]]
print(n)
draw_a_line(n)
def draw_a_line(w):
i=0
for i in range(5):
a=np.linspace(w[i,0],w[i+1,0],100)
b=np.linspace(w[i,1],w[i+1,1],100)
plt.plot(a,b,'.')
c=np.linspace(w[0,0],w[5,0],100)
d=np.linspace(w[0,1],w[5,1],100)
plt.plot(c,d,'.')
def distance(w):
i=0
sum=0
e=[]
for i in range(5):
e.append(math.sqrt((w[i+1,0]-w[i,0])**2+(w[i+1,1]-w[i,1])**2))
sum=sum+e[i]
sum=sum+math.sqrt((w[5,0]-w[1,0])**2+(w[5,1]-w[1,1])**2)
return(sum)
choose()
plt.show()
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"math.sqrt",
"numpy.zeros",
"numpy.array",
"numpy.linspace",
"itertools_recipes.permutations"
] |
[((105, 163), 'numpy.array', 'np.array', (['[[1, 1], [5, 2], [3, 3], [0, 2], [9, 4], [4, 8]]'], {}), '([[1, 1], [5, 2], [3, 3], [0, 2], [9, 4], [4, 8]])\n', (113, 163), True, 'import numpy as np\n'), ((1310, 1320), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1318, 1320), True, 'import matplotlib.pyplot as plt\n'), ((259, 275), 'numpy.zeros', 'np.zeros', (['(6, 2)'], {}), '((6, 2))\n', (267, 275), True, 'import numpy as np\n'), ((282, 298), 'numpy.zeros', 'np.zeros', (['(6, 2)'], {}), '((6, 2))\n', (290, 298), True, 'import numpy as np\n'), ((958, 992), 'numpy.linspace', 'np.linspace', (['w[0, 0]', 'w[5, 0]', '(100)'], {}), '(w[0, 0], w[5, 0], 100)\n', (969, 992), True, 'import numpy as np\n'), ((996, 1030), 'numpy.linspace', 'np.linspace', (['w[0, 1]', 'w[5, 1]', '(100)'], {}), '(w[0, 1], w[5, 1], 100)\n', (1007, 1030), True, 'import numpy as np\n'), ((1032, 1051), 'matplotlib.pyplot.plot', 'plt.plot', (['c', 'd', '"""."""'], {}), "(c, d, '.')\n", (1040, 1051), True, 'import matplotlib.pyplot as plt\n'), ((218, 256), 'itertools_recipes.permutations', 'it.permutations', (['[0, 1, 2, 3, 4, 5]', '(6)'], {}), '([0, 1, 2, 3, 4, 5], 6)\n', (233, 256), True, 'import itertools_recipes as it\n'), ((849, 887), 'numpy.linspace', 'np.linspace', (['w[i, 0]', 'w[i + 1, 0]', '(100)'], {}), '(w[i, 0], w[i + 1, 0], 100)\n', (860, 887), True, 'import numpy as np\n'), ((892, 930), 'numpy.linspace', 'np.linspace', (['w[i, 1]', 'w[i + 1, 1]', '(100)'], {}), '(w[i, 1], w[i + 1, 1], 100)\n', (903, 930), True, 'import numpy as np\n'), ((933, 952), 'matplotlib.pyplot.plot', 'plt.plot', (['a', 'b', '"""."""'], {}), "(a, b, '.')\n", (941, 952), True, 'import matplotlib.pyplot as plt\n'), ((1231, 1293), 'math.sqrt', 'math.sqrt', (['((w[5, 0] - w[1, 0]) ** 2 + (w[5, 1] - w[1, 1]) ** 2)'], {}), '((w[5, 0] - w[1, 0]) ** 2 + (w[5, 1] - w[1, 1]) ** 2)\n', (1240, 1293), False, 'import math\n'), ((1142, 1212), 'math.sqrt', 'math.sqrt', (['((w[i + 1, 0] - w[i, 0]) ** 2 + (w[i + 1, 1] - w[i, 1]) ** 2)'], {}), '((w[i + 1, 0] - w[i, 0]) ** 2 + (w[i + 1, 1] - w[i, 1]) ** 2)\n', (1151, 1212), False, 'import math\n')]
|
import os
import numpy as np
from matplotlib import pyplot as plt
from torch.utils.data import DataLoader
def minibatch_loader(minibatch, minibatch_size, drop_last=True):
return DataLoader(minibatch, batch_size=minibatch_size, drop_last=drop_last)
def get_next_available_dir(root, dir_name, absolute_path=True, create=True):
checkpoint_dir_base = os.path.join(root, dir_name)
dir_id = 1
checkpoint_dir = f"{checkpoint_dir_base}_{dir_id}"
while os.path.exists(checkpoint_dir):
dir_id += 1
checkpoint_dir = f"{checkpoint_dir_base}_{dir_id}"
if create:
os.mkdir(checkpoint_dir)
if absolute_path:
return checkpoint_dir
else:
return f"{dir_name}_{dir_id}"
def _plot(data_dict, x_label, y_label, title):
fig, ax = plt.subplots() # (figsize=(10,10))
if 'x_ticks' in data_dict:
x_values = data_dict.pop('x_ticks')
if len(x_values) > 20:
x_values = None # too crowded to read on the figure
else:
x_values = None
max_x_range_len = 0
for name, data in data_dict.items():
if x_values is not None:
ax.plot(list(range(len(x_values))), data, label=name)
else:
ax.plot(data, label=name)
if x_values is not None:
plt.xticks(list(range(len(x_values))), x_values)
ax.legend()
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.title(title)
plt.show()
return fig
def load_and_plot_privacy_param_variation():
eps1 = {'name': 'eps_1', 'fp': './eps1.npy', }
noise_multiplier = {'name': 'noise_multiplier', 'fp': './noise_multiplier.npy'}
eps3 = {'name': 'eps_3', 'fp': './eps3.npy'}
files = [eps1, noise_multiplier, eps3]
curve_names = ['Test accuracy', 'MEA fidelity', 'MIA accuracy']
for data_file in files:
data = dict()
with open(data_file['fp'], 'rb') as f:
data['x_ticks'] = np.load(f)
for curve in curve_names:
data[curve] = np.load(f)
# data['x_ticks'] = np.array(data_file['rng'])
_plot(data, data_file['name'], 'Privacy and Utility', 'Small CNN on Cifar10')
def load_and_plot_learning_curves():
def fetch(fs, metric_name):
metric_data = dict()
for f in fs:
metric_data[f['name']] = f[metric_name]
return metric_data
metrics = ['val_acc']
msdp = {'name': 'MSDPFL', 'fp': "outFL/MNIST/low_eps/msdpfl/stats.npy"}
opacus = {'name': 'Opacus FL', 'fp': "outFL/MNIST/low_eps/opacusfl/stats.npy"}
non_p = {'name': 'Non-Private FL', 'fp': "outFL/MNIST/npfl/stats.npy"}
title = 'Highly private FL training on MNIST'
files = [msdp, opacus, non_p]
for data_file in files:
data = dict()
with open(data_file['fp'], 'rb') as f:
for metric in metrics:
data[metric] = np.load(f)
data_file.update(**data)
for metric in metrics:
metric_data = fetch(files, metric)
f = _plot(metric_data, 'Epochs', metric, title)
if metric == 'val_acc':
f.savefig(f"./val_acc.png", bbox_inches='tight')
def load_and_plot_dr():
def fetch(fs, metric_name):
metric_data = dict()
for f in fs:
metric_data[f['name']] = f[metric_name]
return metric_data
def dr_plot(data_dict, x_label, y_label, title):
fig, ax = plt.subplots() # (figsize=(10,10))
for name, data in data_dict.items():
ax.plot(data, label=name)
ax.legend()
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.title(title)
plt.show()
metrics = {'centralised': ['train_loss', 'train_acc', 'val_acc'],
'fl': ['val_acc']
}
msdp = {'name': 'MSDP', 'fp': "out_centralMSDP/DR/msdp/MSDPTrainer_0_plot_stats.npy"}
msdpfl = {'name': 'MSDPFL', 'fp': "outFL/DR/msdpfl/stats.npy"}
opacus = {'name': 'Opacus', 'fp': "out_centralMSDP/DR/opacus/MSDPTrainer_0_plot_stats.npy"}
opacusfl = {'name': 'OpacusFL', 'fp': "outFL/DR/opacus_fl/stats.npy"}
non_p = {'name': 'Non-Private', 'fp': "out_centralMSDP/DR/np/MSDPTrainer_0_plot_stats.npy"}
non_pfl = {'name': 'Non-Private FL', 'fp': "outFL/DR/np_fl/stats.npy"}
title = 'FL training on DR'
central = [msdp, opacus, non_p]
fl = [msdpfl, opacusfl, non_pfl]
files = central + fl
for data_file in files:
data = dict()
if data_file in central:
metric_type = 'centralised'
else:
metric_type = 'fl'
with open(data_file['fp'], 'rb') as f:
for metric in metrics[metric_type]:
data[metric] = np.load(f)
data_file.update(**data)
for metric in ['val_acc']:
metric_data = fetch(files, metric)
dr_plot(metric_data, 'Epochs/ Rounds', metric, title)
|
[
"matplotlib.pyplot.title",
"os.mkdir",
"numpy.load",
"matplotlib.pyplot.show",
"torch.utils.data.DataLoader",
"os.path.exists",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"os.path.join"
] |
[((183, 252), 'torch.utils.data.DataLoader', 'DataLoader', (['minibatch'], {'batch_size': 'minibatch_size', 'drop_last': 'drop_last'}), '(minibatch, batch_size=minibatch_size, drop_last=drop_last)\n', (193, 252), False, 'from torch.utils.data import DataLoader\n'), ((356, 384), 'os.path.join', 'os.path.join', (['root', 'dir_name'], {}), '(root, dir_name)\n', (368, 384), False, 'import os\n'), ((459, 489), 'os.path.exists', 'os.path.exists', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (473, 489), False, 'import os\n'), ((753, 767), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (765, 767), True, 'from matplotlib import pyplot as plt\n'), ((1264, 1283), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x_label'], {}), '(x_label)\n', (1274, 1283), True, 'from matplotlib import pyplot as plt\n'), ((1286, 1305), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y_label'], {}), '(y_label)\n', (1296, 1305), True, 'from matplotlib import pyplot as plt\n'), ((1308, 1324), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (1317, 1324), True, 'from matplotlib import pyplot as plt\n'), ((1327, 1337), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1335, 1337), True, 'from matplotlib import pyplot as plt\n'), ((579, 603), 'os.mkdir', 'os.mkdir', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (587, 603), False, 'import os\n'), ((3138, 3152), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3150, 3152), True, 'from matplotlib import pyplot as plt\n'), ((3267, 3286), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x_label'], {}), '(x_label)\n', (3277, 3286), True, 'from matplotlib import pyplot as plt\n'), ((3291, 3310), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y_label'], {}), '(y_label)\n', (3301, 3310), True, 'from matplotlib import pyplot as plt\n'), ((3315, 3331), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (3324, 3331), True, 'from matplotlib import pyplot as plt\n'), ((3336, 3346), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3344, 3346), True, 'from matplotlib import pyplot as plt\n'), ((1797, 1807), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (1804, 1807), True, 'import numpy as np\n'), ((1862, 1872), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (1869, 1872), True, 'import numpy as np\n'), ((2664, 2674), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (2671, 2674), True, 'import numpy as np\n'), ((4323, 4333), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (4330, 4333), True, 'import numpy as np\n')]
|
'''
PISA module to prep incoming data into formats that are
compatible with the mc_uncertainty likelihood formulation
This module takes in events containers from the pipeline, and
introduces an additional array giving the indices where each
event falls into.
module structure imported from bootcamp example
'''
from __future__ import absolute_import, print_function, division
__author__ = "<NAME> (<EMAIL>)"
import numpy as np
from pisa import FTYPE
from pisa.core.pi_stage import PiStage
#from pisa.utils.log import logging
# Load the modified index lookup function
from pisa.core.bin_indexing import lookup_indices
class add_indices(PiStage):
"""
PISA Pi stage to map out the index of the analysis
binning where each event falls into.
Parameters
----------
data
params
foo : Quantity
bar : Quanitiy with time dimension
input_names
output_names
debug_mode
input_specs:
calc_specs : must be events
output_specs: must be a MultiDimBinnig
Notes:
------
- input and calc specs are predetermined in the module
(inputs from the config files will be disregarded)
- stage appends an array quantity called bin_indices
- stage also appends an array mask to access events by
bin index later in the pipeline
"""
# this is the constructor with default arguments
def __init__(self,
data=None,
params=None,
input_names=None,
output_names=None,
debug_mode=None,
input_specs=None,
calc_specs=None,
output_specs=None,
):
#
# No parameters are expected in this stage
# same goes for a bunch of other stage options
#
expected_params = ()
input_names = ()
output_names = ()
input_apply_keys = ()
# We add the bin_indices key
# (but not in the apply function so maybe useless...)
#
output_calc_keys = ('bin_indices',)
output_apply_keys = ()
# init base class
super(add_indices, self).__init__(data=data,
params=params,
expected_params=expected_params,
input_names=input_names,
output_names=output_names,
debug_mode=debug_mode,
input_specs=input_specs,
calc_specs=calc_specs,
output_specs=output_specs,
input_apply_keys=input_apply_keys,
output_apply_keys=output_apply_keys,
output_calc_keys=output_calc_keys,
)
# make sure the user specified some modes
assert self.input_mode is not None
assert self.calc_mode is not None
assert self.output_mode is not None
def setup_function(self):
'''
Calculate the bin index where each event falls into
Create one mask for each analysis bin.
'''
assert self.calc_specs == 'events', 'ERROR: calc specs must be set to "events for this module'
self.data.data_specs = 'events'
for container in self.data:
# Generate a new container called bin_indices
container['bin_indices'] = np.empty((container.size), dtype=np.int64)
variables_to_bin = []
for bin_name in self.output_specs.names:
variables_to_bin.append(container[bin_name])
new_array = lookup_indices(sample=variables_to_bin,
binning=self.output_specs)
new_array = new_array.get('host')
np.copyto(src=new_array, dst=container["bin_indices"].get('host'))
for bin_i in range(self.output_specs.tot_num_bins):
container.add_array_data(key='bin_{}_mask'.format(bin_i),
data=(new_array == bin_i))
|
[
"numpy.empty",
"pisa.core.bin_indexing.lookup_indices"
] |
[((3599, 3639), 'numpy.empty', 'np.empty', (['container.size'], {'dtype': 'np.int64'}), '(container.size, dtype=np.int64)\n', (3607, 3639), True, 'import numpy as np\n'), ((3818, 3884), 'pisa.core.bin_indexing.lookup_indices', 'lookup_indices', ([], {'sample': 'variables_to_bin', 'binning': 'self.output_specs'}), '(sample=variables_to_bin, binning=self.output_specs)\n', (3832, 3884), False, 'from pisa.core.bin_indexing import lookup_indices\n')]
|
import torch
import numpy as np
def accuracy(output, target):
"""Computes the precision@k for the specified values of k"""
batch_size = target.size(0)
pred = torch.argmax(output, dim=1)
pred = pred.squeeze()
correct = pred.eq(target.expand_as(pred))
acc = correct.view(-1).float().sum(0) * 100 / (batch_size)
return acc
def sliding_accuracy(logits, target, slider_length):
'''
compute the accuracy while averaging over slider_length frames
implemented to accumulate at the begining of the sequence and give the average for the last frame in the slider
'''
n_examples = target.size(0)
pred = torch.zeros_like(logits)
for i in range(logits.size(2)):
pred[:, :, i] = torch.mean(logits[:, :, np.max([0, i - slider_length]):i + 1], dim=2)
pred = torch.argmax(pred, dim=1)
pred = pred.squeeze().view(-1)
correct = pred.eq(target)
acc = correct.view(-1).float().sum(0) * 100 / n_examples
return acc, pred
def accuracy_v2(output, target):
"""Computes the precision@k for the specified values of k"""
batch_size = target.size(0)
n_frames = target.size(1)
correct = output.eq(target.expand_as(output))
acc = correct.view(-1).float().sum(0) * 100 / (batch_size*n_frames)
return acc
def accuracy_topk(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def post_process_logits(per_frame_logits, average=False, num_frames_to_avg=12, threshold = 0.7):
if average:
last_frame_logits = torch.mean(per_frame_logits[:, :, -num_frames_to_avg - 1:-1], dim=2)
label_ind = torch.argmax(last_frame_logits, dim=1).item()
last_frame_logits = torch.nn.functional.softmax(last_frame_logits, dim=1).squeeze()
else:
per_frame_logits = torch.nn.functional.softmax(per_frame_logits, dim=1)
_, pred = per_frame_logits.topk(1, 1, True, True)
label_ind = pred.squeeze()[-1].item()
last_frame_logits = per_frame_logits[0, :, -1].squeeze()
if last_frame_logits[label_ind] < threshold:
label_ind = 0
return label_ind, last_frame_logits
def make_weights_for_balanced_classes(clip_set, label_count):
""" compute the weight per clip for the weighted random sampler"""
n_clips = len(clip_set)
nclasses = len(label_count)
N = label_count.sum()
weight_per_class = [0.] * nclasses
for i in range(nclasses):
weight_per_class[i] = N/float(label_count[i])
weight = [0] * n_clips
for idx, clip in enumerate(clip_set):
clip_label_sum = clip[1].sum(axis=1)
if clip_label_sum.sum() == 0:
print("zero!!!")
ratios = clip_label_sum / clip_label_sum.sum()
weight[idx] = np.dot(weight_per_class, ratios)
return weight
|
[
"torch.mean",
"torch.zeros_like",
"torch.argmax",
"torch.nn.functional.softmax",
"numpy.max",
"numpy.dot"
] |
[((171, 198), 'torch.argmax', 'torch.argmax', (['output'], {'dim': '(1)'}), '(output, dim=1)\n', (183, 198), False, 'import torch\n'), ((655, 679), 'torch.zeros_like', 'torch.zeros_like', (['logits'], {}), '(logits)\n', (671, 679), False, 'import torch\n'), ((822, 847), 'torch.argmax', 'torch.argmax', (['pred'], {'dim': '(1)'}), '(pred, dim=1)\n', (834, 847), False, 'import torch\n'), ((1888, 1956), 'torch.mean', 'torch.mean', (['per_frame_logits[:, :, -num_frames_to_avg - 1:-1]'], {'dim': '(2)'}), '(per_frame_logits[:, :, -num_frames_to_avg - 1:-1], dim=2)\n', (1898, 1956), False, 'import torch\n'), ((2152, 2204), 'torch.nn.functional.softmax', 'torch.nn.functional.softmax', (['per_frame_logits'], {'dim': '(1)'}), '(per_frame_logits, dim=1)\n', (2179, 2204), False, 'import torch\n'), ((3090, 3122), 'numpy.dot', 'np.dot', (['weight_per_class', 'ratios'], {}), '(weight_per_class, ratios)\n', (3096, 3122), True, 'import numpy as np\n'), ((1977, 2015), 'torch.argmax', 'torch.argmax', (['last_frame_logits'], {'dim': '(1)'}), '(last_frame_logits, dim=1)\n', (1989, 2015), False, 'import torch\n'), ((2051, 2104), 'torch.nn.functional.softmax', 'torch.nn.functional.softmax', (['last_frame_logits'], {'dim': '(1)'}), '(last_frame_logits, dim=1)\n', (2078, 2104), False, 'import torch\n'), ((764, 794), 'numpy.max', 'np.max', (['[0, i - slider_length]'], {}), '([0, i - slider_length])\n', (770, 794), True, 'import numpy as np\n')]
|
import tensorflow as tf
import tflearn
import numpy as np
import re
from model import SelfAttentive
from sklearn.utils import shuffle
from reader import load_csv, VocabDict
'''
parse
'''
tf.app.flags.DEFINE_integer('num_epochs', 5, 'number of epochs to train')
tf.app.flags.DEFINE_integer('batch_size', 20, 'batch size to train in one step')
tf.app.flags.DEFINE_integer('labels', 5, 'number of label classes')
tf.app.flags.DEFINE_integer('word_pad_length', 60, 'word pad length for training')
tf.app.flags.DEFINE_integer('decay_step', 500, 'decay steps')
tf.app.flags.DEFINE_float('learn_rate', 1e-2, 'learn rate for training optimization')
tf.app.flags.DEFINE_boolean('shuffle', True, 'shuffle data FLAG')
tf.app.flags.DEFINE_boolean('train', True, 'train mode FLAG')
tf.app.flags.DEFINE_boolean('visualize', False, 'visualize FLAG')
tf.app.flags.DEFINE_boolean('penalization', True, 'penalization FLAG')
FLAGS = tf.app.flags.FLAGS
num_epochs = FLAGS.num_epochs
batch_size = FLAGS.batch_size
tag_size = FLAGS.labels
word_pad_length = FLAGS.word_pad_length
lr = FLAGS.learn_rate
TOKENIZER_RE = re.compile(r"[A-Z]{2,}(?![a-z])|[A-Z][a-z]+(?=[A-Z])|[\'\w\-]+", re.UNICODE)
def token_parse(iterator):
for value in iterator:
return TOKENIZER_RE.findall(value)
tokenizer = tflearn.data_utils.VocabularyProcessor(word_pad_length, tokenizer_fn=lambda tokens: [token_parse(x) for x in tokens])
label_dict = VocabDict()
def string_parser(arr, fit):
if fit == False:
return list(tokenizer.transform(arr))
else:
return list(tokenizer.fit_transform(arr))
model = SelfAttentive()
with tf.Session() as sess:
# build graph
model.build_graph(n=word_pad_length)
# Downstream Application
with tf.variable_scope('DownstreamApplication'):
global_step = tf.Variable(0, trainable=False, name='global_step')
learn_rate = tf.train.exponential_decay(lr, global_step, FLAGS.decay_step, 0.95, staircase=True)
labels = tf.placeholder('float32', shape=[None, tag_size])
net = tflearn.fully_connected(model.M, 2000, activation='relu')
logits = tflearn.fully_connected(net, tag_size, activation=None)
loss = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits), axis=1)
if FLAGS.penalization == True:
p_coef = 0.004
p_loss = p_coef * model.P
loss = loss + p_loss
p_loss = tf.reduce_mean(p_loss)
loss = tf.reduce_mean(loss)
params = tf.trainable_variables()
#clipped_gradients = [tf.clip_by_value(x, -0.5, 0.5) for x in gradients]
optimizer = tf.train.AdamOptimizer(learn_rate)
grad_and_vars = tf.gradients(loss, params)
clipped_gradients, _ = tf.clip_by_global_norm(grad_and_vars, 0.5)
opt = optimizer.apply_gradients(zip(clipped_gradients, params), global_step=global_step)
# Start Training
sess.run(tf.global_variables_initializer())
words, tags = load_csv('./data/ag_news_csv/train.csv', target_columns=[0], columns_to_ignore=[1], target_dict=label_dict)
words = string_parser(words, fit=True)
if FLAGS.shuffle == True:
words, tags = shuffle(words, tags)
word_input = tflearn.data_utils.pad_sequences(words, maxlen=word_pad_length)
total = len(word_input)
step_print = int((total/batch_size) / 13)
if FLAGS.train == True:
print('start training')
for epoch_num in range(num_epochs):
epoch_loss = 0
step_loss = 0
for i in range(int(total/batch_size)):
batch_input, batch_tags = (word_input[i*batch_size:(i+1)*batch_size], tags[i*batch_size:(i+1)*batch_size])
train_ops = [opt, loss, learn_rate, global_step]
if FLAGS.penalization == True:
train_ops += [p_loss]
result = sess.run(train_ops, feed_dict={model.input_pl: batch_input, labels: batch_tags})
step_loss += result[1]
epoch_loss += result[1]
if i % step_print == (step_print-step_print):
if FLAGS.penalization == True:
print(f'step_log: (epoch: {epoch_num}, step: {i}, global_step: {result[3]}, learn_rate: {result[2]}), Loss: {step_loss/step_print}, Penalization: {result[4]})')
else:
print(f'step_log: (epoch: {epoch_num}, step: {i}, global_step: {result[3]}, learn_rate: {result[2]}), Loss: {step_loss/step_print})')
#print(f'{result[4]}')
step_loss = 0
print('***')
print(f'epoch {epoch_num}: (global_step: {result[3]}), Average Loss: {epoch_loss/(total/batch_size)})')
print('***\n')
saver = tf.train.Saver()
saver.save(sess, './model.ckpt')
else:
saver = tf.train.Saver()
saver.restore(sess, './model.ckpt')
words, tags = load_csv('./data/ag_news_csv/test.csv', target_columns=[0], columns_to_ignore=[1], target_dict=label_dict)
words_with_index = string_parser(words, fit=True)
word_input = tflearn.data_utils.pad_sequences(words_with_index, maxlen=word_pad_length)
total = len(word_input)
rs = 0.
if FLAGS.visualize == True:
f = open('visualize.html', 'w')
f.write('<html style="margin:0;padding:0;"><body style="margin:0;padding:0;">\n')
for i in range(int(total/batch_size)):
batch_input, batch_tags = (word_input[i*batch_size:(i+1)*batch_size], tags[i*batch_size:(i+1)*batch_size])
result = sess.run([logits, model.A], feed_dict={model.input_pl: batch_input, labels: batch_tags})
arr = result[0]
for j in range(len(batch_tags)):
rs+=np.sum(np.argmax(arr[j]) == np.argmax(batch_tags[j]))
if FLAGS.visualize == True:
f.write('<div style="margin:25px;">\n')
for k in range(len(result[1][0])):
f.write('<p style="margin:10px;">\n')
ww = TOKENIZER_RE.findall(words[i*batch_size][0])
for j in range(word_pad_length):
alpha = "{:.2f}".format(result[1][0][k][j])
if len(ww) <= j:
w = "___"
else:
w = ww[j]
f.write(f'\t<span style="margin-left:3px;background-color:rgba(255,0,0,{alpha})">{w}</span>\n')
f.write('</p>\n')
f.write('</div>\n')
if FLAGS.visualize == True:
f.write('</body></html>')
f.close()
print(f'Test accuracy: {rs/total}')
sess.close()
|
[
"tensorflow.app.flags.DEFINE_float",
"tensorflow.trainable_variables",
"numpy.argmax",
"tensorflow.nn.sigmoid_cross_entropy_with_logits",
"tensorflow.app.flags.DEFINE_boolean",
"tensorflow.Variable",
"tensorflow.app.flags.DEFINE_integer",
"tensorflow.clip_by_global_norm",
"tensorflow.variable_scope",
"tensorflow.placeholder",
"reader.VocabDict",
"tensorflow.gradients",
"tensorflow.train.Saver",
"tensorflow.global_variables_initializer",
"reader.load_csv",
"tensorflow.Session",
"tensorflow.reduce_mean",
"tensorflow.train.exponential_decay",
"re.compile",
"tflearn.fully_connected",
"tflearn.data_utils.pad_sequences",
"model.SelfAttentive",
"sklearn.utils.shuffle",
"tensorflow.train.AdamOptimizer"
] |
[((189, 262), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""num_epochs"""', '(5)', '"""number of epochs to train"""'], {}), "('num_epochs', 5, 'number of epochs to train')\n", (216, 262), True, 'import tensorflow as tf\n'), ((263, 348), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""batch_size"""', '(20)', '"""batch size to train in one step"""'], {}), "('batch_size', 20, 'batch size to train in one step'\n )\n", (290, 348), True, 'import tensorflow as tf\n'), ((344, 411), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""labels"""', '(5)', '"""number of label classes"""'], {}), "('labels', 5, 'number of label classes')\n", (371, 411), True, 'import tensorflow as tf\n'), ((412, 498), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""word_pad_length"""', '(60)', '"""word pad length for training"""'], {}), "('word_pad_length', 60,\n 'word pad length for training')\n", (439, 498), True, 'import tensorflow as tf\n'), ((495, 556), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""decay_step"""', '(500)', '"""decay steps"""'], {}), "('decay_step', 500, 'decay steps')\n", (522, 556), True, 'import tensorflow as tf\n'), ((557, 646), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""learn_rate"""', '(0.01)', '"""learn rate for training optimization"""'], {}), "('learn_rate', 0.01,\n 'learn rate for training optimization')\n", (582, 646), True, 'import tensorflow as tf\n'), ((643, 708), 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""shuffle"""', '(True)', '"""shuffle data FLAG"""'], {}), "('shuffle', True, 'shuffle data FLAG')\n", (670, 708), True, 'import tensorflow as tf\n'), ((709, 770), 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""train"""', '(True)', '"""train mode FLAG"""'], {}), "('train', True, 'train mode FLAG')\n", (736, 770), True, 'import tensorflow as tf\n'), ((771, 836), 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""visualize"""', '(False)', '"""visualize FLAG"""'], {}), "('visualize', False, 'visualize FLAG')\n", (798, 836), True, 'import tensorflow as tf\n'), ((837, 907), 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""penalization"""', '(True)', '"""penalization FLAG"""'], {}), "('penalization', True, 'penalization FLAG')\n", (864, 907), True, 'import tensorflow as tf\n'), ((1099, 1177), 're.compile', 're.compile', (['"""[A-Z]{2,}(?![a-z])|[A-Z][a-z]+(?=[A-Z])|[\\\\\'\\\\w\\\\-]+"""', 're.UNICODE'], {}), '("[A-Z]{2,}(?![a-z])|[A-Z][a-z]+(?=[A-Z])|[\\\\\'\\\\w\\\\-]+", re.UNICODE)\n', (1109, 1177), False, 'import re\n'), ((1411, 1422), 'reader.VocabDict', 'VocabDict', ([], {}), '()\n', (1420, 1422), False, 'from reader import load_csv, VocabDict\n'), ((1577, 1592), 'model.SelfAttentive', 'SelfAttentive', ([], {}), '()\n', (1590, 1592), False, 'from model import SelfAttentive\n'), ((1598, 1610), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1608, 1610), True, 'import tensorflow as tf\n'), ((2872, 2983), 'reader.load_csv', 'load_csv', (['"""./data/ag_news_csv/train.csv"""'], {'target_columns': '[0]', 'columns_to_ignore': '[1]', 'target_dict': 'label_dict'}), "('./data/ag_news_csv/train.csv', target_columns=[0],\n columns_to_ignore=[1], target_dict=label_dict)\n", (2880, 2983), False, 'from reader import load_csv, VocabDict\n'), ((3103, 3166), 'tflearn.data_utils.pad_sequences', 'tflearn.data_utils.pad_sequences', (['words'], {'maxlen': 'word_pad_length'}), '(words, maxlen=word_pad_length)\n', (3135, 3166), False, 'import tflearn\n'), ((4621, 4731), 'reader.load_csv', 'load_csv', (['"""./data/ag_news_csv/test.csv"""'], {'target_columns': '[0]', 'columns_to_ignore': '[1]', 'target_dict': 'label_dict'}), "('./data/ag_news_csv/test.csv', target_columns=[0],\n columns_to_ignore=[1], target_dict=label_dict)\n", (4629, 4731), False, 'from reader import load_csv, VocabDict\n'), ((4795, 4869), 'tflearn.data_utils.pad_sequences', 'tflearn.data_utils.pad_sequences', (['words_with_index'], {'maxlen': 'word_pad_length'}), '(words_with_index, maxlen=word_pad_length)\n', (4827, 4869), False, 'import tflearn\n'), ((1709, 1751), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""DownstreamApplication"""'], {}), "('DownstreamApplication')\n", (1726, 1751), True, 'import tensorflow as tf\n'), ((1771, 1822), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)', 'name': '"""global_step"""'}), "(0, trainable=False, name='global_step')\n", (1782, 1822), True, 'import tensorflow as tf\n'), ((1840, 1927), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (['lr', 'global_step', 'FLAGS.decay_step', '(0.95)'], {'staircase': '(True)'}), '(lr, global_step, FLAGS.decay_step, 0.95,\n staircase=True)\n', (1866, 1927), True, 'import tensorflow as tf\n'), ((1937, 1986), 'tensorflow.placeholder', 'tf.placeholder', (['"""float32"""'], {'shape': '[None, tag_size]'}), "('float32', shape=[None, tag_size])\n", (1951, 1986), True, 'import tensorflow as tf\n'), ((1997, 2054), 'tflearn.fully_connected', 'tflearn.fully_connected', (['model.M', '(2000)'], {'activation': '"""relu"""'}), "(model.M, 2000, activation='relu')\n", (2020, 2054), False, 'import tflearn\n'), ((2068, 2123), 'tflearn.fully_connected', 'tflearn.fully_connected', (['net', 'tag_size'], {'activation': 'None'}), '(net, tag_size, activation=None)\n', (2091, 2123), False, 'import tflearn\n'), ((2392, 2412), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss'], {}), '(loss)\n', (2406, 2412), True, 'import tensorflow as tf\n'), ((2426, 2450), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (2448, 2450), True, 'import tensorflow as tf\n'), ((2544, 2578), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['learn_rate'], {}), '(learn_rate)\n', (2566, 2578), True, 'import tensorflow as tf\n'), ((2599, 2625), 'tensorflow.gradients', 'tf.gradients', (['loss', 'params'], {}), '(loss, params)\n', (2611, 2625), True, 'import tensorflow as tf\n'), ((2653, 2695), 'tensorflow.clip_by_global_norm', 'tf.clip_by_global_norm', (['grad_and_vars', '(0.5)'], {}), '(grad_and_vars, 0.5)\n', (2675, 2695), True, 'import tensorflow as tf\n'), ((2820, 2853), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2851, 2853), True, 'import tensorflow as tf\n'), ((3067, 3087), 'sklearn.utils.shuffle', 'shuffle', (['words', 'tags'], {}), '(words, tags)\n', (3074, 3087), False, 'from sklearn.utils import shuffle\n'), ((4471, 4487), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (4485, 4487), True, 'import tensorflow as tf\n'), ((4545, 4561), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (4559, 4561), True, 'import tensorflow as tf\n'), ((2149, 2218), 'tensorflow.nn.sigmoid_cross_entropy_with_logits', 'tf.nn.sigmoid_cross_entropy_with_logits', ([], {'labels': 'labels', 'logits': 'logits'}), '(labels=labels, logits=logits)\n', (2188, 2218), True, 'import tensorflow as tf\n'), ((2358, 2380), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['p_loss'], {}), '(p_loss)\n', (2372, 2380), True, 'import tensorflow as tf\n'), ((5388, 5405), 'numpy.argmax', 'np.argmax', (['arr[j]'], {}), '(arr[j])\n', (5397, 5405), True, 'import numpy as np\n'), ((5409, 5433), 'numpy.argmax', 'np.argmax', (['batch_tags[j]'], {}), '(batch_tags[j])\n', (5418, 5433), True, 'import numpy as np\n')]
|
from itertools import product
import numpy as np
import argparse
from joblib import Parallel, delayed
from pathlib import Path
import openslide
from openslide.deepzoom import DeepZoomGenerator
class Patcher:
def __init__(self):
self._get_args()
self._make_output_dir()
self._read_img()
def _get_args(self):
parser = argparse.ArgumentParser(description="Make patches from WSI.")
parser.add_argument("img_path",
help="Path to the whole slide image.")
parser.add_argument("-s", "--output_size",
help="Output patch size of both x, y without the overlap area.",
default=254,
type=int)
parser.add_argument("-ov", "--overlap",
help="Overlap size.",
default=1,
type=int)
parser.add_argument("-ou", "--output_dir",
help="Where to save the patches.")
parser.add_argument("-t", "--thresh",
default=0,
type=int,
help="If set a int 1-255, saves only onshore patch.")
self.args = parser.parse_args()
def _make_output_dir(self):
if self.args.output_dir is None:
wsipath = Path(self.args.img_path)
self.args.output_dir = wsipath.parent/wsipath.stem
if not Path(self.args.output_dir).exists():
Path(self.args.output_dir).mkdir(parents=True)
self.output_dir = Path(self.args.output_dir)
def _read_img(self):
img = openslide.OpenSlide(self.args.img_path)
self.dzimg = DeepZoomGenerator(img,
int(self.args.output_size),
int(self.args.overlap))
self.tiles = self.dzimg.level_tiles[-1]
self.deepest_level = self.dzimg.level_count - 1
self.iterator = product(range(self.tiles[0]), range(self.tiles[1]))
def make_patch(self, x, y):
patch = self.dzimg.get_tile(self.deepest_level, (x, y))
if self.args.thresh:
checker = np.array(patch)
if np.mean(checker) < int(self.args.thresh):
patch.save(f"{self.output_dir}/{x:04}_{y:04}.png")
else:
patch.save(f"{self.output_dir}/{x:04}_{y:04}.png")
def make_patch_parallel(self):
parallel = Parallel(n_jobs=-1, verbose=1, backend="threading")
parallel([delayed(self.make_patch)(x, y) for x, y in self.iterator])
def make_patch_for(self):
for x, y in self.iterator:
self.make_patch(x, y)
if __name__ == '__main__':
patcher = Patcher()
patcher.make_patch_parallel()
# p.make_patch_for() # use if make_patch_parallel doesn't work.
|
[
"openslide.OpenSlide",
"argparse.ArgumentParser",
"pathlib.Path",
"numpy.mean",
"numpy.array",
"joblib.Parallel",
"joblib.delayed"
] |
[((360, 421), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Make patches from WSI."""'}), "(description='Make patches from WSI.')\n", (383, 421), False, 'import argparse\n'), ((1607, 1633), 'pathlib.Path', 'Path', (['self.args.output_dir'], {}), '(self.args.output_dir)\n', (1611, 1633), False, 'from pathlib import Path\n'), ((1674, 1713), 'openslide.OpenSlide', 'openslide.OpenSlide', (['self.args.img_path'], {}), '(self.args.img_path)\n', (1693, 1713), False, 'import openslide\n'), ((2488, 2539), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '(-1)', 'verbose': '(1)', 'backend': '"""threading"""'}), "(n_jobs=-1, verbose=1, backend='threading')\n", (2496, 2539), False, 'from joblib import Parallel, delayed\n'), ((1382, 1406), 'pathlib.Path', 'Path', (['self.args.img_path'], {}), '(self.args.img_path)\n', (1386, 1406), False, 'from pathlib import Path\n'), ((2216, 2231), 'numpy.array', 'np.array', (['patch'], {}), '(patch)\n', (2224, 2231), True, 'import numpy as np\n'), ((2247, 2263), 'numpy.mean', 'np.mean', (['checker'], {}), '(checker)\n', (2254, 2263), True, 'import numpy as np\n'), ((1485, 1511), 'pathlib.Path', 'Path', (['self.args.output_dir'], {}), '(self.args.output_dir)\n', (1489, 1511), False, 'from pathlib import Path\n'), ((1534, 1560), 'pathlib.Path', 'Path', (['self.args.output_dir'], {}), '(self.args.output_dir)\n', (1538, 1560), False, 'from pathlib import Path\n'), ((2558, 2582), 'joblib.delayed', 'delayed', (['self.make_patch'], {}), '(self.make_patch)\n', (2565, 2582), False, 'from joblib import Parallel, delayed\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.