code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
from __future__ import print_function
import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
from collections import OrderedDict
import numpy as np
from edgeml_pytorch.trainer.drocc_trainer import DROCCTrainer
class MLP(nn.Module):
"""
Multi-layer perceptron with single hidden layer.
"""
def __init__(self,
input_dim=2,
num_classes=1,
num_hidden_nodes=20):
super(MLP, self).__init__()
self.input_dim = input_dim
self.num_classes = num_classes
self.num_hidden_nodes = num_hidden_nodes
activ = nn.ReLU(True)
self.feature_extractor = nn.Sequential(OrderedDict([
('fc', nn.Linear(self.input_dim, self.num_hidden_nodes)),
('relu1', activ)]))
self.size_final = self.num_hidden_nodes
self.classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(self.size_final, self.num_classes))]))
def forward(self, input):
features = self.feature_extractor(input)
logits = self.classifier(features.view(-1, self.size_final))
return logits
def adjust_learning_rate(epoch, total_epochs, only_ce_epochs, learning_rate, optimizer):
"""Adjust learning rate during training.
Parameters
----------
epoch: Current training epoch.
total_epochs: Total number of epochs for training.
only_ce_epochs: Number of epochs for initial pretraining.
learning_rate: Initial learning rate for training.
"""
#We dont want to consider the only ce
#based epochs for the lr scheduler
epoch = epoch - only_ce_epochs
drocc_epochs = total_epochs - only_ce_epochs
# lr = learning_rate
if epoch <= drocc_epochs:
lr = learning_rate * 0.001
if epoch <= 0.90 * drocc_epochs:
lr = learning_rate * 0.01
if epoch <= 0.60 * drocc_epochs:
lr = learning_rate * 0.1
if epoch <= 0.30 * drocc_epochs:
lr = learning_rate
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return optimizer
class CustomDataset(Dataset):
def __init__(self, data, labels):
self.data = data
self.labels = labels
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
return torch.from_numpy(self.data[idx]), (self.labels[idx]), torch.tensor([0])
def load_data(path):
train_data = np.load(os.path.join(path, 'train_data.npy'), allow_pickle = True)
train_lab = np.ones((train_data.shape[0])) #All positive labelled data points collected
test_data = np.load(os.path.join(path, 'test_data.npy'), allow_pickle = True)
test_lab = np.load(os.path.join(path, 'test_labels.npy'), allow_pickle = True)
## preprocessing
mean=np.mean(train_data,0)
std=np.std(train_data,0)
train_data=(train_data-mean)/ (std + 1e-4)
num_features = train_data.shape[1]
test_data = (test_data - mean)/(std + 1e-4)
train_samples = train_data.shape[0]
test_samples = test_data.shape[0]
print("Train Samples: ", train_samples)
print("Test Samples: ", test_samples)
return CustomDataset(train_data, train_lab), CustomDataset(test_data, test_lab), num_features
def main():
train_dataset, test_dataset, num_features = load_data(args.data_path)
train_loader = DataLoader(train_dataset, args.batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, args.batch_size, shuffle=True)
model = MLP(input_dim=num_features, num_hidden_nodes=args.hd, num_classes=1).to(device)
if args.optim == 1:
optimizer = optim.SGD(model.parameters(),
lr=args.lr,
momentum=args.mom)
print("using SGD")
else:
optimizer = optim.Adam(model.parameters(),
lr=args.lr)
print("using Adam")
# Training the model
trainer = DROCCTrainer(model, optimizer, args.lamda, args.radius, args.gamma, device)
# Restore from checkpoint
if args.restore == 1:
if os.path.exists(os.path.join(args.model_dir, 'model.pt')):
trainer.load(args.model_dir)
print("Saved Model Loaded")
trainer.train(train_loader, test_loader, args.lr, adjust_learning_rate, args.epochs,
metric=args.metric, ascent_step_size=args.ascent_step_size, only_ce_epochs = args.only_ce_epochs)
trainer.save(args.model_dir)
if __name__ == '__main__':
torch.set_printoptions(precision=5)
parser = argparse.ArgumentParser(description='PyTorch Simple Training')
parser.add_argument('--batch_size', type=int, default=128, metavar='N',
help='batch size for training')
parser.add_argument('--epochs', type=int, default=100, metavar='N',
help='number of epochs to train')
parser.add_argument('-oce,', '--only_ce_epochs', type=int, default=50, metavar='N',
help='number of epochs to train with only CE loss')
parser.add_argument('--ascent_num_steps', type=int, default=50, metavar='N',
help='Number of gradient ascent steps')
parser.add_argument('--hd', type=int, default=128, metavar='N',
help='Number of hidden nodes for LSTM model')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate')
parser.add_argument('--ascent_step_size', type=float, default=0.001, metavar='LR',
help='step size of gradient ascent')
parser.add_argument('--mom', type=float, default=0.99, metavar='M',
help='momentum')
parser.add_argument('--model_dir', default='log',
help='path where to save checkpoint')
parser.add_argument('--one_class_adv', type=int, default=1, metavar='N',
help='adv loss to be used or not, 1:use 0:not use(only CE)')
parser.add_argument('--radius', type=float, default=0.2, metavar='N',
help='radius corresponding to the definition of set N_i(r)')
parser.add_argument('--lamda', type=float, default=1, metavar='N',
help='Weight to the adversarial loss')
parser.add_argument('--reg', type=float, default=0, metavar='N',
help='weight reg')
parser.add_argument('--restore', type=int, default=0, metavar='N',
help='whether to load a pretrained model, 1: load 0: train from scratch')
parser.add_argument('--optim', type=int, default=0, metavar='N',
help='0 : Adam 1: SGD')
parser.add_argument('--gamma', type=float, default=2.0, metavar='N',
help='r to gamma * r projection for the set N_i(r)')
parser.add_argument('-d', '--data_path', type=str, default='.')
parser.add_argument('--metric', type=str, default='F1')
args = parser.parse_args()
# settings
#Checkpoint store path
model_dir = args.model_dir
if not os.path.exists(model_dir):
os.makedirs(model_dir)
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
main()
|
[
"numpy.mean",
"edgeml_pytorch.trainer.drocc_trainer.DROCCTrainer",
"torch.nn.ReLU",
"os.path.exists",
"numpy.ones",
"torch.set_printoptions",
"argparse.ArgumentParser",
"os.makedirs",
"numpy.std",
"os.path.join",
"torch.from_numpy",
"torch.is_tensor",
"torch.tensor",
"torch.cuda.is_available",
"torch.nn.Linear",
"torch.utils.data.DataLoader",
"torch.device"
] |
[((2779, 2807), 'numpy.ones', 'np.ones', (['train_data.shape[0]'], {}), '(train_data.shape[0])\n', (2786, 2807), True, 'import numpy as np\n'), ((3052, 3074), 'numpy.mean', 'np.mean', (['train_data', '(0)'], {}), '(train_data, 0)\n', (3059, 3074), True, 'import numpy as np\n'), ((3082, 3103), 'numpy.std', 'np.std', (['train_data', '(0)'], {}), '(train_data, 0)\n', (3088, 3103), True, 'import numpy as np\n'), ((3611, 3667), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset', 'args.batch_size'], {'shuffle': '(True)'}), '(train_dataset, args.batch_size, shuffle=True)\n', (3621, 3667), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((3686, 3741), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset', 'args.batch_size'], {'shuffle': '(True)'}), '(test_dataset, args.batch_size, shuffle=True)\n', (3696, 3741), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((4215, 4290), 'edgeml_pytorch.trainer.drocc_trainer.DROCCTrainer', 'DROCCTrainer', (['model', 'optimizer', 'args.lamda', 'args.radius', 'args.gamma', 'device'], {}), '(model, optimizer, args.lamda, args.radius, args.gamma, device)\n', (4227, 4290), False, 'from edgeml_pytorch.trainer.drocc_trainer import DROCCTrainer\n'), ((4765, 4800), 'torch.set_printoptions', 'torch.set_printoptions', ([], {'precision': '(5)'}), '(precision=5)\n', (4787, 4800), False, 'import torch\n'), ((4819, 4881), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch Simple Training"""'}), "(description='PyTorch Simple Training')\n", (4842, 4881), False, 'import argparse\n'), ((7455, 7480), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7478, 7480), False, 'import torch\n'), ((7494, 7537), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (7506, 7537), False, 'import torch\n'), ((718, 731), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (725, 731), True, 'import torch.nn as nn\n'), ((2517, 2537), 'torch.is_tensor', 'torch.is_tensor', (['idx'], {}), '(idx)\n', (2532, 2537), False, 'import torch\n'), ((2704, 2740), 'os.path.join', 'os.path.join', (['path', '"""train_data.npy"""'], {}), "(path, 'train_data.npy')\n", (2716, 2740), False, 'import os\n'), ((2879, 2914), 'os.path.join', 'os.path.join', (['path', '"""test_data.npy"""'], {}), "(path, 'test_data.npy')\n", (2891, 2914), False, 'import os\n'), ((2960, 2997), 'os.path.join', 'os.path.join', (['path', '"""test_labels.npy"""'], {}), "(path, 'test_labels.npy')\n", (2972, 2997), False, 'import os\n'), ((7382, 7407), 'os.path.exists', 'os.path.exists', (['model_dir'], {}), '(model_dir)\n', (7396, 7407), False, 'import os\n'), ((7417, 7439), 'os.makedirs', 'os.makedirs', (['model_dir'], {}), '(model_dir)\n', (7428, 7439), False, 'import os\n'), ((2585, 2617), 'torch.from_numpy', 'torch.from_numpy', (['self.data[idx]'], {}), '(self.data[idx])\n', (2601, 2617), False, 'import torch\n'), ((2639, 2656), 'torch.tensor', 'torch.tensor', (['[0]'], {}), '([0])\n', (2651, 2656), False, 'import torch\n'), ((4379, 4419), 'os.path.join', 'os.path.join', (['args.model_dir', '"""model.pt"""'], {}), "(args.model_dir, 'model.pt')\n", (4391, 4419), False, 'import os\n'), ((812, 860), 'torch.nn.Linear', 'nn.Linear', (['self.input_dim', 'self.num_hidden_nodes'], {}), '(self.input_dim, self.num_hidden_nodes)\n', (821, 860), True, 'import torch.nn as nn\n'), ((1018, 1062), 'torch.nn.Linear', 'nn.Linear', (['self.size_final', 'self.num_classes'], {}), '(self.size_final, self.num_classes)\n', (1027, 1062), True, 'import torch.nn as nn\n')]
|
"""
This python code demonstrates an edge-based active contour model as an application of the
Distance Regularized Level Set Evolution (DRLSE) formulation in the following paper:
<NAME>, <NAME>, <NAME>, <NAME>, "Distance Regularized Level Set Evolution and Its Application to Image Segmentation",
IEEE Trans. Image Processing, vol. 19 (12), pp. 3243-3254, 2010.
Author: <NAME>
E-mail: <EMAIL>
Released Under MIT License
"""
import numpy as np
from skimage.io import imread
from lv_set.find_lsf import find_lsf
from lv_set.potential_func import *
from lv_set.show_fig import draw_all
def gourd_params():
img = imread('gourd.bmp', True)
img = np.interp(img, [np.min(img), np.max(img)], [0, 255])
# initialize LSF as binary step function
c0 = 2
initial_lsf = c0 * np.ones(img.shape)
# generate the initial region R0 as two rectangles
initial_lsf[24:35, 19:25] = -c0
initial_lsf[24:35, 39:50] = -c0
# parameters
return {
'img': img,
'initial_lsf': initial_lsf,
'timestep': 1, # time step
'iter_inner': 10,
'iter_outer': 30,
'lmda': 5, # coefficient of the weighted length term L(phi)
'alfa': -3, # coefficient of the weighted area term A(phi)
'epsilon': 1.5, # parameter that specifies the width of the DiracDelta function
'sigma': 0.8, # scale parameter in Gaussian kernel
'potential_function': DOUBLE_WELL,
}
def two_cells_params():
img = imread('twocells.bmp', True)
img = np.interp(img, [np.min(img), np.max(img)], [0, 255])
# initialize LSF as binary step function
c0 = 2
initial_lsf = c0 * np.ones(img.shape)
# generate the initial region R0 as two rectangles
initial_lsf[9:55, 9:75] = -c0
# parameters
return {
'img': img,
'initial_lsf': initial_lsf,
'timestep': 5, # time step
'iter_inner': 5,
'iter_outer': 40,
'lmda': 5, # coefficient of the weighted length term L(phi)
'alfa': 1.5, # coefficient of the weighted area term A(phi)
'epsilon': 1.5, # parameter that specifies the width of the DiracDelta function
'sigma': 1.5, # scale parameter in Gaussian kernel
'potential_function': DOUBLE_WELL,
}
params = gourd_params()
# params = two_cells_params()
phi = find_lsf(**params)
print('Show final output')
draw_all(phi, params['img'], 10)
|
[
"numpy.ones",
"numpy.max",
"skimage.io.imread",
"lv_set.find_lsf.find_lsf",
"numpy.min",
"lv_set.show_fig.draw_all"
] |
[((2340, 2358), 'lv_set.find_lsf.find_lsf', 'find_lsf', ([], {}), '(**params)\n', (2348, 2358), False, 'from lv_set.find_lsf import find_lsf\n'), ((2387, 2419), 'lv_set.show_fig.draw_all', 'draw_all', (['phi', "params['img']", '(10)'], {}), "(phi, params['img'], 10)\n", (2395, 2419), False, 'from lv_set.show_fig import draw_all\n'), ((627, 652), 'skimage.io.imread', 'imread', (['"""gourd.bmp"""', '(True)'], {}), "('gourd.bmp', True)\n", (633, 652), False, 'from skimage.io import imread\n'), ((1488, 1516), 'skimage.io.imread', 'imread', (['"""twocells.bmp"""', '(True)'], {}), "('twocells.bmp', True)\n", (1494, 1516), False, 'from skimage.io import imread\n'), ((796, 814), 'numpy.ones', 'np.ones', (['img.shape'], {}), '(img.shape)\n', (803, 814), True, 'import numpy as np\n'), ((1660, 1678), 'numpy.ones', 'np.ones', (['img.shape'], {}), '(img.shape)\n', (1667, 1678), True, 'import numpy as np\n'), ((679, 690), 'numpy.min', 'np.min', (['img'], {}), '(img)\n', (685, 690), True, 'import numpy as np\n'), ((692, 703), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (698, 703), True, 'import numpy as np\n'), ((1543, 1554), 'numpy.min', 'np.min', (['img'], {}), '(img)\n', (1549, 1554), True, 'import numpy as np\n'), ((1556, 1567), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (1562, 1567), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# coding: utf-8
# # Lab 02
#
# ## Solving a system of nonlinear equations
#
# ### <NAME>, Б01-818
#
# IV.12.7.д
# $$\begin{cases} x^7 - 5x^2y^4 + 1510 = 0 \\ y^3 - 3x^4y - 105 = 0 \end{cases}$$
# $$\begin{cases} x_{n+1} = \sqrt{\frac{x_n^7 + 1510}{5y_n^4}} \\ y_{n+1} = \sqrt[3]{3x_{n}^4y_{n}+105} \end{cases}$$
# $$J=\begin{pmatrix}7x^6-10xy^4 & -20x^2y^3\\-12x^3y & 3y^2-3x^4\end{pmatrix}$$
# In[1]:
import unittest
import logging
import numpy as np
import pandas as pd
# In[2]:
#logging.basicConfig(level=logging.DEBUG)
# In[3]:
class FPI:
def __init__(self, f_vec):
self.__f_vec = f_vec
self.iter = 0
self.log = logging.getLogger("FPI")
def __is_stop(self, next_x, cur_x, q, delta):
if next_x == cur_x:
return False
if sum(np.abs((next_x[i] - cur_x[i])) for i in range(len(cur_x))) <= delta * (1 - q):
return True
return False
def solve(self, init_x, q, delta):
cur_x = init_x
next_x = init_x
while not self.__is_stop(next_x, cur_x, q, delta):
cur_x = next_x
next_x = cur_x[:]
for i in range(len(self.__f_vec)):
next_x[i] = self.__f_vec[i](cur_x)
self.log.debug(f"Iter[{self.iter}]: Init: {cur_x} Next: {next_x}")
self.iter = self.iter + 1
return next_x
# In[4]:
class Newton:
def __init__(self, f_vec, J):
self.__f_vec = f_vec
self.__J = J
self.iter = 0
self.log = logging.getLogger("Newton")
def __J_mul_f(self, x, i):
return sum(self.__f_vec[j](x) * self.__J[i][j](x) for j in range(len(self.__f_vec)))
def __is_stop(self, next_x, cur_x, M2, m1, delta):
if next_x == cur_x:
return False
if sum(np.abs(next_x[i] - cur_x[i]) for i in range(len(cur_x))) < np.sqrt(2*delta*m1/M2):
return True
return False
def solve(self, init_x, M2, m1, delta):
self.iter = 0
cur_x = init_x
next_x = init_x
while not self.__is_stop(next_x, cur_x, M2, m1, delta):
cur_x = next_x
next_x = cur_x[:]
for i in range(len(self.__f_vec)):
next_x[i] = cur_x[i] - self.__J_mul_f(cur_x, i)
self.log.debug(f"Iter[{self.iter}]: Init: {cur_x} Next: {next_x}")
self.iter = self.iter + 1
return next_x
# In[5]:
def fpi_f1(x):
return np.sqrt((x[0]**7 + 1510)/(5 * (x[1]**4)))
def fpi_f2(x):
return np.cbrt(3*(x[0]**4)*x[1] + 105)
fpi = FPI([fpi_f1, fpi_f2])
# In[6]:
def newton_f1(x):
return x[0]**7-5*(x[0]**2)*(x[1]**4)+1510
def newton_f2(x):
return x[1]**3-3*(x[0]**4)*x[1]-105
def J00(x):
return 7*(x[0]**6)-10*x[0]*(x[1]**4)
def J01(x):
return -20*(x[0]**2)*(x[1]**3)
def J10(x):
return -12*(x[0]**3)*x[1]
def J11(x):
return 3*(x[1]**2) - 3*(x[0]**4)
def J(x):
return [[J00(x), J01(x)], [J10(x), J11(x)]]
def J00_inv(x):
return J11(x)/(J00(x)*J11(x)-J10(x)*J01(x))
def J01_inv(x):
return - J01(x)/(J00(x)*J11(x)-J10(x)*J01(x))
def J10_inv(x):
return - J10(x)/(J00(x)*J11(x)-J10(x)*J01(x))
def J11_inv(x):
return J00(x)/(J00(x)*J11(x)-J10(x)*J01(x))
J_inv = [[J00_inv, J01_inv], [J10_inv, J11_inv]]
newton = Newton([newton_f1, newton_f2], J_inv)
# In[7]:
log = logging.getLogger()
x_init_vec_fpi = [[1,5], [3, -4], [-1, 5]]
x_init_vec_newton = [[1,5], [3, -4], [-1, 5], [-4, 0], [-2, -2]]
delta = 10**-5
q = 0.5
m1 = 1
M2 = 1
fpi_results = []
fpi_iterations = []
newton_results = []
newton_iterations = []
for x in x_init_vec_fpi:
fpi_results.append(fpi.solve(x, q, delta))
fpi_iterations.append(fpi.iter)
for x in x_init_vec_newton:
newton_results.append(newton.solve(x, M2, m1, delta))
newton_iterations.append(newton.iter)
# In[8]:
fpi_dt = pd.DataFrame({"Начальное приближение": x_init_vec_fpi, "Результат": fpi_results, "Итераций": fpi_iterations})
newton_dt = pd.DataFrame({"Начальное приближение": x_init_vec_newton, "Результат": newton_results, "Итераций": newton_iterations})
print("Метод простых итераций")
print(fpi_dt)
print("\nМетод Ньютона")
print(newton_dt)
|
[
"logging.getLogger",
"numpy.abs",
"numpy.sqrt",
"pandas.DataFrame",
"numpy.cbrt"
] |
[((3476, 3495), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (3493, 3495), False, 'import logging\n'), ((3984, 4097), 'pandas.DataFrame', 'pd.DataFrame', (["{'Начальное приближение': x_init_vec_fpi, 'Результат': fpi_results,\n 'Итераций': fpi_iterations}"], {}), "({'Начальное приближение': x_init_vec_fpi, 'Результат':\n fpi_results, 'Итераций': fpi_iterations})\n", (3996, 4097), True, 'import pandas as pd\n'), ((4106, 4228), 'pandas.DataFrame', 'pd.DataFrame', (["{'Начальное приближение': x_init_vec_newton, 'Результат': newton_results,\n 'Итераций': newton_iterations}"], {}), "({'Начальное приближение': x_init_vec_newton, 'Результат':\n newton_results, 'Итераций': newton_iterations})\n", (4118, 4228), True, 'import pandas as pd\n'), ((2576, 2621), 'numpy.sqrt', 'np.sqrt', (['((x[0] ** 7 + 1510) / (5 * x[1] ** 4))'], {}), '((x[0] ** 7 + 1510) / (5 * x[1] ** 4))\n', (2583, 2621), True, 'import numpy as np\n'), ((2645, 2680), 'numpy.cbrt', 'np.cbrt', (['(3 * x[0] ** 4 * x[1] + 105)'], {}), '(3 * x[0] ** 4 * x[1] + 105)\n', (2652, 2680), True, 'import numpy as np\n'), ((684, 708), 'logging.getLogger', 'logging.getLogger', (['"""FPI"""'], {}), "('FPI')\n", (701, 708), False, 'import logging\n'), ((1600, 1627), 'logging.getLogger', 'logging.getLogger', (['"""Newton"""'], {}), "('Newton')\n", (1617, 1627), False, 'import logging\n'), ((1944, 1972), 'numpy.sqrt', 'np.sqrt', (['(2 * delta * m1 / M2)'], {}), '(2 * delta * m1 / M2)\n', (1951, 1972), True, 'import numpy as np\n'), ((841, 869), 'numpy.abs', 'np.abs', (['(next_x[i] - cur_x[i])'], {}), '(next_x[i] - cur_x[i])\n', (847, 869), True, 'import numpy as np\n'), ((1885, 1913), 'numpy.abs', 'np.abs', (['(next_x[i] - cur_x[i])'], {}), '(next_x[i] - cur_x[i])\n', (1891, 1913), True, 'import numpy as np\n')]
|
import numpy as np
import unittest
from itertools import product
from ml_techniques.svm import *
class PermutationDataTest(unittest.TestCase):
def testpropershape(self):
data = np.random.random((10, 4))
labels = np.random.randint(0, 2, 10)*2-1
data_per = permut_data(data)
self.assertEqual(data_per.shape, data.shape)
data_per, labels_per = permut_data(data, labels)
self.assertEqual(data_per.shape, data.shape)
self.assertEqual(labels_per.shape, labels.shape)
class BatchCreatorTest(unittest.TestCase):
def test_run_batch_iterator(self):
data_size = 100
batch_size = 9
for init, endit in batch_size_iter(data_size, batch_size):
self.assertTrue(init != endit)
self.assertTrue(init < endit)
self.assertEqual(endit, data_size)
data_size = 100
batch_size = 10
for init, endit in batch_size_iter(data_size, batch_size):
self.assertTrue(init != endit)
self.assertTrue(init < endit)
self.assertEqual(endit, data_size)
class RegularizationTest(unittest.TestCase):
def assert_regularization(self, reg):
reg.parameters
reg.regularize(np.random.randn(10), 1)
reg.gradient_regularization(np.random.randn(10), 1)
def test_abstractregularization(self):
reg = Regularization.create_regularization('l2', 1.)
self.assert_regularization(reg)
reg = Regularization.create_regularization(reg)
self.assert_regularization(reg)
reg = Regularization.create_regularization(Null_Regularization)
self.assert_regularization(reg)
def test_l2_regularization(self):
reg = L1_Regularization(1.)
self.assert_regularization(reg)
def test_l1_regularization(self):
reg = L1_Regularization(1.)
self.assert_regularization(reg)
class AccuracyFunctionTest(unittest.TestCase):
def test_order_independency(self):
n = 10
n_tests = 20
for i in range(n_tests):
y0 = np.random.randint(0, 2, n)
y1 = np.random.randint(0, 2, n)
reindices = np.random.permutation(n)
self.assertEqual(accuracy(y0, y1),
accuracy(y0[reindices], y1[reindices]))
def test_symetry(self):
n = 10
n_tests = 20
for i in range(n_tests):
y0 = np.random.randint(0, 2, n)
y1 = np.random.randint(0, 2, n)
self.assertEqual(accuracy(y0, y1), accuracy(y1, y0))
class LossFunctionTest(unittest.TestCase):
def _generator_labels(self, n):
return np.random.randint(0, 2, n)*2-1
def test_abstractloss(self):
lossf = LossFunction.create_lossfunction('Hinge')
lossf = LossFunction.create_lossfunction(lossf)
lossf = LossFunction.create_lossfunction(Hinge)
def test_loss(self):
n = 20
y0 = np.random.random(n)*2-1
y1 = self._generator_labels(n)
thresholds = [0, 1, 2]
for thr in thresholds:
lossf = Hinge(thr)
lossf.loss(y0, y1)
def test_gradient(self):
n, n_feats = 20, 10
y0 = np.random.random(n)*2-1
y1 = self._generator_labels(n)
x = np.random.random((n, n_feats))
thresholds = [0, 1, 2]
for thr in thresholds:
lossf = Hinge(thr)
grad_w, grad_w0 = lossf.gradient_loss(y0, y1, x)
self.assertEqual(len(grad_w), n_feats)
class Modeltest(unittest.TestCase):
def setUp(self):
n = 100
self.create_X = lambda n_feats: np.random.random((n, n_feats))
def assert_linearmodel(self, linearmodel):
w, w0 = linearmodel.parameters
if w is not None:
linearmodel.compute(self.create_X(len(w)))
linearmodel.reset_model()
def test_abstractmodel(self):
mod = Model.create_model('svm', np.random.randn(10), 0.)
Model.create_model(mod)
Model.create_model(LinearModel, np.random.randn(10), 0.)
def test_linearmodel(self):
lm = LinearModel(None)
self.assert_linearmodel(lm)
lm = LinearModel(np.random.randn(10), 0.)
self.assert_linearmodel(lm)
lm = LinearModel.weights_initialization(10, 'gauss')
self.assert_linearmodel(lm)
lm = LinearModel.weights_initialization(10, 'zeros')
self.assert_linearmodel(lm)
class SVMTest(unittest.TestCase):
def setUp(self):
loss = ['Hinge', Hinge()]
reg_pars = [0.01, 1., 10.]
batch_size = [10]
n_epochs = [0, 100]
learning_rate = [0.001, 1.]
stop_step = [.00001, 100]
history = [True, False]
verbose = [True, False]
self.var_names = ['loss', 'reg_pars', 'batch_size', 'n_epochs',
'learning_rate', 'stop_step', 'history', 'verbose']
self.possibilities = [loss, reg_pars, batch_size, n_epochs,
learning_rate, stop_step, history, verbose]
def test_initialization(self):
n, n_feats = 100, 20
data = np.random.random((n, n_feats))
labels = np.random.randint(0, 2, n)*2-1
for p in product(*self.possibilities):
solver = SVM(**dict(zip(self.var_names, p)))
## General asserts
self.assertEqual(solver.optimizer, 'SGD')
self.assertEqual(solver.batch_size, p[2])
self.assertEqual(solver.n_epochs, p[3])
self.assertEqual(solver.learning_rate, p[4])
self.assertEqual(solver.stop_step, p[5])
## Special cases
if not p[6]:
self.assertIsNone(solver.train_loss_history)
self.assertIsNone(solver.test_loss_history)
self.assertIsNone(solver.train_accuracy_history)
self.assertIsNone(solver.test_accuracy_history)
## Weights initialization
solver.model = solver.model.weights_initialization(n_feats)
solver._reset_history()
## Batch creation testing
for x_batch, y_batch in solver._batch_generator(data, labels):
self.assertTrue(len(x_batch) >= p[2])
## Computer functions
if p[7]:
# model._initialization_weights(n_feats, init_type='gauss')
solver.compute_epoch_measures(data, labels, None, None)
solver.compute_epoch_measures(data, labels, data, labels)
def test_fitmodel(self):
n, n_feats = 100, 5
data = np.random.random((n, n_feats))
labels = np.random.randint(0, 2, n)*2-1
for p in product(*self.possibilities):
solver = SVM(**dict(zip(self.var_names, p)))
solver.report_results()
solver.n_epochs = 100
solver.fit(data, labels)
solver.fit(data, labels, data, labels)
solver.predict(data)
solver.score(data, labels)
if p[6]:
self.assertEqual(solver.epoch_learned,
len(solver.train_loss_history))
self.assertEqual(solver.epoch_learned,
len(solver.train_accuracy_history))
self.assertEqual(solver.epoch_learned,
len(solver.test_loss_history))
self.assertEqual(solver.epoch_learned,
len(solver.test_accuracy_history))
solver.report_results()
|
[
"numpy.random.random",
"itertools.product",
"numpy.random.randint",
"numpy.random.randn",
"numpy.random.permutation"
] |
[((193, 218), 'numpy.random.random', 'np.random.random', (['(10, 4)'], {}), '((10, 4))\n', (209, 218), True, 'import numpy as np\n'), ((3285, 3315), 'numpy.random.random', 'np.random.random', (['(n, n_feats)'], {}), '((n, n_feats))\n', (3301, 3315), True, 'import numpy as np\n'), ((5136, 5166), 'numpy.random.random', 'np.random.random', (['(n, n_feats)'], {}), '((n, n_feats))\n', (5152, 5166), True, 'import numpy as np\n'), ((5233, 5261), 'itertools.product', 'product', (['*self.possibilities'], {}), '(*self.possibilities)\n', (5240, 5261), False, 'from itertools import product\n'), ((6591, 6621), 'numpy.random.random', 'np.random.random', (['(n, n_feats)'], {}), '((n, n_feats))\n', (6607, 6621), True, 'import numpy as np\n'), ((6688, 6716), 'itertools.product', 'product', (['*self.possibilities'], {}), '(*self.possibilities)\n', (6695, 6716), False, 'from itertools import product\n'), ((1234, 1253), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (1249, 1253), True, 'import numpy as np\n'), ((1294, 1313), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (1309, 1313), True, 'import numpy as np\n'), ((2077, 2103), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', 'n'], {}), '(0, 2, n)\n', (2094, 2103), True, 'import numpy as np\n'), ((2121, 2147), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', 'n'], {}), '(0, 2, n)\n', (2138, 2147), True, 'import numpy as np\n'), ((2172, 2196), 'numpy.random.permutation', 'np.random.permutation', (['n'], {}), '(n)\n', (2193, 2196), True, 'import numpy as np\n'), ((2429, 2455), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', 'n'], {}), '(0, 2, n)\n', (2446, 2455), True, 'import numpy as np\n'), ((2473, 2499), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', 'n'], {}), '(0, 2, n)\n', (2490, 2499), True, 'import numpy as np\n'), ((3638, 3668), 'numpy.random.random', 'np.random.random', (['(n, n_feats)'], {}), '((n, n_feats))\n', (3654, 3668), True, 'import numpy as np\n'), ((3946, 3965), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (3961, 3965), True, 'import numpy as np\n'), ((4043, 4062), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (4058, 4062), True, 'import numpy as np\n'), ((4193, 4212), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (4208, 4212), True, 'import numpy as np\n'), ((236, 263), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', '(10)'], {}), '(0, 2, 10)\n', (253, 263), True, 'import numpy as np\n'), ((2662, 2688), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', 'n'], {}), '(0, 2, n)\n', (2679, 2688), True, 'import numpy as np\n'), ((2951, 2970), 'numpy.random.random', 'np.random.random', (['n'], {}), '(n)\n', (2967, 2970), True, 'import numpy as np\n'), ((3210, 3229), 'numpy.random.random', 'np.random.random', (['n'], {}), '(n)\n', (3226, 3229), True, 'import numpy as np\n'), ((5184, 5210), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', 'n'], {}), '(0, 2, n)\n', (5201, 5210), True, 'import numpy as np\n'), ((6639, 6665), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', 'n'], {}), '(0, 2, n)\n', (6656, 6665), True, 'import numpy as np\n')]
|
"""
The CPTPState class and supporting functionality.
"""
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import numpy as _np
from pygsti.modelmembers.states.densestate import DenseState as _DenseState
from pygsti.modelmembers.states.state import State as _State
from pygsti.evotypes import Evotype as _Evotype
from pygsti.baseobjs import statespace as _statespace
from pygsti.baseobjs.basis import Basis as _Basis
IMAG_TOL = 1e-7 # tolerance for imaginary part being considered zero
class CPTPState(_DenseState):
"""
TODO: update docstring
A state vector constrained to correspond ot a positive density matrix.
This state vector that is parameterized through the Cholesky decomposition of
it's standard-basis representation as a density matrix (not a Liouville
vector). The resulting state vector thus represents a positive density
matrix, and additional constraints on the parameters also guarantee that the
trace == 1. This state vector is meant for use with CPTP processes, hence
the name.
Parameters
----------
vec : array_like or State
a 1D numpy array representing the state operation. The
shape of this array sets the dimension of the state.
basis : {"std", "gm", "pp", "qt"} or Basis
The basis `vec` is in. Needed because this parameterization
requires we construct the density matrix corresponding to
the Lioville vector `vec`.
trunctate : bool, optional
Whether or not a non-positive, trace=1 `vec` should
be truncated to force a successful construction.
evotype : Evotype or str, optional
The evolution type. The special value `"default"` is equivalent
to specifying the value of `pygsti.evotypes.Evotype.default_evotype`.
state_space : StateSpace, optional
The state space for this operation. If `None` a default state space
with the appropriate number of qubits is used.
"""
def __init__(self, vec, basis, truncate=False, evotype="default", state_space=None):
vector = _State._to_vector(vec)
basis = _Basis.cast(basis, len(vector))
self.basis = basis
self.basis_mxs = basis.elements # shape (len(vec), dmDim, dmDim)
self.basis_mxs = _np.rollaxis(self.basis_mxs, 0, 3) # shape (dmDim, dmDim, len(vec))
assert(self.basis_mxs.shape[-1] == len(vector))
# set self.params and self.dmDim
self._set_params_from_vector(vector, truncate)
#parameter labels (parameter encode the Cholesky Lmx)
labels = []
for i, ilbl in enumerate(basis.labels[1:]):
for j, jlbl in enumerate(basis.labels[1:]):
if i == j: labels.append("%s diagonal element of density matrix Cholesky decomp" % ilbl)
elif j < i: labels.append("Re[(%s,%s) element of density matrix Cholesky decomp]" % (ilbl, jlbl))
else: labels.append("Im[(%s,%s) element of density matrix Cholesky decomp]" % (ilbl, jlbl))
#scratch space
self.Lmx = _np.zeros((self.dmDim, self.dmDim), 'complex')
state_space = _statespace.default_space_for_dim(len(vector)) if (state_space is None) \
else _statespace.StateSpace.cast(state_space)
evotype = _Evotype.cast(evotype)
_DenseState.__init__(self, vector, evotype, state_space)
self._paramlbls = _np.array(labels, dtype=object)
def _set_params_from_vector(self, vector, truncate):
density_mx = _np.dot(self.basis_mxs, vector)
density_mx = density_mx.squeeze()
dmDim = density_mx.shape[0]
assert(dmDim == density_mx.shape[1]), "Density matrix must be square!"
trc = _np.trace(density_mx)
assert(truncate or _np.isclose(trc, 1.0)), \
"`vec` must correspond to a trace-1 density matrix (truncate == False)!"
if not _np.isclose(trc, 1.0): # truncate to trace == 1
density_mx -= _np.identity(dmDim, 'd') / dmDim * (trc - 1.0)
#push any slightly negative evals of density_mx positive
# so that the Cholesky decomp will work.
evals, U = _np.linalg.eig(density_mx)
Ui = _np.linalg.inv(U)
assert(truncate or all([ev >= -1e-12 for ev in evals])), \
"`vec` must correspond to a positive density matrix (truncate == False)!"
pos_evals = evals.clip(1e-16, 1e100)
density_mx = _np.dot(U, _np.dot(_np.diag(pos_evals), Ui))
try:
Lmx = _np.linalg.cholesky(density_mx)
except _np.linalg.LinAlgError: # Lmx not postitive definite?
pos_evals = evals.clip(1e-12, 1e100) # try again with 1e-12
density_mx = _np.dot(U, _np.dot(_np.diag(pos_evals), Ui))
Lmx = _np.linalg.cholesky(density_mx)
#check TP condition: that diagonal els of Lmx squared add to 1.0
Lmx_norm = _np.trace(_np.dot(Lmx.T.conjugate(), Lmx)) # sum of magnitude^2 of all els
assert(_np.isclose(Lmx_norm, 1.0)), \
"Cholesky decomp didn't preserve trace=1!"
self.dmDim = dmDim
self.params = _np.empty(dmDim**2, 'd')
for i in range(dmDim):
assert(_np.linalg.norm(_np.imag(Lmx[i, i])) < IMAG_TOL)
self.params[i * dmDim + i] = Lmx[i, i].real # / paramNorm == 1 as asserted above
for j in range(i):
self.params[i * dmDim + j] = Lmx[i, j].real
self.params[j * dmDim + i] = Lmx[i, j].imag
def _construct_vector(self):
dmDim = self.dmDim
# params is an array of length dmDim^2 that
# encodes a lower-triangular matrix "Lmx" via:
# Lmx[i,i] = params[i*dmDim + i] / param-norm # i = 0...dmDim-2
# *last diagonal el is given by sqrt(1.0 - sum(L[i,j]**2))
# Lmx[i,j] = params[i*dmDim + j] + 1j*params[j*dmDim+i] (i > j)
param2Sum = _np.vdot(self.params, self.params) # or "dot" would work, since params are real
paramNorm = _np.sqrt(param2Sum) # also the norm of *all* Lmx els
for i in range(dmDim):
self.Lmx[i, i] = self.params[i * dmDim + i] / paramNorm
for j in range(i):
self.Lmx[i, j] = (self.params[i * dmDim + j] + 1j * self.params[j * dmDim + i]) / paramNorm
Lmx_norm = _np.trace(_np.dot(self.Lmx.T.conjugate(), self.Lmx)) # sum of magnitude^2 of all els
assert(_np.isclose(Lmx_norm, 1.0)), "Violated trace=1 condition!"
#The (complex, Hermitian) density matrix is build by
# assuming Lmx is its Cholesky decomp, which makes
# the density matrix is pos-def.
density_mx = _np.dot(self.Lmx, self.Lmx.T.conjugate())
assert(_np.isclose(_np.trace(density_mx), 1.0)), "density matrix must be trace == 1"
# write density matrix in given basis: = sum_i alpha_i B_i
# ASSUME that basis is orthogonal, i.e. Tr(Bi^dag*Bj) = delta_ij
basis_mxs = _np.rollaxis(self.basis_mxs, 2) # shape (dmDim, dmDim, len(vec))
vec = _np.array([_np.trace(_np.dot(M.T.conjugate(), density_mx)) for M in basis_mxs])
#for now, assume Liouville vector should always be real (TODO: add 'real' flag later?)
assert(_np.linalg.norm(_np.imag(vec)) < IMAG_TOL)
vec = _np.real(vec)
self._ptr.flags.writeable = True
self._ptr[:] = vec[:] # so shape is (dim,1) - the convention for spam vectors
self._ptr.flags.writeable = False
def set_dense(self, vec):
"""
Set the dense-vector value of this state vector.
Attempts to modify this state vector's parameters so that the raw
state vector becomes `vec`. Will raise ValueError if this operation
is not possible.
Parameters
----------
vec : array_like or State
A numpy array representing a state vector, or a State object.
Returns
-------
None
"""
try:
self._set_params_from_vector(vec, truncate=False)
self.dirty = True
except AssertionError as e:
raise ValueError("Error initializing the parameters of this "
"CPTPState object: " + str(e))
@property
def num_params(self):
"""
Get the number of independent parameters which specify this state vector.
Returns
-------
int
the number of independent parameters.
"""
assert(self.dmDim**2 == self.dim) # should at least be true without composite bases...
return self.dmDim**2
def to_vector(self):
"""
Get the state vector parameters as an array of values.
Returns
-------
numpy array
The parameters as a 1D array with length num_params().
"""
return self.params
def from_vector(self, v, close=False, dirty_value=True):
"""
Initialize the state vector using a 1D array of parameters.
Parameters
----------
v : numpy array
The 1D vector of state vector parameters. Length
must == num_params()
close : bool, optional
Whether `v` is close to this state vector's current
set of parameters. Under some circumstances, when this
is true this call can be completed more quickly.
dirty_value : bool, optional
The value to set this object's "dirty flag" to before exiting this
call. This is passed as an argument so it can be updated *recursively*.
Leave this set to `True` unless you know what you're doing.
Returns
-------
None
"""
assert(len(v) == self.num_params)
self.params[:] = v[:]
self._construct_vector()
self.dirty = dirty_value
def deriv_wrt_params(self, wrt_filter=None):
"""
The element-wise derivative this state vector.
Construct a matrix whose columns are the derivatives of the state vector
with respect to a single param. Thus, each column is of length
dimension and there is one column per state vector parameter.
Parameters
----------
wrt_filter : list or numpy.ndarray
List of parameter indices to take derivative with respect to.
(None means to use all the this operation's parameters.)
Returns
-------
numpy array
Array of derivatives, shape == (dimension, num_params)
"""
dmDim = self.dmDim
nP = len(self.params)
assert(nP == dmDim**2) # number of parameters
# v_i = trace( B_i^dag * Lmx * Lmx^dag )
# d(v_i) = trace( B_i^dag * (dLmx * Lmx^dag + Lmx * (dLmx)^dag) ) #trace = linear so commutes w/deriv
# /
# where dLmx/d[ab] = {
# \
L, Lbar = self.Lmx, self.Lmx.conjugate()
F1 = _np.tril(_np.ones((dmDim, dmDim), 'd'))
F2 = _np.triu(_np.ones((dmDim, dmDim), 'd'), 1) * 1j
conj_basis_mxs = self.basis_mxs.conjugate()
# Derivative of vector wrt params; shape == [vecLen,dmDim,dmDim] *not dealing with TP condition yet*
# (first get derivative assuming last diagonal el of Lmx *is* a parameter, then use chain rule)
dVdp = _np.einsum('aml,mb,ab->lab', conj_basis_mxs, Lbar, F1) # only a >= b nonzero (F1)
dVdp += _np.einsum('mal,mb,ab->lab', conj_basis_mxs, L, F1) # ditto
dVdp += _np.einsum('bml,ma,ab->lab', conj_basis_mxs, Lbar, F2) # only b > a nonzero (F2)
dVdp += _np.einsum('mbl,ma,ab->lab', conj_basis_mxs, L, F2.conjugate()) # ditto
dVdp.shape = [dVdp.shape[0], nP] # jacobian with respect to "p" params,
# which don't include normalization for TP-constraint
#Now get jacobian of actual params wrt the params used above. Denote the actual
# params "P" in variable names, so p_ij = P_ij / sqrt(sum(P_xy**2))
param2Sum = _np.vdot(self.params, self.params)
paramNorm = _np.sqrt(param2Sum) # norm of *all* Lmx els (note lastDiagEl
dpdP = _np.identity(nP, 'd')
# all p_ij params == P_ij / paramNorm = P_ij / sqrt(sum(P_xy**2))
# and so have derivs wrt *all* Pxy elements.
for ij in range(nP):
for kl in range(nP):
if ij == kl:
# dp_ij / dP_ij = 1.0 / (sum(P_xy**2))^(1/2) - 0.5 * P_ij / (sum(P_xy**2))^(3/2) * 2*P_ij
# = 1.0 / (sum(P_xy**2))^(1/2) - P_ij^2 / (sum(P_xy**2))^(3/2)
dpdP[ij, ij] = 1.0 / paramNorm - self.params[ij]**2 / paramNorm**3
else:
# dp_ij / dP_kl = -0.5 * P_ij / (sum(P_xy**2))^(3/2) * 2*P_kl
# = - P_ij * P_kl / (sum(P_xy**2))^(3/2)
dpdP[ij, kl] = - self.params[ij] * self.params[kl] / paramNorm**3
#Apply the chain rule to get dVdP:
dVdP = _np.dot(dVdp, dpdP) # shape (vecLen, nP) - the jacobian!
dVdp = dpdP = None # free memory!
assert(_np.linalg.norm(_np.imag(dVdP)) < IMAG_TOL)
derivMx = _np.real(dVdP)
if wrt_filter is None:
return derivMx
else:
return _np.take(derivMx, wrt_filter, axis=1)
def has_nonzero_hessian(self):
"""
Whether this state vector has a non-zero Hessian with respect to its parameters.
Returns
-------
bool
"""
return True
def hessian_wrt_params(self, wrt_filter1=None, wrt_filter2=None):
"""
Construct the Hessian of this state vector with respect to its parameters.
This function returns a tensor whose first axis corresponds to the
flattened operation matrix and whose 2nd and 3rd axes correspond to the
parameters that are differentiated with respect to.
Parameters
----------
wrt_filter1 : list or numpy.ndarray
List of parameter indices to take 1st derivatives with respect to.
(None means to use all the this operation's parameters.)
wrt_filter2 : list or numpy.ndarray
List of parameter indices to take 2nd derivatives with respect to.
(None means to use all the this operation's parameters.)
Returns
-------
numpy array
Hessian with shape (dimension, num_params1, num_params2)
"""
raise NotImplementedError("TODO: add hessian computation for CPTPState")
|
[
"numpy.trace",
"numpy.sqrt",
"pygsti.modelmembers.states.densestate.DenseState.__init__",
"numpy.rollaxis",
"numpy.array",
"numpy.einsum",
"numpy.imag",
"numpy.take",
"numpy.real",
"numpy.dot",
"numpy.empty",
"pygsti.evotypes.Evotype.cast",
"pygsti.modelmembers.states.state.State._to_vector",
"numpy.identity",
"numpy.linalg.eig",
"numpy.ones",
"numpy.vdot",
"numpy.isclose",
"pygsti.baseobjs.statespace.StateSpace.cast",
"numpy.diag",
"numpy.zeros",
"numpy.linalg.inv",
"numpy.linalg.cholesky"
] |
[((2705, 2727), 'pygsti.modelmembers.states.state.State._to_vector', '_State._to_vector', (['vec'], {}), '(vec)\n', (2722, 2727), True, 'from pygsti.modelmembers.states.state import State as _State\n'), ((2903, 2937), 'numpy.rollaxis', '_np.rollaxis', (['self.basis_mxs', '(0)', '(3)'], {}), '(self.basis_mxs, 0, 3)\n', (2915, 2937), True, 'import numpy as _np\n'), ((3686, 3732), 'numpy.zeros', '_np.zeros', (['(self.dmDim, self.dmDim)', '"""complex"""'], {}), "((self.dmDim, self.dmDim), 'complex')\n", (3695, 3732), True, 'import numpy as _np\n'), ((3907, 3929), 'pygsti.evotypes.Evotype.cast', '_Evotype.cast', (['evotype'], {}), '(evotype)\n', (3920, 3929), True, 'from pygsti.evotypes import Evotype as _Evotype\n'), ((3938, 3994), 'pygsti.modelmembers.states.densestate.DenseState.__init__', '_DenseState.__init__', (['self', 'vector', 'evotype', 'state_space'], {}), '(self, vector, evotype, state_space)\n', (3958, 3994), True, 'from pygsti.modelmembers.states.densestate import DenseState as _DenseState\n'), ((4021, 4052), 'numpy.array', '_np.array', (['labels'], {'dtype': 'object'}), '(labels, dtype=object)\n', (4030, 4052), True, 'import numpy as _np\n'), ((4132, 4163), 'numpy.dot', '_np.dot', (['self.basis_mxs', 'vector'], {}), '(self.basis_mxs, vector)\n', (4139, 4163), True, 'import numpy as _np\n'), ((4336, 4357), 'numpy.trace', '_np.trace', (['density_mx'], {}), '(density_mx)\n', (4345, 4357), True, 'import numpy as _np\n'), ((4768, 4794), 'numpy.linalg.eig', '_np.linalg.eig', (['density_mx'], {}), '(density_mx)\n', (4782, 4794), True, 'import numpy as _np\n'), ((4808, 4825), 'numpy.linalg.inv', '_np.linalg.inv', (['U'], {}), '(U)\n', (4822, 4825), True, 'import numpy as _np\n'), ((5602, 5628), 'numpy.isclose', '_np.isclose', (['Lmx_norm', '(1.0)'], {}), '(Lmx_norm, 1.0)\n', (5613, 5628), True, 'import numpy as _np\n'), ((5738, 5764), 'numpy.empty', '_np.empty', (['(dmDim ** 2)', '"""d"""'], {}), "(dmDim ** 2, 'd')\n", (5747, 5764), True, 'import numpy as _np\n'), ((6517, 6551), 'numpy.vdot', '_np.vdot', (['self.params', 'self.params'], {}), '(self.params, self.params)\n', (6525, 6551), True, 'import numpy as _np\n'), ((6618, 6637), 'numpy.sqrt', '_np.sqrt', (['param2Sum'], {}), '(param2Sum)\n', (6626, 6637), True, 'import numpy as _np\n'), ((7032, 7058), 'numpy.isclose', '_np.isclose', (['Lmx_norm', '(1.0)'], {}), '(Lmx_norm, 1.0)\n', (7043, 7058), True, 'import numpy as _np\n'), ((7570, 7601), 'numpy.rollaxis', '_np.rollaxis', (['self.basis_mxs', '(2)'], {}), '(self.basis_mxs, 2)\n', (7582, 7601), True, 'import numpy as _np\n'), ((7898, 7911), 'numpy.real', '_np.real', (['vec'], {}), '(vec)\n', (7906, 7911), True, 'import numpy as _np\n'), ((11960, 12014), 'numpy.einsum', '_np.einsum', (['"""aml,mb,ab->lab"""', 'conj_basis_mxs', 'Lbar', 'F1'], {}), "('aml,mb,ab->lab', conj_basis_mxs, Lbar, F1)\n", (11970, 12014), True, 'import numpy as _np\n'), ((12059, 12110), 'numpy.einsum', '_np.einsum', (['"""mal,mb,ab->lab"""', 'conj_basis_mxs', 'L', 'F1'], {}), "('mal,mb,ab->lab', conj_basis_mxs, L, F1)\n", (12069, 12110), True, 'import numpy as _np\n'), ((12138, 12192), 'numpy.einsum', '_np.einsum', (['"""bml,ma,ab->lab"""', 'conj_basis_mxs', 'Lbar', 'F2'], {}), "('bml,ma,ab->lab', conj_basis_mxs, Lbar, F2)\n", (12148, 12192), True, 'import numpy as _np\n'), ((12638, 12672), 'numpy.vdot', '_np.vdot', (['self.params', 'self.params'], {}), '(self.params, self.params)\n', (12646, 12672), True, 'import numpy as _np\n'), ((12693, 12712), 'numpy.sqrt', '_np.sqrt', (['param2Sum'], {}), '(param2Sum)\n', (12701, 12712), True, 'import numpy as _np\n'), ((12770, 12791), 'numpy.identity', '_np.identity', (['nP', '"""d"""'], {}), "(nP, 'd')\n", (12782, 12791), True, 'import numpy as _np\n'), ((13630, 13649), 'numpy.dot', '_np.dot', (['dVdp', 'dpdP'], {}), '(dVdp, dpdP)\n', (13637, 13649), True, 'import numpy as _np\n'), ((13809, 13823), 'numpy.real', '_np.real', (['dVdP'], {}), '(dVdP)\n', (13817, 13823), True, 'import numpy as _np\n'), ((3847, 3887), 'pygsti.baseobjs.statespace.StateSpace.cast', '_statespace.StateSpace.cast', (['state_space'], {}), '(state_space)\n', (3874, 3887), True, 'from pygsti.baseobjs import statespace as _statespace\n'), ((4385, 4406), 'numpy.isclose', '_np.isclose', (['trc', '(1.0)'], {}), '(trc, 1.0)\n', (4396, 4406), True, 'import numpy as _np\n'), ((4512, 4533), 'numpy.isclose', '_np.isclose', (['trc', '(1.0)'], {}), '(trc, 1.0)\n', (4523, 4533), True, 'import numpy as _np\n'), ((5123, 5154), 'numpy.linalg.cholesky', '_np.linalg.cholesky', (['density_mx'], {}), '(density_mx)\n', (5142, 5154), True, 'import numpy as _np\n'), ((7343, 7364), 'numpy.trace', '_np.trace', (['density_mx'], {}), '(density_mx)\n', (7352, 7364), True, 'import numpy as _np\n'), ((11587, 11616), 'numpy.ones', '_np.ones', (['(dmDim, dmDim)', '"""d"""'], {}), "((dmDim, dmDim), 'd')\n", (11595, 11616), True, 'import numpy as _np\n'), ((13916, 13953), 'numpy.take', '_np.take', (['derivMx', 'wrt_filter'], {'axis': '(1)'}), '(derivMx, wrt_filter, axis=1)\n', (13924, 13953), True, 'import numpy as _np\n'), ((5066, 5085), 'numpy.diag', '_np.diag', (['pos_evals'], {}), '(pos_evals)\n', (5074, 5085), True, 'import numpy as _np\n'), ((5386, 5417), 'numpy.linalg.cholesky', '_np.linalg.cholesky', (['density_mx'], {}), '(density_mx)\n', (5405, 5417), True, 'import numpy as _np\n'), ((7857, 7870), 'numpy.imag', '_np.imag', (['vec'], {}), '(vec)\n', (7865, 7870), True, 'import numpy as _np\n'), ((11640, 11669), 'numpy.ones', '_np.ones', (['(dmDim, dmDim)', '"""d"""'], {}), "((dmDim, dmDim), 'd')\n", (11648, 11669), True, 'import numpy as _np\n'), ((13763, 13777), 'numpy.imag', '_np.imag', (['dVdP'], {}), '(dVdP)\n', (13771, 13777), True, 'import numpy as _np\n'), ((4587, 4611), 'numpy.identity', '_np.identity', (['dmDim', '"""d"""'], {}), "(dmDim, 'd')\n", (4599, 4611), True, 'import numpy as _np\n'), ((5829, 5848), 'numpy.imag', '_np.imag', (['Lmx[i, i]'], {}), '(Lmx[i, i])\n', (5837, 5848), True, 'import numpy as _np\n'), ((5342, 5361), 'numpy.diag', '_np.diag', (['pos_evals'], {}), '(pos_evals)\n', (5350, 5361), True, 'import numpy as _np\n')]
|
import rospy
import numpy as np
import cv2
class ScalarStable(object):
"""Represents a stabilized scalar"""
def __init__(self,
x=.0,
vx=.0,
p_cov=.03, m_cov=.01,
time=None):
"""ScalarStabilized constructor"""
self.x = x
self.vx = vx
self.p_cov = p_cov
self.m_cov = m_cov
self.filter = cv2.KalmanFilter(2, 1)
self.filter.statePost = self.to_array()
self.filter.measurementMatrix = np.array([[1, 1]], np.float32)
self.__update_noise_cov(p_cov, m_cov)
if time is None:
self.last_update = rospy.Time().now()
else:
self.last_update = time
def from_array(self, array):
"""Updates the scalar stabilized state from array"""
assert array.shape == (2, 1)
self.x = array[0]
self.vx = array[1]
self.filter.statePre = self.filter.statePost
def to_array(self):
"""Returns the scalar stabilizer state array representation"""
return np.array([[self.x], [self.vx]], np.float32)
def position(self):
"""Returns the scalar's position"""
return self.x
def velocity(self):
"""Returns the scalar's velocity"""
return self.vx
def update(self, x, time=None, m_cov=None):
"""Updates/Filter the scalar"""
if m_cov is not None:
self.__update_noise_cov(self.p_cov, m_cov)
self.__update_time(time=time)
self.filter.predict()
measurement = np.array([[np.float32(x)]])
assert measurement.shape == (1, 1)
self.filter.correct(measurement)
self.from_array(self.filter.statePost)
def predict(self, time=None):
"""Predicts the scalar state"""
self.__update_time(time=time)
self.filter.predict()
self.from_array(self.filter.statePost)
def __update_noise_cov(self, p_cov, m_cov):
"""Updates the process and measurement covariances"""
self.filter.processNoiseCov = np.array([[1, 0],
[0, 1]], np.float32) * p_cov
self.filter.measurementNoiseCov = np.array([[1]], np.float32) * m_cov
def __update_transition(self, dt):
self.filter.transitionMatrix = np.array([[1, dt],
[0, 1]], np.float32)
def __update_time(self, time=None):
if time is None:
now = rospy.Time().now()
else:
now = time
elapsed_time = now - self.last_update
self.last_update = now
self.__update_transition(elapsed_time.to_sec())
def __len__(self):
return 1
def __add__(self, scalar):
return self.x + scalar.x
def __sub__(self, scalar):
return self.x - scalar.x
def __str__(self):
return("{}".format(self.to_array()))
|
[
"numpy.array",
"rospy.Time",
"numpy.float32",
"cv2.KalmanFilter"
] |
[((411, 433), 'cv2.KalmanFilter', 'cv2.KalmanFilter', (['(2)', '(1)'], {}), '(2, 1)\n', (427, 433), False, 'import cv2\n'), ((522, 552), 'numpy.array', 'np.array', (['[[1, 1]]', 'np.float32'], {}), '([[1, 1]], np.float32)\n', (530, 552), True, 'import numpy as np\n'), ((1073, 1116), 'numpy.array', 'np.array', (['[[self.x], [self.vx]]', 'np.float32'], {}), '([[self.x], [self.vx]], np.float32)\n', (1081, 1116), True, 'import numpy as np\n'), ((2315, 2354), 'numpy.array', 'np.array', (['[[1, dt], [0, 1]]', 'np.float32'], {}), '([[1, dt], [0, 1]], np.float32)\n', (2323, 2354), True, 'import numpy as np\n'), ((2062, 2100), 'numpy.array', 'np.array', (['[[1, 0], [0, 1]]', 'np.float32'], {}), '([[1, 0], [0, 1]], np.float32)\n', (2070, 2100), True, 'import numpy as np\n'), ((2200, 2227), 'numpy.array', 'np.array', (['[[1]]', 'np.float32'], {}), '([[1]], np.float32)\n', (2208, 2227), True, 'import numpy as np\n'), ((655, 667), 'rospy.Time', 'rospy.Time', ([], {}), '()\n', (665, 667), False, 'import rospy\n'), ((1575, 1588), 'numpy.float32', 'np.float32', (['x'], {}), '(x)\n', (1585, 1588), True, 'import numpy as np\n'), ((2488, 2500), 'rospy.Time', 'rospy.Time', ([], {}), '()\n', (2498, 2500), False, 'import rospy\n')]
|
#!/usr/bin/env python
import os
import sys
import sqlite3
import pandas as pd
import numpy as np
from scraper import create_data_folder, read_config
from collections import OrderedDict
def main():
"""
Mainly for debugging purposes.
"""
config_file = read_config()
# Pick a file
try:
csv_name = os.listdir(config_file["downloaded_data_path"])[0]
except:
print("Could not read csv file.. Please check you've downloaded data beforehand using scraper.py.")
exit(1)
# Read the data
df = read_data(csv_name, config_file)
# Extract information
sanitized_dataframe = extract_event_information(df)
# Save extracted information
create_data_folder(config_file["extracted_data_path"])
save_dataframe(sanitized_dataframe, "test", config_file)
def save_dataframe(df, df_root_name, config_file):
"""
Handles all the saving process into SQL and CSV formats.
@Param df: dataframe to save.
@Param df_root_name: name of the file to create without the extension.
@Param config_file: Configuration file.
"""
sqlite_read_path = os.path.join(config_file["extracted_data_path"] , f"{df_root_name}.db")
csv_save_path = os.path.join(config_file["extracted_data_path"] , f"{df_root_name}.csv")
save_dataframe_to_sqlite(df, sqlite_read_path)
save_dataframe_to_csv(sqlite_read_path, csv_save_path)
def save_dataframe_to_csv(db_path, save_path):
"""
Saves the data as csv in the given path by reading the sqlite3 database.
Makes sure to merge the values with those already existing at the same
location (event, latitude, location).
@Param db_path: path to the sqlite3 database.
@Param save_path: path to the csv file to create.
"""
# Read the SQL database
db = sqlite3.connect(db_path)
db_df = pd.read_sql_query("SELECT * FROM events", db)
# Transforming columns to make them compatible with storing multiple values
db_df["event_document"] = db_df["event_document"].apply(lambda x: [x])
db_df["event_date"] = db_df["event_date"].apply(lambda x: [x])
db_df["event_importance"] = db_df["event_importance"].apply(lambda x: [x])
db_df["event_source_name"] = db_df["event_source_name"].apply(lambda x: [x])
# merge lines with identical position and event.
db_df = db_df.groupby(["event", "event_latitude", "event_longitude"], as_index=False).aggregate({'event_document':np.sum, "event_importance": np.sum, "event_date": np.sum, "event_source_name": np.sum})
# Storing the information
db_df.to_csv(save_path, mode='w', index=False)
# Closing the database connexion
db.commit()
db.close()
def read_data(csv_name, config_file, add_root_dir=True):
"""
Reads the csv file given and returns the associated dataframe.
@Param csv_name: Name of the csv file to read.
@Param config_file: Configuration file.
@Return: Dataframe containing the csv information.
"""
print("Reading the csv file...")
csv = csv_name
if add_root_dir:
data_dir = config_file["downloaded_data_path"]
csv = os.path.join(data_dir, csv_name)
pd.set_option('display.float_format', lambda x: '%.3f' % x) # Avoid scientific notation
dataframe = pd.read_csv(csv,
delimiter = "\t",
names=["ID", "event_date", "source_identifier", "source_name", "document_id", "V1Counts_10", "V2_1Counts", "V1Themes", "V2EnhancedThemes", "V1Locations", "V2EnhancedLocations", "V1Persons",
"V2EnhancedPersons", "V1organizations", "V2EnhancedOrganizations", "V1_5tone", "V2_1EnhancedDates", "V2GCam", "V2_1SharingImage", "V2_1RelatedImages", "V2_1SocialImageEmbeds", "V2_1SocialVideoEmbeds",
"V2_1Quotations", "V2_1AllNames", "V2_1Amounts", "V2_1TranslationInfo", "V2ExtrasXML"],
encoding="ISO-8859-1")
return dataframe
def extract_event_information(dataframe):
"""
Extracts the information related to the events from the dataframe and returns a transformed dataframe.
The new dataframe contains information related to the event type, its importance and position (lat, long).
@Params dataframe: represents all the information contained in the initial csv.
@Return: dataframe containing the extracted information regarding the events.
"""
print("Extracting information from the csv file...")
events_columns = ["event", "event_importance", "event_latitude", "event_longitude"]
sanitized_dataframe = pd.DataFrame(columns=events_columns)
# Removing NaN events
main_dataframe = dataframe[["event_date", "V1Counts_10", "source_name", "document_id"]].copy()
main_series = main_dataframe.dropna(0)
for idx, row in main_series.iterrows():
event_date = row[0]
event_source_name = row[2]
event_document = row[3]
event_details = row[1].split("#")
event_dict = OrderedDict()
event_dict["event_date"] = event_date
event_dict["event_source_name"] = event_source_name
event_dict["event_document"] = event_document
event_dict["event"] = event_details[0]
event_dict["event_importance"] = event_details[1]
event_dict["event_latitude"] = event_details[7]
event_dict["event_longitude"] = event_details[8]
sanitized_dataframe = sanitized_dataframe.append(event_dict, ignore_index=True)
return sanitized_dataframe
def save_dataframe_to_sqlite(sanitized_dataframe, destination_file):
"""
Saves the dataframe information to a sqlite3 database.
@Param sanitized_dataframe: Dataframe containing the information to save.
@Param destination_file: Path to the database to save the information in.
If the database doesn't exist, creates it.
"""
conn = sqlite3.connect(destination_file)
c = conn.cursor()
# Create table
try:
c.execute('''CREATE TABLE events
(event text, event_importance text, event_latitude real, event_longitude real, event_date integer, event_document text, event_source_name text, unique(event_date, event, event_importance, event_latitude, event_longitude))''')
print("Created event table")
except Exception as e:
print(e)
# Populating the database
for idx, row in sanitized_dataframe.iterrows():
try:
# Before adding, we check if the element has been reported in the same day.
if row[2]=="":
row[2]=0
if row[3]=="":
row[3]=0
c.execute(f"SELECT event, event_importance, event_latitude, event_longitude FROM events WHERE event='{row[0]}' AND event_importance={int(row[1])} AND event_latitude={float(row[2])} AND event_longitude={float(row[3])}")
result = c.fetchall()
if len(result) == 0:
try:
c.execute(f"INSERT INTO events VALUES ('{row[0]}', '{row[1]}', '{row[2]}', '{row[3]}', '{row[4]}', '{row[5]}', '{row[6]}')")
except sqlite3.IntegrityError as e:
# Duplicated row
pass
except:
print("Unexpected error:", sys.exc_info()[0])
exit(1)
except Exception as e:
print("Unexpected error:", sys.exc_info()[0], e)
exit(1)
# Save (commit) the changes
conn.commit()
conn.close()
def save_dataframe_to_txt(sanitized_dataframe, destination_file):
"""
Saves the dataframe information to a txt file.
@Param sanitized_dataframe: Dataframe containing the information to save.
@Param destination_file: Path to the file to save the information in.
"""
# TODO: Change to a sqlite database ?
print("Storing the event information into a txt file...")
np.savetxt(destination_file, sanitized_dataframe.values, fmt='%s', delimiter="\t",
header="event\tevent_importance\tevent_latitude\tevent_longitude")
if __name__ == "__main__":
main()
|
[
"pandas.read_sql_query",
"scraper.read_config",
"collections.OrderedDict",
"os.listdir",
"sqlite3.connect",
"pandas.read_csv",
"os.path.join",
"pandas.set_option",
"sys.exc_info",
"scraper.create_data_folder",
"numpy.savetxt",
"pandas.DataFrame"
] |
[((269, 282), 'scraper.read_config', 'read_config', ([], {}), '()\n', (280, 282), False, 'from scraper import create_data_folder, read_config\n'), ((701, 755), 'scraper.create_data_folder', 'create_data_folder', (["config_file['extracted_data_path']"], {}), "(config_file['extracted_data_path'])\n", (719, 755), False, 'from scraper import create_data_folder, read_config\n'), ((1123, 1193), 'os.path.join', 'os.path.join', (["config_file['extracted_data_path']", 'f"""{df_root_name}.db"""'], {}), "(config_file['extracted_data_path'], f'{df_root_name}.db')\n", (1135, 1193), False, 'import os\n'), ((1215, 1286), 'os.path.join', 'os.path.join', (["config_file['extracted_data_path']", 'f"""{df_root_name}.csv"""'], {}), "(config_file['extracted_data_path'], f'{df_root_name}.csv')\n", (1227, 1286), False, 'import os\n'), ((1799, 1823), 'sqlite3.connect', 'sqlite3.connect', (['db_path'], {}), '(db_path)\n', (1814, 1823), False, 'import sqlite3\n'), ((1836, 1881), 'pandas.read_sql_query', 'pd.read_sql_query', (['"""SELECT * FROM events"""', 'db'], {}), "('SELECT * FROM events', db)\n", (1853, 1881), True, 'import pandas as pd\n'), ((3154, 3213), 'pandas.set_option', 'pd.set_option', (['"""display.float_format"""', "(lambda x: '%.3f' % x)"], {}), "('display.float_format', lambda x: '%.3f' % x)\n", (3167, 3213), True, 'import pandas as pd\n'), ((3259, 3825), 'pandas.read_csv', 'pd.read_csv', (['csv'], {'delimiter': '"""\t"""', 'names': "['ID', 'event_date', 'source_identifier', 'source_name', 'document_id',\n 'V1Counts_10', 'V2_1Counts', 'V1Themes', 'V2EnhancedThemes',\n 'V1Locations', 'V2EnhancedLocations', 'V1Persons', 'V2EnhancedPersons',\n 'V1organizations', 'V2EnhancedOrganizations', 'V1_5tone',\n 'V2_1EnhancedDates', 'V2GCam', 'V2_1SharingImage', 'V2_1RelatedImages',\n 'V2_1SocialImageEmbeds', 'V2_1SocialVideoEmbeds', 'V2_1Quotations',\n 'V2_1AllNames', 'V2_1Amounts', 'V2_1TranslationInfo', 'V2ExtrasXML']", 'encoding': '"""ISO-8859-1"""'}), "(csv, delimiter='\\t', names=['ID', 'event_date',\n 'source_identifier', 'source_name', 'document_id', 'V1Counts_10',\n 'V2_1Counts', 'V1Themes', 'V2EnhancedThemes', 'V1Locations',\n 'V2EnhancedLocations', 'V1Persons', 'V2EnhancedPersons',\n 'V1organizations', 'V2EnhancedOrganizations', 'V1_5tone',\n 'V2_1EnhancedDates', 'V2GCam', 'V2_1SharingImage', 'V2_1RelatedImages',\n 'V2_1SocialImageEmbeds', 'V2_1SocialVideoEmbeds', 'V2_1Quotations',\n 'V2_1AllNames', 'V2_1Amounts', 'V2_1TranslationInfo', 'V2ExtrasXML'],\n encoding='ISO-8859-1')\n", (3270, 3825), True, 'import pandas as pd\n'), ((4587, 4623), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'events_columns'}), '(columns=events_columns)\n', (4599, 4623), True, 'import pandas as pd\n'), ((5895, 5928), 'sqlite3.connect', 'sqlite3.connect', (['destination_file'], {}), '(destination_file)\n', (5910, 5928), False, 'import sqlite3\n'), ((7915, 8073), 'numpy.savetxt', 'np.savetxt', (['destination_file', 'sanitized_dataframe.values'], {'fmt': '"""%s"""', 'delimiter': '"""\t"""', 'header': '"""event\tevent_importance\tevent_latitude\tevent_longitude"""'}), "(destination_file, sanitized_dataframe.values, fmt='%s',\n delimiter='\\t', header=\n 'event\\tevent_importance\\tevent_latitude\\tevent_longitude')\n", (7925, 8073), True, 'import numpy as np\n'), ((3116, 3148), 'os.path.join', 'os.path.join', (['data_dir', 'csv_name'], {}), '(data_dir, csv_name)\n', (3128, 3148), False, 'import os\n'), ((4997, 5010), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5008, 5010), False, 'from collections import OrderedDict\n'), ((330, 377), 'os.listdir', 'os.listdir', (["config_file['downloaded_data_path']"], {}), "(config_file['downloaded_data_path'])\n", (340, 377), False, 'import os\n'), ((7406, 7420), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (7418, 7420), False, 'import sys\n'), ((7288, 7302), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (7300, 7302), False, 'import sys\n')]
|
"""Defines procedures for training, and evaluation automatic camfi annotation models,
and for using them for making automatic annotations (inference). Depends on camfi.util,
camfi.datamodel.autoannotation, camfi.datamodel.geometry, camfi.datamode.via, as well
as ._torchutils and ._models."""
from datetime import datetime
import itertools
from math import pi
from pathlib import Path
from typing import Any, Callable, Optional, Union
from sys import stderr
import numpy as np
from pydantic import (
BaseModel,
DirectoryPath,
NonNegativeInt,
NonNegativeFloat,
PositiveFloat,
PositiveInt,
ValidationError,
validator,
)
from scipy import sparse
import torch
from torch.utils.data import DataLoader
from torchvision.models.detection.mask_rcnn import MaskRCNN
from tqdm import tqdm, trange
from camfi.datamodel.autoannotation import CamfiDataset, Prediction
from camfi.datamodel.geometry import (
BoundingBox,
CircleShapeAttributes,
PolylineShapeAttributes,
)
from camfi.datamodel.via import (
ViaFileAttributes,
ViaMetadata,
ViaProject,
ViaRegion,
ViaRegionAttributes,
)
from camfi.models import model_urls
from camfi.util import (
endpoint_truncate,
smallest_enclosing_circle,
weighted_intersection_over_minimum,
Field,
)
from ._torchutils import collate_fn, get_model_instance_segmentation, train_one_epoch
def load_annotation_model(model_path_or_url: Union[Path, str]) -> MaskRCNN:
"""Loads a camfi annotation model. Accepts any model key provided in
camfi.models, a Path object, or a URL str.
Parameters
----------
model_path_or_url : Union[Path, str]
Path to .pth file specifying model parameters, model name defined in
camfi.models.model_urls, or url to model to download from the internet.
Returns
-------
model : MaskRCNN
Instance segmentation model used for automatic annotation.
"""
print(f"Loading model: {model_path_or_url}", file=stderr)
model = get_model_instance_segmentation(2, pretrained=False)
if isinstance(model_path_or_url, Path):
state_dict = torch.load(model_path_or_url)
elif model_path_or_url in model_urls:
state_dict = torch.hub.load_state_dict_from_url(model_urls[model_path_or_url])
else:
state_dict = torch.hub.load_state_dict_from_url(model_path_or_url)
model.load_state_dict(state_dict)
return model
def copy_annotation_model(model: MaskRCNN) -> MaskRCNN:
"""Copies a camfi annotation model.
Parameters
----------
model : MaskRCNN
Model to copy.
Returns
-------
model_copy : MaskRCNN
Copy of model.
"""
model_copy = get_model_instance_segmentation(2, pretrained=False)
model_copy.load_state_dict(model.state_dict())
return model_copy
def train_model(
dataset: CamfiDataset,
load_pretrained_model: Optional[Union[Path, str]] = None,
device: Union[str, torch.device] = "cpu",
batch_size: int = 5,
num_workers: int = 2,
num_epochs: int = 10,
outdir: DirectoryPath = Path(),
model_name: Optional[str] = None,
save_intermediate: bool = False,
) -> Path:
"""Trains a camfi instance segmentation annotation model on specified dataset,
saving to trained model to outdir.
Parameters
----------
dataset : CamfiDataset
Dataset on which to train the model.
load_pretrained_model : Optional[Union[Path, str]]
Path or url to model parameters file. If set, will load the pretrained
parameters. By default, will start with a model pre-trained on the Microsoft
COCO dataset.
device : Union[str, torch.device]
E.g. "cpu" or "cuda". Training is typically much faster on a GPU. Use "cuda" for
Nvidia GPUs.
batch_size : int
Number of images to load at once.
num_workers : int
Number of worker processes for data loader to spawn.
num_epochs : int
Number of epochs to train.
outdir : DirectoryPath
Path to directory where to save model(s).
model_name : Optional[str]
Identifier to include in model save file. By default the current date in
YYYYmmdd format.
save_intermediate : bool
If True, model is saved after each epoch, not just after all epoch are complete.
This is recommended, especially if training on a service which could terminate
unpredicatbly (e.g. Google Colab).
Returns
-------
model_path : Path
Path to saved model.
"""
# Parameter setting
device = torch.device(device)
if model_name is None:
model_name = datetime.now().strftime("%Y%m%d")
# Initialise data_loader
data_loader = DataLoader(
dataset,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
collate_fn=collate_fn,
)
# Initialise model
if load_pretrained_model is not None:
model = load_annotation_model(load_pretrained_model)
else:
model = get_model_instance_segmentation(2)
model.to(device)
# Initialise optimiser and lr_scheduler
params = [p for p in model.parameters() if p.requires_grad]
optimiser = torch.optim.SGD(params, lr=0.005, momentum=0.9, weight_decay=0.0005)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimiser, step_size=3, gamma=0.1)
# Train the model
for epoch in range(num_epochs):
# train for one epoch, printing every 10 iterations
train_one_epoch(model, optimiser, data_loader, device, epoch, print_freq=10)
# update the learning rate
lr_scheduler.step()
if save_intermediate or epoch == num_epochs - 1:
save_path = outdir / f"{model_name}_{epoch}_model.pth"
torch.save(model.state_dict(), save_path)
print(f"Training complete. Model saved at {save_path}")
return save_path
class Annotator(BaseModel):
"""Provides methods for automatically annotating images of flying insects using a
pre-trained instance segmentation model.
Parameters
----------
dataset : CamfiDataset
Dataset to annotate.
model : Union[str, Path, MaskRCNN]
Either a path to state dict file which defines the segmentation model, or a url
pointing to a model to download, or one of the model names defined in
camfi.models.model_urls.
Alternatively, a MaskRCNN instance can be given directly.
device : Union[str, torch.device]
Specifies device to run inference on. E.g. set to "cuda" to use an Nvidia GPU.
backup_device : Optional[Union[str, torch.device]]
Specifies device to run inference on when a runtime error occurs while using
device. Probably only makes sense to set this to "cpu" if device="cuda". This
option enables the annotator to leverage a GPU with limited memory capacity
without crashing if a difficult image is encountered.
backup_model: Optional[MaskRCNN]
Defines the backup model. Will be automatically generated if backup_device is
set. Should not be set manually.
split_angle : PositiveFloat
Approximate maximum angle between polyline segments in degrees. Note that this
will immediately be converted to radians upon instantiation of Annotator.
poly_order : PositiveInt
Order of polynomial used for fitting motion blur paths.
endpoint_method : Callable[[np.ndarray, ...], tuple[NonNegativeInt, NonNegativeInt]]
Method to find endpoints of motion blurs. The first argument to this method
should be a cropped mask np.ndarray.
endpoint_extra_args : list[Any]
Extra arguments to pass to endpoint_method.
score_thresh : float
Score threshold between 0.0 and 1.0 for automatic annotations to be kept.
overlap_thresh : float
Minimum proportion of overlap (weighted intersection over minimum) between two
instance segmentation masks to infer that one of the masks should be discarded.
edge_thresh : NonNegativeInt
Minimum distance an annotation has to be from the edge of the image before it is
converted from a polyline annotation to a circle annotation.
"""
dataset: CamfiDataset
model: MaskRCNN = "release"
device: Union[str, torch.device] = "cpu"
backup_device: Optional[Union[str, torch.device]] = None
backup_model: Optional[MaskRCNN] = None
split_angle: PositiveFloat = 15.0
poly_order: PositiveInt = 2
endpoint_method: Callable[
..., tuple[NonNegativeInt, NonNegativeInt]
] = endpoint_truncate
endpoint_extra_args: list[Any] = [10]
score_thresh: float = 0.4
overlap_thresh: float = 0.4
edge_thresh: NonNegativeInt = 20
backup_model_used: int = 0
class Config:
arbitrary_types_allowed = True
@validator("model", pre=True, always=True)
def get_model(cls, v):
if isinstance(v, MaskRCNN):
return v
else:
return load_annotation_model(v)
@validator("device", always=True)
def put_model_on_device_and_set_to_eval(cls, v, values):
print(f"Putting model on device: {v}", file=stderr)
v = torch.device(v)
values["model"].to(v)
values["model"].eval()
return v
@validator("backup_model", pre=True, always=True)
def copy_model_to_backup_device(cls, v, values):
assert v is None, "Should not set 'backup_model'. It will be set automatically"
if "backup_device" in values and values["backup_device"] is not None:
v = copy_annotation_model(values["model"])
v.to(values["backup_device"])
v.eval()
return v
@validator("split_angle", always=True)
def convert_split_angle_to_radians(cls, v):
return v * pi / 180.0
def get_prediction(self, img_idx: NonNegativeInt) -> Prediction:
"""Run predicion on a single image. First tries to use the model on self.device,
and falls back to the model on self.backup_device if a RuntimeError is caught
(if set).
Parameters
----------
img_idx: int
Index of image in via project.
Returns
-------
prediction: Prediction
Output of model prediction.
"""
try:
img, _ = self.dataset[img_idx]
except (OSError, RuntimeError) as e:
print(
f"Error loading {self.dataset.metadata(img_idx).filename}. {e!r}. Skipping.",
file=stderr,
)
return Prediction.empty()
with torch.no_grad():
try:
prediction = self.model([img.to(self.device)])[0]
except RuntimeError:
if self.backup_model:
prediction = self.backup_model([img.to(self.backup_device)])[0]
self.backup_model_used += 1
else:
raise
del img
return Prediction.from_tensor_dict(prediction)
def filter_annotations(self, prediction: Prediction) -> Prediction:
"""Applies self.score_thresh and self.overlap_thresh to filter out poor quality
annotations.
Parameters
----------
prediction : Prediction
Output of model prediction.
Returns
-------
filtered_prediction : Prediction
Filtered prediction.
"""
# Remove predictions with below-threshold score
prediction = prediction.filter_by_score(self.score_thresh)
n_predictions = len(prediction)
if n_predictions == 0:
return prediction
# Calculate mask overlaps for all pairs of predicted instances
mask_overlaps = np.zeros((n_predictions, n_predictions), dtype="f4")
for i, j in itertools.combinations(range(n_predictions), 2):
if prediction.boxes[i].overlaps(prediction.boxes[j]):
mask_overlaps[i, j] = weighted_intersection_over_minimum(
prediction.masks[i], prediction.masks[j]
)
mask_overlaps[j, i] = mask_overlaps[i, j]
# Remove worst overlapping instances until there are no above-threshold overlaps
keep = set(range(n_predictions))
overlap_mask = mask_overlaps.max(axis=1) >= self.overlap_thresh
while np.any(overlap_mask):
# Figure out which overlapping annotation has the worst score
overlap_annotations = np.where(overlap_mask)[0]
to_discard = overlap_annotations[
np.argmin(np.array(prediction.scores)[overlap_annotations])
]
# Remove the annotation
keep.remove(to_discard)
mask_overlaps[to_discard, :] = 0.0
mask_overlaps[:, to_discard] = 0.0
overlap_mask = mask_overlaps.max(axis=1) >= self.overlap_thresh
return prediction.get_subset_from_index(list(keep))
def fit_poly(
self,
box: BoundingBox,
mask: torch.Tensor,
) -> Union[PolylineShapeAttributes, CircleShapeAttributes, None]:
"""Uses polynomial regression to fit a polyline annotation to the provided
segmentation mask.
Parameters
----------
box : BoundingBox
Fully contains the object to be annotated.
mask : tensor or array
Segmentation mask of instance with shape (image_width, image_height).
Returns
-------
shape_attributes : Union[PolylineShapeAttributes, CircleShapeAttributes, None]
Geometry of automatic annotation.
"""
portrait = box.is_portrait()
crop_mask = box.crop_image(mask).cpu().numpy().reshape(box.shape)
y, x = np.where(crop_mask > 0.0)
weights = np.array(crop_mask[y, x]).flatten()
# Set longest axis as independent variable and fit polynomial
ind = (x, y)[portrait]
dep = (y, x)[portrait]
poly_fit = np.polynomial.Polynomial.fit(ind, dep, self.poly_order, w=weights)
# Find endpoints
ind_vals = np.arange(crop_mask.shape[not portrait])
dep_vals = poly_fit(ind_vals)
val_mask = np.logical_and(dep_vals < crop_mask.shape[portrait], dep_vals >= 0)
y_vals = (dep_vals, ind_vals)[portrait][val_mask]
x_vals = (ind_vals, dep_vals)[portrait][val_mask]
fit_mask_vals = crop_mask[y_vals.astype("i4"), x_vals.astype("i4")]
endpoints = ind_vals[
list(self.endpoint_method(fit_mask_vals, *self.endpoint_extra_args))
]
# Approximate polynomial segment with polyline
end_gradients = poly_fit.deriv()(endpoints)
end_angles = np.arctan(end_gradients)
angle_diff = abs(end_angles[1] - end_angles[0])
all_points_ind, all_points_dep = poly_fit.linspace(
n=int(np.ceil(angle_diff / self.split_angle) + 2), domain=endpoints
)
all_points_x = list((all_points_ind, all_points_dep)[portrait] + box.x0)
all_points_y = list((all_points_dep, all_points_ind)[portrait] + box.y0)
shape_attributes: Union[PolylineShapeAttributes, CircleShapeAttributes, None]
try:
shape_attributes = PolylineShapeAttributes(
all_points_x=all_points_x, all_points_y=all_points_y
)
except ValidationError:
try:
cx, cy, r = smallest_enclosing_circle(zip(all_points_x, all_points_y))
shape_attributes = CircleShapeAttributes(cx=cx, cy=cy, r=r)
except ValidationError:
shape_attributes = None
return shape_attributes
def convert_to_circle(
self,
polyline: PolylineShapeAttributes,
img_shape: tuple[PositiveInt, PositiveInt],
) -> Union[PolylineShapeAttributes, CircleShapeAttributes]:
"""Checks if a polyline annotation is close to the edge of an image, and if so,
converts it to a circle annotation by computing the smallest enclosing circle of
all points in the polyline.
Parameters
----------
polyline : PolylineShapeAttributes
Shape to convert if too close to edge.
img_shape: tuple[int, int]
Height and width of image.
Returns
-------
shape_attributes : Union[PolylineShapeAttributes, CircleShapeAttributes]
Geometry of annotation after (possible) conversion. If polyline does not
go too close to the edge of the image, then polyline is returned unchanged.
Else, a circle annotation is returned.
"""
polyline_accepted_region = BoundingBox.from_shape(
img_shape, border=self.edge_thresh
)
if polyline.in_box(polyline_accepted_region):
return polyline
return polyline.as_circle()
def annotate_img(self, img_idx: int) -> list[ViaRegion]:
"""Calls self.get_prediction, self.filter_annotations, and self.fit_poly to
produce annotations for an image specified with img_idx.
Parameters
----------
img_idx: int
Index of image in via project.
Returns
-------
regions : list[ViaRegion]
list of annotations for image.
"""
prediction = self.get_prediction(img_idx)
prediction = self.filter_annotations(prediction)
regions = []
for i in range(len(prediction)):
box = prediction.boxes[i]
mask = prediction.masks[i]
score = prediction.scores[i]
shape_attributes = self.fit_poly(box, mask)
if shape_attributes is None:
continue
if shape_attributes.name == "polyline":
assert isinstance(shape_attributes, PolylineShapeAttributes)
shape_attributes = self.convert_to_circle(
shape_attributes, (mask.shape[-2], mask.shape[-1])
)
region_attributes = ViaRegionAttributes(score=score)
regions.append(
ViaRegion(
region_attributes=region_attributes,
shape_attributes=shape_attributes,
)
)
return regions
def annotate(self, disable_progress_bar: Optional[bool] = True) -> ViaProject:
"""Calls self.annotate_img on all images and returns a ViaProject instance.
Copies the `via_attributes` and `via_settings` fields from
`self.dataset.via_project`, and just replaces the `via_img_metadata` field.
Parameters
----------
disable_progress_bar : Optional[bool]
If True (default), progress bar is disabled.
If set to None, disable on non-TTY.
Returns
-------
project : ViaProject
With automatic annotations made.
"""
via_img_metadata: dict[str, ViaMetadata] = {}
postfix = {"tot_annotations": 0}
if self.backup_device:
postfix["backup_device_used"] = self.backup_model_used
pb = trange(
len(self.dataset),
disable=disable_progress_bar,
desc="Annotating images",
unit="img",
dynamic_ncols=True,
ascii=True,
postfix=postfix,
)
for img_idx in pb:
img_key = self.dataset.keys[img_idx]
regions = self.annotate_img(img_idx)
in_metadata = self.dataset.metadata(img_idx)
out_metadata = ViaMetadata.construct(
file_attributes=in_metadata.file_attributes.copy(),
filename=in_metadata.filename,
regions=regions,
size=in_metadata.size,
)
via_img_metadata[img_key] = out_metadata
postfix["tot_annotations"] += len(regions)
if self.backup_device:
postfix["backup_device_used"] = self.backup_model_used
pb.set_postfix(postfix, refresh=False)
print(f"Annotation complete.", file=stderr)
return ViaProject.construct(
via_attributes=self.dataset.via_project.via_attributes,
via_img_metadata=via_img_metadata,
via_settings=self.dataset.via_project.via_settings,
)
class AnnotationValidationResult(BaseModel):
"""Contains various metrics for assessing the quality of a set of automatically
obtained annotations of flying insects.
Parameters
----------
ious : list[tuple[NonNegativeFloat, NonNegativeFloat]]
list of (iou, score) pairs.
iou is the Intersection over Union of the bounding boxes of true positives
to their matched ground truth annotation. All matched annotations are
included.
polyline_hausdorff_distances : list[tuple[NonNegativeFloat, NonNegativeFloat]]
list of (h_dist, score) pairs.
h_dist is the hausdorff distance of a true positive polyline annotation,
where the annotation is matched to a polyline ground truth annotation. Only
polyline annotations which matched to a polyline ground truth annotation are
included.
length_differences : list[tuple[float, NonNegativeFloat]]
list of (l_diff, score) pairs.
l_diff is calculated as the length of a true positive polyline annotation
minus the length of it's matched ground truth annotation. Only polyline
annotations which matched to a polyline ground truth annotation are
included.
true_positives : list[NonNegativeFloat]
list of scores.
false_positives : list[NonNegativeFloat]
list of scores. Score is the prediction score of the automatic annotation.
false_negatives : int
Number of false negative annotations.
"""
ious: list[tuple[NonNegativeFloat, NonNegativeFloat]] = []
polyline_hausdorff_distances: list[tuple[NonNegativeFloat, NonNegativeFloat]] = []
length_differences: list[tuple[float, NonNegativeFloat]] = []
true_positives: list[NonNegativeFloat] = []
false_positives: list[NonNegativeFloat] = []
false_negatives: NonNegativeInt = 0
def validate_annotations(
auto_annotations: ViaProject,
ground_truth: ViaProject,
iou_thresh: float = 0.5,
subset_functions: Optional[dict[str, Callable[[ViaMetadata], bool]]] = None,
disable_progress_bar: Optional[bool] = True,
) -> list[AnnotationValidationResult]:
"""Compares automatic annotations against a ground-truth annotations for validation
puposes. Validation data is stored in an AnnotationValidationResult object.
Parameters
----------
auto_annotations : ViaProject
Automatically obtained annotations to assess.
ground_truth : ViaProject
Manually created ground-truth annotations.
iou_thresh : float
Threshold of intersection-over-union of bounding boxes to be considered a
match. Typically, this is 0.5.
subset_functions : Optional[dict[str, Callable[[ViaMetadata], bool]]]
Mapping from subset name to subset function. If set, validation will be repeated
multiple times with different subsets, once for each element.
disable_progress_bar : Optional[bool]
If True (default), progress bar is disabled.
If set to None, disable on non-TTY.
Returns
-------
validation_results : list[AnnotationValidationResult]
list containing instances of AnnotationValidationResult. If subset_functions is
set, then validation_results will have len(subset_functions) elements. By
default it will just contain one element.
"""
if subset_functions is None:
subset_functions = {"all": lambda x: True}
results: list[AnnotationValidationResult] = []
for name, subset_function in subset_functions.items():
gt_annotations = ground_truth.filtered_copy(subset_function)
result = AnnotationValidationResult()
for img_key in tqdm(
gt_annotations.via_img_metadata.keys()
& auto_annotations.via_img_metadata.keys(),
disable=disable_progress_bar,
desc=f"Validating {name} annotations",
unit="img",
dynamic_ncols=True,
ascii=True,
):
gt_metadata = gt_annotations.via_img_metadata[img_key]
metadata = auto_annotations.via_img_metadata[img_key]
ious = sparse.dok_matrix(
(len(metadata.regions), len(gt_metadata.regions)), dtype="f8"
)
for i, j in itertools.product(
range(len(metadata.regions)), range(len(gt_metadata.regions))
):
iou = metadata.regions[i].shape_attributes.intersection_over_union(
gt_metadata.regions[j].shape_attributes
)
if iou >= iou_thresh:
ious[i, j] = iou
ious = ious.tocsr()
matches = sparse.csgraph.maximum_bipartite_matching(ious, "column")
result.false_negatives += len(gt_metadata.regions) - np.count_nonzero(
matches >= 0
)
for i, match in enumerate(matches):
score = metadata.regions[i].region_attributes.score
if score is None:
raise ValueError(
"Invalid automatically obtained annotation. "
"Ensure that auto_annotations were obtained automatically "
f"(region {i} of {img_key} missing 'score' region_attribute)."
)
elif match >= 0:
result.true_positives.append(score)
result.ious.append((ious[i, match], score))
shape = metadata.regions[i].shape_attributes
gt_shape = gt_metadata.regions[match].shape_attributes
if shape.name == gt_shape.name == "polyline":
assert isinstance(shape, PolylineShapeAttributes)
h_dist = shape.hausdorff_distance(gt_shape)
result.polyline_hausdorff_distances.append((h_dist, score))
l_diff = shape.length() - gt_shape.length()
result.length_differences.append((l_diff, score))
else:
result.false_positives.append(score)
results.append(result)
return results
|
[
"numpy.count_nonzero",
"numpy.array",
"camfi.datamodel.via.ViaRegionAttributes",
"numpy.arange",
"camfi.datamodel.geometry.CircleShapeAttributes",
"pathlib.Path",
"numpy.where",
"camfi.datamodel.autoannotation.Prediction.from_tensor_dict",
"torch.hub.load_state_dict_from_url",
"scipy.sparse.csgraph.maximum_bipartite_matching",
"camfi.datamodel.via.ViaProject.construct",
"camfi.datamodel.geometry.PolylineShapeAttributes",
"numpy.arctan",
"torch.optim.SGD",
"numpy.ceil",
"pydantic.validator",
"camfi.datamodel.autoannotation.Prediction.empty",
"numpy.any",
"camfi.util.weighted_intersection_over_minimum",
"torch.device",
"numpy.polynomial.Polynomial.fit",
"camfi.datamodel.geometry.BoundingBox.from_shape",
"numpy.logical_and",
"camfi.datamodel.via.ViaRegion",
"torch.load",
"torch.optim.lr_scheduler.StepLR",
"datetime.datetime.now",
"numpy.zeros",
"torch.utils.data.DataLoader",
"torch.no_grad"
] |
[((3085, 3091), 'pathlib.Path', 'Path', ([], {}), '()\n', (3089, 3091), False, 'from pathlib import Path\n'), ((4578, 4598), 'torch.device', 'torch.device', (['device'], {}), '(device)\n', (4590, 4598), False, 'import torch\n'), ((4729, 4838), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': 'num_workers', 'collate_fn': 'collate_fn'}), '(dataset, batch_size=batch_size, shuffle=True, num_workers=\n num_workers, collate_fn=collate_fn)\n', (4739, 4838), False, 'from torch.utils.data import DataLoader\n'), ((5215, 5283), 'torch.optim.SGD', 'torch.optim.SGD', (['params'], {'lr': '(0.005)', 'momentum': '(0.9)', 'weight_decay': '(0.0005)'}), '(params, lr=0.005, momentum=0.9, weight_decay=0.0005)\n', (5230, 5283), False, 'import torch\n'), ((5303, 5369), 'torch.optim.lr_scheduler.StepLR', 'torch.optim.lr_scheduler.StepLR', (['optimiser'], {'step_size': '(3)', 'gamma': '(0.1)'}), '(optimiser, step_size=3, gamma=0.1)\n', (5334, 5369), False, 'import torch\n'), ((8842, 8883), 'pydantic.validator', 'validator', (['"""model"""'], {'pre': '(True)', 'always': '(True)'}), "('model', pre=True, always=True)\n", (8851, 8883), False, 'from pydantic import BaseModel, DirectoryPath, NonNegativeInt, NonNegativeFloat, PositiveFloat, PositiveInt, ValidationError, validator\n'), ((9032, 9064), 'pydantic.validator', 'validator', (['"""device"""'], {'always': '(True)'}), "('device', always=True)\n", (9041, 9064), False, 'from pydantic import BaseModel, DirectoryPath, NonNegativeInt, NonNegativeFloat, PositiveFloat, PositiveInt, ValidationError, validator\n'), ((9298, 9346), 'pydantic.validator', 'validator', (['"""backup_model"""'], {'pre': '(True)', 'always': '(True)'}), "('backup_model', pre=True, always=True)\n", (9307, 9346), False, 'from pydantic import BaseModel, DirectoryPath, NonNegativeInt, NonNegativeFloat, PositiveFloat, PositiveInt, ValidationError, validator\n'), ((9708, 9745), 'pydantic.validator', 'validator', (['"""split_angle"""'], {'always': '(True)'}), "('split_angle', always=True)\n", (9717, 9745), False, 'from pydantic import BaseModel, DirectoryPath, NonNegativeInt, NonNegativeFloat, PositiveFloat, PositiveInt, ValidationError, validator\n'), ((2129, 2158), 'torch.load', 'torch.load', (['model_path_or_url'], {}), '(model_path_or_url)\n', (2139, 2158), False, 'import torch\n'), ((9198, 9213), 'torch.device', 'torch.device', (['v'], {}), '(v)\n', (9210, 9213), False, 'import torch\n'), ((10999, 11038), 'camfi.datamodel.autoannotation.Prediction.from_tensor_dict', 'Prediction.from_tensor_dict', (['prediction'], {}), '(prediction)\n', (11026, 11038), False, 'from camfi.datamodel.autoannotation import CamfiDataset, Prediction\n'), ((11772, 11824), 'numpy.zeros', 'np.zeros', (['(n_predictions, n_predictions)'], {'dtype': '"""f4"""'}), "((n_predictions, n_predictions), dtype='f4')\n", (11780, 11824), True, 'import numpy as np\n'), ((12390, 12410), 'numpy.any', 'np.any', (['overlap_mask'], {}), '(overlap_mask)\n', (12396, 12410), True, 'import numpy as np\n'), ((13791, 13816), 'numpy.where', 'np.where', (['(crop_mask > 0.0)'], {}), '(crop_mask > 0.0)\n', (13799, 13816), True, 'import numpy as np\n'), ((14023, 14089), 'numpy.polynomial.Polynomial.fit', 'np.polynomial.Polynomial.fit', (['ind', 'dep', 'self.poly_order'], {'w': 'weights'}), '(ind, dep, self.poly_order, w=weights)\n', (14051, 14089), True, 'import numpy as np\n'), ((14135, 14175), 'numpy.arange', 'np.arange', (['crop_mask.shape[not portrait]'], {}), '(crop_mask.shape[not portrait])\n', (14144, 14175), True, 'import numpy as np\n'), ((14233, 14300), 'numpy.logical_and', 'np.logical_and', (['(dep_vals < crop_mask.shape[portrait])', '(dep_vals >= 0)'], {}), '(dep_vals < crop_mask.shape[portrait], dep_vals >= 0)\n', (14247, 14300), True, 'import numpy as np\n'), ((14744, 14768), 'numpy.arctan', 'np.arctan', (['end_gradients'], {}), '(end_gradients)\n', (14753, 14768), True, 'import numpy as np\n'), ((16702, 16760), 'camfi.datamodel.geometry.BoundingBox.from_shape', 'BoundingBox.from_shape', (['img_shape'], {'border': 'self.edge_thresh'}), '(img_shape, border=self.edge_thresh)\n', (16724, 16760), False, 'from camfi.datamodel.geometry import BoundingBox, CircleShapeAttributes, PolylineShapeAttributes\n'), ((20153, 20325), 'camfi.datamodel.via.ViaProject.construct', 'ViaProject.construct', ([], {'via_attributes': 'self.dataset.via_project.via_attributes', 'via_img_metadata': 'via_img_metadata', 'via_settings': 'self.dataset.via_project.via_settings'}), '(via_attributes=self.dataset.via_project.via_attributes,\n via_img_metadata=via_img_metadata, via_settings=self.dataset.\n via_project.via_settings)\n', (20173, 20325), False, 'from camfi.datamodel.via import ViaFileAttributes, ViaMetadata, ViaProject, ViaRegion, ViaRegionAttributes\n'), ((2222, 2287), 'torch.hub.load_state_dict_from_url', 'torch.hub.load_state_dict_from_url', (['model_urls[model_path_or_url]'], {}), '(model_urls[model_path_or_url])\n', (2256, 2287), False, 'import torch\n'), ((2319, 2372), 'torch.hub.load_state_dict_from_url', 'torch.hub.load_state_dict_from_url', (['model_path_or_url'], {}), '(model_path_or_url)\n', (2353, 2372), False, 'import torch\n'), ((10615, 10630), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10628, 10630), False, 'import torch\n'), ((15267, 15344), 'camfi.datamodel.geometry.PolylineShapeAttributes', 'PolylineShapeAttributes', ([], {'all_points_x': 'all_points_x', 'all_points_y': 'all_points_y'}), '(all_points_x=all_points_x, all_points_y=all_points_y)\n', (15290, 15344), False, 'from camfi.datamodel.geometry import BoundingBox, CircleShapeAttributes, PolylineShapeAttributes\n'), ((18058, 18090), 'camfi.datamodel.via.ViaRegionAttributes', 'ViaRegionAttributes', ([], {'score': 'score'}), '(score=score)\n', (18077, 18090), False, 'from camfi.datamodel.via import ViaFileAttributes, ViaMetadata, ViaProject, ViaRegion, ViaRegionAttributes\n'), ((25031, 25088), 'scipy.sparse.csgraph.maximum_bipartite_matching', 'sparse.csgraph.maximum_bipartite_matching', (['ious', '"""column"""'], {}), "(ious, 'column')\n", (25072, 25088), False, 'from scipy import sparse\n'), ((4647, 4661), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4659, 4661), False, 'from datetime import datetime\n'), ((10582, 10600), 'camfi.datamodel.autoannotation.Prediction.empty', 'Prediction.empty', ([], {}), '()\n', (10598, 10600), False, 'from camfi.datamodel.autoannotation import CamfiDataset, Prediction\n'), ((11999, 12075), 'camfi.util.weighted_intersection_over_minimum', 'weighted_intersection_over_minimum', (['prediction.masks[i]', 'prediction.masks[j]'], {}), '(prediction.masks[i], prediction.masks[j])\n', (12033, 12075), False, 'from camfi.util import endpoint_truncate, smallest_enclosing_circle, weighted_intersection_over_minimum, Field\n'), ((12520, 12542), 'numpy.where', 'np.where', (['overlap_mask'], {}), '(overlap_mask)\n', (12528, 12542), True, 'import numpy as np\n'), ((13835, 13860), 'numpy.array', 'np.array', (['crop_mask[y, x]'], {}), '(crop_mask[y, x])\n', (13843, 13860), True, 'import numpy as np\n'), ((18135, 18221), 'camfi.datamodel.via.ViaRegion', 'ViaRegion', ([], {'region_attributes': 'region_attributes', 'shape_attributes': 'shape_attributes'}), '(region_attributes=region_attributes, shape_attributes=\n shape_attributes)\n', (18144, 18221), False, 'from camfi.datamodel.via import ViaFileAttributes, ViaMetadata, ViaProject, ViaRegion, ViaRegionAttributes\n'), ((25154, 25184), 'numpy.count_nonzero', 'np.count_nonzero', (['(matches >= 0)'], {}), '(matches >= 0)\n', (25170, 25184), True, 'import numpy as np\n'), ((15546, 15586), 'camfi.datamodel.geometry.CircleShapeAttributes', 'CircleShapeAttributes', ([], {'cx': 'cx', 'cy': 'cy', 'r': 'r'}), '(cx=cx, cy=cy, r=r)\n', (15567, 15586), False, 'from camfi.datamodel.geometry import BoundingBox, CircleShapeAttributes, PolylineShapeAttributes\n'), ((12618, 12645), 'numpy.array', 'np.array', (['prediction.scores'], {}), '(prediction.scores)\n', (12626, 12645), True, 'import numpy as np\n'), ((14903, 14941), 'numpy.ceil', 'np.ceil', (['(angle_diff / self.split_angle)'], {}), '(angle_diff / self.split_angle)\n', (14910, 14941), True, 'import numpy as np\n')]
|
"""
Analyses skewness for continuous features
Options:
A. Log
B. Yeo-Johnson
C. QuantileTransformer
"""
import json
import pandas as pd
import numpy as np
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import power_transform, quantile_transform
from pathlib import Path
p = Path(__file__).parents[1]
# To load project modules
import sys; sys.path.append(str(p))
from src.logger import LOGGER
from src.utils import skewTest
LOGGER.info('Load data')
X = pd.read_pickle(p.joinpath('data', 'interim', 'research.pkl')).filter(like='cont')
LOGGER.info('Process data - Logarithm')
A = (
pd.DataFrame(X.apply(skewTest, args=(np.log1p,)).to_list())
.assign(Transformation='Logarithm')
.set_index('Transformation')
)
LOGGER.info('Process data - Yeo-Johnson')
B = (
pd.DataFrame(
X.apply(lambda s: skewTest(np.reshape(s.values, (-1, 1)), power_transform))
.to_list()
)
.apply(lambda s: s.explode().astype(float))
.assign(Transformation='Yeo-Johnson')
.set_index('Transformation')
)
LOGGER.info('Process data - Quantile Transform')
C = (
pd.DataFrame(
X.apply(lambda s: skewTest(
np.reshape(s.values, (-1, 1)),
quantile_transform,
output_distribution='normal',
random_state=0
))
.to_list()
)
.apply(lambda s: s.explode().astype(float))
.assign(Transformation='Quantile Transform')
.set_index('Transformation')
)
LOGGER.info('Computing result')
(
pd.concat([A, B, C]).reset_index().groupby('Transformation').mean()
.assign(CostEffectivenessRatio=lambda df: df['Time'].div(df['Insignificance']))
.sort_values('CostEffectivenessRatio')
.to_html(
buf=p.joinpath('reports', 'tables', '02ContTransformations.html'),
float_format='{:.2f}'.format,
bold_rows=False
)
)
|
[
"numpy.reshape",
"pandas.concat",
"src.logger.LOGGER.info",
"pathlib.Path"
] |
[((464, 488), 'src.logger.LOGGER.info', 'LOGGER.info', (['"""Load data"""'], {}), "('Load data')\n", (475, 488), False, 'from src.logger import LOGGER\n'), ((576, 615), 'src.logger.LOGGER.info', 'LOGGER.info', (['"""Process data - Logarithm"""'], {}), "('Process data - Logarithm')\n", (587, 615), False, 'from src.logger import LOGGER\n'), ((762, 803), 'src.logger.LOGGER.info', 'LOGGER.info', (['"""Process data - Yeo-Johnson"""'], {}), "('Process data - Yeo-Johnson')\n", (773, 803), False, 'from src.logger import LOGGER\n'), ((1063, 1111), 'src.logger.LOGGER.info', 'LOGGER.info', (['"""Process data - Quantile Transform"""'], {}), "('Process data - Quantile Transform')\n", (1074, 1111), False, 'from src.logger import LOGGER\n'), ((1485, 1516), 'src.logger.LOGGER.info', 'LOGGER.info', (['"""Computing result"""'], {}), "('Computing result')\n", (1496, 1516), False, 'from src.logger import LOGGER\n'), ((312, 326), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (316, 326), False, 'from pathlib import Path\n'), ((863, 892), 'numpy.reshape', 'np.reshape', (['s.values', '(-1, 1)'], {}), '(s.values, (-1, 1))\n', (873, 892), True, 'import numpy as np\n'), ((1184, 1213), 'numpy.reshape', 'np.reshape', (['s.values', '(-1, 1)'], {}), '(s.values, (-1, 1))\n', (1194, 1213), True, 'import numpy as np\n'), ((1523, 1543), 'pandas.concat', 'pd.concat', (['[A, B, C]'], {}), '([A, B, C])\n', (1532, 1543), True, 'import pandas as pd\n')]
|
import deepchem as dc
import numpy as np
import os
def test_numpy_dataset_get_shape():
"""Test that get_shape works for numpy datasets."""
num_datapoints = 100
num_features = 10
num_tasks = 10
# Generate data
X = np.random.rand(num_datapoints, num_features)
y = np.random.randint(2, size=(num_datapoints, num_tasks))
w = np.random.randint(2, size=(num_datapoints, num_tasks))
ids = np.array(["id"] * num_datapoints)
dataset = dc.data.NumpyDataset(X, y, w, ids)
X_shape, y_shape, w_shape, ids_shape = dataset.get_shape()
assert X_shape == X.shape
assert y_shape == y.shape
assert w_shape == w.shape
assert ids_shape == ids.shape
def test_disk_dataset_get_shape_single_shard():
"""Test that get_shape works for disk dataset."""
num_datapoints = 100
num_features = 10
num_tasks = 10
# Generate data
X = np.random.rand(num_datapoints, num_features)
y = np.random.randint(2, size=(num_datapoints, num_tasks))
w = np.random.randint(2, size=(num_datapoints, num_tasks))
ids = np.array(["id"] * num_datapoints)
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
X_shape, y_shape, w_shape, ids_shape = dataset.get_shape()
assert X_shape == X.shape
assert y_shape == y.shape
assert w_shape == w.shape
assert ids_shape == ids.shape
def test_disk_dataset_get_shape_multishard():
"""Test that get_shape works for multisharded disk dataset."""
num_datapoints = 100
num_features = 10
num_tasks = 10
# Generate data
X = np.random.rand(num_datapoints, num_features)
y = np.random.randint(2, size=(num_datapoints, num_tasks))
w = np.random.randint(2, size=(num_datapoints, num_tasks))
ids = np.array(["id"] * num_datapoints)
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
# Should now have 10 shards
dataset.reshard(shard_size=10)
X_shape, y_shape, w_shape, ids_shape = dataset.get_shape()
assert X_shape == X.shape
assert y_shape == y.shape
assert w_shape == w.shape
assert ids_shape == ids.shape
def test_disk_dataset_get_legacy_shape_single_shard():
"""Test that get_shape works for legacy disk dataset."""
# This is the shape of legacy_data
num_datapoints = 100
num_features = 10
num_tasks = 10
current_dir = os.path.dirname(os.path.abspath(__file__))
# legacy_dataset is a dataset in the legacy format kept around for testing
# purposes.
data_dir = os.path.join(current_dir, "legacy_dataset")
dataset = dc.data.DiskDataset(data_dir)
X_shape, y_shape, w_shape, ids_shape = dataset.get_shape()
assert X_shape == (num_datapoints, num_features)
assert y_shape == (num_datapoints, num_tasks)
assert w_shape == (num_datapoints, num_tasks)
assert ids_shape == (num_datapoints,)
def test_disk_dataset_get_legacy_shape_multishard():
"""Test that get_shape works for multisharded legacy disk dataset."""
# This is the shape of legacy_data_reshard
num_datapoints = 100
num_features = 10
num_tasks = 10
# legacy_dataset_reshard is a sharded dataset in the legacy format kept
# around for testing
current_dir = os.path.dirname(os.path.abspath(__file__))
data_dir = os.path.join(current_dir, "legacy_dataset_reshard")
dataset = dc.data.DiskDataset(data_dir)
# Should now have 10 shards
assert dataset.get_number_shards() == 10
X_shape, y_shape, w_shape, ids_shape = dataset.get_shape()
assert X_shape == (num_datapoints, num_features)
assert y_shape == (num_datapoints, num_tasks)
assert w_shape == (num_datapoints, num_tasks)
assert ids_shape == (num_datapoints,)
def test_get_shard_size():
"""
Test that using ids for getting the shard size does not break the method.
The issue arises when attempting to load a dataset that does not have a labels
column. The create_dataset method of the DataLoader class sets the y to None
in this case, which causes the existing implementation of the get_shard_size()
method to fail, as it relies on the dataset having a not None y column. This
consequently breaks all methods depending on this, like the splitters for
example.
Note
----
DiskDatasets without labels cannot be resharded!
"""
current_dir = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(current_dir, "reaction_smiles.csv")
featurizer = dc.feat.DummyFeaturizer()
loader = dc.data.CSVLoader(
tasks=[], feature_field="reactions", featurizer=featurizer)
dataset = loader.create_dataset(file_path)
assert dataset.get_shard_size() == 4
|
[
"deepchem.data.DiskDataset.from_numpy",
"deepchem.data.CSVLoader",
"numpy.random.rand",
"os.path.join",
"numpy.array",
"numpy.random.randint",
"deepchem.feat.DummyFeaturizer",
"deepchem.data.NumpyDataset",
"os.path.abspath",
"deepchem.data.DiskDataset"
] |
[((227, 271), 'numpy.random.rand', 'np.random.rand', (['num_datapoints', 'num_features'], {}), '(num_datapoints, num_features)\n', (241, 271), True, 'import numpy as np\n'), ((278, 332), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(num_datapoints, num_tasks)'}), '(2, size=(num_datapoints, num_tasks))\n', (295, 332), True, 'import numpy as np\n'), ((339, 393), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(num_datapoints, num_tasks)'}), '(2, size=(num_datapoints, num_tasks))\n', (356, 393), True, 'import numpy as np\n'), ((402, 435), 'numpy.array', 'np.array', (["(['id'] * num_datapoints)"], {}), "(['id'] * num_datapoints)\n", (410, 435), True, 'import numpy as np\n'), ((449, 483), 'deepchem.data.NumpyDataset', 'dc.data.NumpyDataset', (['X', 'y', 'w', 'ids'], {}), '(X, y, w, ids)\n', (469, 483), True, 'import deepchem as dc\n'), ((848, 892), 'numpy.random.rand', 'np.random.rand', (['num_datapoints', 'num_features'], {}), '(num_datapoints, num_features)\n', (862, 892), True, 'import numpy as np\n'), ((899, 953), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(num_datapoints, num_tasks)'}), '(2, size=(num_datapoints, num_tasks))\n', (916, 953), True, 'import numpy as np\n'), ((960, 1014), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(num_datapoints, num_tasks)'}), '(2, size=(num_datapoints, num_tasks))\n', (977, 1014), True, 'import numpy as np\n'), ((1023, 1056), 'numpy.array', 'np.array', (["(['id'] * num_datapoints)"], {}), "(['id'] * num_datapoints)\n", (1031, 1056), True, 'import numpy as np\n'), ((1070, 1114), 'deepchem.data.DiskDataset.from_numpy', 'dc.data.DiskDataset.from_numpy', (['X', 'y', 'w', 'ids'], {}), '(X, y, w, ids)\n', (1100, 1114), True, 'import deepchem as dc\n'), ((1490, 1534), 'numpy.random.rand', 'np.random.rand', (['num_datapoints', 'num_features'], {}), '(num_datapoints, num_features)\n', (1504, 1534), True, 'import numpy as np\n'), ((1541, 1595), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(num_datapoints, num_tasks)'}), '(2, size=(num_datapoints, num_tasks))\n', (1558, 1595), True, 'import numpy as np\n'), ((1602, 1656), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(num_datapoints, num_tasks)'}), '(2, size=(num_datapoints, num_tasks))\n', (1619, 1656), True, 'import numpy as np\n'), ((1665, 1698), 'numpy.array', 'np.array', (["(['id'] * num_datapoints)"], {}), "(['id'] * num_datapoints)\n", (1673, 1698), True, 'import numpy as np\n'), ((1712, 1756), 'deepchem.data.DiskDataset.from_numpy', 'dc.data.DiskDataset.from_numpy', (['X', 'y', 'w', 'ids'], {}), '(X, y, w, ids)\n', (1742, 1756), True, 'import deepchem as dc\n'), ((2375, 2418), 'os.path.join', 'os.path.join', (['current_dir', '"""legacy_dataset"""'], {}), "(current_dir, 'legacy_dataset')\n", (2387, 2418), False, 'import os\n'), ((2431, 2460), 'deepchem.data.DiskDataset', 'dc.data.DiskDataset', (['data_dir'], {}), '(data_dir)\n', (2450, 2460), True, 'import deepchem as dc\n'), ((3112, 3163), 'os.path.join', 'os.path.join', (['current_dir', '"""legacy_dataset_reshard"""'], {}), "(current_dir, 'legacy_dataset_reshard')\n", (3124, 3163), False, 'import os\n'), ((3176, 3205), 'deepchem.data.DiskDataset', 'dc.data.DiskDataset', (['data_dir'], {}), '(data_dir)\n', (3195, 3205), True, 'import deepchem as dc\n'), ((4193, 4241), 'os.path.join', 'os.path.join', (['current_dir', '"""reaction_smiles.csv"""'], {}), "(current_dir, 'reaction_smiles.csv')\n", (4205, 4241), False, 'import os\n'), ((4258, 4283), 'deepchem.feat.DummyFeaturizer', 'dc.feat.DummyFeaturizer', ([], {}), '()\n', (4281, 4283), True, 'import deepchem as dc\n'), ((4295, 4372), 'deepchem.data.CSVLoader', 'dc.data.CSVLoader', ([], {'tasks': '[]', 'feature_field': '"""reactions"""', 'featurizer': 'featurizer'}), "(tasks=[], feature_field='reactions', featurizer=featurizer)\n", (4312, 4372), True, 'import deepchem as dc\n'), ((2244, 2269), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (2259, 2269), False, 'import os\n'), ((3072, 3097), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (3087, 3097), False, 'import os\n'), ((4152, 4177), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (4167, 4177), False, 'import os\n')]
|
import models
import os
import copy
import torch
import torch.nn as nn
from lifelines import KaplanMeierFitter as KMFitter
import pycox
import numpy as np
# local
import catdist
import data_utils
import _concordance
import _nll
import _saver
def str_to_bool(arg):
"""Convert an argument string into its boolean value.
Args:
arg: String representing a bool.
Returns:
Boolean value for the string.
"""
if arg.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif arg.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def isnan(x):
return torch.any(torch.isnan(x))
def safe_log(x,eps):
return (x+eps).log()
def clip(prob,clip_min):
return prob.clamp(min=clip_min)
def round3(x):
return round(x,3)
class Meter:
def __init__(self):
self.N = 0
self.total = 0
def update(self,val,N):
self.total += val
self.N += N
def avg(self):
return round(self.total / self.N,4)
############################################
############ KM G IPCW F BS and BLL ########
############################################
def cdfvals_to_probs(cdfvals,args):
K=cdfvals.shape[1]
Gprobs = torch.zeros_like(cdfvals).to(args.device)
Gprobs[:,0] = cdfvals[:,0]
for k in range(1,K-1):
Gprobs[:,k] = cdfvals[:,k] - cdfvals[:,k-1]
Gprobs[:,K-1] = 1 - (Gprobs[:,:K-1]).sum(-1)
return Gprobs
def cdfvals_to_dist(cdfvals,bsz,args):
cdfvals = cdfvals.unsqueeze(0).repeat(bsz,1)
Gprobs = cdfvals_to_probs(cdfvals,args)
assert torch.all( (Gprobs.sum(-1) - 1.0).abs() < 1e-4)
Gdist = catdist.CatDist(logits=None, args=args, probs=Gprobs, k=None)
return Gdist
def get_KM_cdfvals(loader,args):
u=loader.dataset.U
delta=loader.dataset.Delta
durations = u.cpu().numpy()
is_censored = ~delta.cpu().numpy()
km = pycox.utils.kaplan_meier
surv_func = km(durations,is_censored).to_numpy()
cdf_func = 1. - surv_func
km_support = np.sort(np.unique(durations))
cdfvals = torch.zeros(args.K).to(args.device)
for i,val in enumerate(km_support):
cdfvals[val] = cdf_func[i]
for i,val in enumerate(cdfvals):
if i > 0:
if val==0.0:
cdfvals[i]=cdfvals[i-1]
return cdfvals
|
[
"numpy.unique",
"catdist.CatDist",
"torch.zeros_like",
"torch.isnan",
"torch.zeros"
] |
[((1717, 1778), 'catdist.CatDist', 'catdist.CatDist', ([], {'logits': 'None', 'args': 'args', 'probs': 'Gprobs', 'k': 'None'}), '(logits=None, args=args, probs=Gprobs, k=None)\n', (1732, 1778), False, 'import catdist\n'), ((699, 713), 'torch.isnan', 'torch.isnan', (['x'], {}), '(x)\n', (710, 713), False, 'import torch\n'), ((2097, 2117), 'numpy.unique', 'np.unique', (['durations'], {}), '(durations)\n', (2106, 2117), True, 'import numpy as np\n'), ((1293, 1318), 'torch.zeros_like', 'torch.zeros_like', (['cdfvals'], {}), '(cdfvals)\n', (1309, 1318), False, 'import torch\n'), ((2137, 2156), 'torch.zeros', 'torch.zeros', (['args.K'], {}), '(args.K)\n', (2148, 2156), False, 'import torch\n')]
|
#!/usr/bin/env python
# coding: utf-8
__author__ = '<NAME>'
__copyright__ = 'Copyright 2017-2020, <NAME>'
__license__ = 'MIT'
__version__ = '0.5'
__email__ = '<EMAIL>'
__status__ = 'Development'
__description__ = 'Tkinter based GUI, visualizing PASCAL VOC object detection annotation'
"""
Changelog:
- 2020-06-16 11:39 v0.5
Support specifying ignore and not ignore class names. Better logger. Fix MacOS font.
- 2020-06-13 00:48 v0.4
API change: add class name mapping dict, mapping xml class name to shown class name.
Based on this, ImageNet2012 and self-defined VOC format style dataset labels can show.
Supported image extension: bmp, jpg, jpeg, png and their upper cases.
- 2020-06-09 23:14 v0.3
User select saving directory(optional) for picking up interested images.
By pressing left control button, selected image is saved.
- 2020-06-02 16:40 v0.2
User choose image and annotation folders separately. Better UI layout.
Colorful boxes and class name text.
- 2020-06-01 14:44 v0.1
Draw object class name. Add license. Polish meta info. Adjust UI.
- 2017.10.22 22:36 v0.0
Created project. Dependencies: Python, Tkinter(GUI), opencv(image processing),
lxml(annotation parsing).
You may need this: pip install --upgrade image pillow lxml numpy
"""
from PIL import Image, ImageTk, ImageFont, ImageDraw # pillow module
import os
import cv2
from lxml import etree
import numpy as np
import random
import colorsys
import shutil
import platform
import matplotlib.font_manager as fm # to create font
import six
import logging
from natsort import natsorted
import time
if six.PY3:
import tkinter as tk
from tkinter.filedialog import askdirectory
else:
import Tkinter as tk
from tkFileDialog import askdirectory
def draw_text(im, text, text_org, color=(0,0,255,0), font=None):
"""
Draw text on OpenCV's Image (ndarray)
Implemented by: ndarray -> pil's image -> draw text -> ndarray
Note: OpenCV puttext's drawback: font too large, no anti-alias, can't show Chinese chars
@param im: opencv loaded image
@param text: text(string) to be put. support Chinese
@param font: font, e.g. ImageFont.truetype('C:/Windows/Fonts/msyh.ttc', font_size)
Example Usage:
font_size = 20
font = ImageFont.truetype('C:/Windows/Fonts/msyh.ttc', font_size)
text_org = (256, 256)
im = draw_text(im, "object", text_org, font)
"""
im_pil = Image.fromarray(im)
draw = ImageDraw.Draw(im_pil)
draw.text(text_org, text, font=font, fill=color)
return np.array(im_pil)
class BndBox(object):
def __init__(self, x1=0, y1=0, x2=0, y2=0, cls_name=None):
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.cls_name = cls_name # class name
class PascalVOC2007XML:
def __init__(self, xml_pth):
# TODO: validate xml_pth's content
self.tree = etree.parse(xml_pth)
self.boxes = []
def get_boxes(self):
if len(self.boxes) == 0:
for obj in self.tree.xpath('//object'):
box = BndBox()
for item in obj.getchildren():
if (item.tag=='name'):
box.cls_name = item.text
elif (item.tag=='bndbox'):
coords = [int(float(_.text)) for _ in item.getchildren()]
box.x1, box.y1, box.x2, box.y2 = coords
self.boxes.append(box)
return self.boxes
def get_color_table(num_cls=20):
hsv_tuples = [(x*1.0 / num_cls, 1., 1.) for x in range(num_cls)]
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), colors))
random.seed(42)
random.shuffle(colors)
random.seed(None)
return colors
class VOCViewer(tk.Tk):
def __init__(self, im_dir=None, anno_dir=None, save_dir=None, max_width=None, max_height=None, box_thick=1,
name_mapping=None, ignore_names=None, not_ignore_names=None):
"""
@param im_dir: the directory which contains images, e.g. "JPEGImages"
@param max_width: max image width when image is displayed
@param max_height: max image height when image is displayed
@param box_thick: thickness of bounding box
@param name_mapping: dict of: class name in XML => class name to be viewed
@param ignore_names: list of class names that will be ignored on viewer
@param not_ignore_names: list of all class names to be viewed
@note `ignore_names` and `not_ignore_names` shouldn't be setting at the same time
@note loading image: tk doesn't support directly load image. Pillow module is required as intermidiate stuff.
"""
#super().__init__() # not working for Python2
tk.Tk.__init__(self)
self.init_logger()
self.init_layout(im_dir, anno_dir, save_dir, max_width, max_height, box_thick)
self.init_dataset(name_mapping, ignore_names, not_ignore_names)
def init_logger(self):
logger = logging.getLogger()
logger.setLevel(logging.WARN)
formatter = logging.Formatter(
'%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
time_line = time.strftime('%Y%m%d%H%M', time.localtime(time.time()))
logfile = os.getcwd() + '/view-' + time_line + '.log'
# print to file via FileHandler
fh = logging.FileHandler(logfile)
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
# print to screen via StreamHandler
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
# add two Handler
logger.addHandler(ch)
logger.addHandler(fh)
self.logger = logger
def should_ignore(self, cls_name):
if self.ignore_names is not None:
if cls_name in self.ignore_names:
return True
else:
return False
if self.not_ignore_names is not None:
if cls_name in self.not_ignore_names:
return False
return True
return False
def init_dataset(self, name_mapping, ignore_names, not_ignore_names):
if (ignore_names is not None and not_ignore_names is not None):
self.logger.fatal("ignore_names and not_ignore_names can't be setting at the same time")
self.name_mapping = dict()
if name_mapping is not None:
self.name_mapping = name_mapping
self.ignore_names = None
if ignore_names is not None:
self.ignore_names = ignore_names
self.not_ignore_names = None
if not_ignore_names is not None:
self.not_ignore_names = not_ignore_names
self.color_table = get_color_table()
self.class_to_ind = dict()
for cls_name in self.name_mapping.keys():
next_ind = len(self.class_to_ind)
self.class_to_ind[cls_name] = next_ind
self.supported_im_ext = ['bmp', 'BMP', 'png', 'PNG',
'jpg', 'JPG', 'jpeg', 'JPEG', 'jpe', 'jif', 'jfif', 'jfi']
def get_color_by_cls_name(self, cls_name):
ind = self.class_to_ind[cls_name]
return self.color_table[ind]
def init_layout(self, im_dir, anno_dir, save_dir, max_width, max_height, box_thick):
# custom settings
self.max_width = max_width
self.max_height = max_height
self.box_thick = box_thick
self.bg = '#34373c'
self.fg = '#f2f2f2'
# MacOSX's tk is wired and I don't want tkmacosx
if platform.system()=='Darwin':
self.bg, self.fg = self.fg, self.bg
# set title, window size and background
self.title('ImageSet Viewer ' + __version__)
self.width = (int)(0.6 * self.winfo_screenwidth())
self.height = (int)(0.6 * self.winfo_screenheight())
self.geometry('%dx%d+200+100' % (self.width, self.height))
self.configure(bg=self.bg)
self.minsize(800, 600)
# Setting top level widget's row & column weight,
# children widgets won't stretch-and-fill-in until setting this weight
# ref: https://blog.csdn.net/acaic/article/details/80963688
self.rowconfigure(0,weight=1)
self.columnconfigure(0,weight=1)
# Top Level Layout: main_frame & side_frame
main_frame_width = (int)(0.8*self.width)
main_frame = tk.LabelFrame(self, bg=self.bg, width=main_frame_width)
main_frame.grid(row=0, column=0, padx=10, pady=10, sticky=tk.NSEW)
side_frame = tk.LabelFrame(self, bg=self.bg)
side_frame.grid(row=0, column=1, padx=10, pady=10, sticky=tk.NSEW)
# main_frame: directory_frame & image_frame
main_frame.rowconfigure(0, weight=20)
main_frame.rowconfigure(1, weight=80)
main_frame.columnconfigure(0, weight=1)
directory_frame = tk.LabelFrame(main_frame, bg=self.bg)
directory_frame.grid(row=0, column=0, sticky=tk.NSEW)
image_frame_height = (int)(0.7*self.height)
image_frame = tk.LabelFrame(main_frame, height=image_frame_height, bg=self.bg)
image_frame.grid(row=1, column=0, sticky=tk.NSEW)
# keep widgets size stay, instead of change when switching to another image
# ref: https://zhidao.baidu.com/question/1643979034294549180.html
image_frame.grid_propagate(0)
# image_frame
image_frame.rowconfigure(0, weight=1)
image_frame.columnconfigure(0, weight=1)
self.surface = self.get_surface_image() # Surface image
# self.surface = self.cv_to_tk(cv2.imread('surface.jpg')) # Use image file
self.image_label = tk.Label(image_frame, image=self.surface,
bg=self.bg, fg=self.fg,compound='center')
self.image_label.grid(row=0, column=0, sticky=tk.NSEW)
#self.image_label.bind('<Configure>', self.changeSize) #TODO
# side_frame
side_frame.rowconfigure(0, weight=5)
side_frame.rowconfigure(1, weight=95)
image_names_label = tk.Label(side_frame, text="Image Files", bg=self.bg, fg=self.fg)
image_names_label.grid(row=0, column=0)
self.scrollbar = tk.Scrollbar(side_frame, orient=tk.VERTICAL)
self.listbox = tk.Listbox(side_frame, yscrollcommand=self.scrollbar.set)
self.listbox.grid(row=1, column=0, sticky=tk.NS)
# directory_frame
directory_frame.rowconfigure(0, weight=5)
directory_frame.rowconfigure(1, weight=5)
directory_frame.rowconfigure(2, weight=5)
directory_frame.columnconfigure(0, weight=1)
directory_frame.columnconfigure(1, weight=9)
# im_dir button
choose_im_dir_btn = tk.Button(directory_frame, text='Image Directory',
command=self.select_image_directory, bg=self.bg, fg=self.fg)
choose_im_dir_btn.grid(row=0, column=0, sticky=tk.NSEW)
self.im_dir = tk.StringVar()
im_dir_entry = tk.Entry(directory_frame, text=self.im_dir, state='readonly')
im_dir_entry.grid(row=0, column=1, sticky=tk.NSEW)
self.im_names = []
if im_dir is not None:
self.im_dir.set(im_dir)
self.im_names = [_ for _ in os.listdir(self.im_dir.get())]
self.im_names = natsorted(self.im_names)
for im_name in self.im_names:
self.listbox.insert(tk.END, im_name)
self.listbox.bind('<<ListboxSelect>>', self.callback)
# more key binds see https://www.cnblogs.com/muziyunxuan/p/8297536.html
self.listbox.bind('<Control_L>', self.save_image)
self.scrollbar.config(command=self.listbox.yview)
self.scrollbar.grid(row=1, column=1, sticky=tk.NS)
# anno_dir button
choose_anno_dir_bn = tk.Button(directory_frame, text='Annotation Directory',
command=self.select_annotation_directory, bg=self.bg, fg=self.fg)
choose_anno_dir_bn.grid(row=1, column=0, sticky=tk.NSEW)
self.anno_dir = tk.StringVar()
anno_dir_entry = tk.Entry(directory_frame, text=self.anno_dir, state='readonly')
anno_dir_entry.grid(row=1, column=1, sticky=tk.NSEW)
if anno_dir is not None:
self.anno_dir.set(anno_dir)
# copy (save) dir button
choose_save_dir_btn = tk.Button(directory_frame, text='Copy Save Directory',
command=self.select_save_directory, bg=self.bg, fg=self.fg)
choose_save_dir_btn.grid(row=2, column=0, sticky=tk.NSEW)
self.save_dir = tk.StringVar()
save_dir_entry = tk.Entry(directory_frame, text=self.save_dir, state='readonly')
save_dir_entry.grid(row=2, column=1, sticky=tk.NSEW)
if save_dir is not None:
self.save_dir.set(save_dir)
def callback(self, event=None):
im_id = self.listbox.curselection()
if im_id:
im_id = im_id[0]
self.logger.info('im_id is {:d}'.format(im_id))
im_name = self.listbox.get(im_id)
im_ext = im_name.split('.')[-1]
if im_ext in self.supported_im_ext:
im_pth = os.path.join(self.im_dir.get(), im_name).replace('\\', '/')
self.tkim = self.get_tkim(im_pth)
self.image_label.configure(image=self.tkim)
#self.logger.debug(im_pth)
def save_image(self, event):
"""Save (copy) current displayed (original, no box) image to specified saving directory.
This is binding to left-control key now. Useful for manually picking up images.
"""
im_id = self.listbox.curselection()
if im_id:
im_name = self.listbox.get(im_id)
im_ext = im_name.split('.')[-1]
if im_ext in self.supported_im_ext:
im_pth = os.path.join(self.im_dir.get(), im_name).replace('\\', '/')
save_pth = os.path.join(self.save_dir.get(), im_name).replace('\\', '/')
shutil.copyfile(im_pth, save_pth)
self.logger.info('Save(copy) to {:s}'.format(save_pth))
#self.logger.debug(im_pth)
def get_tkim(self, im_pth):
"""
Load image and annotation, draw on image, and convert to image.
When necessary, image resizing is utilized.
"""
im = cv2.imread(im_pth)
self.logger.info('Image file is: {:s}'.format(im_pth))
im_ht, im_wt, im_dt = im.shape
if self.max_width is None or self.max_width >= im_wt:
show_width = im_wt
else:
show_width = self.max_width
if self.max_height is None or self.max_height >= im_ht:
show_height = im_ht
else:
show_height = self.max_height
scale_width = im_wt * 1.0 / show_width
scale_height = im_ht * 1.0 / show_height
if show_width!=im_wt or show_height!=im_ht:
im = cv2.resize(im, (show_width, show_height))
self.logger.info('doing resize, show_width={:d}, im_wt={:d}, show_height={:d}, im_ht={:d}'.format(show_width, im_wt, show_height, im_ht))
# xml_pth = im_pth.replace('JPEGImages', 'Annotations').replace('.jpg', '.xml').replace('.png', '.xml')
# We don't assume a standard PASCAL VOC dataset directory.
# User should choose image and annotation folder seperately.
im_head = '.'.join(im_pth.split('/')[-1].split('.')[:-1])
xml_pth = self.anno_dir.get() + '/' + im_head + '.xml'
if os.path.exists(xml_pth):
self.logger.info('XML annotation file is {:s}'.format(xml_pth))
boxes = self.parse_xml(xml_pth)
for box in boxes:
if self.should_ignore(box.cls_name): continue
if box.cls_name not in self.name_mapping.keys():
self.name_mapping[box.cls_name] = box.cls_name
next_ind = len(self.class_to_ind)
self.class_to_ind[box.cls_name] = next_ind
xmin = int(box.x1/scale_width)
ymin = int(box.y1/scale_height)
xmax = int(box.x2/scale_width)
ymax = int(box.y2/scale_height)
color = self.get_color_by_cls_name(box.cls_name)
cv2.rectangle(im, pt1=(xmin, ymin), pt2=(xmax, ymax),
color = color, thickness=self.box_thick)
font_size = 16
font = self.get_font(font_size)
tx = xmin
ty = ymin-20
if(ty<0):
ty = ymin+10
tx = xmin+10
text_org = (tx, ty)
show_text = self.name_mapping[box.cls_name]
self.logger.debug('box.cls_name is:' + box.cls_name)
self.logger.debug('show_text:' + show_text)
im = draw_text(im, show_text, text_org, color, font)
else:
self.logger.warning("XML annotation file {:s} doesn't exist".format(xml_pth))
return self.cv_to_tk(im)
@staticmethod
def cv_to_tk(im):
"""Convert OpenCV's (numpy) image to Tkinter-compatible photo image"""
im = im[:, :, ::-1] # bgr => rgb
return ImageTk.PhotoImage(Image.fromarray(im))
@staticmethod
def get_font(font_size):
font_pth = None
if platform.system()=='Windows':
font_pth = 'C:/Windows/Fonts/msyh.ttc'
elif (platform.system()=='Linux'):
font_pth = fm.findfont(fm.FontProperties(family='DejaVu Mono'))
else:
font_pth = '/Library/Fonts//Songti.ttc'
return ImageFont.truetype(font_pth, font_size)
def get_surface_image(self):
"""Return surface image, which is ImageTK type"""
im = np.ndarray((256, 256, 3), dtype=np.uint8)
for y in range(256):
for x in range(256):
im[y, x, :] = (60, 55, 52) # #34373c(RGB)'s BGR split
im = cv2.resize(im, ((int)(self.width*0.6), (int)(self.height*0.6)))
font_size = 30
font = self.get_font(font_size)
text_org = (self.width*0.16, self.height*0.26)
text = 'ImageSet Viewer'
im = draw_text(im, text, text_org, color=(255, 255, 255, 255), font=font)
return self.cv_to_tk(im)
def parse_xml(self, xml_pth):
anno = PascalVOC2007XML(xml_pth)
return anno.get_boxes()
def select_image_directory(self):
im_dir = askdirectory()
self.listbox.delete(0, len(self.im_names)-1) # delete all elements
self.fill_im_names(im_dir)
def select_annotation_directory(self):
anno_dir = askdirectory()
self.anno_dir.set(anno_dir) # TODO: validate anno_dir
def select_save_directory(self):
save_dir = askdirectory()
self.save_dir.set(save_dir) # the directory to save(copy) select images
def fill_im_names(self, im_dir):
if im_dir is not None:
self.im_dir.set(im_dir)
# Get natural order of image file names
self.im_names = [_ for _ in os.listdir(im_dir)]
self.im_names = natsorted(self.im_names)
for im_name in self.im_names:
self.listbox.insert(tk.END, im_name)
def example1():
"""The simplest example: don't specify any parameters.
Choose imd dir and xml dir in GUI
"""
app = VOCViewer()
app.mainloop()
def example2():
"""Specify directories & drawing related settings
"""
app = VOCViewer(im_dir = '/Users/chris/data/VOC2007/JPEGImages', # image directory
anno_dir = '/Users/chris/data/VOC2007/Annotations', # XML directory
save_dir = '/Users/chris/data/VOC2007/save', # Picking images saving directory
max_width = 1000, # max allowed shown image width is 1000
max_height = 800, # max allowed shown image height is 800
box_thick = 2, # bounding box thickness
)
app.mainloop()
def example3():
"""Specify name mapping
"""
# category mapping dict: key for class name in XML,
# value for shown class name in displayed image
# note: you can make key=val if it is understandable
voc_mapping = {
'__background__': '背景',
'aeroplane': '飞机',
'bicycle': '自行车',
'bird': '鸟',
'boat': '船',
'bottle': '瓶子',
'bus': '公交车',
'car': '汽车',
'cat': '猫',
'chair': '椅子',
'cow': '牛',
'diningtable': '餐桌',
'dog': '狗',
'horse': '马',
'motorbike': '摩托车',
'person': '人',
'pottedplant': '盆栽',
'sheep': '绵羊',
'sofa': '沙发',
'train': '火车',
'tvmonitor': '显示器'
}
app = VOCViewer(im_dir = '/Users/chris/data/VOC2007/JPEGImages', # image directory
anno_dir = '/Users/chris/data/VOC2007/Annotations', # XML directory
save_dir = '/Users/chris/data/VOC2007/save', # Picking images saving directory
max_width = 1000, # max allowed shown image width is 1000
max_height = 800, # max allowed shown image height is 800
box_thick = 2, # bounding box thickness
name_mapping = voc_mapping #!!
)
app.mainloop()
def example4():
"""Specify ignore_names / not_ignore_names
You can specify either ignore_names or not_ignore_names. But can't specify neither.
"""
app = VOCViewer(im_dir = '/Users/chris/data/VOC2007/JPEGImages', # image directory
anno_dir = '/Users/chris/data/VOC2007/Annotations', # XML directory
save_dir = '/Users/chris/data/VOC2007/save', # Picking images saving directory
max_width = 1000, # max allowed shown image width is 1000
max_height = 800, # max allowed shown image height is 800
box_thick = 2, # bounding box thickness
not_ignore_names = ['person']
)
app.mainloop()
def example5():
"""
Take ImageNet2012 as example. You can imitate this and
show your own PASCAL-VOC-Style-Labeled imageset
"""
fin = open('imagenet_cls_cn.txt', encoding='UTF-8')
lines = [_.strip() for _ in fin.readlines()]
fin.close()
ilsvrc2012_cls_dict = dict()
for item in lines:
item = item.split(' ')
digit_cls_name = item[0]
literal_cls_name = ' '.join(item[1:])
ilsvrc2012_cls_dict[digit_cls_name] = literal_cls_name
app = VOCViewer(im_dir = 'D:/data/ILSVRC2012/ILSVRC2012_img_train/n01440764', # image directory
anno_dir = 'D:/data/ILSVRC2012/ILSVRC2012_bbox_train_v2/n01440764', # XML directory
save_dir = None, # not specified saving direcotry
max_width = 1000, # max allowed shown image width is 1000
max_height = 800, # max allowed shown image height is 800
box_thick = 2, # bounding box thickness
name_mapping = ilsvrc2012_cls_dict
)
app.mainloop()
if __name__ == '__main__':
example1()
#example2()
#example3()
#example4()
#example5()
|
[
"logging.getLogger",
"Tkinter.Entry",
"cv2.rectangle",
"logging.StreamHandler",
"colorsys.hsv_to_rgb",
"numpy.array",
"PIL.ImageDraw.Draw",
"Tkinter.Scrollbar",
"Tkinter.LabelFrame",
"Tkinter.Label",
"os.path.exists",
"os.listdir",
"Tkinter.Tk.__init__",
"Tkinter.Listbox",
"Tkinter.StringVar",
"PIL.ImageFont.truetype",
"platform.system",
"logging.FileHandler",
"tkFileDialog.askdirectory",
"random.shuffle",
"Tkinter.Button",
"shutil.copyfile",
"cv2.resize",
"cv2.imread",
"time.time",
"PIL.Image.fromarray",
"logging.Formatter",
"matplotlib.font_manager.FontProperties",
"lxml.etree.parse",
"random.seed",
"os.getcwd",
"numpy.ndarray",
"natsort.natsorted"
] |
[((2452, 2471), 'PIL.Image.fromarray', 'Image.fromarray', (['im'], {}), '(im)\n', (2467, 2471), False, 'from PIL import Image, ImageTk, ImageFont, ImageDraw\n'), ((2483, 2505), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['im_pil'], {}), '(im_pil)\n', (2497, 2505), False, 'from PIL import Image, ImageTk, ImageFont, ImageDraw\n'), ((2570, 2586), 'numpy.array', 'np.array', (['im_pil'], {}), '(im_pil)\n', (2578, 2586), True, 'import numpy as np\n'), ((3782, 3797), 'random.seed', 'random.seed', (['(42)'], {}), '(42)\n', (3793, 3797), False, 'import random\n'), ((3802, 3824), 'random.shuffle', 'random.shuffle', (['colors'], {}), '(colors)\n', (3816, 3824), False, 'import random\n'), ((3829, 3846), 'random.seed', 'random.seed', (['None'], {}), '(None)\n', (3840, 3846), False, 'import random\n'), ((2926, 2946), 'lxml.etree.parse', 'etree.parse', (['xml_pth'], {}), '(xml_pth)\n', (2937, 2946), False, 'from lxml import etree\n'), ((4870, 4890), 'Tkinter.Tk.__init__', 'tk.Tk.__init__', (['self'], {}), '(self)\n', (4884, 4890), True, 'import Tkinter as tk\n'), ((5124, 5143), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (5141, 5143), False, 'import logging\n'), ((5202, 5333), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s"""'], {'datefmt': '"""%Y-%m-%d %H:%M:%S"""'}), "(\n '%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\n", (5219, 5333), False, 'import logging\n'), ((5544, 5572), 'logging.FileHandler', 'logging.FileHandler', (['logfile'], {}), '(logfile)\n', (5563, 5572), False, 'import logging\n'), ((5701, 5724), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (5722, 5724), False, 'import logging\n'), ((8579, 8634), 'Tkinter.LabelFrame', 'tk.LabelFrame', (['self'], {'bg': 'self.bg', 'width': 'main_frame_width'}), '(self, bg=self.bg, width=main_frame_width)\n', (8592, 8634), True, 'import Tkinter as tk\n'), ((8732, 8763), 'Tkinter.LabelFrame', 'tk.LabelFrame', (['self'], {'bg': 'self.bg'}), '(self, bg=self.bg)\n', (8745, 8763), True, 'import Tkinter as tk\n'), ((9059, 9096), 'Tkinter.LabelFrame', 'tk.LabelFrame', (['main_frame'], {'bg': 'self.bg'}), '(main_frame, bg=self.bg)\n', (9072, 9096), True, 'import Tkinter as tk\n'), ((9234, 9298), 'Tkinter.LabelFrame', 'tk.LabelFrame', (['main_frame'], {'height': 'image_frame_height', 'bg': 'self.bg'}), '(main_frame, height=image_frame_height, bg=self.bg)\n', (9247, 9298), True, 'import Tkinter as tk\n'), ((9845, 9934), 'Tkinter.Label', 'tk.Label', (['image_frame'], {'image': 'self.surface', 'bg': 'self.bg', 'fg': 'self.fg', 'compound': '"""center"""'}), "(image_frame, image=self.surface, bg=self.bg, fg=self.fg, compound=\n 'center')\n", (9853, 9934), True, 'import Tkinter as tk\n'), ((10229, 10293), 'Tkinter.Label', 'tk.Label', (['side_frame'], {'text': '"""Image Files"""', 'bg': 'self.bg', 'fg': 'self.fg'}), "(side_frame, text='Image Files', bg=self.bg, fg=self.fg)\n", (10237, 10293), True, 'import Tkinter as tk\n'), ((10368, 10412), 'Tkinter.Scrollbar', 'tk.Scrollbar', (['side_frame'], {'orient': 'tk.VERTICAL'}), '(side_frame, orient=tk.VERTICAL)\n', (10380, 10412), True, 'import Tkinter as tk\n'), ((10437, 10494), 'Tkinter.Listbox', 'tk.Listbox', (['side_frame'], {'yscrollcommand': 'self.scrollbar.set'}), '(side_frame, yscrollcommand=self.scrollbar.set)\n', (10447, 10494), True, 'import Tkinter as tk\n'), ((10888, 11004), 'Tkinter.Button', 'tk.Button', (['directory_frame'], {'text': '"""Image Directory"""', 'command': 'self.select_image_directory', 'bg': 'self.bg', 'fg': 'self.fg'}), "(directory_frame, text='Image Directory', command=self.\n select_image_directory, bg=self.bg, fg=self.fg)\n", (10897, 11004), True, 'import Tkinter as tk\n'), ((11099, 11113), 'Tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (11111, 11113), True, 'import Tkinter as tk\n'), ((11137, 11198), 'Tkinter.Entry', 'tk.Entry', (['directory_frame'], {'text': 'self.im_dir', 'state': '"""readonly"""'}), "(directory_frame, text=self.im_dir, state='readonly')\n", (11145, 11198), True, 'import Tkinter as tk\n'), ((11945, 12071), 'Tkinter.Button', 'tk.Button', (['directory_frame'], {'text': '"""Annotation Directory"""', 'command': 'self.select_annotation_directory', 'bg': 'self.bg', 'fg': 'self.fg'}), "(directory_frame, text='Annotation Directory', command=self.\n select_annotation_directory, bg=self.bg, fg=self.fg)\n", (11954, 12071), True, 'import Tkinter as tk\n'), ((12169, 12183), 'Tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (12181, 12183), True, 'import Tkinter as tk\n'), ((12209, 12272), 'Tkinter.Entry', 'tk.Entry', (['directory_frame'], {'text': 'self.anno_dir', 'state': '"""readonly"""'}), "(directory_frame, text=self.anno_dir, state='readonly')\n", (12217, 12272), True, 'import Tkinter as tk\n'), ((12472, 12591), 'Tkinter.Button', 'tk.Button', (['directory_frame'], {'text': '"""Copy Save Directory"""', 'command': 'self.select_save_directory', 'bg': 'self.bg', 'fg': 'self.fg'}), "(directory_frame, text='Copy Save Directory', command=self.\n select_save_directory, bg=self.bg, fg=self.fg)\n", (12481, 12591), True, 'import Tkinter as tk\n'), ((12690, 12704), 'Tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (12702, 12704), True, 'import Tkinter as tk\n'), ((12730, 12793), 'Tkinter.Entry', 'tk.Entry', (['directory_frame'], {'text': 'self.save_dir', 'state': '"""readonly"""'}), "(directory_frame, text=self.save_dir, state='readonly')\n", (12738, 12793), True, 'import Tkinter as tk\n'), ((14457, 14475), 'cv2.imread', 'cv2.imread', (['im_pth'], {}), '(im_pth)\n', (14467, 14475), False, 'import cv2\n'), ((15626, 15649), 'os.path.exists', 'os.path.exists', (['xml_pth'], {}), '(xml_pth)\n', (15640, 15649), False, 'import os\n'), ((17742, 17781), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['font_pth', 'font_size'], {}), '(font_pth, font_size)\n', (17760, 17781), False, 'from PIL import Image, ImageTk, ImageFont, ImageDraw\n'), ((17887, 17928), 'numpy.ndarray', 'np.ndarray', (['(256, 256, 3)'], {'dtype': 'np.uint8'}), '((256, 256, 3), dtype=np.uint8)\n', (17897, 17928), True, 'import numpy as np\n'), ((18571, 18585), 'tkFileDialog.askdirectory', 'askdirectory', ([], {}), '()\n', (18583, 18585), False, 'from tkFileDialog import askdirectory\n'), ((18759, 18773), 'tkFileDialog.askdirectory', 'askdirectory', ([], {}), '()\n', (18771, 18773), False, 'from tkFileDialog import askdirectory\n'), ((18893, 18907), 'tkFileDialog.askdirectory', 'askdirectory', ([], {}), '()\n', (18905, 18907), False, 'from tkFileDialog import askdirectory\n'), ((7739, 7756), 'platform.system', 'platform.system', ([], {}), '()\n', (7754, 7756), False, 'import platform\n'), ((11452, 11476), 'natsort.natsorted', 'natsorted', (['self.im_names'], {}), '(self.im_names)\n', (11461, 11476), False, 'from natsort import natsorted\n'), ((15045, 15086), 'cv2.resize', 'cv2.resize', (['im', '(show_width, show_height)'], {}), '(im, (show_width, show_height))\n', (15055, 15086), False, 'import cv2\n'), ((17357, 17376), 'PIL.Image.fromarray', 'Image.fromarray', (['im'], {}), '(im)\n', (17372, 17376), False, 'from PIL import Image, ImageTk, ImageFont, ImageDraw\n'), ((17461, 17478), 'platform.system', 'platform.system', ([], {}), '()\n', (17476, 17478), False, 'import platform\n'), ((19233, 19257), 'natsort.natsorted', 'natsorted', (['self.im_names'], {}), '(self.im_names)\n', (19242, 19257), False, 'from natsort import natsorted\n'), ((3646, 3669), 'colorsys.hsv_to_rgb', 'colorsys.hsv_to_rgb', (['*x'], {}), '(*x)\n', (3665, 3669), False, 'import colorsys\n'), ((5414, 5425), 'time.time', 'time.time', ([], {}), '()\n', (5423, 5425), False, 'import time\n'), ((14114, 14147), 'shutil.copyfile', 'shutil.copyfile', (['im_pth', 'save_pth'], {}), '(im_pth, save_pth)\n', (14129, 14147), False, 'import shutil\n'), ((16383, 16479), 'cv2.rectangle', 'cv2.rectangle', (['im'], {'pt1': '(xmin, ymin)', 'pt2': '(xmax, ymax)', 'color': 'color', 'thickness': 'self.box_thick'}), '(im, pt1=(xmin, ymin), pt2=(xmax, ymax), color=color,\n thickness=self.box_thick)\n', (16396, 16479), False, 'import cv2\n'), ((17556, 17573), 'platform.system', 'platform.system', ([], {}), '()\n', (17571, 17573), False, 'import platform\n'), ((5446, 5457), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5455, 5457), False, 'import os\n'), ((17620, 17659), 'matplotlib.font_manager.FontProperties', 'fm.FontProperties', ([], {'family': '"""DejaVu Mono"""'}), "(family='DejaVu Mono')\n", (17637, 17659), True, 'import matplotlib.font_manager as fm\n'), ((19185, 19203), 'os.listdir', 'os.listdir', (['im_dir'], {}), '(im_dir)\n', (19195, 19203), False, 'import os\n')]
|
__author__ = 'multiangle'
# 这是实现 霍夫曼树相关的文件, 主要用于 针对层次softmax进行 word2vec 优化方案的一种
'''
至于 为什么要进行层次softmax 可以简单理解 因为词表很大 针对上完个类别单词进行softmax 计算量大 更新参数过多 无法训练,而采用softmax 层次化 只需要 计算几个有限单词的sigmod 就可以 更新参数也非常少
提高训练速度
什么是霍夫曼树 简单理解就是 将训练文本 进行词频统计 通过构建加权最短路径来构造二叉树 这样 词频高的 位置在前 词频低的位置在后 每一个 霍夫曼编码代表一个词 路径 并且是唯一 不是其他词的前缀
'''
import numpy as np
class HuffmanTreeNode():
def __init__(self,value,possibility):
# common part of leaf node and tree node
# 词频概率,训练文本出现的次数
self.possibility = possibility
# 左右子节点
self.left = None
self.right = None
# value of leaf node will be the word, and be
# mid vector in tree node
# 叶节点是学习的词向量 非叶子节点是中间变量 即 wx 与 xite
self.value = value # the value of word
# 存储霍夫曼码
self.Huffman = "" # store the huffman code
def __str__(self):
return 'HuffmanTreeNode object, value: {v}, possibility: {p}, Huffman: {h}'\
.format(v=self.value,p=self.possibility,h=self.Huffman)
class HuffmanTree():
def __init__(self, word_dict, vec_len=15000):
self.vec_len = vec_len # the length of word vector
self.root = None
# 所有词汇
word_dict_list = list(word_dict.values())
# 根据所有词汇信息 创建节点
node_list = [HuffmanTreeNode(x['word'],x['possibility']) for x in word_dict_list]
# 构建霍夫曼树
self.build_tree(node_list)
# self.build_CBT(node_list)
# 生成霍夫曼树的霍夫曼编码
self.generate_huffman_code(self.root, word_dict)
def build_tree(self,node_list):
# node_list.sort(key=lambda x:x.possibility,reverse=True)
# for i in range(node_list.__len__()-1)[::-1]:
# top_node = self.merge(node_list[i],node_list[i+1])
# node_list.insert(i,top_node)
# self.root = node_list[0]
while node_list.__len__()>1:
i1 = 0 # i1表示概率最小的节点
i2 = 1 # i2 概率第二小的节点
if node_list[i2].possibility < node_list[i1].possibility :
[i1,i2] = [i2,i1]
for i in range(2,node_list.__len__()): # 找到最小的两个节点
if node_list[i].possibility<node_list[i2].possibility :
i2 = i
if node_list[i2].possibility < node_list[i1].possibility :
[i1,i2] = [i2,i1]
#根据 叶节点1 和叶节点2 生成叶节点 也就是中间变量 其中 用来 存放xite
top_node = self.merge(node_list[i1],node_list[i2])
# 删除节点1 和节点2 将 新生成的非叶节点进行 加入 以进行后续 循环构建霍夫曼树
if i1<i2:
node_list.pop(i2)
node_list.pop(i1)
elif i1>i2:
node_list.pop(i1)
node_list.pop(i2)
else:
raise RuntimeError('i1 should not be equal to i2')
node_list.insert(0,top_node)
self.root = node_list[0]
def build_CBT(self,node_list): # build a complete binary tree
node_list.sort(key=lambda x:x.possibility,reverse=True)
node_num = node_list.__len__()
before_start = 0
while node_num>1 :
for i in range(node_num>>1):
top_node = self.merge(node_list[before_start+i*2],node_list[before_start+i*2+1])
node_list.append(top_node)
if node_num%2==1:
top_node = self.merge(node_list[before_start+i*2+2],node_list[-1])
node_list[-1] = top_node
before_start = before_start + node_num
node_num = node_num>>1
self.root = node_list[-1]
def generate_huffman_code(self, node, word_dict):
# # use recursion in this edition
# if node.left==None and node.right==None :
# word = node.value
# code = node.Huffman
# print(word,code)
# word_dict[word]['Huffman'] = code
# return -1
#
# code = node.Huffman
# if code==None:
# code = ""
# node.left.Huffman = code + "1"
# node.right.Huffman = code + "0"
# self.generate_huffman_code(node.left, word_dict)
# self.generate_huffman_code(node.right, word_dict)
# use stack butnot recursion in this edition
# 左子树 编码是1 右子树 编码是0 先左子树 在右字数 设置编码链
stack = [self.root]
while (stack.__len__()>0):
node = stack.pop()
# go along left tree
while node.left or node.right :
code = node.Huffman
node.left.Huffman = code + "1"
node.right.Huffman = code + "0"
stack.append(node.right)
node = node.left
word = node.value
code = node.Huffman
# print(word,'\t',code.__len__(),'\t',node.possibility)
word_dict[word]['Huffman'] = code
def merge(self,node1,node2):
# 新生成的非叶节点的词频是 俩个叶节点的加和
top_pos = node1.possibility + node2.possibility
# 将非叶节点向量进行初始化
top_node = HuffmanTreeNode(np.zeros([1,self.vec_len]), top_pos)
if node1.possibility >= node2.possibility :
top_node.left = node1
top_node.right = node2
else:
top_node.left = node2
top_node.right = node1
return top_node
|
[
"numpy.zeros"
] |
[((4947, 4974), 'numpy.zeros', 'np.zeros', (['[1, self.vec_len]'], {}), '([1, self.vec_len])\n', (4955, 4974), True, 'import numpy as np\n')]
|
# -*- coding: UTF-8 -*-
"""
Source processing routines
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import warnings
from collections import OrderedDict
from astropy.cosmology import default_cosmology
import numpy as np
import os
import pysynphot as S
import astropy.table as at
from . import io
from . import passband
def load_source(sourcenames):
"""
Loads sources
Parameters
----------
sourcenames : array-like
The source names. Passed to :py:func:`source_synphot.io.read_source`
Returns
-------
sources : dict
The dictionary of source spectra
See Also
--------
:py:func:`source_synphot.io.read_source`
"""
sources = OrderedDict()
if np.isscalar(sourcenames):
sourcenames = np.array(sourcenames, ndmin=1)
else:
sourcenames = np.array(sourcenames).flatten()
nsource = len(sourcenames)
for source in sourcenames:
try:
thissource = io.read_source(source)
except Exception as e:
message = 'Source {} not loaded'.format(source)
warnings.warn(message, RuntimeWarning)
continue
sources[source] = thissource
return sources
def pre_process_source(source, sourcemag, sourcepb, sourcez, smooth=True):
"""
Pre-process a source at some redshift ``sourcez`` back to the rest-frame
and normalize it to have magnitude ``sourcemag`` in passband ``sourcepb``
Parameters
----------
sourcespec : str
The source spectrum filename
sourcemag : float
The magnitude of the source spectrum in passband ``sourcepb``
sourcepb : :py:class:`pysynphot.spectrum.ArraySpectralElement`
The passband in which `source` has magnitude ``sourcemag``
sourcez : float
The redshift of `source`
smooth : bool, optional
Smooth the spectrum (default: True)
Returns
-------
source : :py:class:`pysynphot.ArraySpectrum`
The de-redshifted, normalized and optionally smoothed spectrum
See Also
--------
:py:func:`astropy.table.Table.read`
"""
inspec = None
inspecz = np.nan
inspecmag = np.nan
inspecpb = None
source_table_file = os.path.join('sources', 'sourcetable.txt')
source_table_file = io.get_pkgfile(source_table_file)
source_table = at.Table.read(source_table_file, format='ascii')
ind = (source_table['specname'] == source)
nmatch = len(source_table['specname'][ind])
if nmatch == 1:
# load the file and the info
inspec = source_table['specname'][ind][0]
inspecz = source_table['redshift'][ind][0]
inspecmag = source_table['g'][ind][0] # for now, just normalize the g-band mag
elif nmatch == 0:
message = 'Spectrum {} not listed in lookup table'.format(source)
pass
else:
message = 'Spectrum {} not uniquely listed in lookup table'.format(source)
pass
if inspec is None:
warnings.warn(message, RuntimeWarning)
inspec = source
inspecz = sourcez
inspecmag = sourcemag
inspecpb = sourcepb
if not os.path.exists(inspec):
message = 'Spectrum {} could not be found'.format(inspec)
raise ValueError(message)
try:
spec = at.Table.read(inspec, names=('wave','flux'), format='ascii')
except Exception as e:
message = 'Could not read file {}'.format(source)
raise ValueError(message)
if hasattr(inspecpb,'wave') and hasattr(inspecpb, 'throughput'):
pass
else:
pbs = passband.load_pbs([inspecpb], 0.)
try:
inspecpb = pbs[inspecpb][0]
except KeyError as e:
message = 'Could not load passband {}'.format(inspecpb)
raise RuntimeError(message)
try:
inspecmag = float(inspecmag)
except (TypeError, ValueError) as e:
message = 'Source magnitude {} could not be interpreted as a float'.format(inspecmag)
raise ValueError(message)
try:
inspecz = float(inspecz)
except (TypeError, ValueError) as e:
message = 'Source redshift {} could not be interpreted as a float'.format(inspecz)
raise ValueError(message)
if inspecz < 0 :
message = 'Source must have positive definite cosmological redshift'
raise ValueError(message)
inspec = S.ArraySpectrum(spec['wave'], spec['flux'], fluxunits='flam')
try:
inspec = inspec.renorm(sourcemag, 'ABmag', inspecpb)
inspec.convert('flam')
except Exception as e:
message = 'Could not renormalize spectrum {}'.format(inspec)
raise RuntimeError(message)
if inspecz > 0:
zblue = 1./(1+inspecz) - 1.
inspec_rest = inspec.redshift(zblue)
inspec_rest.convert('flam')
c = default_cosmology.get()
mu = c.distmod(inspecz)
out = inspec_rest*(10.**(0.4*mu.value))
else:
out = inspec
# TODO renorm is basic and just calculates dmag = RNval - what the original spectrum's mag is
# and renormalizes - there's some sanity checking for overlaps
# we can do this without using it and relying on the .passband routines
return out
|
[
"os.path.exists",
"collections.OrderedDict",
"numpy.isscalar",
"os.path.join",
"pysynphot.ArraySpectrum",
"numpy.array",
"astropy.cosmology.default_cosmology.get",
"warnings.warn",
"astropy.table.Table.read"
] |
[((732, 745), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (743, 745), False, 'from collections import OrderedDict\n'), ((753, 777), 'numpy.isscalar', 'np.isscalar', (['sourcenames'], {}), '(sourcenames)\n', (764, 777), True, 'import numpy as np\n'), ((2274, 2316), 'os.path.join', 'os.path.join', (['"""sources"""', '"""sourcetable.txt"""'], {}), "('sources', 'sourcetable.txt')\n", (2286, 2316), False, 'import os\n'), ((2394, 2442), 'astropy.table.Table.read', 'at.Table.read', (['source_table_file'], {'format': '"""ascii"""'}), "(source_table_file, format='ascii')\n", (2407, 2442), True, 'import astropy.table as at\n'), ((4434, 4495), 'pysynphot.ArraySpectrum', 'S.ArraySpectrum', (["spec['wave']", "spec['flux']"], {'fluxunits': '"""flam"""'}), "(spec['wave'], spec['flux'], fluxunits='flam')\n", (4449, 4495), True, 'import pysynphot as S\n'), ((801, 831), 'numpy.array', 'np.array', (['sourcenames'], {'ndmin': '(1)'}), '(sourcenames, ndmin=1)\n', (809, 831), True, 'import numpy as np\n'), ((3035, 3073), 'warnings.warn', 'warnings.warn', (['message', 'RuntimeWarning'], {}), '(message, RuntimeWarning)\n', (3048, 3073), False, 'import warnings\n'), ((3200, 3222), 'os.path.exists', 'os.path.exists', (['inspec'], {}), '(inspec)\n', (3214, 3222), False, 'import os\n'), ((3349, 3410), 'astropy.table.Table.read', 'at.Table.read', (['inspec'], {'names': "('wave', 'flux')", 'format': '"""ascii"""'}), "(inspec, names=('wave', 'flux'), format='ascii')\n", (3362, 3410), True, 'import astropy.table as at\n'), ((4879, 4902), 'astropy.cosmology.default_cosmology.get', 'default_cosmology.get', ([], {}), '()\n', (4900, 4902), False, 'from astropy.cosmology import default_cosmology\n'), ((864, 885), 'numpy.array', 'np.array', (['sourcenames'], {}), '(sourcenames)\n', (872, 885), True, 'import numpy as np\n'), ((1124, 1162), 'warnings.warn', 'warnings.warn', (['message', 'RuntimeWarning'], {}), '(message, RuntimeWarning)\n', (1137, 1162), False, 'import warnings\n')]
|
import pandas as pd
import numpy as np
import pickle
from sklearn.metrics.pairwise import linear_kernel
from sklearn.feature_extraction.text import TfidfVectorizer
import csv
from kivy.app import App
from kivy.uix.gridlayout import GridLayout
from kivy.uix.label import Label
from kivy.uix.textinput import TextInput
class Jaden:
_model = None
_vector = None
_vocabulary = None
def __init__(self):
self._model = pickle.load(open('_model.sav', 'rb'))
self._vector = pickle.load(open('_vectorized.sav', 'rb'))
with open('dataset/tarih.csv', newline='', encoding='utf8') as f:
reader = csv.reader(f)
_vocabulary = list(reader)
self._vocabulary = _vocabulary
def find_answer(self, question):
_cos_sim = linear_kernel(_model.transform([question]), _vector).flatten()
_cos_sim = np.ndarray.argsort(-_cos_sim)[:5]
_result = []
for i in _cos_sim:
_result.append(self._vocabulary[i+1][1])
return _result
class LoginScreen(GridLayout):
def __init__(self, **kwargs):
super(LoginScreen, self).__init__(**kwargs)
self.cols = 2
self.add_widget(Label(text='User Name'))
self.username = TextInput(multiline=False)
self.add_widget(self.username)
self.add_widget(Label(text='password'))
self.password = TextInput(password=True, multiline=False)
self.add_widget(self.password)
class MyApp(App):
def build(self):
return LoginScreen()
MyApp().run()
|
[
"kivy.uix.label.Label",
"numpy.ndarray.argsort",
"kivy.uix.textinput.TextInput",
"csv.reader"
] |
[((1260, 1286), 'kivy.uix.textinput.TextInput', 'TextInput', ([], {'multiline': '(False)'}), '(multiline=False)\n', (1269, 1286), False, 'from kivy.uix.textinput import TextInput\n'), ((1398, 1439), 'kivy.uix.textinput.TextInput', 'TextInput', ([], {'password': '(True)', 'multiline': '(False)'}), '(password=True, multiline=False)\n', (1407, 1439), False, 'from kivy.uix.textinput import TextInput\n'), ((644, 657), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (654, 657), False, 'import csv\n'), ((879, 908), 'numpy.ndarray.argsort', 'np.ndarray.argsort', (['(-_cos_sim)'], {}), '(-_cos_sim)\n', (897, 908), True, 'import numpy as np\n'), ((1211, 1234), 'kivy.uix.label.Label', 'Label', ([], {'text': '"""User Name"""'}), "(text='User Name')\n", (1216, 1234), False, 'from kivy.uix.label import Label\n'), ((1350, 1372), 'kivy.uix.label.Label', 'Label', ([], {'text': '"""password"""'}), "(text='password')\n", (1355, 1372), False, 'from kivy.uix.label import Label\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 29 14:14:45 2020
@author: Nikki
"""
import numpy as np
import cv2
import transform as tform
import sys
import math
import scipy.spatial
import markers
###---------------------------------------------------------------------------
# Allows video to be initialized using a string
#
# returns - video_path - path to video to be used
# returns - GPS_pix - matrix to convert from GPS to pixel
# - pix_GPS - matrix to convert from pixel to GPS
# - origin - approximate camera location in GPS
###
def sample_select(name):
if name == 'aot3':
video_path = 'C:/Users/Nikki/Documents/work/inputs-outputs/video/AOTsample3.mp4'
elif name == 'mrb3':
video_path = 'C:/Users/Nikki/Documents/work/inputs-outputs/video/20190422_153844_DA4A.mkv'
elif name == 'aot1':
video_path = 'C:/Users/Nikki/Documents/work/inputs-outputs/video/AOTsample1_1.mp4'
elif name == 'aot2':
video_path = 'C:/Users/Nikki/Documents/work/inputs-outputs/video/AOTsample2_1.mp4'
GPS_pix, pix_GPS, origin = get_transform(name)
return video_path, GPS_pix, pix_GPS, origin
###---------------------------------------------------------------------------
# Used to find transformation matrices between GPS and pixel space and vice versa.
#
# returns - GPS_pix - matrix to convert from GPS to pixel
# - pix_GPS - matrix to convert from pixel to GPS
###
def get_transform(name):
if name == 'mrb3':
x, y, origin = markers.mrb3_markers()
elif name == 'aot1':
x, y, origin = markers.aot_1_markers()
elif name == 'aot2':
x, y, origin = markers.aot_2_markers()
elif name == 'aot3':
x, y, origin = markers.aot_3_markers()
else:
print("Camera name invalid")
GPS_pix = tform.get_best_transform(x, y)
pix_GPS = tform.get_best_transform(y, x)
return(GPS_pix, pix_GPS, origin)
###---------------------------------------------------------------------------
# Given photo points at people's feet, draws '6 foot' ellipse around them.
# Most useful of these functions for implementing with yolo bounding box points.
#
# returns - img - input frame with ellipses drawn at specified points
###
def draw_radius(frame, pts, GPS_pix, pix_GPS, origin):
bounds = four_pts(pts, pix_GPS, GPS_pix, origin)
mytree = load_tree(pts, pix_GPS)
img, count = draw_ellipse(frame, bounds, pts, mytree, pix_GPS)
return img, count
###---------------------------------------------------------------------------
# Given an array of photo pts and conversion matrices, converts to GPS, finds
# defining points of 6 ft circle at camera angle, and converts back to pixel coords.
#
# returns - final - array of arrays of 4 pixel coordinates to be used to define each ellipse's axes
###
def four_pts(pts, pix_GPS, GPS_pix, origin):
#convert to gps coords
gps = tform.transform_pt_array(pts, pix_GPS)
final = []
#calculate locations six feet away at given bearings and add to array
for pt in gps:
degrees = calc_bearing(pt, origin)
for angle in degrees:
a = six_ft(pt, angle)
final.append(a)
#convert list of pts to numpy array
final = np.array([final])
final = np.squeeze(np.asarray(final))
#check if final has any elements?
#convert to pixel coords
final = tform.transform_pt_array(final, GPS_pix)
return final
###---------------------------------------------------------------------------
# Given a point, calculates it's bearing in relation to the approximate camera location.
# This enables GPS circle points to be found such that they define an ellipse within pixel
# plane that appears properly scaled. Uses haversine formula.
# Formula from: https://www.movable-type.co.uk/scripts/latlong.html
#
# returns - array of 4 bearings in degrees, clockwise from north. First is bearing
# between camera and given pt)
###
def calc_bearing(pt, origin):
#convert GPS coords to radians
la1 = math.radians(origin[0])
la2 = math.radians(pt[0])
lo1 = math.radians(origin[1])
lo2 = math.radians(pt[1])
#perform calculation
y = math.sin(lo2-lo1) * math.cos(la2)
x = math.cos(la1) * math.sin(la2) - math.sin(la1) * math.cos(la2) * math.cos(lo2-lo1)
b = math.atan2(y,x)
#convert to degrees
b = math.degrees(b)
#fill arrray with 90 degree increments
bearing = 4 * [None]
i = 0
while i < 4:
bearing[i] = (b + i * 90) % 360
i = i + 1
return bearing
###---------------------------------------------------------------------------
# Loads array of pts into a ckd tree for to enable easy finding of nearest pt
#
# returns - ckd tree
###
def load_tree(pts, pix_GPS):
gps = tform.transform_pt_array(pts, pix_GPS)
mytree = scipy.spatial.cKDTree(gps)
return mytree
###---------------------------------------------------------------------------
# Given array of defining points of several ellipses (endpoints of axes) and
# corresponding center points, draws ellipses on given image
#
# returns - all_img - given image with ellipses drawn onto it
###
def draw_ellipse(frame, pts, centers, mytree, pix_GPS):
#define qualities of the ellipse
thickness = -1
line_type = 8
#set transparency
alpha = 0.25
#create separate image for ellipses to be drawn into
ellipses = frame.copy()
#iterate through list of ellipse points and centers, drawing each into ellipse image
i = 0
count = 0
gps_centers = tform.transform_pt_array(centers, pix_GPS)
while i < pts.shape[0]:
a = pts[i]
b = pts[i + 1]
c = pts[i + 2]
d = pts[i + 3]
minor = int((math.sqrt(math.pow((c[0]-a[0]), 2) + math.pow((c[1]-a[1]), 2)))/2)
major = int((math.sqrt(math.pow((d[0]-b[0]), 2) + math.pow((d[1]-b[1]), 2)))/2)
if centers.size <= 2:
centers = np.array([centers])
center = centers[i//4]
x = int(center[0])
y = int(center[1])
if centers.size > 2:
gps_center = gps_centers[i//4]
dist, ind = mytree.query(gps_center, k=2)
closest = mytree.data[ind[1]]
dist = GPS_to_ft(gps_center, closest)
if dist < 6:
cv2.ellipse(ellipses, (x,y), (major, minor), 0, 0, 360, (255, 0, 0), thickness, line_type)
count = count + 1
elif dist < 8:
cv2.ellipse(ellipses, (x,y), (major, minor), 0, 0, 360, (255, 140, 0), thickness, line_type)
elif dist < 10:
cv2.ellipse(ellipses, (x,y), (major, minor), 0, 0, 360, (255, 255, 0), thickness, line_type)
else:
cv2.ellipse(ellipses, (x,y), (major, minor), 0, 0, 360, (0,255,0), thickness, line_type)
else:
cv2.ellipse(ellipses, (x,y), (major, minor), 0, 0, 360, (0,255,0), thickness, line_type)
i = i + 4
#combine original image and ellipse image into one
all_img = cv2.addWeighted(ellipses, alpha, frame, 1-alpha, 0)
return all_img, count
###---------------------------------------------------------------------------
# Given a GPS point and a bearing, finds point six feet away in that direction,
# using haversine formula.
# Formula from: https://www.movable-type.co.uk/scripts/latlong.html
#
# returns - GPS coord 6 ft away
###
def six_ft(pt1, b):
#convert to rad
la1 = math.radians(pt1[0])
lo1 = math.radians(pt1[1])
b = math.radians(b)
#calc latitude and longitude
radius = 20902231
d =(6.0/radius)
la2 = math.asin(math.sin(la1) * math.cos(d) + math.cos(la1) * math.sin(d) * math.cos(b))
lo2 = lo1 + math.atan2((math.sin(b) * math.sin(d) * math.cos(la1)), (math.cos(d) - math.sin(la1) * math.sin(la2)))
#reconvert to GPS standard, degrees
pt2 = (math.degrees(la2), math.degrees(lo2))
return(pt2)
###---------------------------------------------------------------------------
# Given two GPS points, finds distance in ft between them, calulated using
# haversine formula.
#
# returns - distance in ft between given points
###
def GPS_to_ft(pt1, pt2):
#earths rad in ft
radius = 20902231
la1 = math.radians(pt1[0])
la2 = math.radians(pt2[0])
lo1 = math.radians(pt1[1])
lo2 = math.radians(pt2[1])
#la2, lo2 = six_ft(pt1, 90)
a = math.pow(((la2 - la1) / 2), 2)
b = math.cos(la1) * math.cos(la2)
c = math.pow(((lo2 - lo1) / 2), 2)
d = math.sin(a) + b * math.sin(c)
dist = 2 * radius * math.asin(math.sqrt(d))
#print(dist)
return dist
###---------------------------------------------------------------------------
# Following functions are not utilized in video processing code, but were helpful
# during development
###---------------------------------------------------------------------------
###---------------------------------------------------------------------------
# Returns pixel coordinate value of location left-clicked on screen
# Based on:
# https://stackoverflow.com/questions/60066334/get-pixel-coordinates-using-mouse-in-cv2-video-frame-with-python
def get_pixel_coord(video_path):
try:
video_capture = cv2.VideoCapture(video_path)
def mouseHandler(event, x, y, flags, params):
if event == cv2.EVENT_LBUTTONDOWN:
print(x, y)
cv2.namedWindow("result", cv2.WINDOW_NORMAL)
cv2.setMouseCallback("result", mouseHandler)
while(True):
# Capture frame-by-frame
_, frame = video_capture.read()
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.imshow("result", frame)
video_capture.release()
cv2.destroyAllWindows()
except:
video_capture.release()
cv2.destroyAllWindows()
###---------------------------------------------------------------------------
# Given points, draws circles around them
###
def make_circles(frame, centers, size):
size = size[0] // 128
thickness = -1
line_type = 8
for center in centers:
pt = (int(center[0]), int(center[1]))
cv2.circle(frame, pt, size, (0,0,255), thickness, line_type)
###---------------------------------------------------------------------------
# Draws 4 ellipses on video, utilizing most functions in this doc.
###
def test():
# define where video comes from
# video_path = './data/AOTsample3.mp4'
video_path = './data/20190422_153844_DA4A.mkv'
# get transfer function from known GPS and pixel locations
GPS_pix, pix_GPS = get_transform()
# load in sample pts
# a = np.array([36.148342, -86.799332]) #closest lamp
# b = np.array([36.148139, -86.799375]) #lamp across street, right
# c = np.array([36.148349, -86.799135]) #closest left corner of furthest crosswalk dash to right
# d = np.array([36.147740, -86.799218]) #sixth tree down the street
a = np.array([36.144187, -86.799707]) #far left street pole
b = np.array([36.143990, -86.799594]) #pole by bike sign
c = np.array([36.143997, -86.800180]) #corner of sidewalk
d = np.array([36.144203, -86.800149]) #right of sidewalk stripe closest to camera
x = np.array([a,b,c,d])
pts = tform.transform_pt_array(x, GPS_pix)
print(pts)
# start video
print("Video from: ", video_path )
vid = cv2.VideoCapture(video_path)
try:
while True:
# skip desired number of frames to speed up processing
for i in range (10):
vid.grab()
# read frame
return_value, frame = vid.read()
# if frame doesn't exist, exit
if not return_value:
cv2.destroyWindow('result')
print('Video has ended')
break
# draw ellipse
img, count = draw_radius(frame, pts, GPS_pix, pix_GPS)
cv2.namedWindow("result", cv2.WINDOW_NORMAL)
cv2.imshow("result", img)
if cv2.waitKey(1) & 0xFF == ord('q'): break
# end video, close viewer, stop writing to file
vid.release()
cv2.destroyAllWindows()
# if interrupted, end video, close viewer, stop writing to file
except:
print("Unexpected error:", sys.exc_info()[0])
vid.release()
cv2.destroyAllWindows()
#test()
|
[
"math.sqrt",
"cv2.imshow",
"math.cos",
"numpy.array",
"cv2.ellipse",
"sys.exc_info",
"cv2.destroyAllWindows",
"cv2.setMouseCallback",
"markers.aot_1_markers",
"numpy.asarray",
"markers.mrb3_markers",
"cv2.addWeighted",
"markers.aot_3_markers",
"cv2.waitKey",
"transform.get_best_transform",
"math.degrees",
"math.radians",
"cv2.circle",
"markers.aot_2_markers",
"math.atan2",
"cv2.namedWindow",
"math.pow",
"cv2.destroyWindow",
"cv2.VideoCapture",
"math.sin",
"transform.transform_pt_array"
] |
[((1885, 1915), 'transform.get_best_transform', 'tform.get_best_transform', (['x', 'y'], {}), '(x, y)\n', (1909, 1915), True, 'import transform as tform\n'), ((1930, 1960), 'transform.get_best_transform', 'tform.get_best_transform', (['y', 'x'], {}), '(y, x)\n', (1954, 1960), True, 'import transform as tform\n'), ((3007, 3045), 'transform.transform_pt_array', 'tform.transform_pt_array', (['pts', 'pix_GPS'], {}), '(pts, pix_GPS)\n', (3031, 3045), True, 'import transform as tform\n'), ((3346, 3363), 'numpy.array', 'np.array', (['[final]'], {}), '([final])\n', (3354, 3363), True, 'import numpy as np\n'), ((3490, 3530), 'transform.transform_pt_array', 'tform.transform_pt_array', (['final', 'GPS_pix'], {}), '(final, GPS_pix)\n', (3514, 3530), True, 'import transform as tform\n'), ((4176, 4199), 'math.radians', 'math.radians', (['origin[0]'], {}), '(origin[0])\n', (4188, 4199), False, 'import math\n'), ((4210, 4229), 'math.radians', 'math.radians', (['pt[0]'], {}), '(pt[0])\n', (4222, 4229), False, 'import math\n'), ((4240, 4263), 'math.radians', 'math.radians', (['origin[1]'], {}), '(origin[1])\n', (4252, 4263), False, 'import math\n'), ((4274, 4293), 'math.radians', 'math.radians', (['pt[1]'], {}), '(pt[1])\n', (4286, 4293), False, 'import math\n'), ((4464, 4480), 'math.atan2', 'math.atan2', (['y', 'x'], {}), '(y, x)\n', (4474, 4480), False, 'import math\n'), ((4517, 4532), 'math.degrees', 'math.degrees', (['b'], {}), '(b)\n', (4529, 4532), False, 'import math\n'), ((4944, 4982), 'transform.transform_pt_array', 'tform.transform_pt_array', (['pts', 'pix_GPS'], {}), '(pts, pix_GPS)\n', (4968, 4982), True, 'import transform as tform\n'), ((5748, 5790), 'transform.transform_pt_array', 'tform.transform_pt_array', (['centers', 'pix_GPS'], {}), '(centers, pix_GPS)\n', (5772, 5790), True, 'import transform as tform\n'), ((7270, 7323), 'cv2.addWeighted', 'cv2.addWeighted', (['ellipses', 'alpha', 'frame', '(1 - alpha)', '(0)'], {}), '(ellipses, alpha, frame, 1 - alpha, 0)\n', (7285, 7323), False, 'import cv2\n'), ((7708, 7728), 'math.radians', 'math.radians', (['pt1[0]'], {}), '(pt1[0])\n', (7720, 7728), False, 'import math\n'), ((7739, 7759), 'math.radians', 'math.radians', (['pt1[1]'], {}), '(pt1[1])\n', (7751, 7759), False, 'import math\n'), ((7768, 7783), 'math.radians', 'math.radians', (['b'], {}), '(b)\n', (7780, 7783), False, 'import math\n'), ((8510, 8530), 'math.radians', 'math.radians', (['pt1[0]'], {}), '(pt1[0])\n', (8522, 8530), False, 'import math\n'), ((8541, 8561), 'math.radians', 'math.radians', (['pt2[0]'], {}), '(pt2[0])\n', (8553, 8561), False, 'import math\n'), ((8572, 8592), 'math.radians', 'math.radians', (['pt1[1]'], {}), '(pt1[1])\n', (8584, 8592), False, 'import math\n'), ((8603, 8623), 'math.radians', 'math.radians', (['pt2[1]'], {}), '(pt2[1])\n', (8615, 8623), False, 'import math\n'), ((8669, 8697), 'math.pow', 'math.pow', (['((la2 - la1) / 2)', '(2)'], {}), '((la2 - la1) / 2, 2)\n', (8677, 8697), False, 'import math\n'), ((8746, 8774), 'math.pow', 'math.pow', (['((lo2 - lo1) / 2)', '(2)'], {}), '((lo2 - lo1) / 2, 2)\n', (8754, 8774), False, 'import math\n'), ((11397, 11430), 'numpy.array', 'np.array', (['[36.144187, -86.799707]'], {}), '([36.144187, -86.799707])\n', (11405, 11430), True, 'import numpy as np\n'), ((11463, 11495), 'numpy.array', 'np.array', (['[36.14399, -86.799594]'], {}), '([36.14399, -86.799594])\n', (11471, 11495), True, 'import numpy as np\n'), ((11526, 11558), 'numpy.array', 'np.array', (['[36.143997, -86.80018]'], {}), '([36.143997, -86.80018])\n', (11534, 11558), True, 'import numpy as np\n'), ((11590, 11623), 'numpy.array', 'np.array', (['[36.144203, -86.800149]'], {}), '([36.144203, -86.800149])\n', (11598, 11623), True, 'import numpy as np\n'), ((11678, 11700), 'numpy.array', 'np.array', (['[a, b, c, d]'], {}), '([a, b, c, d])\n', (11686, 11700), True, 'import numpy as np\n'), ((11713, 11749), 'transform.transform_pt_array', 'tform.transform_pt_array', (['x', 'GPS_pix'], {}), '(x, GPS_pix)\n', (11737, 11749), True, 'import transform as tform\n'), ((11837, 11865), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_path'], {}), '(video_path)\n', (11853, 11865), False, 'import cv2\n'), ((1553, 1575), 'markers.mrb3_markers', 'markers.mrb3_markers', ([], {}), '()\n', (1573, 1575), False, 'import markers\n'), ((3387, 3404), 'numpy.asarray', 'np.asarray', (['final'], {}), '(final)\n', (3397, 3404), True, 'import numpy as np\n'), ((4332, 4351), 'math.sin', 'math.sin', (['(lo2 - lo1)'], {}), '(lo2 - lo1)\n', (4340, 4351), False, 'import math\n'), ((4352, 4365), 'math.cos', 'math.cos', (['la2'], {}), '(la2)\n', (4360, 4365), False, 'import math\n'), ((8132, 8149), 'math.degrees', 'math.degrees', (['la2'], {}), '(la2)\n', (8144, 8149), False, 'import math\n'), ((8151, 8168), 'math.degrees', 'math.degrees', (['lo2'], {}), '(lo2)\n', (8163, 8168), False, 'import math\n'), ((8708, 8721), 'math.cos', 'math.cos', (['la1'], {}), '(la1)\n', (8716, 8721), False, 'import math\n'), ((8724, 8737), 'math.cos', 'math.cos', (['la2'], {}), '(la2)\n', (8732, 8737), False, 'import math\n'), ((8785, 8796), 'math.sin', 'math.sin', (['a'], {}), '(a)\n', (8793, 8796), False, 'import math\n'), ((9518, 9546), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_path'], {}), '(video_path)\n', (9534, 9546), False, 'import cv2\n'), ((9702, 9746), 'cv2.namedWindow', 'cv2.namedWindow', (['"""result"""', 'cv2.WINDOW_NORMAL'], {}), "('result', cv2.WINDOW_NORMAL)\n", (9717, 9746), False, 'import cv2\n'), ((9755, 9799), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['"""result"""', 'mouseHandler'], {}), "('result', mouseHandler)\n", (9775, 9799), False, 'import cv2\n'), ((10099, 10122), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (10120, 10122), False, 'import cv2\n'), ((10548, 10610), 'cv2.circle', 'cv2.circle', (['frame', 'pt', 'size', '(0, 0, 255)', 'thickness', 'line_type'], {}), '(frame, pt, size, (0, 0, 255), thickness, line_type)\n', (10558, 10610), False, 'import cv2\n'), ((12637, 12660), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (12658, 12660), False, 'import cv2\n'), ((1633, 1656), 'markers.aot_1_markers', 'markers.aot_1_markers', ([], {}), '()\n', (1654, 1656), False, 'import markers\n'), ((4374, 4387), 'math.cos', 'math.cos', (['la1'], {}), '(la1)\n', (4382, 4387), False, 'import math\n'), ((4390, 4403), 'math.sin', 'math.sin', (['la2'], {}), '(la2)\n', (4398, 4403), False, 'import math\n'), ((4438, 4457), 'math.cos', 'math.cos', (['(lo2 - lo1)'], {}), '(lo2 - lo1)\n', (4446, 4457), False, 'import math\n'), ((6153, 6172), 'numpy.array', 'np.array', (['[centers]'], {}), '([centers])\n', (6161, 6172), True, 'import numpy as np\n'), ((7094, 7189), 'cv2.ellipse', 'cv2.ellipse', (['ellipses', '(x, y)', '(major, minor)', '(0)', '(0)', '(360)', '(0, 255, 0)', 'thickness', 'line_type'], {}), '(ellipses, (x, y), (major, minor), 0, 0, 360, (0, 255, 0),\n thickness, line_type)\n', (7105, 7189), False, 'import cv2\n'), ((8803, 8814), 'math.sin', 'math.sin', (['c'], {}), '(c)\n', (8811, 8814), False, 'import math\n'), ((8854, 8866), 'math.sqrt', 'math.sqrt', (['d'], {}), '(d)\n', (8863, 8866), False, 'import math\n'), ((10031, 10058), 'cv2.imshow', 'cv2.imshow', (['"""result"""', 'frame'], {}), "('result', frame)\n", (10041, 10058), False, 'import cv2\n'), ((10185, 10208), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (10206, 10208), False, 'import cv2\n'), ((12407, 12451), 'cv2.namedWindow', 'cv2.namedWindow', (['"""result"""', 'cv2.WINDOW_NORMAL'], {}), "('result', cv2.WINDOW_NORMAL)\n", (12422, 12451), False, 'import cv2\n'), ((12464, 12489), 'cv2.imshow', 'cv2.imshow', (['"""result"""', 'img'], {}), "('result', img)\n", (12474, 12489), False, 'import cv2\n'), ((12830, 12853), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (12851, 12853), False, 'import cv2\n'), ((1710, 1733), 'markers.aot_2_markers', 'markers.aot_2_markers', ([], {}), '()\n', (1731, 1733), False, 'import markers\n'), ((4406, 4419), 'math.sin', 'math.sin', (['la1'], {}), '(la1)\n', (4414, 4419), False, 'import math\n'), ((4422, 4435), 'math.cos', 'math.cos', (['la2'], {}), '(la2)\n', (4430, 4435), False, 'import math\n'), ((6535, 6630), 'cv2.ellipse', 'cv2.ellipse', (['ellipses', '(x, y)', '(major, minor)', '(0)', '(0)', '(360)', '(255, 0, 0)', 'thickness', 'line_type'], {}), '(ellipses, (x, y), (major, minor), 0, 0, 360, (255, 0, 0),\n thickness, line_type)\n', (6546, 6630), False, 'import cv2\n'), ((7884, 7897), 'math.sin', 'math.sin', (['la1'], {}), '(la1)\n', (7892, 7897), False, 'import math\n'), ((7900, 7911), 'math.cos', 'math.cos', (['d'], {}), '(d)\n', (7908, 7911), False, 'import math\n'), ((7944, 7955), 'math.cos', 'math.cos', (['b'], {}), '(b)\n', (7952, 7955), False, 'import math\n'), ((8013, 8026), 'math.cos', 'math.cos', (['la1'], {}), '(la1)\n', (8021, 8026), False, 'import math\n'), ((8030, 8041), 'math.cos', 'math.cos', (['d'], {}), '(d)\n', (8038, 8041), False, 'import math\n'), ((12197, 12224), 'cv2.destroyWindow', 'cv2.destroyWindow', (['"""result"""'], {}), "('result')\n", (12214, 12224), False, 'import cv2\n'), ((1791, 1814), 'markers.aot_3_markers', 'markers.aot_3_markers', ([], {}), '()\n', (1812, 1814), False, 'import markers\n'), ((6703, 6800), 'cv2.ellipse', 'cv2.ellipse', (['ellipses', '(x, y)', '(major, minor)', '(0)', '(0)', '(360)', '(255, 140, 0)', 'thickness', 'line_type'], {}), '(ellipses, (x, y), (major, minor), 0, 0, 360, (255, 140, 0),\n thickness, line_type)\n', (6714, 6800), False, 'import cv2\n'), ((7914, 7927), 'math.cos', 'math.cos', (['la1'], {}), '(la1)\n', (7922, 7927), False, 'import math\n'), ((7930, 7941), 'math.sin', 'math.sin', (['d'], {}), '(d)\n', (7938, 7941), False, 'import math\n'), ((7985, 7996), 'math.sin', 'math.sin', (['b'], {}), '(b)\n', (7993, 7996), False, 'import math\n'), ((7999, 8010), 'math.sin', 'math.sin', (['d'], {}), '(d)\n', (8007, 8010), False, 'import math\n'), ((8044, 8057), 'math.sin', 'math.sin', (['la1'], {}), '(la1)\n', (8052, 8057), False, 'import math\n'), ((8060, 8073), 'math.sin', 'math.sin', (['la2'], {}), '(la2)\n', (8068, 8073), False, 'import math\n'), ((9953, 9967), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (9964, 9967), False, 'import cv2\n'), ((12505, 12519), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (12516, 12519), False, 'import cv2\n'), ((12781, 12795), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (12793, 12795), False, 'import sys\n'), ((5947, 5971), 'math.pow', 'math.pow', (['(c[0] - a[0])', '(2)'], {}), '(c[0] - a[0], 2)\n', (5955, 5971), False, 'import math\n'), ((5974, 5998), 'math.pow', 'math.pow', (['(c[1] - a[1])', '(2)'], {}), '(c[1] - a[1], 2)\n', (5982, 5998), False, 'import math\n'), ((6035, 6059), 'math.pow', 'math.pow', (['(d[0] - b[0])', '(2)'], {}), '(d[0] - b[0], 2)\n', (6043, 6059), False, 'import math\n'), ((6062, 6086), 'math.pow', 'math.pow', (['(d[1] - b[1])', '(2)'], {}), '(d[1] - b[1], 2)\n', (6070, 6086), False, 'import math\n'), ((6840, 6937), 'cv2.ellipse', 'cv2.ellipse', (['ellipses', '(x, y)', '(major, minor)', '(0)', '(0)', '(360)', '(255, 255, 0)', 'thickness', 'line_type'], {}), '(ellipses, (x, y), (major, minor), 0, 0, 360, (255, 255, 0),\n thickness, line_type)\n', (6851, 6937), False, 'import cv2\n'), ((6979, 7074), 'cv2.ellipse', 'cv2.ellipse', (['ellipses', '(x, y)', '(major, minor)', '(0)', '(0)', '(360)', '(0, 255, 0)', 'thickness', 'line_type'], {}), '(ellipses, (x, y), (major, minor), 0, 0, 360, (0, 255, 0),\n thickness, line_type)\n', (6990, 7074), False, 'import cv2\n')]
|
import numpy as np
from src import const
#TODO: should be imported from aguirregabiria_simple.py
def period_profit(p: np.ndarray, lambdas: np.ndarray, betas_transition=const.betas_transition):
"""
Correct expected period return profit. See ReadMe for derivation
"""
constant_part = (p-const.c) * np.e ** const.α * np.e ** ((const.σ_ɛ ** 2) / 2)
summation = np.dot(np.e**(betas_transition*np.log(p[:, np.newaxis])), lambdas)
return constant_part*summation
def test_period_profit():
p = np.array([1.4, 1.2])
lambdas = np.array([0.5, 0.4, 0.1])
beta_p_part = np.array([[np.e ** (-3. * 0.33647224), np.e ** (-2.5 * 0.33647224), np.e ** (-2 * 0.33647224)],
[np.e ** (-3. * 0.18232156), np.e ** (-2.5 * 0.18232156), np.e ** (-2 * 0.18232156)]])
summation_part = np.array([0.36443148 * lambdas[0] + 0.43120115 * lambdas[1] + 0.51020408 * lambdas[2],
0.5787037 * lambdas[0] + 0.63393814 * lambdas[1] + 0.69444444 * lambdas[2]])
expected = (p - const.c) * np.e ** const.α * np.e ** ((const.σ_ɛ ** 2) / 2) * summation_part
computed = period_profit(p, lambdas)
assert np.allclose(expected, computed, rtol=0.05)
|
[
"numpy.log",
"numpy.array",
"numpy.allclose"
] |
[((518, 538), 'numpy.array', 'np.array', (['[1.4, 1.2]'], {}), '([1.4, 1.2])\n', (526, 538), True, 'import numpy as np\n'), ((553, 578), 'numpy.array', 'np.array', (['[0.5, 0.4, 0.1]'], {}), '([0.5, 0.4, 0.1])\n', (561, 578), True, 'import numpy as np\n'), ((598, 791), 'numpy.array', 'np.array', (['[[np.e ** (-3.0 * 0.33647224), np.e ** (-2.5 * 0.33647224), np.e ** (-2 * \n 0.33647224)], [np.e ** (-3.0 * 0.18232156), np.e ** (-2.5 * 0.18232156),\n np.e ** (-2 * 0.18232156)]]'], {}), '([[np.e ** (-3.0 * 0.33647224), np.e ** (-2.5 * 0.33647224), np.e **\n (-2 * 0.33647224)], [np.e ** (-3.0 * 0.18232156), np.e ** (-2.5 * \n 0.18232156), np.e ** (-2 * 0.18232156)]])\n', (606, 791), True, 'import numpy as np\n'), ((830, 1002), 'numpy.array', 'np.array', (['[0.36443148 * lambdas[0] + 0.43120115 * lambdas[1] + 0.51020408 * lambdas[2\n ], 0.5787037 * lambdas[0] + 0.63393814 * lambdas[1] + 0.69444444 *\n lambdas[2]]'], {}), '([0.36443148 * lambdas[0] + 0.43120115 * lambdas[1] + 0.51020408 *\n lambdas[2], 0.5787037 * lambdas[0] + 0.63393814 * lambdas[1] + \n 0.69444444 * lambdas[2]])\n', (838, 1002), True, 'import numpy as np\n'), ((1177, 1219), 'numpy.allclose', 'np.allclose', (['expected', 'computed'], {'rtol': '(0.05)'}), '(expected, computed, rtol=0.05)\n', (1188, 1219), True, 'import numpy as np\n'), ((410, 434), 'numpy.log', 'np.log', (['p[:, np.newaxis]'], {}), '(p[:, np.newaxis])\n', (416, 434), True, 'import numpy as np\n')]
|
import os.path
from scipy.optimize import fsolve
import math
import numpy as np
from matplotlib import pyplot as plt
import pandas as pd
import utils_Florian as utils
def equations(p, t_peak, t_half):
x, y = p
return (0.5 * (math.exp(-x * t_peak) - math.exp(-y * t_peak)) - (math.exp(-x * t_half) - math.exp(-y * t_half)), -x * math.exp(-x * t_peak) + y * math.exp(-y * t_peak))
results = pd.DataFrame()
t_peaks = []
t_halfs = []
xs = []
ys = []
initial_conditions = ((12, 5),
(14, 4),
(14, 4),
(30, 1),
(30, 1),
(30, 1),
(30, 1),
(30, 1),
(30, 1),
(30, 1),
(30, 1),
(30, 1),
(30, 1),
(30, 1),
(30, 1),
(30, 1))
for alpha in range(1, 16):
t_peak = 0.1415
t_half = t_peak + 0.2 + alpha * 0.05
print("Target: ", t_half)
x, y = fsolve(equations, initial_conditions[alpha], args=(t_peak, t_half))
t_peaks.append(t_peak)
t_halfs.append(t_half - t_peak)
xs.append(x)
ys.append(y)
t = np.linspace(0, 2.0, 10000)
crf = -np.exp(-x * t) + np.exp(-y * t)
crf = crf / sum(crf)
print("t peak", t[np.argmax(crf)])
diff = crf - 0.5 * max(crf)
diff[:np.argmax(crf)] = np.inf
diff = np.abs(diff)
half_idx = np.argmin(diff)
print("t half", t[half_idx] - t[np.argmax(crf)])
plt.plot(t, crf, label=str(t_half - t_peak))
results = results.append(pd.DataFrame({"t_peak": [t_peak], "t_half": [t_half - t_peak], "a": [x], "b": [y]}))
results.to_csv(os.path.join(utils.output_dir, "crf_parameters.csv"))
|
[
"scipy.optimize.fsolve",
"numpy.abs",
"numpy.argmax",
"numpy.exp",
"numpy.linspace",
"numpy.argmin",
"pandas.DataFrame",
"math.exp"
] |
[((404, 418), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (416, 418), True, 'import pandas as pd\n'), ((1088, 1155), 'scipy.optimize.fsolve', 'fsolve', (['equations', 'initial_conditions[alpha]'], {'args': '(t_peak, t_half)'}), '(equations, initial_conditions[alpha], args=(t_peak, t_half))\n', (1094, 1155), False, 'from scipy.optimize import fsolve\n'), ((1267, 1293), 'numpy.linspace', 'np.linspace', (['(0)', '(2.0)', '(10000)'], {}), '(0, 2.0, 10000)\n', (1278, 1293), True, 'import numpy as np\n'), ((1479, 1491), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (1485, 1491), True, 'import numpy as np\n'), ((1507, 1522), 'numpy.argmin', 'np.argmin', (['diff'], {}), '(diff)\n', (1516, 1522), True, 'import numpy as np\n'), ((1322, 1336), 'numpy.exp', 'np.exp', (['(-y * t)'], {}), '(-y * t)\n', (1328, 1336), True, 'import numpy as np\n'), ((1654, 1741), 'pandas.DataFrame', 'pd.DataFrame', (["{'t_peak': [t_peak], 't_half': [t_half - t_peak], 'a': [x], 'b': [y]}"], {}), "({'t_peak': [t_peak], 't_half': [t_half - t_peak], 'a': [x],\n 'b': [y]})\n", (1666, 1741), True, 'import pandas as pd\n'), ((1305, 1319), 'numpy.exp', 'np.exp', (['(-x * t)'], {}), '(-x * t)\n', (1311, 1319), True, 'import numpy as np\n'), ((1384, 1398), 'numpy.argmax', 'np.argmax', (['crf'], {}), '(crf)\n', (1393, 1398), True, 'import numpy as np\n'), ((1443, 1457), 'numpy.argmax', 'np.argmax', (['crf'], {}), '(crf)\n', (1452, 1457), True, 'import numpy as np\n'), ((288, 309), 'math.exp', 'math.exp', (['(-x * t_half)'], {}), '(-x * t_half)\n', (296, 309), False, 'import math\n'), ((312, 333), 'math.exp', 'math.exp', (['(-y * t_half)'], {}), '(-y * t_half)\n', (320, 333), False, 'import math\n'), ((341, 362), 'math.exp', 'math.exp', (['(-x * t_peak)'], {}), '(-x * t_peak)\n', (349, 362), False, 'import math\n'), ((369, 390), 'math.exp', 'math.exp', (['(-y * t_peak)'], {}), '(-y * t_peak)\n', (377, 390), False, 'import math\n'), ((1559, 1573), 'numpy.argmax', 'np.argmax', (['crf'], {}), '(crf)\n', (1568, 1573), True, 'import numpy as np\n'), ((238, 259), 'math.exp', 'math.exp', (['(-x * t_peak)'], {}), '(-x * t_peak)\n', (246, 259), False, 'import math\n'), ((262, 283), 'math.exp', 'math.exp', (['(-y * t_peak)'], {}), '(-y * t_peak)\n', (270, 283), False, 'import math\n')]
|
import re
import string
import numpy as np
from tqdm import tqdm
from typing import List
from docqa.triviaqa.read_data import TriviaQaQuestion
from docqa.triviaqa.trivia_qa_eval import normalize_answer, f1_score
from docqa.utils import flatten_iterable, split
"""
Tools for turning the aliases and answer strings from TriviaQA into labelled spans
"""
class ExactMatchDetector(object):
def __init__(self):
self.answer_tokens = None
def set_question(self, normalized_aliases):
self.answer_tokens = normalized_aliases
def any_found(self, para):
words = [x.lower() for x in flatten_iterable(para)]
occurances = []
for answer_ix, answer in enumerate(self.answer_tokens):
word_starts = [i for i, w in enumerate(words) if answer[0] == w]
n_tokens = len(answer)
for start in word_starts:
end = start + 1
ans_token = 1
while ans_token < n_tokens and end < len(words):
next = words[end]
if answer[ans_token] == next:
ans_token += 1
end += 1
else:
break
if n_tokens == ans_token:
occurances.append((start, end))
return list(set(occurances))
class NormalizedAnswerDetector(object):
""" Try to labels tokens sequences, such that the extracted sequence would be evaluated as 100% correct
by the official trivia-qa evaluation script """
def __init__(self):
self.answer_tokens = None
def set_question(self, normalized_aliases):
self.answer_tokens = normalized_aliases
def any_found(self, para):
words = [normalize_answer(w) for w in flatten_iterable(para)]
occurances = []
for answer_ix, answer in enumerate(self.answer_tokens):
word_starts = [i for i, w in enumerate(words) if answer[0] == w]
n_tokens = len(answer)
for start in word_starts:
end = start + 1
ans_token = 1
while ans_token < n_tokens and end < len(words):
next = words[end]
if answer[ans_token] == next:
ans_token += 1
end += 1
elif next == "":
end += 1
else:
break
if n_tokens == ans_token:
occurances.append((start, end))
return list(set(occurances))
class FastNormalizedAnswerDetector(object):
""" almost twice as fast and very,very close to NormalizedAnswerDetector's output """
def __init__(self):
# These come from the TrivaQA official evaluation script
self.skip = {"a", "an", "the", ""}
self.strip = string.punctuation + "".join([u"‘", u"’", u"´", u"`", "_"])
self.answer_tokens = None
def set_question(self, normalized_aliases):
self.answer_tokens = normalized_aliases
def any_found(self, para):
# Normalize the paragraph
words = [w.lower().strip(self.strip) for w in flatten_iterable(para)]
occurances = []
for answer_ix, answer in enumerate(self.answer_tokens):
# Locations where the first word occurs
word_starts = [i for i, w in enumerate(words) if answer[0] == w]
n_tokens = len(answer)
# Advance forward until we find all the words, skipping over articles
for start in word_starts:
end = start + 1
ans_token = 1
while ans_token < n_tokens and end < len(words):
next = words[end]
if answer[ans_token] == next:
ans_token += 1
end += 1
elif next in self.skip:
end += 1
else:
break
if n_tokens == ans_token:
occurances.append((start, end))
return list(set(occurances))
class CarefulAnswerDetector(object):
"""
There are some common false negatives in the above answer detection, in particular plurals of answers are
often not found (nor are counted correct by the official script). This detector makes a stronger effort to
find them, although its unclear if training with these additional answers would hurt/help our overall score
since I never got around to trying it.
"""
def __init__(self):
self.skip = {"a", "an", "the", "&", "and", "-", "\u2019", "\u2018", "\"", ";", "'",
"(", ")", "'s'", "s", ":", ",", "."}
self.answer_regex = None
self.aliases = None
def set_question(self, normalized_aliases):
answer_regex = []
self.aliases = normalized_aliases
for answer in normalized_aliases:
tokens = []
for token in answer:
if len(token) > 1:
tokens.append(token + "s?")
else:
tokens.append(token)
if tokens[-1] == "s":
tokens[-1] = "s?"
answer_regex.append([re.compile(x, re.IGNORECASE) for x in tokens])
self.answer_regex = answer_regex
def any_found(self, para):
words = flatten_iterable(para)
occurances = []
for answer_ix, answer in enumerate(self.answer_regex):
word_starts = [i for i, w in enumerate(words) if answer[0].fullmatch(w)]
n_tokens = len(answer)
for start in word_starts:
end = start + 1
ans_token = 1
while ans_token < n_tokens and end < len(words):
next = words[end]
if answer[ans_token].match(next):
ans_token += 1
end += 1
elif next in self.skip:
end += 1
else:
break
if n_tokens == ans_token:
occurances.append((start, end))
return list(set(occurances))
def evaluate_question_detector(questions, corpus, word_tokenize, detector, reference_detector=None, compute_f1s=False):
""" Just for debugging """
n_no_docs = 0
answer_per_doc = []
answer_f1s = []
for question_ix, q in enumerate(tqdm(questions)):
tokenized_aliases = [word_tokenize(x) for x in q.answer.normalized_aliases]
detector.set_question(tokenized_aliases)
for doc in q.all_docs:
doc = corpus.get_document(doc.doc_id)
if doc is None:
n_no_docs += 1
continue
output = []
for i, para in enumerate(doc):
for s,e in detector.any_found(para):
output.append((i, s, e))
if len(output) == 0 and reference_detector is not None:
if reference_detector is not None:
reference_detector.set_question(tokenized_aliases)
detected = []
for i, para in enumerate(doc):
for s, e in reference_detector.any_found(para):
detected.append((i, s, e))
if len(detected) > 0:
print("Found a difference")
print(q.answer.normalized_aliases)
print(tokenized_aliases)
for p, s, e in detected:
token = flatten_iterable(doc[p])[s:e]
print(token)
answer_per_doc.append(output)
if compute_f1s:
f1s = []
for p, s, e in output:
token = flatten_iterable(doc[p])[s:e]
answer = normalize_answer(" ".join(token))
f1 = 0
for gt in q.answer.normalized_aliases:
f1 = max(f1, f1_score(answer, gt))
f1s.append(f1)
answer_f1s.append(f1s)
n_answers = sum(len(x) for x in answer_per_doc)
print("Found %d answers (av %.4f)" % (n_answers, n_answers/len(answer_per_doc)))
print("%.4f docs have answers" % np.mean([len(x) > 0 for x in answer_per_doc]))
if len(answer_f1s) > 0:
print("Average f1 is %.4f" % np.mean(flatten_iterable(answer_f1s)))
def compute_answer_spans(questions: List[TriviaQaQuestion], corpus, word_tokenize,
detector):
for i, q in enumerate(questions):
if i % 500 == 0:
print("Completed question %d of %d (%.3f)" % (i, len(questions), i/len(questions)))
q.question = word_tokenize(q.question)
if q.answer is None:
continue
tokenized_aliases = [word_tokenize(x) for x in q.answer.all_answers]
if len(tokenized_aliases) == 0:
raise ValueError()
detector.set_question(tokenized_aliases)
for doc in q.all_docs:
text = corpus.get_document(doc.doc_id)
if text is None:
raise ValueError()
spans = []
offset = 0
for para_ix, para in enumerate(text):
for s, e in detector.any_found(para):
spans.append((s+offset, e+offset-1)) # turn into inclusive span
offset += sum(len(s) for s in para)
if len(spans) == 0:
spans = np.zeros((0, 2), dtype=np.int32)
else:
spans = np.array(spans, dtype=np.int32)
doc.answer_spans = spans
def _compute_answer_spans_chunk(questions, corpus, tokenizer, detector):
# We use tokenize_paragraph since some questions can have multiple sentences,
# but we still store the results as a flat list of tokens
word_tokenize = tokenizer.tokenize_paragraph_flat
compute_answer_spans(questions, corpus, word_tokenize, detector)
return questions
def compute_answer_spans_par(questions: List[TriviaQaQuestion], corpus,
tokenizer, detector, n_processes: int):
if n_processes == 1:
word_tokenize = tokenizer.tokenize_paragraph_flat
compute_answer_spans(questions, corpus, word_tokenize, detector)
return questions
from multiprocessing import Pool
with Pool(n_processes) as p:
chunks = split(questions, n_processes)
questions = flatten_iterable(p.starmap(_compute_answer_spans_chunk,
[[c, corpus, tokenizer, detector] for c in chunks]))
return questions
def main():
from trivia_qa.build_span_corpus import TriviaQaWebDataset
from data_processing.text_utils import NltkAndPunctTokenizer
dataset = TriviaQaWebDataset()
qs = dataset.get_train()
qs = np.random.RandomState(0).choice(qs, 1000, replace=False)
evaluate_question_detector(qs, dataset.evidence, NltkAndPunctTokenizer().tokenize_paragraph_flat,
FastNormalizedAnswerDetector())
if __name__ == "__main__":
main()
|
[
"docqa.triviaqa.trivia_qa_eval.normalize_answer",
"docqa.utils.flatten_iterable",
"re.compile",
"tqdm.tqdm",
"trivia_qa.build_span_corpus.TriviaQaWebDataset",
"data_processing.text_utils.NltkAndPunctTokenizer",
"numpy.array",
"numpy.zeros",
"multiprocessing.Pool",
"docqa.triviaqa.trivia_qa_eval.f1_score",
"numpy.random.RandomState",
"docqa.utils.split"
] |
[((10899, 10919), 'trivia_qa.build_span_corpus.TriviaQaWebDataset', 'TriviaQaWebDataset', ([], {}), '()\n', (10917, 10919), False, 'from trivia_qa.build_span_corpus import TriviaQaWebDataset\n'), ((5407, 5429), 'docqa.utils.flatten_iterable', 'flatten_iterable', (['para'], {}), '(para)\n', (5423, 5429), False, 'from docqa.utils import flatten_iterable, split\n'), ((6482, 6497), 'tqdm.tqdm', 'tqdm', (['questions'], {}), '(questions)\n', (6486, 6497), False, 'from tqdm import tqdm\n'), ((10470, 10487), 'multiprocessing.Pool', 'Pool', (['n_processes'], {}), '(n_processes)\n', (10474, 10487), False, 'from multiprocessing import Pool\n'), ((10511, 10540), 'docqa.utils.split', 'split', (['questions', 'n_processes'], {}), '(questions, n_processes)\n', (10516, 10540), False, 'from docqa.utils import flatten_iterable, split\n'), ((1755, 1774), 'docqa.triviaqa.trivia_qa_eval.normalize_answer', 'normalize_answer', (['w'], {}), '(w)\n', (1771, 1774), False, 'from docqa.triviaqa.trivia_qa_eval import normalize_answer, f1_score\n'), ((10958, 10982), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (10979, 10982), True, 'import numpy as np\n'), ((11068, 11091), 'data_processing.text_utils.NltkAndPunctTokenizer', 'NltkAndPunctTokenizer', ([], {}), '()\n', (11089, 11091), False, 'from data_processing.text_utils import NltkAndPunctTokenizer\n'), ((613, 635), 'docqa.utils.flatten_iterable', 'flatten_iterable', (['para'], {}), '(para)\n', (629, 635), False, 'from docqa.utils import flatten_iterable, split\n'), ((1784, 1806), 'docqa.utils.flatten_iterable', 'flatten_iterable', (['para'], {}), '(para)\n', (1800, 1806), False, 'from docqa.utils import flatten_iterable, split\n'), ((3192, 3214), 'docqa.utils.flatten_iterable', 'flatten_iterable', (['para'], {}), '(para)\n', (3208, 3214), False, 'from docqa.utils import flatten_iterable, split\n'), ((9593, 9625), 'numpy.zeros', 'np.zeros', (['(0, 2)'], {'dtype': 'np.int32'}), '((0, 2), dtype=np.int32)\n', (9601, 9625), True, 'import numpy as np\n'), ((9668, 9699), 'numpy.array', 'np.array', (['spans'], {'dtype': 'np.int32'}), '(spans, dtype=np.int32)\n', (9676, 9699), True, 'import numpy as np\n'), ((5270, 5298), 're.compile', 're.compile', (['x', 're.IGNORECASE'], {}), '(x, re.IGNORECASE)\n', (5280, 5298), False, 'import re\n'), ((8498, 8526), 'docqa.utils.flatten_iterable', 'flatten_iterable', (['answer_f1s'], {}), '(answer_f1s)\n', (8514, 8526), False, 'from docqa.utils import flatten_iterable, split\n'), ((7891, 7915), 'docqa.utils.flatten_iterable', 'flatten_iterable', (['doc[p]'], {}), '(doc[p])\n', (7907, 7915), False, 'from docqa.utils import flatten_iterable, split\n'), ((8107, 8127), 'docqa.triviaqa.trivia_qa_eval.f1_score', 'f1_score', (['answer', 'gt'], {}), '(answer, gt)\n', (8115, 8127), False, 'from docqa.triviaqa.trivia_qa_eval import normalize_answer, f1_score\n'), ((7656, 7680), 'docqa.utils.flatten_iterable', 'flatten_iterable', (['doc[p]'], {}), '(doc[p])\n', (7672, 7680), False, 'from docqa.utils import flatten_iterable, split\n')]
|
import nifty.tools as nt
import numpy as np
import z5py
from elf.label_multiset import deserialize_multiset
from tqdm import trange
def check_serialization(mset1, mset2):
if len(mset1) != len(mset2):
print("Serialization sizes disagree:", len(mset1), len(mset2))
return False
if not np.array_equal(mset1, mset2):
disagree = (mset1 != mset2)
print("Serializations disagree for entries", disagree.sum(), "/", disagree.size)
return False
print("Check serialization passed")
return True
def check_multiset_members(mset1, mset2):
assert mset1.shape == mset2.shape
if mset1.n_elements != mset2.n_elements:
print("N-elements disagree:", mset1.n_elements, mset2.n_elements)
return False
amax1, amax2 = mset1.argmax, mset2.argmax
if not np.array_equal(amax1, amax2):
disagree = (amax1 != amax2)
print("Argmax disagree for entries", disagree.sum(), "/", disagree.size)
return False
off1, off2 = mset1.offsets, mset2.offsets
if not np.array_equal(off1, off2):
disagree = (off1 != off2)
print("Offsets disagree for entries", disagree.sum(), "/", disagree.size)
return False
id1, id2 = mset1.ids, mset2.ids
if not np.array_equal(id1, id2):
disagree = (id1 != id2)
print("Ids disagree for entries", disagree.sum(), "/", disagree.size)
return False
count1, count2 = mset1.counts, mset2.counts
if not np.array_equal(count1, count2):
disagree = (count1 != count2)
print("Counts disagree for entries", disagree.sum(), "/", disagree.size)
return False
print("Check members passed")
return True
def check_pixels(mset1, mset2, seg, scale, offset):
roi_end = mset1.shape
blocking = nt.blocking([0, 0, 0], roi_end, [1, 1, 1])
for block_id in trange(blocking.numberOfBlocks):
block = blocking.getBlock(block_id)
bb = tuple(slice(beg, end) for beg, end in zip(block.begin, block.end))
i1, c1 = mset1[bb]
i2, c2 = mset2[bb]
if not np.array_equal(i1, i2) or not np.array_equal(c1, c2):
print("Entries disagree for block", block_id, ":", bb)
print("Ids")
print("Res:", i1)
print("Exp:", i2)
print("Counts")
print("Res:", c1)
print("Exp:", c2)
print("From segmentation")
effective_bb = tuple(slice(b.start * sc + off, b.stop * sc + off) for b, sc, off in zip(bb, scale, offset))
print(effective_bb)
sub_seg = seg[effective_bb]
print(sub_seg)
sids, scounts = np.unique(sub_seg, return_counts=True)
print("Ids")
print(sids)
print("Counts")
print(scounts)
return False
print("Check pixels passed")
return True
def check_chunk(blocking, chunk_id, ds_mset1, ds_mset2, ds_seg, scale):
if isinstance(chunk_id, tuple):
bpd = blocking.blocksPerAxis
strides = [bpd[2] * bpd[1], bpd[2], 1]
chunk_id = sum([stride * cid for stride, cid in zip(strides, chunk_id)])
print(chunk_id)
block = blocking.getBlock(chunk_id)
chunk = tuple(beg // ch for beg, ch in zip(block.begin, blocking.blockShape))
mset1 = ds_mset1.read_chunk(chunk)
mset2 = ds_mset2.read_chunk(chunk)
if(check_serialization(mset1, mset2)):
print("Multisets agree")
return
mset1 = deserialize_multiset(mset1, block.shape)
mset2 = deserialize_multiset(mset2, block.shape)
if(check_multiset_members(mset1, mset2)):
print("Multisets agree")
return
ds_seg.n_threads = 8
seg = ds_seg[:]
offset = tuple(beg * sc for beg, sc in zip(block.begin, scale))
if(check_pixels(mset1, mset2, seg, scale, offset)):
print("Multisets agree")
else:
print("Multisets disagree")
def check_multiset(level, chunk_id=0):
path = '/home/pape/Work/data/cremi/example/sampleA.n5'
seg_key = 'volumes/segmentation/multicut'
mset_key = 'paintera/data/s%i' % level
f = z5py.File(path)
ds_seg = f[seg_key]
ds_mset = f[mset_key]
path1 = '/home/pape/Work/data/cremi/example/sampleA_paintera.n5'
mset_key1 = 'volumes/segmentation/multicut/data/s%i' % level
f1 = z5py.File(path1)
ds_mset1 = f1[mset_key1]
assert ds_mset.shape == ds_mset1.shape
assert ds_mset.chunks == ds_mset1.chunks, "%s, %s" % (str(ds_mset.chunks),
str(ds_mset1.chunks))
shape, chunks = ds_mset.shape, ds_mset.chunks
ds_factor = ds_mset.attrs.get('downsamplingFactors', None)
ds_factor_exp = ds_mset1.attrs.get('downsamplingFactors', None)
assert ds_factor == ds_factor_exp
scale = [int(df) for df in ds_factor[::-1]]
print("Have scale", scale)
blocking = nt.blocking([0, 0, 0], shape, chunks)
check_chunk(blocking, chunk_id, ds_mset, ds_mset1, ds_seg, scale)
if __name__ == '__main__':
level = 1
# chunk_id = 0
chunk_id = (0, 2, 0)
check_multiset(level, chunk_id)
# print("Checking mult-sets for chunk 0 of scales:")
# for scale in range(5):
# print("Check scale", scale)
# check_multiset(scale)
|
[
"numpy.unique",
"elf.label_multiset.deserialize_multiset",
"nifty.tools.blocking",
"z5py.File",
"numpy.array_equal",
"tqdm.trange"
] |
[((1798, 1840), 'nifty.tools.blocking', 'nt.blocking', (['[0, 0, 0]', 'roi_end', '[1, 1, 1]'], {}), '([0, 0, 0], roi_end, [1, 1, 1])\n', (1809, 1840), True, 'import nifty.tools as nt\n'), ((1861, 1892), 'tqdm.trange', 'trange', (['blocking.numberOfBlocks'], {}), '(blocking.numberOfBlocks)\n', (1867, 1892), False, 'from tqdm import trange\n'), ((3493, 3533), 'elf.label_multiset.deserialize_multiset', 'deserialize_multiset', (['mset1', 'block.shape'], {}), '(mset1, block.shape)\n', (3513, 3533), False, 'from elf.label_multiset import deserialize_multiset\n'), ((3546, 3586), 'elf.label_multiset.deserialize_multiset', 'deserialize_multiset', (['mset2', 'block.shape'], {}), '(mset2, block.shape)\n', (3566, 3586), False, 'from elf.label_multiset import deserialize_multiset\n'), ((4129, 4144), 'z5py.File', 'z5py.File', (['path'], {}), '(path)\n', (4138, 4144), False, 'import z5py\n'), ((4339, 4355), 'z5py.File', 'z5py.File', (['path1'], {}), '(path1)\n', (4348, 4355), False, 'import z5py\n'), ((4902, 4939), 'nifty.tools.blocking', 'nt.blocking', (['[0, 0, 0]', 'shape', 'chunks'], {}), '([0, 0, 0], shape, chunks)\n', (4913, 4939), True, 'import nifty.tools as nt\n'), ((311, 339), 'numpy.array_equal', 'np.array_equal', (['mset1', 'mset2'], {}), '(mset1, mset2)\n', (325, 339), True, 'import numpy as np\n'), ((824, 852), 'numpy.array_equal', 'np.array_equal', (['amax1', 'amax2'], {}), '(amax1, amax2)\n', (838, 852), True, 'import numpy as np\n'), ((1050, 1076), 'numpy.array_equal', 'np.array_equal', (['off1', 'off2'], {}), '(off1, off2)\n', (1064, 1076), True, 'import numpy as np\n'), ((1263, 1287), 'numpy.array_equal', 'np.array_equal', (['id1', 'id2'], {}), '(id1, id2)\n', (1277, 1287), True, 'import numpy as np\n'), ((1480, 1510), 'numpy.array_equal', 'np.array_equal', (['count1', 'count2'], {}), '(count1, count2)\n', (1494, 1510), True, 'import numpy as np\n'), ((2669, 2707), 'numpy.unique', 'np.unique', (['sub_seg'], {'return_counts': '(True)'}), '(sub_seg, return_counts=True)\n', (2678, 2707), True, 'import numpy as np\n'), ((2088, 2110), 'numpy.array_equal', 'np.array_equal', (['i1', 'i2'], {}), '(i1, i2)\n', (2102, 2110), True, 'import numpy as np\n'), ((2118, 2140), 'numpy.array_equal', 'np.array_equal', (['c1', 'c2'], {}), '(c1, c2)\n', (2132, 2140), True, 'import numpy as np\n')]
|
import numpy as np
from pandas import read_csv
import pandas as pd
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from config import *
from lib.preprocess.read_data import DataReader
from lib.scaler.preprocessing_data.data_normalizer import DataNormalizer
class DataPreprocessor:
def __init__(self, metrics):
self.train_size = Config.TRAIN_SIZE
self.valid_size = Config.VALID_SIZE
self.train_data_type = metrics['train_data_type']
self.predict_data = metrics['predict_data']
self.google_trace_config = Config.GOOGLE_TRACE_DATA_CONFIG
self.read_data()
def read_data(self):
self.data = None
data_reader = DataReader()
official_data = data_reader.read()
self.x_data, self.y_data = self.create_x_y_data(official_data)
def create_x_y_data(self, official_data):
if Config.DATA_EXPERIMENT == 'google_trace':
# DEFINE X DATA
if self.train_data_type == 'cpu_mem':
x_data = [official_data['cpu'], official_data['mem']]
elif self.train_data_type == 'cpu':
x_data = [official_data['cpu']]
elif self.train_data_type == 'mem':
x_data = [official_data['mem']]
# DEFINE Y DATA
if self.predict_data == 'cpu':
y_data = official_data['cpu']
elif self.predict_data == 'mem':
y_data = official_data['mem']
else:
print('|-> ERROR: Not support these data')
return x_data, y_data
def create_timeseries(self, X):
if len(X) > 1:
data = np.concatenate((X[0], X[1]), axis=1)
if(len(X) > 2):
for i in range(2, len(X), 1):
data = np.column_stack((data, X[i]))
else:
data = []
for i in range(len(X[0])):
data.append(X[0][i])
data = np.array(data)
return data
def create_x(self, timeseries, sliding):
dataX = []
for i in range(len(timeseries) - sliding):
datai = []
for j in range(sliding):
datai.append(timeseries[i + j])
dataX.append(datai)
return dataX
def init_data_lstm(self, sliding, scaler_method):
#print('>>> start init data for training LSTM model <<<')
data_normalizer = DataNormalizer(scaler_method)
x_timeseries, y_time_series, self.y_scaler = data_normalizer.normalize(
self.x_data, self.y_data)
num_points = x_timeseries.shape[0]
train_point = int(self.train_size * num_points)
x_sample = self.create_x(x_timeseries, sliding)
x_train = x_sample[0:train_point - sliding]
x_train = np.array(x_train)
x_test = x_sample[train_point - sliding:]
x_test = np.array(x_test)
y_train = y_time_series[sliding: train_point]
y_train = np.array(y_train)
y_test = self.y_data[train_point:]
y_test = np.array(y_test)
# print(x_train.shape, x_test.shape)
# print(y_train.shape, y_test.shape)
# print('>>> Init data for training model complete <<<')
return x_train, y_train, x_test, y_test, data_normalizer
def init_data_ann(self, sliding, scaler_method):
print('>>> start init data for training ANN model <<<')
data_normalizer = DataNormalizer(scaler_method)
x_timeseries, y_time_series, self.y_scaler = data_normalizer.normalize(
self.x_data, self.y_data)
num_points = x_timeseries.shape[0]
train_point = int(self.train_size * num_points)
x_sample = self.create_x(x_timeseries, sliding)
x_train = x_sample[0:train_point - sliding]
x_train = np.array(x_train)
x_train = np.reshape(
x_train, (x_train.shape[0], sliding * int(x_train.shape[2])))
x_test = x_sample[train_point - sliding:]
x_test = np.array(x_test)
x_test = np.reshape(
x_test, (x_test.shape[0], sliding * int(x_test.shape[2])))
y_train = y_time_series[sliding: train_point]
y_train = np.array(y_train)
y_test = self.y_data[train_point:]
y_test = np.array(y_test)
return x_train, y_train, x_test, y_test, data_normalizer
|
[
"lib.scaler.preprocessing_data.data_normalizer.DataNormalizer",
"lib.preprocess.read_data.DataReader",
"numpy.column_stack",
"numpy.array",
"numpy.concatenate"
] |
[((717, 729), 'lib.preprocess.read_data.DataReader', 'DataReader', ([], {}), '()\n', (727, 729), False, 'from lib.preprocess.read_data import DataReader\n'), ((2432, 2461), 'lib.scaler.preprocessing_data.data_normalizer.DataNormalizer', 'DataNormalizer', (['scaler_method'], {}), '(scaler_method)\n', (2446, 2461), False, 'from lib.scaler.preprocessing_data.data_normalizer import DataNormalizer\n'), ((2808, 2825), 'numpy.array', 'np.array', (['x_train'], {}), '(x_train)\n', (2816, 2825), True, 'import numpy as np\n'), ((2894, 2910), 'numpy.array', 'np.array', (['x_test'], {}), '(x_test)\n', (2902, 2910), True, 'import numpy as np\n'), ((2984, 3001), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (2992, 3001), True, 'import numpy as np\n'), ((3063, 3079), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (3071, 3079), True, 'import numpy as np\n'), ((3447, 3476), 'lib.scaler.preprocessing_data.data_normalizer.DataNormalizer', 'DataNormalizer', (['scaler_method'], {}), '(scaler_method)\n', (3461, 3476), False, 'from lib.scaler.preprocessing_data.data_normalizer import DataNormalizer\n'), ((3823, 3840), 'numpy.array', 'np.array', (['x_train'], {}), '(x_train)\n', (3831, 3840), True, 'import numpy as np\n'), ((4014, 4030), 'numpy.array', 'np.array', (['x_test'], {}), '(x_test)\n', (4022, 4030), True, 'import numpy as np\n'), ((4204, 4221), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (4212, 4221), True, 'import numpy as np\n'), ((4283, 4299), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (4291, 4299), True, 'import numpy as np\n'), ((1674, 1710), 'numpy.concatenate', 'np.concatenate', (['(X[0], X[1])'], {'axis': '(1)'}), '((X[0], X[1]), axis=1)\n', (1688, 1710), True, 'import numpy as np\n'), ((1973, 1987), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (1981, 1987), True, 'import numpy as np\n'), ((1812, 1841), 'numpy.column_stack', 'np.column_stack', (['(data, X[i])'], {}), '((data, X[i]))\n', (1827, 1841), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# vim: set fileencoding=utf-8 ts=4 sts=4 sw=4 et tw=80 :
#
# Compare an image file and its associated uncertainty image.
#
# <NAME>
# Created: 2021-06-03
# Last modified: 2021-06-03
#--------------------------------------------------------------------------
#**************************************************************************
#--------------------------------------------------------------------------
## Logging setup:
import logging
#logging.basicConfig(level=logging.DEBUG)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
#logger.setLevel(logging.DEBUG)
logger.setLevel(logging.INFO)
## Current version:
__version__ = "0.0.1"
## Python version-agnostic module reloading:
try:
reload # Python 2.7
except NameError:
try:
from importlib import reload # Python 3.4+
except ImportError:
from imp import reload # Python 3.0 - 3.3
## Modules:
#import argparse
#import shutil
import resource
import signal
#import glob
import gc
import os
import sys
import time
#import vaex
#import calendar
#import ephem
import numpy as np
#from numpy.lib.recfunctions import append_fields
#import datetime as dt
#from dateutil import parser as dtp
#import scipy.linalg as sla
#import scipy.signal as ssig
#import scipy.ndimage as ndi
#import scipy.optimize as opti
#import scipy.interpolate as stp
#import scipy.spatial.distance as ssd
import matplotlib.pyplot as plt
#import matplotlib.cm as cm
#import matplotlib.ticker as mt
#import matplotlib._pylab_helpers as hlp
#from matplotlib.colors import LogNorm
#import matplotlib.colors as mplcolors
#import matplotlib.collections as mcoll
#import matplotlib.gridspec as gridspec
#from functools import partial
#from collections import OrderedDict
#from collections.abc import Iterable
#import multiprocessing as mp
#np.set_printoptions(suppress=True, linewidth=160)
#import pandas as pd
#import statsmodels.api as sm
#import statsmodels.formula.api as smf
#from statsmodels.regression.quantile_regression import QuantReg
#import PIL.Image as pli
#import seaborn as sns
#import cmocean
import theil_sen as ts
#import window_filter as wf
#import itertools as itt
_have_np_vers = float('.'.join(np.__version__.split('.')[:2]))
##--------------------------------------------------------------------------##
## Disable buffering on stdout/stderr:
class Unbuffered(object):
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
sys.stdout = Unbuffered(sys.stdout)
sys.stderr = Unbuffered(sys.stderr)
##--------------------------------------------------------------------------##
##--------------------------------------------------------------------------##
## Home-brew robust statistics:
try:
import robust_stats
reload(robust_stats)
rs = robust_stats
except ImportError:
logger.error("module robust_stats not found! Install and retry.")
sys.stderr.write("\nError! robust_stats module not found!\n"
"Please install and try again ...\n\n")
sys.exit(1)
## Home-brew KDE:
#try:
# import my_kde
# reload(my_kde)
# mk = my_kde
#except ImportError:
# logger.error("module my_kde not found! Install and retry.")
# sys.stderr.write("\nError! my_kde module not found!\n"
# "Please install and try again ...\n\n")
# sys.exit(1)
## Fast FITS I/O:
#try:
# import fitsio
#except ImportError:
# logger.error("fitsio module not found! Install and retry.")
# sys.stderr.write("\nError: fitsio module not found!\n")
# sys.exit(1)
## Various from astropy:
try:
# import astropy.io.ascii as aia
import astropy.io.fits as pf
# import astropy.io.votable as av
# import astropy.table as apt
# import astropy.time as astt
# import astropy.wcs as awcs
# from astropy import constants as aconst
# from astropy import coordinates as coord
# from astropy import units as uu
except ImportError:
# logger.error("astropy module not found! Install and retry.")
sys.stderr.write("\nError: astropy module not found!\n")
sys.exit(1)
## Star extraction:
#try:
# import easy_sep
# reload(easy_sep)
#except ImportError:
# logger.error("easy_sep module not found! Install and retry.")
# sys.stderr.write("Error: easy_sep module not found!\n\n")
# sys.exit(1)
#pse = easy_sep.EasySEP()
##--------------------------------------------------------------------------##
## Colors for fancy terminal output:
NRED = '\033[0;31m' ; BRED = '\033[1;31m'
NGREEN = '\033[0;32m' ; BGREEN = '\033[1;32m'
NYELLOW = '\033[0;33m' ; BYELLOW = '\033[1;33m'
NBLUE = '\033[0;34m' ; BBLUE = '\033[1;34m'
NMAG = '\033[0;35m' ; BMAG = '\033[1;35m'
NCYAN = '\033[0;36m' ; BCYAN = '\033[1;36m'
NWHITE = '\033[0;37m' ; BWHITE = '\033[1;37m'
ENDC = '\033[0m'
## Suppress colors in cron jobs:
if (os.getenv('FUNCDEF') == '--nocolors'):
NRED = '' ; BRED = ''
NGREEN = '' ; BGREEN = ''
NYELLOW = '' ; BYELLOW = ''
NBLUE = '' ; BBLUE = ''
NMAG = '' ; BMAG = ''
NCYAN = '' ; BCYAN = ''
NWHITE = '' ; BWHITE = ''
ENDC = ''
## Fancy text:
degree_sign = u'\N{DEGREE SIGN}'
## Dividers:
halfdiv = '-' * 40
fulldiv = '-' * 80
##--------------------------------------------------------------------------##
## Save FITS image with clobber (astropy / pyfits):
#def qsave(iname, idata, header=None, padkeys=1000, **kwargs):
# this_func = sys._getframe().f_code.co_name
# parent_func = sys._getframe(1).f_code.co_name
# sys.stderr.write("Writing to '%s' ... " % iname)
# if header:
# while (len(header) < padkeys):
# header.append() # pad header
# if os.path.isfile(iname):
# os.remove(iname)
# pf.writeto(iname, idata, header=header, **kwargs)
# sys.stderr.write("done.\n")
##--------------------------------------------------------------------------##
## Save FITS image with clobber (fitsio):
#def qsave(iname, idata, header=None, **kwargs):
# this_func = sys._getframe().f_code.co_name
# parent_func = sys._getframe(1).f_code.co_name
# sys.stderr.write("Writing to '%s' ... " % iname)
# #if os.path.isfile(iname):
# # os.remove(iname)
# fitsio.write(iname, idata, clobber=True, header=header, **kwargs)
# sys.stderr.write("done.\n")
##--------------------------------------------------------------------------##
def ldmap(things):
return dict(zip(things, range(len(things))))
def argnear(vec, val):
return (np.abs(vec - val)).argmin()
##--------------------------------------------------------------------------##
## New-style string formatting (more at https://pyformat.info/):
#oldway = '%s %s' % ('one', 'two')
#newway = '{} {}'.format('one', 'two')
#oldway = '%d %d' % (1, 2)
#newway = '{} {}'.format(1, 2)
# With padding:
#oldway = '%10s' % ('test',) # right-justified
#newway = '{:>10}'.format('test') # right-justified
#oldway = '%-10s' % ('test',) # left-justified
#newway = '{:10}'.format('test') # left-justified
# Ordinally:
#newway = '{1} {0}'.format('one', 'two') # prints "two one"
# Dictionarily:
#newway = '{lastname}, {firstname}'.format(firstname='Rob', lastname='Siverd')
# Centered (new-only):
#newctr = '{:^10}'.format('test') # prints " test "
# Numbers:
#oldway = '%06.2f' % (3.141592653589793,)
#newway = '{:06.2f}'.format(3.141592653589793)
##--------------------------------------------------------------------------##
## Quick ASCII I/O:
#data_file = 'data.txt'
#gftkw = {'encoding':None} if (_have_np_vers >= 1.14) else {}
#gftkw.update({'names':True, 'autostrip':True})
#gftkw.update({'delimiter':'|', 'comments':'%0%0%0%0'})
#gftkw.update({'loose':True, 'invalid_raise':False})
#all_data = np.genfromtxt(data_file, dtype=None, **gftkw)
#all_data = aia.read(data_file)
#all_data = pd.read_csv(data_file)
#all_data = pd.read_table(data_file, delim_whitespace=True)
#all_data = pd.read_table(data_file, skipinitialspace=True)
#all_data = pd.read_table(data_file, sep='|')
#fields = all_data.dtype.names
#if not fields:
# x = all_data[:, 0]
# y = all_data[:, 1]
#else:
# x = all_data[fields[0]]
# y = all_data[fields[1]]
#vot_file = 'neato.xml'
#vot_data = av.parse_single_table(vot_file)
#vot_data = av.parse_single_table(vot_file).to_table()
##--------------------------------------------------------------------------##
## Quick FITS I/O:
ifile = 'SPITZER_I2_44772864_0004_0000_2_cbcd.fits'
ufile = 'SPITZER_I2_44772864_0004_0000_2_cbunc.fits'
idata, ihdrs = pf.getdata(ifile, header=True)
udata, uhdrs = pf.getdata(ufile, header=True)
gain = ihdrs['GAIN']
exptime = ihdrs['EXPTIME']
fluxconv = ihdrs['FLUXCONV']
ignore = np.isnan(idata) | np.isnan(udata)
isafe = idata[~ignore]
usafe = udata[~ignore]
ignore = (isafe <= 0.0)
iclean = isafe[~ignore]
uclean = usafe[~ignore]
ui_ratio = uclean / iclean
## Try to reproduce the idata:udata relationship ...
icounts = iclean / fluxconv * exptime * gain # in electrons
ucounts = uclean / fluxconv * exptime * gain # in electrons
#icounts -= np.median(icounts)
##--------------------------------------------------------------------------##
##--------------------------------------------------------------------------##
## Estimate icounts:ucounts relationship from bright pixels:
cutoff = 1e3
bright = (icounts >= cutoff)
ic_fit = icounts[bright]
uc_fit = ucounts[bright]
vc_fit = uc_fit**2
sys.stderr.write("Fitting variance(counts) for bright pixels ... ")
model = ts.linefit(ic_fit, vc_fit)
sys.stderr.write("done.\n")
#model = np.array([375., 1.05])
## A line for plotting:
pcounts = np.linspace(0.1, 3e4, 1000)
pcounts = np.logspace(-1.0, 4.5, 1000)
pvarian = model[0] + model[1] * pcounts
##--------------------------------------------------------------------------##
## Theil-Sen line-fitting (linear):
#model = ts.linefit(xvals, yvals)
#icept, slope = ts.linefit(xvals, yvals)
## Theil-Sen line-fitting (loglog):
#xvals, yvals = np.log10(original_xvals), np.log10(original_yvals)
#xvals, yvals = np.log10(df['x'].values), np.log10(df['y'].values)
#llmodel = ts.linefit(np.log10(xvals), np.log10(yvals))
#icept, slope = ts.linefit(xvals, yvals)
#fit_exponent = slope
#fit_multiplier = 10**icept
#bestfit_x = np.arange(5000)
#bestfit_y = fit_multiplier * bestfit_x**fit_exponent
## Log-log evaluator:
#def loglog_eval(xvals, model):
# icept, slope = model
# return 10**icept * xvals**slope
#def loglog_eval(xvals, icept, slope):
# return 10**icept * xvals**slope
##--------------------------------------------------------------------------##
## Plot config:
# gridspec examples:
# https://matplotlib.org/users/gridspec.html
#gs1 = gridspec.GridSpec(4, 4)
#gs1.update(wspace=0.025, hspace=0.05) # set axis spacing
#ax1 = plt.subplot2grid((3, 3), (0, 0), colspan=3) # top-left + center + right
#ax2 = plt.subplot2grid((3, 3), (1, 0), colspan=2) # mid-left + mid-center
#ax3 = plt.subplot2grid((3, 3), (1, 2), rowspan=2) # mid-right + bot-right
#ax4 = plt.subplot2grid((3, 3), (2, 0)) # bot-left
#ax5 = plt.subplot2grid((3, 3), (2, 1)) # bot-center
##--------------------------------------------------------------------------##
#plt.style.use('bmh') # Bayesian Methods for Hackers style
fig_dims = (12, 10)
fig = plt.figure(1, figsize=fig_dims)
plt.gcf().clf()
#fig, axs = plt.subplots(2, 2, sharex=True, figsize=fig_dims, num=1)
# sharex='col' | sharex='row'
#fig.frameon = False # disable figure frame drawing
#fig.subplots_adjust(left=0.07, right=0.95)
#ax1 = plt.subplot(gs[0, 0])
ax1 = fig.add_subplot(111)
#ax1 = fig.add_axes([0, 0, 1, 1])
#ax1.patch.set_facecolor((0.8, 0.8, 0.8))
#ax1.grid(True)
#ax1.axis('off')
ax1.grid(True)
#ax1.scatter(iclean, uclean, lw=0, s=5)
ax1.scatter(icounts, ucounts**2, lw=0, s=5)
ax1.plot(pcounts, pvarian, c='r')
ax1.set_yscale('log')
ax1.set_xscale('log')
plot_name = 'gain_log.png'
fig.tight_layout() # adjust boundaries sensibly, matplotlib v1.1+
plt.draw()
fig.savefig(plot_name, bbox_inches='tight')
ax1.set_xscale('linear')
ax1.set_yscale('linear')
plot_name = 'gain_lin.png'
fig.tight_layout() # adjust boundaries sensibly, matplotlib v1.1+
plt.draw()
fig.savefig(plot_name, bbox_inches='tight')
## Disable axis offsets:
#ax1.xaxis.get_major_formatter().set_useOffset(False)
#ax1.yaxis.get_major_formatter().set_useOffset(False)
#ax1.plot(kde_pnts, kde_vals)
#blurb = "some text"
#ax1.text(0.5, 0.5, blurb, transform=ax1.transAxes)
#ax1.text(0.5, 0.5, blurb, transform=ax1.transAxes,
# va='top', ha='left', bbox=dict(facecolor='white', pad=10.0))
# fontdict={'family':'monospace'}) # fixed-width
#colors = cm.rainbow(np.linspace(0, 1, len(plot_list)))
#for camid, c in zip(plot_list, colors):
# cam_data = subsets[camid]
# xvalue = cam_data['CCDATEMP']
# yvalue = cam_data['PIX_MED']
# yvalue = cam_data['IMEAN']
# ax1.scatter(xvalue, yvalue, color=c, lw=0, label=camid)
#mtickpos = [2,5,7]
#ndecades = 1.0 # for symlog, set width of linear portion in units of dex
#nonposx='mask' | nonposx='clip' | nonposy='mask' | nonposy='clip'
#ax1.set_xscale('log', basex=10, nonposx='mask', subsx=mtickpos)
#ax1.set_xscale('log', nonposx='clip', subsx=[3])
#ax1.set_yscale('symlog', basey=10, linthreshy=0.1, linscaley=ndecades)
#ax1.xaxis.set_major_formatter(formatter) # re-format x ticks
#ax1.set_ylim(ax1.get_ylim()[::-1])
#ax1.set_xlabel('whatever', labelpad=30) # push X label down
#ax1.set_xticks([1.0, 3.0, 10.0, 30.0, 100.0])
#ax1.set_xticks([1, 2, 3], ['Jan', 'Feb', 'Mar'])
#for label in ax1.get_xticklabels():
# label.set_rotation(30)
# label.set_fontsize(14)
#ax1.xaxis.label.set_fontsize(18)
#ax1.yaxis.label.set_fontsize(18)
#ax1.set_xlim(nice_limits(xvec, pctiles=[1,99], pad=1.2))
#ax1.set_ylim(nice_limits(yvec, pctiles=[1,99], pad=1.2))
#spts = ax1.scatter(x, y, lw=0, s=5)
##cbar = fig.colorbar(spts, orientation='vertical') # old way
#cbnorm = mplcolors.Normalize(*spts.get_clim())
#scm = plt.cm.ScalarMappable(norm=cbnorm, cmap=spts.cmap)
#scm.set_array([])
#cbar = fig.colorbar(scm, orientation='vertical')
#cbar = fig.colorbar(scm, ticks=cs.levels, orientation='vertical') # contours
#cbar.formatter.set_useOffset(False)
#cbar.update_ticks()
fig.tight_layout() # adjust boundaries sensibly, matplotlib v1.1+
plt.draw()
#fig.savefig(plot_name, bbox_inches='tight')
######################################################################
# CHANGELOG (compare_images.py):
#---------------------------------------------------------------------
#
# 2021-06-03:
# -- Increased __version__ to 0.0.1.
# -- First created compare_images.py.
#
|
[
"logging.basicConfig",
"logging.getLogger",
"numpy.__version__.split",
"numpy.abs",
"os.getenv",
"matplotlib.pyplot.gcf",
"theil_sen.linefit",
"imp.reload",
"sys.stderr.write",
"astropy.io.fits.getdata",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.isnan",
"sys.exit",
"matplotlib.pyplot.draw",
"numpy.logspace"
] |
[((514, 553), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (533, 553), False, 'import logging\n'), ((563, 590), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (580, 590), False, 'import logging\n'), ((8752, 8782), 'astropy.io.fits.getdata', 'pf.getdata', (['ifile'], {'header': '(True)'}), '(ifile, header=True)\n', (8762, 8782), True, 'import astropy.io.fits as pf\n'), ((8798, 8828), 'astropy.io.fits.getdata', 'pf.getdata', (['ufile'], {'header': '(True)'}), '(ufile, header=True)\n', (8808, 8828), True, 'import astropy.io.fits as pf\n'), ((9644, 9711), 'sys.stderr.write', 'sys.stderr.write', (['"""Fitting variance(counts) for bright pixels ... """'], {}), "('Fitting variance(counts) for bright pixels ... ')\n", (9660, 9711), False, 'import sys\n'), ((9720, 9746), 'theil_sen.linefit', 'ts.linefit', (['ic_fit', 'vc_fit'], {}), '(ic_fit, vc_fit)\n', (9730, 9746), True, 'import theil_sen as ts\n'), ((9747, 9774), 'sys.stderr.write', 'sys.stderr.write', (['"""done.\n"""'], {}), "('done.\\n')\n", (9763, 9774), False, 'import sys\n'), ((9842, 9873), 'numpy.linspace', 'np.linspace', (['(0.1)', '(30000.0)', '(1000)'], {}), '(0.1, 30000.0, 1000)\n', (9853, 9873), True, 'import numpy as np\n'), ((9880, 9908), 'numpy.logspace', 'np.logspace', (['(-1.0)', '(4.5)', '(1000)'], {}), '(-1.0, 4.5, 1000)\n', (9891, 9908), True, 'import numpy as np\n'), ((11513, 11544), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': 'fig_dims'}), '(1, figsize=fig_dims)\n', (11523, 11544), True, 'import matplotlib.pyplot as plt\n'), ((12193, 12203), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (12201, 12203), True, 'import matplotlib.pyplot as plt\n'), ((12392, 12402), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (12400, 12402), True, 'import matplotlib.pyplot as plt\n'), ((14521, 14531), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (14529, 14531), True, 'import matplotlib.pyplot as plt\n'), ((2951, 2971), 'imp.reload', 'reload', (['robust_stats'], {}), '(robust_stats)\n', (2957, 2971), False, 'from imp import reload\n'), ((5049, 5069), 'os.getenv', 'os.getenv', (['"""FUNCDEF"""'], {}), "('FUNCDEF')\n", (5058, 5069), False, 'import os\n'), ((8917, 8932), 'numpy.isnan', 'np.isnan', (['idata'], {}), '(idata)\n', (8925, 8932), True, 'import numpy as np\n'), ((8935, 8950), 'numpy.isnan', 'np.isnan', (['udata'], {}), '(udata)\n', (8943, 8950), True, 'import numpy as np\n'), ((3089, 3197), 'sys.stderr.write', 'sys.stderr.write', (['"""\nError! robust_stats module not found!\nPlease install and try again ...\n\n"""'], {}), '(\n """\nError! robust_stats module not found!\nPlease install and try again ...\n\n"""\n )\n', (3105, 3197), False, 'import sys\n'), ((3206, 3217), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3214, 3217), False, 'import sys\n'), ((4179, 4237), 'sys.stderr.write', 'sys.stderr.write', (['"""\nError: astropy module not found!\n"""'], {}), '("""\nError: astropy module not found!\n""")\n', (4195, 4237), False, 'import sys\n'), ((4240, 4251), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4248, 4251), False, 'import sys\n'), ((11545, 11554), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (11552, 11554), True, 'import matplotlib.pyplot as plt\n'), ((2259, 2284), 'numpy.__version__.split', 'np.__version__.split', (['"""."""'], {}), "('.')\n", (2279, 2284), True, 'import numpy as np\n'), ((6713, 6730), 'numpy.abs', 'np.abs', (['(vec - val)'], {}), '(vec - val)\n', (6719, 6730), True, 'import numpy as np\n')]
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
import numpy as np
import unittest
import paddle
import paddle.nn as nn
class SimpleReturnLayer(nn.Layer):
def forward(self, x):
return x
class AddAttrLayer(nn.Layer):
def __init__(self):
super(AddAttrLayer, self).__init__()
self.attr = None
def forward(self, x):
out = x + self.attr
return out
class IsInstanceLayer(nn.Layer):
def __init__(self, layer):
super(IsInstanceLayer, self).__init__()
self.layer = layer
@paddle.jit.to_static
def forward(self, x):
if isinstance(self.layer, (AddAttrLayer, )):
self.layer.attr = x
res = self.layer(x)
return res
class SequentialLayer(nn.Layer):
def __init__(self, layers):
super(SequentialLayer, self).__init__()
self.layers = nn.LayerList(layers)
@paddle.jit.to_static
def forward(self, x):
res = x
for layer in self.layers:
if isinstance(layer, AddAttrLayer):
layer.attr = x
res = layer(res)
return res
def train(model, to_static):
prog_trans = paddle.jit.ProgramTranslator.get_instance()
prog_trans.enable(to_static)
x = paddle.ones(shape=[2, 3], dtype='int32')
out = model(x)
return out.numpy()
class TestIsinstance(unittest.TestCase):
def test_isinstance_simple_return_layer(self):
model = IsInstanceLayer(SimpleReturnLayer())
self._test_model(model)
def test_isinstance_add_attr_layer(self):
model = IsInstanceLayer(AddAttrLayer())
self._test_model(model)
def test_sequential_layer(self):
layers = []
for i in range(5):
layers.append(SimpleReturnLayer())
layers.append(AddAttrLayer())
model = SequentialLayer(layers)
self._test_model(model)
def _test_model(self, model):
st_out = train(model, to_static=True)
dy_out = train(model, to_static=False)
self.assertTrue(
np.allclose(dy_out, st_out),
msg="dy_out:\n {}\n st_out:\n{}".format(dy_out, st_out))
if __name__ == "__main__":
unittest.main()
|
[
"numpy.allclose",
"paddle.nn.LayerList",
"paddle.jit.ProgramTranslator.get_instance",
"paddle.ones",
"unittest.main"
] |
[((2204, 2247), 'paddle.jit.ProgramTranslator.get_instance', 'paddle.jit.ProgramTranslator.get_instance', ([], {}), '()\n', (2245, 2247), False, 'import paddle\n'), ((2290, 2330), 'paddle.ones', 'paddle.ones', ([], {'shape': '[2, 3]', 'dtype': '"""int32"""'}), "(shape=[2, 3], dtype='int32')\n", (2301, 2330), False, 'import paddle\n'), ((3222, 3237), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3235, 3237), False, 'import unittest\n'), ((1905, 1925), 'paddle.nn.LayerList', 'nn.LayerList', (['layers'], {}), '(layers)\n', (1917, 1925), True, 'import paddle.nn as nn\n'), ((3091, 3118), 'numpy.allclose', 'np.allclose', (['dy_out', 'st_out'], {}), '(dy_out, st_out)\n', (3102, 3118), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# Copyright (c) 2017, DIANA-HEP
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""ROOT constants used in deserialization."""
import numpy
# used in unmarshaling
kByteCountMask = numpy.int64(0x40000000)
kByteCountVMask = numpy.int64(0x4000)
kClassMask = numpy.int64(0x80000000)
kNewClassTag = numpy.int64(0xFFFFFFFF)
kIsOnHeap = numpy.uint32(0x01000000)
kIsReferenced = numpy.uint32(1 << 4)
kMapOffset = 2
# not used?
kNullTag = 0
kNotDeleted = 0x02000000
kZombie = 0x04000000
kBitMask = 0x00ffffff
kDisplacementMask = 0xFF000000
################################################################ core/zip/inc/Compression.h
kZLIB = 1
kLZMA = 2
kOldCompressionAlgo = 3
kLZ4 = 4
kUndefinedCompressionAlgorithm = 5
################################################################ constants for streamers
kBase = 0
kChar = 1
kShort = 2
kInt = 3
kLong = 4
kFloat = 5
kCounter = 6
kCharStar = 7
kDouble = 8
kDouble32 = 9
kLegacyChar = 10
kUChar = 11
kUShort = 12
kUInt = 13
kULong = 14
kBits = 15
kLong64 = 16
kULong64 = 17
kBool = 18
kFloat16 = 19
kOffsetL = 20
kOffsetP = 40
kObject = 61
kAny = 62
kObjectp = 63
kObjectP = 64
kTString = 65
kTObject = 66
kTNamed = 67
kAnyp = 68
kAnyP = 69
kAnyPnoVT = 70
kSTLp = 71
kSkip = 100
kSkipL = 120
kSkipP = 140
kConv = 200
kConvL = 220
kConvP = 240
kSTL = 300
kSTLstring = 365
kStreamer = 500
kStreamLoop = 501
################################################################ constants from core/foundation/inc/ESTLType.h
kNotSTL = 0
kSTLvector = 1
kSTLlist = 2
kSTLdeque = 3
kSTLmap = 4
kSTLmultimap = 5
kSTLset = 6
kSTLmultiset = 7
kSTLbitset = 8
kSTLforwardlist = 9
kSTLunorderedset = 10
kSTLunorderedmultiset = 11
kSTLunorderedmap = 12
kSTLunorderedmultimap = 13
kSTLend = 14
kSTLany = 300
################################################################ IOFeatures
kGenerateOffsetMap = 1
|
[
"numpy.int64",
"numpy.uint32"
] |
[((1673, 1696), 'numpy.int64', 'numpy.int64', (['(1073741824)'], {}), '(1073741824)\n', (1684, 1696), False, 'import numpy\n'), ((1721, 1739), 'numpy.int64', 'numpy.int64', (['(16384)'], {}), '(16384)\n', (1732, 1739), False, 'import numpy\n'), ((1765, 1788), 'numpy.int64', 'numpy.int64', (['(2147483648)'], {}), '(2147483648)\n', (1776, 1788), False, 'import numpy\n'), ((1813, 1836), 'numpy.int64', 'numpy.int64', (['(4294967295)'], {}), '(4294967295)\n', (1824, 1836), False, 'import numpy\n'), ((1862, 1884), 'numpy.uint32', 'numpy.uint32', (['(16777216)'], {}), '(16777216)\n', (1874, 1884), False, 'import numpy\n'), ((1911, 1931), 'numpy.uint32', 'numpy.uint32', (['(1 << 4)'], {}), '(1 << 4)\n', (1923, 1931), False, 'import numpy\n')]
|
import itertools
import os
import random
import numpy as np
import pandas as pd
from tqdm import tqdm
def _get_steps():
hdf_subdir = "augmentation/"
steps = {"step_name": ["prototypical", "single_sources", "mixtures"]}
steps_df = pd.DataFrame(steps)
steps_df["hdf_path"] = hdf_subdir + steps_df["step_name"]
# Impose order on the augmentation steps:
steps_df["step_name"] = pd.Categorical(
steps_df["step_name"], ["prototypical", "single_sources", "mixtures"]
)
steps_df.sort_values("step_name", inplace=True, ignore_index=True)
return steps_df
def prototypical_spectrum(dataset, source_df):
"""Weighted average of calibration spectra with randomly assigned weights
between 0 and 1.
Args:
dataset (pyeem.datasets.Dataset): Your PyEEM dataset.
source_df (pandas.DataFrame): Calibration information for a single source.
Returns:
pandas.DataFrame: A prototypical Excitation Emission Matrix for a single source.
"""
aug_steps_df = _get_steps()
source_name = source_df.index.get_level_values("source").unique().item()
source_units = source_df.index.get_level_values("source_units").unique().item()
intensity_units = (
source_df.index.get_level_values("intensity_units").unique().item()
)
proto_eems = []
for index, row in source_df[source_df["prototypical_sample"]].iterrows():
eem_path = row["hdf_path"]
eem = pd.read_hdf(dataset.hdf, key=eem_path)
proto_eems.append(eem)
# TODO - IMPORTANT: This can't just be the mean of the prototypical samples...
# Need to use the same weighted average as the intensity values!
proto_concentration = source_df[source_df["prototypical_sample"]][
"concentration"
].mean()
"""
weights = []
for i in range(len(proto_eems)):
weights.append(random.uniform(0, 1))
proto_eem = np.average([eem.values for eem in proto_eems], axis=0, weights=weights)
"""
proto_eem = np.average([eem.values for eem in proto_eems], axis=0)
proto_eem = pd.DataFrame(
data=proto_eem, index=proto_eems[0].index, columns=proto_eems[0].columns
)
proto_eem.index.name = "emission_wavelength"
hdf_path = aug_steps_df[aug_steps_df["step_name"] == "prototypical"][
"hdf_path"
].item()
hdf_path = os.path.join(hdf_path, source_name)
new_indices = np.array(
["source", "proto_conc", "source_units", "intensity_units", "hdf_path"]
)
proto_eem = proto_eem.assign(
**{
"source": source_name,
"proto_conc": proto_concentration,
"source_units": source_units,
"intensity_units": intensity_units,
"hdf_path": hdf_path,
}
)
proto_eem.set_index(new_indices.tolist(), append=True, inplace=True)
new_indices = np.append(new_indices, ("emission_wavelength"))
proto_eem = proto_eem.reorder_levels(new_indices)
proto_eem.to_hdf(dataset.hdf, key=hdf_path)
return proto_eem
def create_prototypical_spectra(dataset, cal_df):
"""Creates a protoypical spectrum for each calibration source in the PyEEM
dataset.
Args:
dataset (pyeem.datasets.Dataset): Your PyEEM dataset.
cal_df (pandas.DataFrame): Calibration information for your dataset
returned from :meth:`pyeem.preprocessing.calibration()`
Returns:
pandas.DataFrame: A table describing the prototypical spectra and their
paths within the HDF5 store.
"""
results_rows = []
for source_name, group in cal_df.groupby(level="source", as_index=False):
proto_eem_df = prototypical_spectrum(dataset, group)
new_indices = proto_eem_df.index.droplevel("emission_wavelength").unique()
result = dict(zip(list(new_indices.names), list(new_indices.item())))
results_rows.append(result)
results_df = pd.DataFrame(results_rows)
results_index = "source"
results_df.set_index(results_index, inplace=True)
return results_df
def single_source(dataset, source_df, conc_range, num_spectra):
"""Creates augmented single source spectra for a single calibration source.
Args:
dataset (pyeem.datasets.Dataset): Your PyEEM dataset.
source_df (pandas.DataFrame): Calibration information for a single source.
conc_range (tuple of (int, float)): The concentration range which the
augmented single source spectra will occupy.
num_spectra (int): The number of augmented single source spectra to create.
Returns:
pandas.DataFrame: A table describing the source's augmented spectra and their
paths within the HDF5 store.
"""
aug_steps_df = _get_steps()
# Get the source's name
source_name = source_df.index.get_level_values("source").unique().item()
# Get the HDF5 path to the source's prototypical EEM
proto_hdf_path = aug_steps_df[aug_steps_df["step_name"] == "prototypical"][
"hdf_path"
].item()
proto_hdf_path = os.path.join(proto_hdf_path, source_name)
# Read in the prototypical EEM
proto_eem = pd.read_hdf(dataset.hdf, key=proto_hdf_path)
# Get the source's prototypical concentration
proto_concentration = proto_eem.index.get_level_values("proto_conc").unique().item()
# Remove the concentration index from the dataframe
proto_eem.reset_index(level=["proto_conc"], drop=True, inplace=True)
# Get the slope and intercept of the source's calibration function
slope = source_df.index.get_level_values("slope").unique().item()
y_intercept = source_df.index.get_level_values("intercept").unique().item()
"""
slope = (
cal_df.xs(source_name, level="source")
.index.get_level_values("slope")
.unique()
.item()
)
y_intercept = (
cal_df.xs(source_name, level="source")
.index.get_level_values("intercept")
.unique()
.item()
)
"""
# Generate the 1D polynomial
cal_func = np.poly1d([slope, y_intercept])
# Generate the concentration range based on the argument's
concentration_range = np.linspace(conc_range[0], conc_range[1], num=num_spectra)
# Create a new HDF5 path for the single source spectra
hdf_path = aug_steps_df[aug_steps_df["step_name"] == "single_sources"][
"hdf_path"
].item()
hdf_path = os.path.join(hdf_path, source_name)
# aug_ss_dfs: A list which we will iteratively append single source spectra to. For each
# concentration in the concentration range. Then we will turn the list of DFs
# into a single DF by using concat()
aug_ss_dfs = []
sources = list(dataset.calibration_sources)
for new_concentration in concentration_range:
scalar = cal_func(new_concentration) / cal_func(proto_concentration)
ss_eem = proto_eem * scalar
# Make sure there are no negative values
ss_eem.clip(lower=0, inplace=True)
label = np.zeros(len(sources))
source_index = sources.index(source_name)
label[source_index] = new_concentration
ss_eem.index.name = "emission_wavelength"
ss_eem = ss_eem.assign(**dict(zip(sources, label)))
new_indices = sources
ss_eem.set_index(new_indices, append=True, inplace=True)
new_indices = [
"source",
"source_units",
"intensity_units",
"hdf_path",
] + new_indices
new_indices.append("emission_wavelength")
ss_eem = ss_eem.reorder_levels(new_indices)
ss_eem.rename(index={proto_hdf_path: hdf_path}, inplace=True)
aug_ss_dfs.append(ss_eem)
aug_ss_df = pd.concat(aug_ss_dfs)
aug_ss_df.to_hdf(dataset.hdf, key=hdf_path)
return aug_ss_df
def create_single_source_spectra(dataset, cal_df, conc_range, num_spectra):
"""Creates augmented single source spectra for each calibration source in the
PyEEM dataset.
Args:
dataset (pyeem.datasets.Dataset): Your PyEEM dataset.
cal_df (pandas.DataFrame): Calibration information for your dataset
returned from :meth:`pyeem.preprocessing.calibration()`
conc_range (tuple of (int, float)): The concentration range which the
augmented single source spectra will occupy.
num_spectra (int): The number of augmented single source spectra for each
calibration source.
Returns:
pandas.DataFrame: A table describing the augmented single source spectra
and their paths within the HDF5 store.
"""
aug_ss_dfs = []
for source_name, group in tqdm(cal_df.groupby(level="source", as_index=False)):
ss_df = single_source(
dataset, group, conc_range=conc_range, num_spectra=num_spectra
)
ss_df = (
ss_df.index.droplevel(["emission_wavelength"])
.unique()
.to_frame()
.reset_index(drop=True)
)
ss_df.set_index(
["source", "source_units", "intensity_units", "hdf_path"], inplace=True
)
aug_ss_dfs.append(ss_df)
aug_ss_df = pd.concat(aug_ss_dfs)
return aug_ss_df
"""
def mixture():
return
"""
def create_mixture_spectra(dataset, cal_df, conc_range, num_steps, scale="logarithmic"):
"""Creates augmented mixture spectra by summing together augmented single source spectra.
The number of augmented mixtures created is equal to the Cartesian product composed of...
Args:
dataset (pyeem.datasets.Dataset): Your PyEEM dataset.
cal_df (pandas.DataFrame): Calibration information for your dataset
returned from :meth:`pyeem.preprocessing.calibration()`
conc_range (tuple of (int, float)): The concentration range which the
augmented spectra mixtures will occupy.
num_steps (int): The number of intervals within the concentration range.
scale (str, optional): Determines how the concentrations will be spaced along
the given concentration range. Options are "linear" and "logarithmic". Defaults to "logarithmic".
Raises:
Exception: Raised if calibration sources are reported in different units.
ValueError: Raised if the scale argument is a value other than linear" or "logarithmic".
Returns:
pandas.DataFrame: A table describing the augmented mixture spectra
and their paths within the HDF5 store.
"""
if cal_df.index.get_level_values("source_units").nunique() != 1:
raise Exception(
"Sources must be reported in the same units in order create augmented mixtures."
)
sources = cal_df.index.get_level_values(level="source").unique().to_list()
source_units = cal_df.index.get_level_values("source_units").unique().item()
intensity_units = (
cal_df.index.get_level_values(level="intensity_units").unique().item()
)
aug_steps_df = _get_steps()
hdf_path = aug_steps_df[aug_steps_df["step_name"] == "mixtures"]["hdf_path"].item()
proto_spectra = []
for source_name, group in cal_df.groupby(level="source", as_index=False):
# Get the HDF5 path to the source's prototypical EEM
proto_hdf_path = aug_steps_df[aug_steps_df["step_name"] == "prototypical"][
"hdf_path"
].item()
proto_hdf_path = os.path.join(proto_hdf_path, source_name)
# Read in the prototypical EEM
proto_eem = pd.read_hdf(dataset.hdf, key=proto_hdf_path)
proto_spectra.append(proto_eem)
proto_eem_df = pd.concat(proto_spectra)
if scale == "logarithmic":
number_range = np.geomspace(conc_range[0], conc_range[1], num=num_steps)
elif scale == "linear":
number_range = np.linspace(conc_range[0], conc_range[1], num=num_steps)
else:
raise ValueError("scale must be 'logarithmic' or 'linear'")
cartesian_product = [
p for p in itertools.product(number_range.tolist(), repeat=len(sources))
]
aug = []
for conc_set in tqdm(cartesian_product, desc="Creating Augmented Mixtures"):
mix = []
# TODO - it'd be a good idea to break this out into another function.
# Call it mixture() -- returns a single mixture EEM
for index, label in enumerate(zip(sources, conc_set)):
source_name = label[0]
new_concentration = label[1]
slope = (
cal_df.xs(source_name, level="source")
.index.get_level_values("slope")
.unique()
.item()
)
y_intercept = (
cal_df.xs(source_name, level="source")
.index.get_level_values("intercept")
.unique()
.item()
)
cal_func = np.poly1d([slope, y_intercept])
proto_eem = proto_eem_df.xs(source_name, level="source", drop_level=False)
proto_concentration = (
proto_eem.index.get_level_values("proto_conc").unique().item()
)
proto_eem.reset_index(level=["proto_conc"], drop=True, inplace=True)
scalar = cal_func(new_concentration) / cal_func(proto_concentration)
new_eem = proto_eem * scalar
# Make sure there are no negative values
new_eem.clip(lower=0, inplace=True)
mix.append(new_eem)
mix_eem = pd.concat(mix).sum(level="emission_wavelength")
mix_eem = mix_eem.assign(**dict(zip(sources, conc_set)))
mix_eem["hdf_path"] = hdf_path
mix_eem["source"] = "mixture"
mix_eem["source_units"] = source_units
mix_eem["intensity_units"] = intensity_units
new_indices = [
"source",
"source_units",
"intensity_units",
"hdf_path",
] + sources
mix_eem.set_index(new_indices, append=True, inplace=True)
new_indices = np.append(new_indices, ("emission_wavelength"))
mix_eem = mix_eem.reorder_levels(new_indices)
aug.append(mix_eem)
aug_mix_df = pd.concat(aug)
aug_mix_df.to_hdf(dataset.hdf, key=hdf_path)
aug_mix_df = (
aug_mix_df.index.droplevel(["emission_wavelength"])
.unique()
.to_frame()
.reset_index(drop=True)
)
aug_mix_df.set_index(
["source", "source_units", "intensity_units", "hdf_path"], inplace=True
)
return aug_mix_df
|
[
"numpy.average",
"tqdm.tqdm",
"os.path.join",
"pandas.Categorical",
"numpy.append",
"numpy.array",
"numpy.linspace",
"numpy.geomspace",
"pandas.DataFrame",
"pandas.concat",
"numpy.poly1d",
"pandas.read_hdf"
] |
[((245, 264), 'pandas.DataFrame', 'pd.DataFrame', (['steps'], {}), '(steps)\n', (257, 264), True, 'import pandas as pd\n'), ((401, 490), 'pandas.Categorical', 'pd.Categorical', (["steps_df['step_name']", "['prototypical', 'single_sources', 'mixtures']"], {}), "(steps_df['step_name'], ['prototypical', 'single_sources',\n 'mixtures'])\n", (415, 490), True, 'import pandas as pd\n'), ((2006, 2060), 'numpy.average', 'np.average', (['[eem.values for eem in proto_eems]'], {'axis': '(0)'}), '([eem.values for eem in proto_eems], axis=0)\n', (2016, 2060), True, 'import numpy as np\n'), ((2078, 2169), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'proto_eem', 'index': 'proto_eems[0].index', 'columns': 'proto_eems[0].columns'}), '(data=proto_eem, index=proto_eems[0].index, columns=proto_eems[\n 0].columns)\n', (2090, 2169), True, 'import pandas as pd\n'), ((2350, 2385), 'os.path.join', 'os.path.join', (['hdf_path', 'source_name'], {}), '(hdf_path, source_name)\n', (2362, 2385), False, 'import os\n'), ((2405, 2490), 'numpy.array', 'np.array', (["['source', 'proto_conc', 'source_units', 'intensity_units', 'hdf_path']"], {}), "(['source', 'proto_conc', 'source_units', 'intensity_units',\n 'hdf_path'])\n", (2413, 2490), True, 'import numpy as np\n'), ((2860, 2905), 'numpy.append', 'np.append', (['new_indices', '"""emission_wavelength"""'], {}), "(new_indices, 'emission_wavelength')\n", (2869, 2905), True, 'import numpy as np\n'), ((3908, 3934), 'pandas.DataFrame', 'pd.DataFrame', (['results_rows'], {}), '(results_rows)\n', (3920, 3934), True, 'import pandas as pd\n'), ((5034, 5075), 'os.path.join', 'os.path.join', (['proto_hdf_path', 'source_name'], {}), '(proto_hdf_path, source_name)\n', (5046, 5075), False, 'import os\n'), ((5128, 5172), 'pandas.read_hdf', 'pd.read_hdf', (['dataset.hdf'], {'key': 'proto_hdf_path'}), '(dataset.hdf, key=proto_hdf_path)\n', (5139, 5172), True, 'import pandas as pd\n'), ((6023, 6054), 'numpy.poly1d', 'np.poly1d', (['[slope, y_intercept]'], {}), '([slope, y_intercept])\n', (6032, 6054), True, 'import numpy as np\n'), ((6145, 6203), 'numpy.linspace', 'np.linspace', (['conc_range[0]', 'conc_range[1]'], {'num': 'num_spectra'}), '(conc_range[0], conc_range[1], num=num_spectra)\n', (6156, 6203), True, 'import numpy as np\n'), ((6387, 6422), 'os.path.join', 'os.path.join', (['hdf_path', 'source_name'], {}), '(hdf_path, source_name)\n', (6399, 6422), False, 'import os\n'), ((7683, 7704), 'pandas.concat', 'pd.concat', (['aug_ss_dfs'], {}), '(aug_ss_dfs)\n', (7692, 7704), True, 'import pandas as pd\n'), ((9128, 9149), 'pandas.concat', 'pd.concat', (['aug_ss_dfs'], {}), '(aug_ss_dfs)\n', (9137, 9149), True, 'import pandas as pd\n'), ((11553, 11577), 'pandas.concat', 'pd.concat', (['proto_spectra'], {}), '(proto_spectra)\n', (11562, 11577), True, 'import pandas as pd\n'), ((12024, 12083), 'tqdm.tqdm', 'tqdm', (['cartesian_product'], {'desc': '"""Creating Augmented Mixtures"""'}), "(cartesian_product, desc='Creating Augmented Mixtures')\n", (12028, 12083), False, 'from tqdm import tqdm\n'), ((14073, 14087), 'pandas.concat', 'pd.concat', (['aug'], {}), '(aug)\n', (14082, 14087), True, 'import pandas as pd\n'), ((1455, 1493), 'pandas.read_hdf', 'pd.read_hdf', (['dataset.hdf'], {'key': 'eem_path'}), '(dataset.hdf, key=eem_path)\n', (1466, 1493), True, 'import pandas as pd\n'), ((11347, 11388), 'os.path.join', 'os.path.join', (['proto_hdf_path', 'source_name'], {}), '(proto_hdf_path, source_name)\n', (11359, 11388), False, 'import os\n'), ((11448, 11492), 'pandas.read_hdf', 'pd.read_hdf', (['dataset.hdf'], {'key': 'proto_hdf_path'}), '(dataset.hdf, key=proto_hdf_path)\n', (11459, 11492), True, 'import pandas as pd\n'), ((11633, 11690), 'numpy.geomspace', 'np.geomspace', (['conc_range[0]', 'conc_range[1]'], {'num': 'num_steps'}), '(conc_range[0], conc_range[1], num=num_steps)\n', (11645, 11690), True, 'import numpy as np\n'), ((13925, 13970), 'numpy.append', 'np.append', (['new_indices', '"""emission_wavelength"""'], {}), "(new_indices, 'emission_wavelength')\n", (13934, 13970), True, 'import numpy as np\n'), ((11742, 11798), 'numpy.linspace', 'np.linspace', (['conc_range[0]', 'conc_range[1]'], {'num': 'num_steps'}), '(conc_range[0], conc_range[1], num=num_steps)\n', (11753, 11798), True, 'import numpy as np\n'), ((12793, 12824), 'numpy.poly1d', 'np.poly1d', (['[slope, y_intercept]'], {}), '([slope, y_intercept])\n', (12802, 12824), True, 'import numpy as np\n'), ((13398, 13412), 'pandas.concat', 'pd.concat', (['mix'], {}), '(mix)\n', (13407, 13412), True, 'import pandas as pd\n')]
|
import pytest
import numpy as np
import os
import pyarrow as pa
import pyarrow.feather as feather
import pandas as pd
from app.services.preprocessor import PreProcessor
from typing import List
@pytest.fixture
def preprocessor() -> PreProcessor:
return PreProcessor("datasets/csvs/train.csv", "datasets/csvs/building1.csv")
@pytest.fixture
def generic_csv() -> str:
arr = np.random.rand(20, 20)
path = "datasets/csvs/dummy.csv"
np.savetxt(path, arr)
yield path
os.remove(path)
@pytest.fixture
def generic_feathers() -> List[str]:
base_path = "datasets/gen"
files = []
n_files = 30
col_rows = 20
rows = [f"row{x}" for x in range(0, col_rows)]
columns = [f"column{x}" for x in range(0, col_rows)]
for number in range(0, n_files):
arr = np.random.rand(col_rows , col_rows)
df = pd.DataFrame(arr, index = rows, columns = columns)
file_path = f"{base_path}/gen_{number}.feather"
files.append(file_path)
feather.write_feather(df, file_path)
yield (files, n_files, col_rows)
for file in files:
os.remove(file)
|
[
"numpy.random.rand",
"app.services.preprocessor.PreProcessor",
"numpy.savetxt",
"pandas.DataFrame",
"pyarrow.feather.write_feather",
"os.remove"
] |
[((260, 330), 'app.services.preprocessor.PreProcessor', 'PreProcessor', (['"""datasets/csvs/train.csv"""', '"""datasets/csvs/building1.csv"""'], {}), "('datasets/csvs/train.csv', 'datasets/csvs/building1.csv')\n", (272, 330), False, 'from app.services.preprocessor import PreProcessor\n'), ((384, 406), 'numpy.random.rand', 'np.random.rand', (['(20)', '(20)'], {}), '(20, 20)\n', (398, 406), True, 'import numpy as np\n'), ((448, 469), 'numpy.savetxt', 'np.savetxt', (['path', 'arr'], {}), '(path, arr)\n', (458, 469), True, 'import numpy as np\n'), ((491, 506), 'os.remove', 'os.remove', (['path'], {}), '(path)\n', (500, 506), False, 'import os\n'), ((804, 838), 'numpy.random.rand', 'np.random.rand', (['col_rows', 'col_rows'], {}), '(col_rows, col_rows)\n', (818, 838), True, 'import numpy as np\n'), ((853, 899), 'pandas.DataFrame', 'pd.DataFrame', (['arr'], {'index': 'rows', 'columns': 'columns'}), '(arr, index=rows, columns=columns)\n', (865, 899), True, 'import pandas as pd\n'), ((1000, 1036), 'pyarrow.feather.write_feather', 'feather.write_feather', (['df', 'file_path'], {}), '(df, file_path)\n', (1021, 1036), True, 'import pyarrow.feather as feather\n'), ((1108, 1123), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (1117, 1123), False, 'import os\n')]
|
#!/usr/bin/env python
#
# // SPDX-License-Identifier: BSD-3-CLAUSE
#
# (C) Copyright 2018, Xilinx, Inc.
#
import tensorflow as tf
import numpy as np
from xfdnn_compiler_tensorflow import TFFrontend
#from xfdnn.tools.compile.frontends.frontend_caffe import CaffeFrontend
from tensorflow.python.platform import gfile
import xdnn_opt
class xdnnRT:
def __init__(self, compiler, rtargs):
#print ("compiler args", cargs)
self._inputs = self.list_inputs_of_graph()
pydotGraph, schedule, self._out, _ = compiler.compile()
# print ("compiled pydot graph", pydotGraph)
# print ("compiled schedule", schedule)
opt = None
if rtargs.device == "CPU":
opt = xdnn_opt.CPUTransform( self._inputs, pydotGraph, schedule)
elif rtargs.device == "FPGA":
if rtargs.xclbin:
opt = xdnn_opt.FPGATransform( self._inputs, pydotGraph, schedule, rtargs.xclbin)
else:
raise AttributeError("Must specify path to xclbin when device = FPGA")
else:
raise AttributeError("Unsupported device type", rtargs.device)
#variables hold the inputs/consts of graph
self._variables = opt.variables
self._layers = opt.getLayers()
for l in self._layers:
l.setup()
def list_inputs_of_graph(self):
pass
def preprocess(self,inputs):
pass
def batch_classify(self, img_list, batch, preprocess) :
bctr = 0
ictr = 0
pred = None
prepdata = {}
prep = self._inputs[0]
print(len(img_list))
ctr = 0
pred = []
while ctr < len(img_list) :
ctrmax = min(ctr+batch, len(img_list))
pred.append(self.feed_forward(img_list[ctr:ctrmax], preprocess = preprocess))
ctr = ctrmax
if len(pred) == 0 : return []
elif len(pred) == 1 :
return pred[0]
return np.concatenate(pred)
def feed_forward(self, inputs, out=None, preprocess = None):
inp_dict = {}
if not preprocess:
preprocess = self.preprocess
inp_dict[self._inputs[0]] = preprocess(inputs)
for k, v in inp_dict.items():
self._variables[k] = v
for layer in self._layers:
layer_inputs = []
layer_inputs = [self._variables[inp] for inp in layer.inputs]
self._variables[layer.output] = layer.forward_exec( layer_inputs )
if out is None:
return self._variables[self._out]
return self._variables[out]
class TFxdnnRT(xdnnRT):
def __init__ ( self, cargs):
self._tfGraph = tf.GraphDef()
with gfile.FastGFile(cargs.networkfile, 'rb') as f:
self._tfGraph.ParseFromString(f.read())
compiler = TFFrontend(cargs)
xdnnRT.__init__(self, compiler, cargs)
def list_inputs_of_graph(self) :
res = []
for node in self._tfGraph.node :
if node.op == 'Placeholder' :
res.append(node.name)
return res
def preprocess(self, inputs):
if type(inputs) is not np.ndarray:
inputs = np.transpose(self.read_tensor_from_image_file(inputs), [0,3,1,2]) # assuming that there is only one input
return inputs
def read_tensor_from_image_file(self, file_name,
input_height=299,
input_width=299,
input_mean=0,
input_std=255):
input_name = "file_reader"
file_reader = tf.read_file(file_name, input_name)
if file_name.endswith(".png"):
image_reader = tf.image.decode_png(file_reader, channels=3, name="png_reader")
elif file_name.endswith(".gif"):
image_reader = tf.squeeze(
tf.image.decode_gif(file_reader, name="gif_reader"))
elif file_name.endswith(".bmp"):
image_reader = tf.image.decode_bmp(file_reader, name="bmp_reader")
else:
image_reader = tf.image.decode_jpeg(
file_reader, channels=3, name="jpeg_reader")
float_caster = tf.cast(image_reader, tf.float32)
dims_expander = tf.expand_dims(float_caster, 0)
resized = tf.image.resize_bilinear(dims_expander, [input_height, input_width])
normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])
with tf.Session() as sess :
result = sess.run(normalized)
return result
|
[
"tensorflow.image.decode_png",
"xfdnn_compiler_tensorflow.TFFrontend",
"tensorflow.image.decode_bmp",
"tensorflow.Session",
"tensorflow.image.resize_bilinear",
"xdnn_opt.CPUTransform",
"xdnn_opt.FPGATransform",
"tensorflow.GraphDef",
"tensorflow.python.platform.gfile.FastGFile",
"tensorflow.image.decode_gif",
"numpy.concatenate",
"tensorflow.subtract",
"tensorflow.expand_dims",
"tensorflow.cast",
"tensorflow.read_file",
"tensorflow.image.decode_jpeg"
] |
[((1993, 2013), 'numpy.concatenate', 'np.concatenate', (['pred'], {}), '(pred)\n', (2007, 2013), True, 'import numpy as np\n'), ((2743, 2756), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (2754, 2756), True, 'import tensorflow as tf\n'), ((2889, 2906), 'xfdnn_compiler_tensorflow.TFFrontend', 'TFFrontend', (['cargs'], {}), '(cargs)\n', (2899, 2906), False, 'from xfdnn_compiler_tensorflow import TFFrontend\n'), ((3718, 3753), 'tensorflow.read_file', 'tf.read_file', (['file_name', 'input_name'], {}), '(file_name, input_name)\n', (3730, 3753), True, 'import tensorflow as tf\n'), ((4296, 4329), 'tensorflow.cast', 'tf.cast', (['image_reader', 'tf.float32'], {}), '(image_reader, tf.float32)\n', (4303, 4329), True, 'import tensorflow as tf\n'), ((4354, 4385), 'tensorflow.expand_dims', 'tf.expand_dims', (['float_caster', '(0)'], {}), '(float_caster, 0)\n', (4368, 4385), True, 'import tensorflow as tf\n'), ((4404, 4472), 'tensorflow.image.resize_bilinear', 'tf.image.resize_bilinear', (['dims_expander', '[input_height, input_width]'], {}), '(dims_expander, [input_height, input_width])\n', (4428, 4472), True, 'import tensorflow as tf\n'), ((731, 788), 'xdnn_opt.CPUTransform', 'xdnn_opt.CPUTransform', (['self._inputs', 'pydotGraph', 'schedule'], {}), '(self._inputs, pydotGraph, schedule)\n', (752, 788), False, 'import xdnn_opt\n'), ((2770, 2810), 'tensorflow.python.platform.gfile.FastGFile', 'gfile.FastGFile', (['cargs.networkfile', '"""rb"""'], {}), "(cargs.networkfile, 'rb')\n", (2785, 2810), False, 'from tensorflow.python.platform import gfile\n'), ((3820, 3883), 'tensorflow.image.decode_png', 'tf.image.decode_png', (['file_reader'], {'channels': '(3)', 'name': '"""png_reader"""'}), "(file_reader, channels=3, name='png_reader')\n", (3839, 3883), True, 'import tensorflow as tf\n'), ((4504, 4538), 'tensorflow.subtract', 'tf.subtract', (['resized', '[input_mean]'], {}), '(resized, [input_mean])\n', (4515, 4538), True, 'import tensorflow as tf\n'), ((4566, 4578), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (4576, 4578), True, 'import tensorflow as tf\n'), ((880, 953), 'xdnn_opt.FPGATransform', 'xdnn_opt.FPGATransform', (['self._inputs', 'pydotGraph', 'schedule', 'rtargs.xclbin'], {}), '(self._inputs, pydotGraph, schedule, rtargs.xclbin)\n', (902, 953), False, 'import xdnn_opt\n'), ((3976, 4027), 'tensorflow.image.decode_gif', 'tf.image.decode_gif', (['file_reader'], {'name': '"""gif_reader"""'}), "(file_reader, name='gif_reader')\n", (3995, 4027), True, 'import tensorflow as tf\n'), ((4097, 4148), 'tensorflow.image.decode_bmp', 'tf.image.decode_bmp', (['file_reader'], {'name': '"""bmp_reader"""'}), "(file_reader, name='bmp_reader')\n", (4116, 4148), True, 'import tensorflow as tf\n'), ((4190, 4255), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['file_reader'], {'channels': '(3)', 'name': '"""jpeg_reader"""'}), "(file_reader, channels=3, name='jpeg_reader')\n", (4210, 4255), True, 'import tensorflow as tf\n')]
|
from copy import deepcopy
import numpy as np
def complete_mol(self, labels):
"""
Take a cell and complete certain molecules
The objective is to end up with a unit cell where the molecules of interest
are complete. The rest of the atoms of the cell must remain intact. Note that
the input atoms are transformed and are the same as are present in the
output.
Parameters
----------
labels : int or list of ints
The number of the atoms from which the molecules are generated
Returns
-------
new_mol : Mol object
The now complete molecule
new_cell : Mol object
The cell with the completed molecule
"""
new_mol, scattered_mol = self.per_select(labels, old_pos=True)
new_cell_atoms = deepcopy(
[a for a in self.atoms if a not in scattered_mol])
new_cell = self.copy()
new_cell.atoms = new_cell_atoms
for atom in new_mol:
new_cell.append(atom.copy())
return new_mol, new_cell
def complete_cell(self):
"""
Return a cell where atoms have been translated to complete all molecules of
the cell
Returns
-------
out_cell : Mol object
The new untruncated cell
full_mol_l : list of Mol objects
Each molecule in the untruncated cell
"""
full_mol_l = []
remaining = self.copy()
while len(remaining) != 0:
full_mol, cell = remaining.complete_mol(0)
full_mol_l.append(full_mol)
remaining = cell
for atom in full_mol:
if atom in remaining:
remaining.remove(atom)
# Convinently, remaining is now an empty Mol
out_cell = remaining
for mol in full_mol_l:
out_cell.extend(mol)
return out_cell, full_mol_l
def supercell(self, trans):
"""
Return a supercell of I x J x K
Parameters
----------
trans : array-like of length 3
Multiplications of the primitive cell
Returns
-------
supercell : Mol object
New supercell with adjusted lattice vectors
"""
import fromage.utils.mol as mol_init
# make the input into a np array
trans = np.array(trans)
new_cell = self.empty_mol()
for a_mult in range(trans[0]):
for b_mult in range(trans[1]):
for c_mult in range(trans[2]):
vector = a_mult * \
self.vectors[0] + b_mult * \
self.vectors[1] + c_mult * self.vectors[2]
new_atoms = mol_init.Mol([i.v_translated(vector)
for i in self.atoms])
new_cell += new_atoms
out_vec = (self.vectors.T * trans.transpose()).T
new_cell.vectors = out_vec
return new_cell
def centered_supercell(self, trans, from_origin=False):
"""
Make a bigger supercell out of an input cell.
The cell is multiplied positively and negatively through each lattice
vector so that the supercluster ends up being
(1+2*trans[0])*(1+2*trans[1])*(1+2*trans[2]) times larger. For example if the
input is 1,1,1 for a cubic unit cell, the output will be the original unit
cell surrounded by 26 other unit cells forming a total 3x3x3 cube.
Alternatively, the multiplication can be centered around the origin, a corner of the
unit cell, instead of the centre. In that case the supercluster ends up being
only (2*trans[0])*(2*trans[1])*(2*trans[2])
Parameters
----------
trans : numpy array of length 3
Multiplications of the primitive cell
from_origin : bool
Determines the kind of multiplication. True is corner of the cell as
the center, False is middle of the cell.
Returns
-------
mega_cell : Mol object
The resulting supercell
"""
import fromage.utils.mol as mol_init
trans_series = [0, 0, 0]
for i, tra in enumerate(trans):
if from_origin:
trans_series[i] = list(range(-tra, tra))
else:
trans_series[i] = list(range(-tra, tra + 1))
trans_series = np.array(trans_series)
new_cell = self.empty_mol()
for a_mult in trans_series[0]:
for b_mult in trans_series[1]:
for c_mult in trans_series[2]:
vector = a_mult * \
self.vectors[0] + b_mult * \
self.vectors[1] + c_mult * self.vectors[2]
new_atoms = mol_init.Mol([i.v_translated(vector)
for i in self.atoms])
new_cell += new_atoms
out_vec = (self.vectors.T * trans.transpose()).T
new_cell.vectors = out_vec
return new_cell
def trans_from_rad(self, clust_rad):
"""
Generate the translations necessary to encapsulate a sphere of given rad
Parameters
----------
clust_rad : float
Radius defining a sphere
Returns
-------
trans_count : 3 x 1 numpy array
The translations required for the unit cell to contain the sphere
"""
# determine how many unit cells we need
vectors = deepcopy(self.vectors)
# vectors normal to faces
a_perp = np.cross(vectors[1], vectors[2])
b_perp = np.cross(vectors[2], vectors[0])
c_perp = np.cross(vectors[0], vectors[1])
# the three normalised unit vectors
perp = np.array([a_perp / np.linalg.norm(a_perp), b_perp /
np.linalg.norm(b_perp), c_perp / np.linalg.norm(c_perp)])
trans_count = np.array([1, 1, 1])
# distances from faces
distances = np.array([0.0, 0.0, 0.0])
new_vectors = deepcopy(vectors)
for comp in range(3):
while True:
trans_count[comp] += 1
distances[comp] = np.dot(new_vectors[comp], perp[comp])
new_vectors[comp] = trans_count[comp] * vectors[comp]
if distances[comp] > clust_rad:
break
trans_count -= np.array([1, 1, 1])
return trans_count
def make_cluster(self, clust_rad, mode='exc', central_mol=None):
"""
Generate a cluster of molecules from a primitive cell
This first makes a supercell of the correct size which will contain with
one additional buffer shell. Then the sphere is generated from this new
supercell by connectivity.
A central molecule can also be supplied which will turn the spheres
defining the clusters into the union of spheres stemming from each atom
of the central molecule.
Parameters
----------
clust_rad : float
Radius defining a sphere. All molecules with atoms in the sphere are
to be grabbed
mode : str
Switches between inclusive and exclusive selecting. Inclusive,
'inc', selects all molecules which have atoms within the radius.
Exclusive, 'exc', selects all molecules fully in the radius.
Default: false
central_mol : Mol
If this is supplied, the central molecule will act as a kernel for
the cluster which will end up being of the appropriate shape.
Returns
-------
cluster : Mol object
Spherical cluster of molecules from their crystal positions
"""
import fromage.utils.mol as mol_init
# if there is a central mol, account for nearest neighbour molecules
# bleeding out of the original radius
if central_mol:
central_rad = 0
for atom in central_mol:
dis = atom.v_dist([0, 0, 0])
if dis < central_rad:
central_rad = dis
trans = self.trans_from_rad(clust_rad + central_rad)
# get the translations necessary to enclose the required mols
else:
trans = self.trans_from_rad(clust_rad)
# if the cluster is inclusive, then extra mols might be required from
# an additional layer of the supercell
if mode == 'inc':
trans += np.array([1, 1, 1]) # one buffer cell layer
supercell = self.centered_supercell(trans, from_origin=True)
seed_atoms = mol_init.Mol([])
# get seedatoms in the shape of the central mol if pertinent
if central_mol:
for atom_i in supercell:
for atom_j in central_mol:
if atom_i.dist(atom_j) < clust_rad:
seed_atoms.append(atom_i)
break
# get spherical seedatoms
else:
for atom in supercell:
if atom.v_dist([0, 0, 0]) < clust_rad:
seed_atoms.append(atom)
max_mol_len = 0
if mode == 'exc':
while len(seed_atoms) > 0:
mol = seed_atoms.select(0)
if len(mol) > max_mol_len:
max_mol_len = len(mol)
clust_atoms = mol_init.Mol([])
if len(mol) == max_mol_len:
clust_atoms += mol
for atom in mol:
seed_atoms.remove(atom)
if mode == 'inc':
clust_atoms = mol_init.Mol([])
max_mol_len = len(supercell.select(supercell.index(seed_atoms[0])))
while len(seed_atoms) > 0:
# The part of the mol detected in seed_atoms
mol_tmp = seed_atoms.select(0)
if len(mol_tmp) < max_mol_len:
# The whole mol, which could potentially include even more
# seed_atoms
mol = supercell.select(supercell.index(seed_atoms[0]))
else:
mol = mol_tmp
clust_atoms += mol
for atom in mol_tmp:
seed_atoms.remove(atom)
for atom in mol:
supercell.remove(atom)
# remove all atoms of the mol which are part of seed_atoms
try:
seed_atoms.remove(atom)
except ValueError:
pass
return clust_atoms
def centered_mols(self, labels, return_trans=False):
"""
Return the molecules translated at the origin with a corresponding cell
Parameters
----------
labels : int or list of ints
The labels of the atoms to select
print_centro : bool
Print the translation vector which was detected as -centroid
Returns
-------
mol : Mol object
The selected molecules with their centroid at the origin
mod_cell : Mol object
The new confined cell corresponding to the now translated molecules
"""
mol, mod_cell = self.complete_mol(labels)
centro = mol.centroid()
mol.translate(-centro)
mod_cell.translate(-centro)
mod_cell = mod_cell.confined()
if return_trans:
return mol, mod_cell, -centro
else:
return mol, mod_cell
def confined(self):
"""Move all atoms to fit inside the primitive cell"""
frac_mol = self.dir_to_frac_pos()
out_mol = frac_mol.frac_to_dir_pos()
return out_mol
|
[
"copy.deepcopy",
"numpy.cross",
"numpy.linalg.norm",
"numpy.array",
"numpy.dot",
"fromage.utils.mol.Mol"
] |
[((769, 828), 'copy.deepcopy', 'deepcopy', (['[a for a in self.atoms if a not in scattered_mol]'], {}), '([a for a in self.atoms if a not in scattered_mol])\n', (777, 828), False, 'from copy import deepcopy\n'), ((2143, 2158), 'numpy.array', 'np.array', (['trans'], {}), '(trans)\n', (2151, 2158), True, 'import numpy as np\n'), ((4050, 4072), 'numpy.array', 'np.array', (['trans_series'], {}), '(trans_series)\n', (4058, 4072), True, 'import numpy as np\n'), ((5055, 5077), 'copy.deepcopy', 'deepcopy', (['self.vectors'], {}), '(self.vectors)\n', (5063, 5077), False, 'from copy import deepcopy\n'), ((5122, 5154), 'numpy.cross', 'np.cross', (['vectors[1]', 'vectors[2]'], {}), '(vectors[1], vectors[2])\n', (5130, 5154), True, 'import numpy as np\n'), ((5168, 5200), 'numpy.cross', 'np.cross', (['vectors[2]', 'vectors[0]'], {}), '(vectors[2], vectors[0])\n', (5176, 5200), True, 'import numpy as np\n'), ((5214, 5246), 'numpy.cross', 'np.cross', (['vectors[0]', 'vectors[1]'], {}), '(vectors[0], vectors[1])\n', (5222, 5246), True, 'import numpy as np\n'), ((5449, 5468), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (5457, 5468), True, 'import numpy as np\n'), ((5513, 5538), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (5521, 5538), True, 'import numpy as np\n'), ((5558, 5575), 'copy.deepcopy', 'deepcopy', (['vectors'], {}), '(vectors)\n', (5566, 5575), False, 'from copy import deepcopy\n'), ((5877, 5896), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (5885, 5896), True, 'import numpy as np\n'), ((7923, 7939), 'fromage.utils.mol.Mol', 'mol_init.Mol', (['[]'], {}), '([])\n', (7935, 7939), True, 'import fromage.utils.mol as mol_init\n'), ((7795, 7814), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (7803, 7814), True, 'import numpy as np\n'), ((8814, 8830), 'fromage.utils.mol.Mol', 'mol_init.Mol', (['[]'], {}), '([])\n', (8826, 8830), True, 'import fromage.utils.mol as mol_init\n'), ((5688, 5725), 'numpy.dot', 'np.dot', (['new_vectors[comp]', 'perp[comp]'], {}), '(new_vectors[comp], perp[comp])\n', (5694, 5725), True, 'import numpy as np\n'), ((5318, 5340), 'numpy.linalg.norm', 'np.linalg.norm', (['a_perp'], {}), '(a_perp)\n', (5332, 5340), True, 'import numpy as np\n'), ((5372, 5394), 'numpy.linalg.norm', 'np.linalg.norm', (['b_perp'], {}), '(b_perp)\n', (5386, 5394), True, 'import numpy as np\n'), ((5405, 5427), 'numpy.linalg.norm', 'np.linalg.norm', (['c_perp'], {}), '(c_perp)\n', (5419, 5427), True, 'import numpy as np\n'), ((8609, 8625), 'fromage.utils.mol.Mol', 'mol_init.Mol', (['[]'], {}), '([])\n', (8621, 8625), True, 'import fromage.utils.mol as mol_init\n')]
|
# -*- coding: utf-8 -*-
# @Time : 2020/2/15 16:10
# @Author : <NAME>
# @Email : <EMAIL>
# @File : utils.py
# @Software: PyCharm
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.core.framework import summary_pb2
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.average = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.average = self.sum / float(self.count)
def make_summary(name, val):
return summary_pb2.Summary(value=[summary_pb2.Summary.Value(tag=name, simple_value=val)])
def plot_accuracy(x,y,path):
"""
这是绘制精度的函数
:param x: x坐标数组
:param y: y坐标数组
:param path: 结果保存地址
:param mode: 模式,“train”代表训练损失,“val”为验证损失
"""
lengend_array = ["train_acc", "val_acc"]
train_accuracy,val_accuracy = y
plt.plot(x, train_accuracy, 'r-')
plt.plot(x, val_accuracy, 'b--')
plt.grid(True)
plt.xlim(0, x[-1]+2)
#plt.xticks(x)
plt.xlabel("epoch")
plt.ylabel("accuracy")
plt.legend(lengend_array,loc="best")
plt.savefig(path)
plt.close()
def plot_loss(x,y,path,mode="train"):
"""
这是绘制损失的函数
:param x: x坐标数组
:param y: y坐标数组
:param path: 结果保存地址
:param mode: 模式,“train”代表训练损失,“val”为验证损失
"""
if mode == "train":
lengend_array = ["train_loss","train_image_cls_loss","train_domain_cls_loss"]
else:
lengend_array = ["val_loss", "val_image_cls_loss", "val_domain_cls_loss"]
loss_results,image_cls_loss_results,domain_cls_loss_results = y
loss_results_min = np.max([np.min(loss_results) - 0.1,0])
image_cls_loss_results_min = np.max([np.min(image_cls_loss_results) - 0.1,0])
domain_cls_loss_results_min =np.max([np.min(domain_cls_loss_results) - 0.1,0])
y_min = np.min([loss_results_min,image_cls_loss_results_min,domain_cls_loss_results_min])
plt.plot(x, loss_results, 'r-')
plt.plot(x, image_cls_loss_results, 'b--')
plt.plot(x, domain_cls_loss_results, 'g-.')
plt.grid(True)
plt.xlabel("epoch")
plt.ylabel("loss")
plt.xlim(0,x[-1]+2)
plt.ylim(ymin=y_min)
#plt.xticks(x)
plt.legend(lengend_array,loc="best")
plt.savefig(path)
plt.close()
def learning_rate_schedule(process,init_learning_rate = 0.01,alpha = 10.0 , beta = 0.75):
"""
这个学习率的变换函数
:param process: 训练进程比率,值在0-1之间
:param init_learning_rate: 初始学习率,默认为0.01
:param alpha: 参数alpha,默认为10
:param beta: 参数beta,默认为0.75
"""
return init_learning_rate /(1.0 + alpha * process)**beta
def grl_lambda_schedule(process,gamma=10.0):
"""
这是GRL的参数lambda的变换函数
:param process: 训练进程比率,值在0-1之间
:param gamma: 参数gamma,默认为10
"""
return 2.0 / (1.0+np.exp(-gamma*process)) - 1.0
|
[
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"tensorflow.core.framework.summary_pb2.Summary.Value",
"matplotlib.pyplot.close",
"numpy.exp",
"numpy.min",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.legend"
] |
[((1003, 1036), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'train_accuracy', '"""r-"""'], {}), "(x, train_accuracy, 'r-')\n", (1011, 1036), True, 'import matplotlib.pyplot as plt\n'), ((1042, 1074), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'val_accuracy', '"""b--"""'], {}), "(x, val_accuracy, 'b--')\n", (1050, 1074), True, 'import matplotlib.pyplot as plt\n'), ((1080, 1094), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1088, 1094), True, 'import matplotlib.pyplot as plt\n'), ((1100, 1122), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(x[-1] + 2)'], {}), '(0, x[-1] + 2)\n', (1108, 1122), True, 'import matplotlib.pyplot as plt\n'), ((1146, 1165), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (1156, 1165), True, 'import matplotlib.pyplot as plt\n'), ((1171, 1193), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""accuracy"""'], {}), "('accuracy')\n", (1181, 1193), True, 'import matplotlib.pyplot as plt\n'), ((1199, 1236), 'matplotlib.pyplot.legend', 'plt.legend', (['lengend_array'], {'loc': '"""best"""'}), "(lengend_array, loc='best')\n", (1209, 1236), True, 'import matplotlib.pyplot as plt\n'), ((1241, 1258), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {}), '(path)\n', (1252, 1258), True, 'import matplotlib.pyplot as plt\n'), ((1264, 1275), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1273, 1275), True, 'import matplotlib.pyplot as plt\n'), ((1981, 2068), 'numpy.min', 'np.min', (['[loss_results_min, image_cls_loss_results_min, domain_cls_loss_results_min]'], {}), '([loss_results_min, image_cls_loss_results_min,\n domain_cls_loss_results_min])\n', (1987, 2068), True, 'import numpy as np\n'), ((2068, 2099), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'loss_results', '"""r-"""'], {}), "(x, loss_results, 'r-')\n", (2076, 2099), True, 'import matplotlib.pyplot as plt\n'), ((2105, 2147), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'image_cls_loss_results', '"""b--"""'], {}), "(x, image_cls_loss_results, 'b--')\n", (2113, 2147), True, 'import matplotlib.pyplot as plt\n'), ((2153, 2196), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'domain_cls_loss_results', '"""g-."""'], {}), "(x, domain_cls_loss_results, 'g-.')\n", (2161, 2196), True, 'import matplotlib.pyplot as plt\n'), ((2202, 2216), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (2210, 2216), True, 'import matplotlib.pyplot as plt\n'), ((2222, 2241), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (2232, 2241), True, 'import matplotlib.pyplot as plt\n'), ((2247, 2265), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (2257, 2265), True, 'import matplotlib.pyplot as plt\n'), ((2271, 2293), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(x[-1] + 2)'], {}), '(0, x[-1] + 2)\n', (2279, 2293), True, 'import matplotlib.pyplot as plt\n'), ((2296, 2316), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'ymin': 'y_min'}), '(ymin=y_min)\n', (2304, 2316), True, 'import matplotlib.pyplot as plt\n'), ((2342, 2379), 'matplotlib.pyplot.legend', 'plt.legend', (['lengend_array'], {'loc': '"""best"""'}), "(lengend_array, loc='best')\n", (2352, 2379), True, 'import matplotlib.pyplot as plt\n'), ((2384, 2401), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {}), '(path)\n', (2395, 2401), True, 'import matplotlib.pyplot as plt\n'), ((2407, 2418), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2416, 2418), True, 'import matplotlib.pyplot as plt\n'), ((681, 734), 'tensorflow.core.framework.summary_pb2.Summary.Value', 'summary_pb2.Summary.Value', ([], {'tag': 'name', 'simple_value': 'val'}), '(tag=name, simple_value=val)\n', (706, 734), False, 'from tensorflow.core.framework import summary_pb2\n'), ((1770, 1790), 'numpy.min', 'np.min', (['loss_results'], {}), '(loss_results)\n', (1776, 1790), True, 'import numpy as np\n'), ((1843, 1873), 'numpy.min', 'np.min', (['image_cls_loss_results'], {}), '(image_cls_loss_results)\n', (1849, 1873), True, 'import numpy as np\n'), ((1926, 1957), 'numpy.min', 'np.min', (['domain_cls_loss_results'], {}), '(domain_cls_loss_results)\n', (1932, 1957), True, 'import numpy as np\n'), ((2939, 2963), 'numpy.exp', 'np.exp', (['(-gamma * process)'], {}), '(-gamma * process)\n', (2945, 2963), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# coding=utf-8
# Copyright 2018 The THUMT Authors
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
import math
import numpy as np
def count_words(filename):
counter = collections.Counter()
with open(filename, "r") as fd:
for line in fd:
words = line.strip().split()
counter.update(words)
count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))
words, counts = list(zip(*count_pairs))
return words, counts
def control_symbols(string):
if not string:
return []
else:
return string.strip().split(",")
def save_vocab(name, vocab):
if name.split(".")[-1] != "txt":
name = name + ".txt"
# pairs = sorted(vocab.items(), key=lambda x: (x[1], x[0]))
pairs = sorted(vocab.items(), key=lambda x: x[1], reverse=True)
words, ids = list(zip(*pairs))
# total freq
T_freq = sum(ids)
with open(name, "w") as f:
for i, word in enumerate(words):
# f.write(word + " " + str(ids[i]) + "\n")
f.write(word + " " + "%.16f" % (ids[i] / T_freq) + "\n")
# write total freq
def cal_cdf_model(corpus, vocab):
pairs = sorted(vocab.items(), key=lambda x: x[1], reverse=True)
words, ids = list(zip(*pairs))
freq_dict = {}
for word, id in zip(words, ids):
freq_dict[word] = id
T_freq = sum(ids)
data = []
debug = 0
with open(corpus, "r") as f:
for line in f.readlines():
line = line.split()
SUM = 0
for w in line:
p = freq_dict[w] / T_freq
if p != 0:
SUM += math.log(p)
SUM = -SUM
data.append(SUM)
# if SUM < 5.718:
# debug += 1
# print (SUM)
# data contains all sum log
# bins='auto'
v, base = np.histogram(data, bins=np.arange(1000))
print ("data:", data[:50])
print ("value", v[:50])
base = base.astype(np.float32)
print ("base:", base[:50])
print ("highest value:", base[-1])
print ("len of base:", len(base))
# print ("debug:", debug)
cdf = np.cumsum(v)
cdf = cdf / len(data)
cdf = cdf.astype(np.float32)
print ("cdf:", cdf, cdf.dtype)
print ("outputing cdf and bases.")
# res = {"cdf": cdf, "base": base}
np.savez(args.output + "-cdf_base.npz", cdf=cdf, base=base)
def parse_args():
parser = argparse.ArgumentParser(description="Create vocabulary")
parser.add_argument("corpus", help="input corpus")
parser.add_argument("output", default="vocab.txt",
help="Output vocabulary name")
parser.add_argument("--limit", default=0, type=int, help="Vocabulary size")
parser.add_argument("--control", type=str, default="",
help="Add control symbols to vocabulary. "
"Control symbols are separated by comma.")
return parser.parse_args()
args=parse_args()
def main():
vocab = {}
limit = args.limit
count = 0
words, counts = count_words(args.corpus)
ctrl_symbols = control_symbols(args.control)
for sym in ctrl_symbols:
vocab[sym] = len(vocab)
for word, freq in zip(words, counts):
if limit and len(vocab) >= limit:
break
if word in vocab:
print("Warning: found duplicate token %s, ignored" % word)
continue
# vocab[word] = len(vocab)
# print(word, freq)
vocab[word] = freq
count += freq
save_vocab(args.output, vocab)
cal_cdf_model(args.corpus, vocab)
print("Total words: %d" % sum(counts))
print("Unique words: %d" % len(words))
print("Vocabulary coverage: %4.2f%%" % (100.0 * count / sum(counts)))
if __name__ == "__main__":
main()
|
[
"numpy.savez",
"argparse.ArgumentParser",
"math.log",
"collections.Counter",
"numpy.cumsum",
"numpy.arange"
] |
[((291, 312), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (310, 312), False, 'import collections\n'), ((2254, 2266), 'numpy.cumsum', 'np.cumsum', (['v'], {}), '(v)\n', (2263, 2266), True, 'import numpy as np\n'), ((2443, 2502), 'numpy.savez', 'np.savez', (["(args.output + '-cdf_base.npz')"], {'cdf': 'cdf', 'base': 'base'}), "(args.output + '-cdf_base.npz', cdf=cdf, base=base)\n", (2451, 2502), True, 'import numpy as np\n'), ((2537, 2593), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Create vocabulary"""'}), "(description='Create vocabulary')\n", (2560, 2593), False, 'import argparse\n'), ((1995, 2010), 'numpy.arange', 'np.arange', (['(1000)'], {}), '(1000)\n', (2004, 2010), True, 'import numpy as np\n'), ((1757, 1768), 'math.log', 'math.log', (['p'], {}), '(p)\n', (1765, 1768), False, 'import math\n')]
|
# ---
# jupyter:
# jupytext:
# formats: jupyter_scripts//ipynb,scripts//py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.3'
# jupytext_version: 1.0.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # series_tools:
#
# set of tools that work with streamflow records.
# - Identify events.
# - Identidy baseflow and runoff.
#
import pandas as pd
import numpy as np
# ## Digital filters
#
# Collection of functions to separate runoff from baseflow.
# +
def DigitalFilters(Q,tipo = 'Eckhart', a = 0.98, BFI = 0.8):
'''Digital filters to separate baseflow from runoff in a continuos time series.
Parameters:
- tipo: type of filter to be used.
- Eckhart o 1.
- Nathan o 2.
- Chapman o 3.
- Q: pandas series with the streamflow records.
- a: paramter for the filter.
- Eckhart: 0.98.
- Nathan: 0.8.
- Chapman: 0.8.
- BFI: 0.8 only applies for Eckhart filter.
Returns:
- Pandas DataFrame with the Runoff, Baseflow.'''
#Functions definitions.
def Nathan1990(Q, a = 0.8):
'''One parameter digital filter of Nathan and McMahon (1990)'''
R = np.zeros(Q.size)
c = 1
for q1,q2 in zip(Q[:-1], Q[1:]):
R[c] = a*R[c-1] + ((1+a)/2.)*(q2-q1)
if R[c]<0:
R[c] = 0
elif R[c]>q2:
R[c] = q2
c += 1
B = Q - R
return R, B
def Eckhart2005(Q, BFI=0.8, a = 0.98):
'''Two parameter Eckhart digital filter
Parameters:
- Q: np.ndarray with the streamflow records.
- BFI: The maximum amount of baseflow (%).
- a: parameter alpha (0.98)
Output:
- R: total runoff.
- B: total baseflow.'''
#SEparation
B = np.zeros(Q.size)
B[0] = Q[0]
c = 1
for q in Q[1:]:
#SEparation equation
B[c] = ((1.0-BFI)*a*B[c-1]+(1.0-a)*BFI*q)/(1.0-a*BFI)
#Constrains
if B[c] > q:
B[c] = q
c+=1
R = Q - B
return R, B
def ChapmanMaxwell1996(Q, a = 0.98):
'''Digital filter proposed by chapman and maxwell (1996)'''
B = np.zeros(Q.size)
c = 1
for q in Q[1:]:
B[c] = (a / (2.-a))*B[c-1] + ((1.-a)/(2.-a))*q
c+=1
R = Q-B
return R,B
#Cal the filter
if tipo == 'Eckhart' or tipo == 1:
R,B = Eckhart2005(Q.values, a, BFI)
elif tipo =='Nathan' or tipo == 2:
R,B = Nathan1990(Q.values, a,)
elif tipo == 'Chapman' or tipo ==3:
R,B = ChapmanMaxwell1996(Q.values, a)
#Returns the serie
return pd.DataFrame(np.vstack([R,B]).T, index = Q.index, columns = ['Runoff','Baseflow'])
# -
# ## Events selection functions
#
# Collection of functions to identify peaks in a series and the end of each peak recession.
# +
def Events_Get_Peaks(Q, Qmin = None, tw = pd.Timedelta('12h')):
'''Find the peack values of the hydrographs of a serie
Params:
- Q: Pandas serie with the records.
- Qmin: The minimum value of Q to be considered a peak.
if None takes the 99th percentile of the series as the min
- tw: size of the ime window used to eliminate surrounding maximum values'''
if Qmin is None:
Qmin = np.percentile(Q.values[np.isfinite(Q.values)], 99)
#Find the maximum
Qmax = Q[Q>Qmin]
QmaxCopy = Qmax.copy()
#Search the maxium maximorums
Flag = True
PosMax = []
while Flag:
MaxIdx = Qmax.idxmax()
PosMax.append(MaxIdx)
Qmax[MaxIdx-tw:MaxIdx+tw] = -9
if Qmax.max() < Qmin: Flag = False
#Return the result
return QmaxCopy[PosMax].sort_index()
def Events_Get_End(Q, Qmax, minDif = 0.04, minDistance = None,maxSearch = 10, Window = '1h'):
'''Find the end of each selected event in order to know the
longitude of each recession event.
Parameters:
- Q: Pandas series with the records.
- Qmax: Pandas series with the peak streamflows.
- minDif: The minimum difference to consider that a recession is over.
Optional:
- minDistance: minimum temporal distance between the peak and the end.
- maxSearch: maximum number of iterations to search for the end.
- Widow: Size of the temporal window used to smooth the streamflow
records before the difference estimation (pandas format).
Returns:
- Qend: The point indicating the en of the recession.'''
#Obtains the difference
X = Q.resample('1h').mean()
dX = X.values[1:] - X.values[:-1]
dX = pd.Series(dX, index=X.index[:-1])
#Obtains the points.
DatesEnds = []
Correct = []
for peakIndex in Qmax.index:
try:
a = dX[dX.index > peakIndex]
if minDistance is None:
DatesEnds.append(a[a>minDif].index[0])
else:
Dates = a[a>minDif].index
flag = True
c = 0
while flag:
distancia = Dates[c] - peakIndex
if distancia > minDistance:
DatesEnds.append(Dates[c])
flag= False
c += 1
if c>maxSearch: flag = False
Correct.append(0)
except:
DatesEnds.append(peakIndex)
Correct.append(1)
#Returns the pandas series with the values and end dates
Correct = np.array(Correct)
return pd.Series(Q[DatesEnds], index=DatesEnds), Qmax[Correct == 0]
# -
# ## Runoff analysis
# +
def Runoff_SeparateBaseflow(Qobs, Qsim):
'''From observed records obtain the baseflow and runoff streamflow records.
Parameters:
- Qobs: Observed record dt < 1h.
- Qsim: Simulated records dt < 1h.
Returns:
- Qh: Observed records at hourly scale.
- Qsh: Simulated records at a hourly scale.
- Qsep: Observed separated records at hourly scale'''
#Observed series to hourly scale.
Qh = Qobs.resample('1h').mean()
Qh[np.isnan(Qh)] = Qh.mean()
Qh[Qh<0] = Qh.mean()
Qsep = DigitalFilters(Qh, tipo = 'Nathan', a = 0.998)
#Pre-process of simulated series to hourly scale.
Qsh = Qsim.resample('1h').mean()
Qsh[np.isnan(Qsh)] = 0.0
#Return results
return Qh, Qsh, Qsep
def Runoff_FindEvents(Qobs, Qsim, minTime = 1, minConcav = None, minPeak = None):
'''Separates runoff from baseflow and finds the events.
Parameters:
- Qobs: Hourly obseved streamflow.
- Qsim: Hourly simulated streamflow.
- minTime: minimum duration of the event.
- minConcav: minimum concavity of the event.
- minPeak: minimum value of the peakflows.
Returns:
- pos1: pandas index lists with the initial positions.
- pos2: pandas index lists with the end positions.'''
#Obtain the positions of the start and
pos1, pos2 = __Runoff_Get_Events__(Qsim, np.percentile(Qobs, 20))
pos1, pos2 = __Runoff_Del_Events__(Qobs, pos1, pos2, minTime=1, minConcav=minConcav, minPeak = minPeak)
#Returns results
return pos1, pos2
def Runoff_CompleteAnalysis(Area, Qobs, Rain, Qsep, pos1, pos2, N=None, Nant = None):
'''Obtains the DataFrame with the resume of the RC analysis.
Parameters:
- Area: the area of the basin in km2.
- Qobs: Hourly observed streamflow.
- Rain: Hourly rainfall.
- Qsep: Hourly dataFrame with the separated flows.
- pos1: pandas index lists with the initial positions.
- pos2: pandas index lists with the end positions.
- N: Number of days to eval the rainfall between p1-N: p2.
- Nant: Number of antecedent days to eval the rainfall between p1-Nant : p1-N.
Results:
- DataFrame with the columns: RC, RainEvent, RainBefore, RainInt, Qmax'''
#Search for N
if N is None:
#Time window based on the basin area.
N = Area**0.2
N = np.floor(N) // 2 * 2 + 1
if N<3: N = 3
if N>11: N = 11
Ndays = pd.Timedelta(str(N)+'d')
if Nant is None:
Nant = pd.Timedelta(str(N+3)+'d')
else:
Ndays = N
if Nant is None:
Nant = N + pd.Timedelta('3d')
#Lists of data
RC = []
RainTot = []
Date = []
Qmax = []
RainInt = []
RainAnt = []
#Get Values for events
for pi,pf in zip(pos1, pos2):
#General variables obtention
Runoff = Qsep['Runoff'][pi:pf+Ndays].sum()*3600.
Rainfall = (Rain[pi-Ndays:pf].sum()/1000.)*(Area*1e6)
#Runoff and streamflow List updates
Qmax.append(Qobs[pi:pf].max())
RC.append(Runoff / Rainfall)
#Rainfall list updates
RainTot.append(Rain[pi-Ndays:pf].sum())
RainInt.append(Rain[pi-Ndays:pf].max())
RainAnt.append(Rain[pi-Ndays-Nant:pi-Ndays].sum())
#Dates.
Date.append(pi)
#Converts to arrays
RC = np.array(RC)
RainTot = np.array(RainTot)
RainInt = np.array(RainInt)
RainAnt = np.array(RainAnt)
Date = np.array(Date)
Qmax = np.array(Qmax)
#Select the correct values
p1 = np.where(np.isfinite(RC))[0]
p2 = np.where((RC[p1]<=1.0) & (RC[p1]>0.0))[0]
#Lo que es
RC = RC[p1[p2]]
RainTot = RainTot[p1[p2]]
RainInt = RainInt[p1[p2]]
RainAnt = RainAnt[p1[p2]]
Date = Date[p1[p2]]
Qmax = Qmax[p1[p2]]
#Los malos
pos = np.where((RC>0.04) & (RainTot<10))[0]
#Depura de nuevo
RC = np.delete(RC, pos)
RainTot = np.delete(RainTot, pos)
RainInt = np.delete(RainInt, pos)
RainAnt = np.delete(RainAnt, pos)
Date = np.delete(Date, pos)
Qmax = np.delete(Qmax, pos)
#Turns things into a DataFrame
Data = pd.DataFrame(
np.vstack([RC, RainTot, RainAnt, RainInt, Qmax]).T,
index= Date,
columns=['RC', 'RainEvent', 'RainBefore','RainInt','Qmax'])
return Data
def Runoff_groupByRain(D, groupby = 'RainEvent' , bins = None,
Vmin=None, Vmax=None, Nb = 10, logx = True):
'''Group the values of RC in function of a variable.
Parameters:
- D: pandas Dataframe with the results from the RC analysis.
- groupby: name of the column to use for the groups.
- Vmin: minimum value to set the groups.
- Vmax: max value to set the groups.
- b: number of bins.
- logx: use or not logaritmic X axis.
Results:
- Dictionary with the RC by groups, P25, P50, P90, mean value of the variable
for grouping, Variable for groups.'''
#Change if the axis X is logarithm or not
if logx:
x = np.log(D[groupby])
else:
x = D[groupby]
#SEt max y min
if Vmin is None: Vmin = x.min()
if Vmax is None: Vmax = x.max()
#SEt the intervals
if bins is None:
b = np.linspace(Vmin, Vmax, Nb)
else:
b = bins
#Make the groups
DicStats = {'RC':[],'P25':[],'P75':[],'P50':[], 'X': [], groupby: []}
for i,j in zip(b[:-1], b[1:]):
p = np.where((x>=i) & (x<=j))[0]
if p.size > 0:
DicStats['RC'].append(D['RC'][p])
DicStats['P25'].append(np.percentile(D['RC'][p], 25))
DicStats['P50'].append(np.percentile(D['RC'][p], 50))
DicStats['P75'].append(np.percentile(D['RC'][p], 75))
DicStats['X'].append((i+j)/2.)
DicStats[groupby].append(x[p])
return DicStats
#-------------------------------------------------------------------------------------------
## Backgroudn functions.
def __Runoff_Get_Events__(Q, Umbral):
'''Obtais the initia and end dates of the events related to
a time series based on the results from the Asynch 190.
Parameters:
- Q: pandas series with the streamflow (simulated from asynch 190 no infiltration).
- perc: percentile used to stablish runoff occurrence.
Returns:
- pos1: initial date of each event.
- pos2: end date of each event'''
#Treshold and positions with values over it
pos = np.where(Q.values > Umbral)[0]
#Positions start and end.
Dpos = pos[1:] - pos[:-1]
Dpos1 = pd.Series(Dpos, Q.index[pos[1:]])
pos1 = Dpos1[Dpos1>1].index
pos1 = pos1.insert(0, Q.index[pos][0])
pos1 = pos1[:-1]
Dpos2 = pd.Series(Dpos, Q.index[pos[:-1]])
pos2 = Dpos2[Dpos2>1].index
#returns results
return pos1, pos2
def __Runoff_Get_eventsPeaks__(Q, pos1, pos2):
'''Obtains the peaks of the observed events selected by the
criteria of the asynch 190 model
PArameters:
- Q: Pandas series qwith the observed data.
- pos1: list with the start of the peaks.
- pos2: list with the end of the peaks.
Returns:
- List with the peaks corresponding to the events.'''
#Peak at each event
Peaks = []
for p1, p2 in zip(pos1, pos2):
Peaks.append(np.nanmax(Q[p1:p2].values))
return Peaks
def __Runoff_Del_Events__(Q, pos1, pos2, minTime = 2.5, minPeak = None, minConcav = None):
'''Eliminates events from the selected initial peaks based on different
aspects such as min time of the event, min peak and the concativity.
Parameters:
- Q: pandas series with the observed streamflow.
- pos1: Pandas indexes with the start of the events.
- pos2: Pandas indexes with the end of the events.
- minTime: minimum time (days) of the duration of the hydrographs.
- minPeak: minim value of the peak at the hydrographs.
- minConcat: minimum concativity for the hydrograph (suggested: 10).
Returns:
- starts: pandas index with the corrected starts.
- ends: pandas indexes with the corrected ends.'''
#Eliminates events based on their duration
if minTime is not None:
#Obtains the duration
Td = pos2 - pos1
Td = Td.total_seconds()/(3600*24)
Td = Td.values
#Eliminates
p = np.where(Td<minTime)[0]
pos1 = pos1.delete(p)
pos2 = pos2.delete(p)
#Eliminates events based on the peak flow
if minPeak is not None:
#Obtains peaks
Peaks = Series_Get_eventsPeaks(Q, pos1, pos2)
Peaks = np.array(Peaks)
#Eliminates
p = np.where(Peaks<minPeak)[0]
pos1 = pos1.delete(p)
pos2 = pos2.delete(p)
#Eliminates events based on the concavity criterion
if minConcav is not None:
#Obtains the concativity series
Concav = Q.resample('5h').mean().diff(2)
Concav = Series_Get_eventsPeaks(Concav, pos1, pos2)
#Eliminates
p = np.where(np.array(Concav)<minConcav)[0]
pos1 = pos1.delete(p)
pos2 = pos2.delete(p)
#Returns the result
return pos1, pos2
# -
# ## Recession analysis
# +
#Function to obtain a
def Recession_NDF_method(l):
'''l[0]: np.ndarray of the streamflow data.
l[1]: parameter B between 0 and 5'''
# Function to obtains A for a given B (l[1])
def Estimate_A(Q,B,dt):
e1 = np.nansum((Q.values[:-1] - Q.values[1:]))
e2 = dt * np.nansum(((Q.values[:-1] - Q.values[1:])/2.)**B)
return e1/e2
# Estimates Q for the pair B and A
def Estimate_Q(Q, B, A):
'''Obtaines the estimated Q for a given A and B
Parameters:
- Qo: the initial value of the analyzed peak.
- t: Vector with the elapsed time.'''
#Convert time vector to elapsed time in seconds.
t = Q.index.astype('int64') / 1e9
t = (t.values - t.values[0])/3600.
Qo = Q.values[0]
# Obtains the estimted Qs
return Qo * (1 - ( (1.-B)*A*t / Qo**(1.-B) )) ** (1./(1.-B))
def Estimate_error(Qobs, Qsim):
'''Estimates the total percentage error obtained with the pair
A and B'''
Vsim = Qsim.sum()
Vobs = Qobs.sum()
return (Vsim - Vobs) / Vsim
#Obtains the time delta
dt = l[0].index[1] - l[0].index[0]
dt = dt.value / 1e9
#Estimates A
A = Estimate_A(l[0],l[1],dt)
#Estimaest Q
Qsim = Estimate_Q(l[0],l[1], A)
CountNaN = Qsim[np.isnan(Qsim)].size
#Estimate error
if CountNaN == 0:
E = Estimate_error(l[0],Qsim)
else:
E = 1000
return A, E, Qsim
# search B for recession
def Recession_Search_NDF(Q,Initial = 0, Long=1 ,process = 8, Window = 1, step = 0.01):
'''Search for the optimum value of B and A for a hydrograph
Parameters:
- Initial: Initial point oscillates between 0 and 168h.
- Long: recession longitude oscillates between 4 and 12 days.
- process: total number of processors to do the analysis.'''
#Movement of the initial and finish time
dis_i = pd.Timedelta(hours = Initial)
dis_f = pd.Timedelta(hours = 24*Long)
#Take a portion of the recession curve
X = Q[Q.idxmax()+dis_i:Q.idxmax()+dis_f+dis_i]
# Excercise to obtain A and B for a streamflow record.
L = []
B = np.arange(0, 5., step)
for b in B:
L.append([X, b])
p = Pool(processes=process)
Res = p.map(NDF_method, L)
p.close()
p.join()
#Error selection
Error = np.abs([i[1] for i in Res])
PosEr = np.argmin(Error)
#Return: B, A, E and Qsim
return B[PosEr], Res[PosEr][0], Error[PosEr], pd.Series(Res[PosEr][2], X.index)
# -
|
[
"pandas.Series",
"numpy.abs",
"numpy.where",
"numpy.delete",
"pandas.Timedelta",
"numpy.log",
"numpy.floor",
"numpy.array",
"numpy.zeros",
"numpy.linspace",
"numpy.isnan",
"numpy.vstack",
"numpy.isfinite",
"numpy.nanmax",
"numpy.argmin",
"numpy.percentile",
"numpy.nansum",
"numpy.arange"
] |
[((3125, 3144), 'pandas.Timedelta', 'pd.Timedelta', (['"""12h"""'], {}), "('12h')\n", (3137, 3144), True, 'import pandas as pd\n'), ((4823, 4856), 'pandas.Series', 'pd.Series', (['dX'], {'index': 'X.index[:-1]'}), '(dX, index=X.index[:-1])\n', (4832, 4856), True, 'import pandas as pd\n'), ((5690, 5707), 'numpy.array', 'np.array', (['Correct'], {}), '(Correct)\n', (5698, 5707), True, 'import numpy as np\n'), ((9215, 9227), 'numpy.array', 'np.array', (['RC'], {}), '(RC)\n', (9223, 9227), True, 'import numpy as np\n'), ((9242, 9259), 'numpy.array', 'np.array', (['RainTot'], {}), '(RainTot)\n', (9250, 9259), True, 'import numpy as np\n'), ((9274, 9291), 'numpy.array', 'np.array', (['RainInt'], {}), '(RainInt)\n', (9282, 9291), True, 'import numpy as np\n'), ((9306, 9323), 'numpy.array', 'np.array', (['RainAnt'], {}), '(RainAnt)\n', (9314, 9323), True, 'import numpy as np\n'), ((9335, 9349), 'numpy.array', 'np.array', (['Date'], {}), '(Date)\n', (9343, 9349), True, 'import numpy as np\n'), ((9361, 9375), 'numpy.array', 'np.array', (['Qmax'], {}), '(Qmax)\n', (9369, 9375), True, 'import numpy as np\n'), ((9765, 9783), 'numpy.delete', 'np.delete', (['RC', 'pos'], {}), '(RC, pos)\n', (9774, 9783), True, 'import numpy as np\n'), ((9798, 9821), 'numpy.delete', 'np.delete', (['RainTot', 'pos'], {}), '(RainTot, pos)\n', (9807, 9821), True, 'import numpy as np\n'), ((9836, 9859), 'numpy.delete', 'np.delete', (['RainInt', 'pos'], {}), '(RainInt, pos)\n', (9845, 9859), True, 'import numpy as np\n'), ((9874, 9897), 'numpy.delete', 'np.delete', (['RainAnt', 'pos'], {}), '(RainAnt, pos)\n', (9883, 9897), True, 'import numpy as np\n'), ((9909, 9929), 'numpy.delete', 'np.delete', (['Date', 'pos'], {}), '(Date, pos)\n', (9918, 9929), True, 'import numpy as np\n'), ((9941, 9961), 'numpy.delete', 'np.delete', (['Qmax', 'pos'], {}), '(Qmax, pos)\n', (9950, 9961), True, 'import numpy as np\n'), ((12409, 12442), 'pandas.Series', 'pd.Series', (['Dpos', 'Q.index[pos[1:]]'], {}), '(Dpos, Q.index[pos[1:]])\n', (12418, 12442), True, 'import pandas as pd\n'), ((12551, 12585), 'pandas.Series', 'pd.Series', (['Dpos', 'Q.index[pos[:-1]]'], {}), '(Dpos, Q.index[pos[:-1]])\n', (12560, 12585), True, 'import pandas as pd\n'), ((16982, 17009), 'pandas.Timedelta', 'pd.Timedelta', ([], {'hours': 'Initial'}), '(hours=Initial)\n', (16994, 17009), True, 'import pandas as pd\n'), ((17024, 17053), 'pandas.Timedelta', 'pd.Timedelta', ([], {'hours': '(24 * Long)'}), '(hours=24 * Long)\n', (17036, 17053), True, 'import pandas as pd\n'), ((17226, 17249), 'numpy.arange', 'np.arange', (['(0)', '(5.0)', 'step'], {}), '(0, 5.0, step)\n', (17235, 17249), True, 'import numpy as np\n'), ((17413, 17440), 'numpy.abs', 'np.abs', (['[i[1] for i in Res]'], {}), '([i[1] for i in Res])\n', (17419, 17440), True, 'import numpy as np\n'), ((17453, 17469), 'numpy.argmin', 'np.argmin', (['Error'], {}), '(Error)\n', (17462, 17469), True, 'import numpy as np\n'), ((1303, 1319), 'numpy.zeros', 'np.zeros', (['Q.size'], {}), '(Q.size)\n', (1311, 1319), True, 'import numpy as np\n'), ((1963, 1979), 'numpy.zeros', 'np.zeros', (['Q.size'], {}), '(Q.size)\n', (1971, 1979), True, 'import numpy as np\n'), ((2388, 2404), 'numpy.zeros', 'np.zeros', (['Q.size'], {}), '(Q.size)\n', (2396, 2404), True, 'import numpy as np\n'), ((5719, 5759), 'pandas.Series', 'pd.Series', (['Q[DatesEnds]'], {'index': 'DatesEnds'}), '(Q[DatesEnds], index=DatesEnds)\n', (5728, 5759), True, 'import pandas as pd\n'), ((6292, 6304), 'numpy.isnan', 'np.isnan', (['Qh'], {}), '(Qh)\n', (6300, 6304), True, 'import numpy as np\n'), ((6500, 6513), 'numpy.isnan', 'np.isnan', (['Qsh'], {}), '(Qsh)\n', (6508, 6513), True, 'import numpy as np\n'), ((7195, 7218), 'numpy.percentile', 'np.percentile', (['Qobs', '(20)'], {}), '(Qobs, 20)\n', (7208, 7218), True, 'import numpy as np\n'), ((9454, 9496), 'numpy.where', 'np.where', (['((RC[p1] <= 1.0) & (RC[p1] > 0.0))'], {}), '((RC[p1] <= 1.0) & (RC[p1] > 0.0))\n', (9462, 9496), True, 'import numpy as np\n'), ((9696, 9734), 'numpy.where', 'np.where', (['((RC > 0.04) & (RainTot < 10))'], {}), '((RC > 0.04) & (RainTot < 10))\n', (9704, 9734), True, 'import numpy as np\n'), ((10889, 10907), 'numpy.log', 'np.log', (['D[groupby]'], {}), '(D[groupby])\n', (10895, 10907), True, 'import numpy as np\n'), ((11089, 11116), 'numpy.linspace', 'np.linspace', (['Vmin', 'Vmax', 'Nb'], {}), '(Vmin, Vmax, Nb)\n', (11100, 11116), True, 'import numpy as np\n'), ((12306, 12333), 'numpy.where', 'np.where', (['(Q.values > Umbral)'], {}), '(Q.values > Umbral)\n', (12314, 12333), True, 'import numpy as np\n'), ((14453, 14468), 'numpy.array', 'np.array', (['Peaks'], {}), '(Peaks)\n', (14461, 14468), True, 'import numpy as np\n'), ((15274, 15313), 'numpy.nansum', 'np.nansum', (['(Q.values[:-1] - Q.values[1:])'], {}), '(Q.values[:-1] - Q.values[1:])\n', (15283, 15313), True, 'import numpy as np\n'), ((17550, 17583), 'pandas.Series', 'pd.Series', (['Res[PosEr][2]', 'X.index'], {}), '(Res[PosEr][2], X.index)\n', (17559, 17583), True, 'import pandas as pd\n'), ((2874, 2891), 'numpy.vstack', 'np.vstack', (['[R, B]'], {}), '([R, B])\n', (2883, 2891), True, 'import numpy as np\n'), ((9425, 9440), 'numpy.isfinite', 'np.isfinite', (['RC'], {}), '(RC)\n', (9436, 9440), True, 'import numpy as np\n'), ((10030, 10078), 'numpy.vstack', 'np.vstack', (['[RC, RainTot, RainAnt, RainInt, Qmax]'], {}), '([RC, RainTot, RainAnt, RainInt, Qmax])\n', (10039, 10078), True, 'import numpy as np\n'), ((11290, 11319), 'numpy.where', 'np.where', (['((x >= i) & (x <= j))'], {}), '((x >= i) & (x <= j))\n', (11298, 11319), True, 'import numpy as np\n'), ((13149, 13175), 'numpy.nanmax', 'np.nanmax', (['Q[p1:p2].values'], {}), '(Q[p1:p2].values)\n', (13158, 13175), True, 'import numpy as np\n'), ((14202, 14224), 'numpy.where', 'np.where', (['(Td < minTime)'], {}), '(Td < minTime)\n', (14210, 14224), True, 'import numpy as np\n'), ((14501, 14526), 'numpy.where', 'np.where', (['(Peaks < minPeak)'], {}), '(Peaks < minPeak)\n', (14509, 14526), True, 'import numpy as np\n'), ((15334, 15388), 'numpy.nansum', 'np.nansum', (['(((Q.values[:-1] - Q.values[1:]) / 2.0) ** B)'], {}), '(((Q.values[:-1] - Q.values[1:]) / 2.0) ** B)\n', (15343, 15388), True, 'import numpy as np\n'), ((16376, 16390), 'numpy.isnan', 'np.isnan', (['Qsim'], {}), '(Qsim)\n', (16384, 16390), True, 'import numpy as np\n'), ((3541, 3562), 'numpy.isfinite', 'np.isfinite', (['Q.values'], {}), '(Q.values)\n', (3552, 3562), True, 'import numpy as np\n'), ((8476, 8494), 'pandas.Timedelta', 'pd.Timedelta', (['"""3d"""'], {}), "('3d')\n", (8488, 8494), True, 'import pandas as pd\n'), ((11423, 11452), 'numpy.percentile', 'np.percentile', (["D['RC'][p]", '(25)'], {}), "(D['RC'][p], 25)\n", (11436, 11452), True, 'import numpy as np\n'), ((11489, 11518), 'numpy.percentile', 'np.percentile', (["D['RC'][p]", '(50)'], {}), "(D['RC'][p], 50)\n", (11502, 11518), True, 'import numpy as np\n'), ((11555, 11584), 'numpy.percentile', 'np.percentile', (["D['RC'][p]", '(75)'], {}), "(D['RC'][p], 75)\n", (11568, 11584), True, 'import numpy as np\n'), ((8209, 8220), 'numpy.floor', 'np.floor', (['N'], {}), '(N)\n', (8217, 8220), True, 'import numpy as np\n'), ((14865, 14881), 'numpy.array', 'np.array', (['Concav'], {}), '(Concav)\n', (14873, 14881), True, 'import numpy as np\n')]
|
# +
import argparse
import os
import pickle
import sys
sys.path.append("..")
import numpy as np
import torchvision
import torchvision.transforms as T
import torch.utils.data as torch_data
from tqdm import tqdm
from models.classifiers import EvalCompoundResNet
# -
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-F', '--function', type=str, required=True, choices=['max_index', 'count_data'])
parser.add_argument('-O', '--output_path', type=str, required=True)
parser.add_argument('--num_attr', type=str, default=8)
parser.add_argument('--sample_per_category', type=int, default=1e5)
parser.add_argument('--weight_path', type=str, default='/home/u5397696/interpolation/celebA-hq-classifier/')
parser.add_argument('--data_root', type=str, default='/home/u5397696/interpolation/interfacegan/data/tmp')
return parser.parse_args()
def max_index(args):
if not os.path.exists(args.output_path):
raise ValueError(f"{args.output_path} doesn't exist.")
with open(args.output_path, 'rb') as f:
data_index = pickle.load(f)
print(f'#attributes: {len(data_index)}')
max_val = -1e9
for i in range(len(data_index)):
max_p = np.max(data_index[i][0])
max_n = np.max(data_index[i][1])
max_val = np.max([max_val, max_p, max_n])
print(i, max_p, max_n)
print (f'Max index is {max_val}')
def count_data(args):
#if os.path.exists(args.output_path):
# raise ValueError(f"{args.output_path} has existed.")
t = T.Compose([T.Resize(224), T.ToTensor()])
dset = torchvision.datasets.ImageFolder(args.data_root, transform=t)
loader= torch_data.DataLoader(dset, batch_size=32, shuffle=False, num_workers=4, pin_memory=True)
print (f'Start processing {os.path.basename(args.data_root)}.')
m = EvalCompoundResNet(args.weight_path).cuda()
data_index = [[[],[]] for _ in range(args.num_attr)]
image_cnt = 0
for bid, (imgs, _) in enumerate(loader):
imgs = imgs.cuda()
preds = m.predict_quantize(imgs)
for iid, pred in enumerate(preds):
is_save = False
for ind in range(args.num_attr):
if pred[ind] == True and len(data_index[ind][0])<args.sample_per_category:
is_save = True
data_index[ind][0].append(image_cnt)
elif pred[ind] == False and len(data_index[ind][1])<args.sample_per_category:
is_save = True
data_index[ind][1].append(image_cnt)
if is_save:
image_cnt += 1
if bid % 10 == 0:
for i in range(args.num_attr):
print(i, len(data_index[i][0]), len(data_index[i][1]))
print(f'Processes {bid}/{len(loader)}.')
with open(args.output_path, 'wb') as f:
pickle.dump(data_index, f)
def main():
args = parse_args()
if args.function == 'max_index':
max_index(args)
elif args.function == 'count_data':
count_data(args)
if __name__ == '__main__':
main()
|
[
"os.path.exists",
"pickle.dump",
"argparse.ArgumentParser",
"pickle.load",
"numpy.max",
"torchvision.datasets.ImageFolder",
"models.classifiers.EvalCompoundResNet",
"os.path.basename",
"torch.utils.data.DataLoader",
"torchvision.transforms.Resize",
"torchvision.transforms.ToTensor",
"sys.path.append"
] |
[((55, 76), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (70, 76), False, 'import sys\n'), ((300, 325), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (323, 325), False, 'import argparse\n'), ((1629, 1690), 'torchvision.datasets.ImageFolder', 'torchvision.datasets.ImageFolder', (['args.data_root'], {'transform': 't'}), '(args.data_root, transform=t)\n', (1661, 1690), False, 'import torchvision\n'), ((1704, 1797), 'torch.utils.data.DataLoader', 'torch_data.DataLoader', (['dset'], {'batch_size': '(32)', 'shuffle': '(False)', 'num_workers': '(4)', 'pin_memory': '(True)'}), '(dset, batch_size=32, shuffle=False, num_workers=4,\n pin_memory=True)\n', (1725, 1797), True, 'import torch.utils.data as torch_data\n'), ((930, 962), 'os.path.exists', 'os.path.exists', (['args.output_path'], {}), '(args.output_path)\n', (944, 962), False, 'import os\n'), ((1101, 1115), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1112, 1115), False, 'import pickle\n'), ((1238, 1262), 'numpy.max', 'np.max', (['data_index[i][0]'], {}), '(data_index[i][0])\n', (1244, 1262), True, 'import numpy as np\n'), ((1279, 1303), 'numpy.max', 'np.max', (['data_index[i][1]'], {}), '(data_index[i][1])\n', (1285, 1303), True, 'import numpy as np\n'), ((1322, 1353), 'numpy.max', 'np.max', (['[max_val, max_p, max_n]'], {}), '([max_val, max_p, max_n])\n', (1328, 1353), True, 'import numpy as np\n'), ((1588, 1601), 'torchvision.transforms.Resize', 'T.Resize', (['(224)'], {}), '(224)\n', (1596, 1601), True, 'import torchvision.transforms as T\n'), ((1603, 1615), 'torchvision.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (1613, 1615), True, 'import torchvision.transforms as T\n'), ((1871, 1907), 'models.classifiers.EvalCompoundResNet', 'EvalCompoundResNet', (['args.weight_path'], {}), '(args.weight_path)\n', (1889, 1907), False, 'from models.classifiers import EvalCompoundResNet\n'), ((1826, 1858), 'os.path.basename', 'os.path.basename', (['args.data_root'], {}), '(args.data_root)\n', (1842, 1858), False, 'import os\n'), ((2918, 2944), 'pickle.dump', 'pickle.dump', (['data_index', 'f'], {}), '(data_index, f)\n', (2929, 2944), False, 'import pickle\n')]
|
import math
from math import pi
import numpy as np
import open3d as o3d
import matplotlib.pyplot as plt
import cv2
import toml
from .cameraparam import CameraParam
from .fitted_line import FittedLine
from .ransac_fit import ransac_line_fit, ransac_ground_fit
from .util import check_all_false
# TODO: output random seed used in ransac and open3d
# PCL pre-processing (the unit of these numerics is [m])
DOWNSAMPLE_VOXEL_SIZE = 0.003
DOWNSAMPLE_VOXEL_SIZE_GROUND = 0.005
# Ground fit
X_MIN = 0.
X_MAX = +1.2
Y_MIN = -0.8
Y_MAX = +0.8
GRID_SIZE = 0.080
GROUND_SEED_Z_MAX = 0.
GROUND_SEED_MARGIN = 0.080
GROUND_MARGIN = 0.030
SMOOTHING_KERNEL = GRID_SIZE * 0.5
# Clustering
# DBSCAN_EPS : Density parameter that is used to find neighbouring points
# DBSCAN_MINPOINTS : Minimum number of points to form a cluster
DBSCAN_EPS = 0.016
DBSCAN_MINPOINTS = 10
CLUSTER_MINPOINTS = 50
CMAP_CLUSTER = plt.get_cmap("tab20")
def set_pcl_fitter(toml_path):
dict_toml = toml.load(open(toml_path))
set_roll = float(dict_toml['General']['set_roll'])
set_pitch = float(dict_toml['General']['set_pitch'])
set_yaw = float(dict_toml['General']['set_yaw'])
camera_set_param = CameraParam()
camera_set_param.set_tf_rot_and_trans([set_roll, set_pitch, set_yaw], [0., 0., 0.])
return PCLFitter(camera_set_param, dict_toml)
class PCLFitter(object):
def __init__(self, camera_set_param=None, target_attribute=None):
self.depth_img = None
self.camera_param = None
self.grid_xyzw = None
if camera_set_param is None:
self.camera_set_param = CameraParam()
else:
self.camera_set_param = camera_set_param
if target_attribute is None:
self.set_parameters()
else:
self.set_target_attribute(target_attribute)
def set_target_attribute(self, dict_toml):
self.pcl_cutoff_dist = float(dict_toml['Selection']['pcl_cutoff_dist'])
self.target_max_dist = float(dict_toml['Selection']['target_max_dist'])
self.target_min_dist = float(dict_toml['Selection']['target_min_dist'])
self.target_max_len = float(dict_toml['Selection']['target_max_len'])
self.target_min_len = float(dict_toml['Selection']['target_min_len'])
self.target_max_tilt = float(dict_toml['Selection']['target_max_tilt'])
def set_parameters(self):
self.pcl_cutoff_dist = 1.1
self.target_max_dist = 0.85
self.target_min_dist = 0.3
self.target_min_len = 0.25
self.target_max_len = 0.40
self.target_max_tilt = 30.
def get_pcd_from_depth_img(self, depth_img, camera_param):
self.depth_img = depth_img
self.camera_param = camera_param
pcl_raw = self.tfm_pcl_cam2global(self.cvt_depth2pcl(self.depth_img, self.camera_param), camera_param)
pcd = self.downsample(pcl_raw, voxel_size=DOWNSAMPLE_VOXEL_SIZE)
return pcd
def fit_pcd(self, pcd, cluster_eps=DBSCAN_EPS, cluster_min_points=DBSCAN_MINPOINTS, verbose=True):
pcd_list = []
fitgeom_list = []
pcd_array = np.array(pcd.points, dtype=np.float32)
bflg_above_ground, xy_binidx, grid_xyzw, pcd_grounds_list = self.ground_fit(pcd_array)
pcd_grounds_ary_pre_downsample = np.asarray(pcd_grounds_list[2].points) # pcd_grounds = [pcd_out_of_bin, pcd_groundseed, pcd_ground]
pcd_grounds = self.downsample(pcd_grounds_ary_pre_downsample, voxel_size=DOWNSAMPLE_VOXEL_SIZE_GROUND)
ground_points_ary = np.asarray(pcd_grounds.points)
pcd_list += [ground_points_ary]
fitgeom_list.append(self.get_mesh_ground())
# TODO debug.error() send to cloud if above ground is all false
if check_all_false(bflg_above_ground):
return [], pcd_list, fitgeom_list, pcd_array, ground_points_ary
labels, cluster_pcd = self.clustering(pcd_array[bflg_above_ground],
eps=cluster_eps, min_points=cluster_min_points)
pcd_list.append(cluster_pcd)
line_list = self.line_fit(pcd_array[bflg_above_ground], labels)
self.merge_lines(line_list)
self.mark_multiline_clusters(line_list)
self.extend_lines_to_ground(line_list, grid_xyzw)
self.check_line_truncation(line_list)
self.final_selection(line_list)
if verbose:
self.print_line_info(line_list)
self.bkg_postprocess(line_list)
self.remove_noise_lines(line_list, grid_xyzw)
mesh_cylinders = self.get_line_fit_geometry(line_list)
fitgeom_list += mesh_cylinders
return line_list, pcd_list, fitgeom_list, pcd_array, ground_points_ary
def cvt_depth2pcl(self, depth_img, camera_param):
cx, cy = camera_param.center_xy
fx, fy = camera_param.focal_xy
DEPTH_MIN = 1e-3
arr_y = np.arange(depth_img.shape[0], dtype=np.float32)
arr_x = np.arange(depth_img.shape[1], dtype=np.float32)
val_x, val_y = np.meshgrid(arr_x, arr_y)
# TODO: rewrite axis convertion explicitly (i.e. zense clockwise rotation)
tmp_x = +depth_img
tmp_y = +depth_img * (val_y - cy) * (1. / fy)
tmp_z = -depth_img * (val_x - cx) * (1. / fx)
filled = (depth_img > DEPTH_MIN) * (depth_img < self.pcl_cutoff_dist + 0.2)
filled_x = tmp_x[filled]
filled_y = tmp_y[filled]
filled_z = tmp_z[filled]
pcl = np.stack([filled_x, filled_y, filled_z], axis=-1)
return pcl
def tfm_pcl_cam2global(self, pcl_camframe, camera_param):
pcl_tmp = np.dot(pcl_camframe, camera_param.rot_mtx.transpose()) + camera_param.translation
pcl_global = np.dot(pcl_tmp, self.camera_set_param.rot_mtx.transpose())
return pcl_global
def cvt_to_2d_image_xyd(self, input_points, camera_param):
points = input_points.reshape(-1, 3)
points_tmp = np.dot(points, self.camera_set_param.inv_rot_mtx.transpose())
points_camframe = np.dot(points_tmp - camera_param.translation, camera_param.inv_rot_mtx.transpose())
cx, cy = camera_param.center_xy
fx, fy = camera_param.focal_xy
depth = +points_camframe[:, 0]
val_y = +points_camframe[:, 1] / depth * fy + cy
val_x = -points_camframe[:, 2] / depth * fx + cx
xyd = np.stack([val_x, val_y, depth], axis=-1)
return xyd.reshape(input_points.shape)
def downsample(self, pcl_raw, voxel_size):
pcd_raw = self.cvt_numpy2open3d(pcl_raw, color=[0., 0., 1.])
pcd = pcd_raw.voxel_down_sample(voxel_size=voxel_size)
return pcd
def cvt_numpy2open3d(self, pcl, color=None):
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(pcl.astype(np.float64))
if not color is None:
pcd.paint_uniform_color(color)
return pcd
def ground_fit(self, pcl):
x_nbin = int( (X_MAX - X_MIN) / float(GRID_SIZE) + 1e-3 )
y_nbin = int( (Y_MAX - Y_MIN) / float(GRID_SIZE) + 1e-3 )
x_edge = np.linspace(X_MIN, X_MIN + GRID_SIZE * x_nbin, x_nbin + 1).reshape(1, -1)
y_edge = np.linspace(Y_MIN, Y_MIN + GRID_SIZE * y_nbin, y_nbin + 1).reshape(1, -1)
x_ctr = (x_edge[0, 1:] + x_edge[0, :-1]) * 0.5
y_ctr = (y_edge[0, 1:] + y_edge[0, :-1]) * 0.5
pcl_tmp = pcl.reshape(-1, 1, 3)
x_binflg = (pcl_tmp[:, :, 0] >= x_edge[:, :-1]) * (pcl_tmp[:, :, 0] < x_edge[:, 1:])
y_binflg = (pcl_tmp[:, :, 1] >= y_edge[:, :-1]) * (pcl_tmp[:, :, 1] < y_edge[:, 1:])
x_binidx = np.argmax(x_binflg, axis=-1)
y_binidx = np.argmax(y_binflg, axis=-1)
x_binidx[(x_binflg.sum(axis=-1) == 0)] = -1
y_binidx[(y_binflg.sum(axis=-1) == 0)] = -1
xy_binidx = np.concatenate([x_binidx.reshape(-1,1), y_binidx.reshape(-1,1)], axis=-1)
bflg_out_of_bin = (xy_binidx == -1).sum(-1).astype(np.bool)
bflg_in_bin = (bflg_out_of_bin == False)
grid_xyzw = np.zeros([x_nbin, y_nbin, 4], dtype=np.float64)
for i_x in range(x_nbin):
for i_y in range(y_nbin):
in_bin = (x_binidx == i_x) * (y_binidx == i_y)
pcl_in_bin = pcl[in_bin]
valid = (pcl_in_bin[:, 2] < GROUND_SEED_Z_MAX)
pcl_valid = pcl_in_bin[valid]
if pcl_valid.shape[0] == 0:
z_val = 0.
wgt = 0.1
else:
z_val = pcl_valid[:, 2].min()
wgt = 1.
grid_xyzw[i_x, i_y] = [x_ctr[i_x], y_ctr[i_y], z_val, wgt]
grid_xyzw = self.fill_empy_gridz(grid_xyzw, w_thres=0.1)
pcd_groundseed = self.cvt_numpy2open3d(grid_xyzw.reshape(-1, 4)[:, :3], color=[1., 0., 1.])
pcl_ground_seed_z = grid_xyzw[x_binidx, y_binidx, 2]
bflg_ground_seed = (pcl[:, 2] < (pcl_ground_seed_z + GROUND_SEED_MARGIN)) * bflg_in_bin
grid_xyzw = ransac_ground_fit(pcl[bflg_ground_seed], xy_binidx[bflg_ground_seed], grid_xyzw)
grid_xyzw = self.fill_empy_gridz(grid_xyzw, w_thres=1.)
grid_xyzw = self.smooth_ground(grid_xyzw, kernel_size=SMOOTHING_KERNEL)
self.grid_xyzw = grid_xyzw
bflg_in_range = (np.linalg.norm(pcl[:,:2], axis=-1) < self.pcl_cutoff_dist)
bflg_valid_points = bflg_in_range * bflg_in_bin
pcl_ground_z = grid_xyzw[x_binidx, y_binidx, 2]
bflg_ground = (pcl[:, 2] < (pcl_ground_z + GROUND_MARGIN)) * bflg_valid_points
bflg_above_ground = (bflg_ground == False) * bflg_valid_points
pcd_out_of_bin = self.cvt_numpy2open3d(pcl[bflg_valid_points == False], color=[0.3, 0., 0.5])
pcd_ground = self.cvt_numpy2open3d(pcl[bflg_ground], color=[0., 0., 0.5])
pcd_all = [pcd_out_of_bin, pcd_groundseed, pcd_ground]
return bflg_above_ground, xy_binidx, grid_xyzw, pcd_all
def fill_empy_gridz(self, grid_xyzw, w_thres=0.1):
filled = (grid_xyzw[:,:,3] > w_thres)
empty = (filled == False)
# print 'filled ', filled.shape, filled.sum()
# print 'empty ', empty.shape, empty.sum()
filled_xyzw = grid_xyzw[filled].reshape(-1, 1, 4)
empty_xyzw = grid_xyzw[empty].reshape(1, -1, 4)
# print 'filled_xyzw ', filled_xyzw.shape
# print 'empty_xyzw ', empty_xyzw.shape
dist_array = np.linalg.norm(filled_xyzw[:,:,:2] - empty_xyzw[:,:,:2], axis=-1)
# print 'dist_array ', dist_array.shape
if dist_array.shape[0] != 0:
nearest_filled = np.argmin(dist_array, axis=0)
grid_xyzw[empty, 2] = filled_xyzw[nearest_filled, 0, 2]
return grid_xyzw
def smooth_ground(self, grid_xyzw, kernel_size):
vect = grid_xyzw[:,:,:2].reshape(1, -1, 2) - grid_xyzw[:,:,:2].reshape(-1, 1, 2)
dsq = (vect ** 2).sum(axis=-1)
z_orig = grid_xyzw[:,:,2].reshape(-1)
wgt = grid_xyzw[:,:,3].reshape(-1)
coeff = 0.5 / kernel_size ** 2
fill_wgt = wgt * np.exp(-dsq * coeff)
z_smooth = (z_orig * fill_wgt).sum(axis=-1) / fill_wgt.sum(axis=-1)
grid_xyzw[:,:,2].reshape(-1)[:] = z_smooth
return grid_xyzw
def get_mesh_ground(self):
return self.cvt_gridvtx2mesh(self.grid_xyzw) if self.grid_xyzw is not None else None
def cvt_gridvtx2mesh(self, grid_vtx, double_sided=True):
ngrid_x = grid_vtx.shape[0]
ngrid_y = grid_vtx.shape[1]
vertices = np.array(grid_vtx[:,:,:3].reshape(-1,3))
triangles = []
for i_x in range(grid_vtx.shape[0] - 1):
for i_y in range(grid_vtx.shape[1] - 1):
ivert_base = i_x * ngrid_y + i_y
triangles.append([ivert_base, ivert_base+ngrid_y, ivert_base+1])
triangles.append([ivert_base+ngrid_y+1, ivert_base+1, ivert_base+ngrid_y])
triangles = np.array(triangles)
if double_sided:
triangles = np.concatenate([triangles, triangles[:,::-1]], axis=0)
mesh = o3d.geometry.TriangleMesh()
mesh.vertices = o3d.utility.Vector3dVector(vertices)
mesh.triangles = o3d.utility.Vector3iVector(triangles)
mesh.paint_uniform_color([0.4, 0.4, 0.4])
mesh.compute_vertex_normals()
return mesh
def clustering(self, pcl, eps=DBSCAN_EPS, min_points=DBSCAN_MINPOINTS):
n_points = pcl.shape[0]
print('Clustering {} points ...'.format(n_points),)
pcd = self.cvt_numpy2open3d(pcl)
labels_orig = np.array(
pcd.cluster_dbscan(eps=eps, min_points=min_points, print_progress=False))
n_cluster = labels_orig.max() + 1
print('Found {} clusters.'.format(n_cluster))
cls_flg = (np.arange(n_cluster).reshape(-1,1) == labels_orig.reshape(1,-1))
n_points_in_cls = cls_flg.sum(axis=-1)
sortidx_cls = np.argsort(n_points_in_cls)[::-1]
labels = np.ones(n_points, dtype=np.int32) * -1
for i_cls in range(n_cluster):
labels[cls_flg[sortidx_cls[i_cls]]] = i_cls
colors = CMAP_CLUSTER(labels)
colors[labels < 0] = 0.8
pcd.colors = o3d.utility.Vector3dVector(colors[:, :3])
return labels, pcd
def line_fit(self, pcl, labels):
MAX_ITER_LINEFIT = 3
RANSAC_N_ITER = 500
CUT_PERCENTILE = 0.8
DTHRES_INLIER = 0.020
MAX_ROOT_Z = 0.20
line_list = []
n_cluster = labels.max() + 1
print("Line fit on %d clusters ..." % n_cluster)
do_break = False
for i_cluster in range(n_cluster):
pcl_cluster = pcl[(labels == i_cluster)]
print("Cluster #{} : {} points".format(i_cluster, pcl_cluster.shape[0]))
pcl_to_fit = pcl_cluster
for i_iter in range(MAX_ITER_LINEFIT):
n_to_fit = pcl_to_fit.shape[0]
print(" - Iteration {} : {} points".format(i_iter, n_to_fit)),
if n_to_fit < CLUSTER_MINPOINTS:
print(" - Too small!")
if i_iter == 0:
do_break = True
break
length, tfm_mtx, is_outlier = ransac_line_fit(pcl_to_fit, n_iter=RANSAC_N_ITER, dthres_inlier=DTHRES_INLIER, cut_percentile=CUT_PERCENTILE, max_root_z=(MAX_ROOT_Z if i_iter==0 else -1.))
if tfm_mtx is None:
print(" - Bad fit!")
break
print(" - Good fit!")
line_list.append(FittedLine(length, tfm_mtx, i_cluster))
pcl_to_fit = pcl_to_fit[is_outlier]
if do_break:
break
print("Found {} lines.".format(len(line_list)))
return line_list
def merge_lines(self, line_list):
MERGE_THRES_COS = math.cos(15. * pi / 180.)
MERGE_THRES_DIST = 0.10
z_array = np.array([line.position[2] for line in line_list])
sorted_idx = np.argsort(z_array)
n_line = len(line_list)
for i_line in range(n_line):
line = line_list[sorted_idx[i_line]]
for i_line2 in range(i_line + 1, n_line):
line2 = line_list[sorted_idx[i_line2]]
if not line2.parent is None:
continue
to_line2 = line2.position - line.position_center
dist_to_line2 = np.linalg.norm(to_line2)
dir_to_line2 = to_line2 / dist_to_line2
cos_to_line2 = np.dot(dir_to_line2, line.direction)
if cos_to_line2 < MERGE_THRES_COS:
continue
if dist_to_line2 > MERGE_THRES_DIST + line.length * 0.5:
continue
line2.parent = line
def count_lines_in_cluster(self, line_list):
counts = {}
for line in line_list:
if not line.cluster_id in counts:
counts[line.cluster_id] = 0
counts[line.cluster_id] += 1
return counts
def mark_multiline_clusters(self, line_list):
counts = self.count_lines_in_cluster(line_list)
for line in line_list:
if counts[line.cluster_id] > 1:
line.is_multiline_cluster = True
def extend_lines_to_ground(self, line_list, grid_xyzw):
N_AVERAGE = 4
MAX_R = GRID_SIZE
MIN_SOLITARY_LEN = 0.100
MAX_EXTEND_LEN = 0.200
MAX_GROUNDED_EXTEND_LEN = 0.060
COSZ_THRESHOLD = math.cos(45. * pi / 180.)
flatten_grid_xyz = grid_xyzw[:,:,:3].reshape(-1, 3)
for line in line_list:
if not line.parent is None:
continue
if line.is_solitary and line.length < MIN_SOLITARY_LEN:
continue
if line.direction[2] < COSZ_THRESHOLD:
continue
flatten_grid_local_frame = line.tfm_to_local_frame(flatten_grid_xyz)
flatten_grid_r = np.linalg.norm(flatten_grid_local_frame[:,:2], axis=-1)
idx_sort = np.argsort(flatten_grid_r)[0:N_AVERAGE]
weight = np.clip((MAX_R - flatten_grid_r[idx_sort]) / MAX_R, 0., 1.)
weight_sum = weight.sum()
if not weight_sum > 0.:
continue
ground_z_local_frame = np.dot(flatten_grid_local_frame[idx_sort,2], weight) / weight_sum
# idx_min = idx_sort[0]
# if flatten_grid_r[idx_min] > MAX_R:
# continue
# ground_z_local_frame = flatten_grid_local_frame[idx_min, 2]
extend_len = -ground_z_local_frame
if extend_len > MAX_EXTEND_LEN:
continue
line.extend_root(extend_len)
line.is_grounded = (extend_len <= MAX_GROUNDED_EXTEND_LEN)
def is_in_image(self, xyd, image_shape):
TOP_MARGIN = 20
SIDE_MARGIN = 20
BOTTOM_MARGIN = 0
x_val = xyd[0]
y_val = xyd[1]
if (y_val > SIDE_MARGIN
and y_val < image_shape[0] - SIDE_MARGIN
and x_val > TOP_MARGIN
and x_val < image_shape[1] - BOTTOM_MARGIN):
return True
else:
return False
def check_line_truncation(self, line_list):
SEEK_MARGIN = [10, 50]
OPENING_ANGLE = 4.
SECTOR_COLOR = 1
DEPTH_MARGIN = 0.015
MAX_OCCLUDING_PIXELS = 5
sector_mask = np.zeros(self.depth_img.shape, dtype=np.uint8)
for line in line_list:
line.sector_mask = {}
line.occlusion_mask = {}
root_is_contained = 0
tip_is_contained = 0
is_occluded = False
sector_mask = sector_mask
xyd_ends = self.cvt_to_2d_image_xyd(line.position_ends, self.camera_param)
line.xyd_ends = xyd_ends
root_is_contained += self.is_in_image(xyd_ends[0], sector_mask.shape)
tip_is_contained += self.is_in_image(xyd_ends[1], sector_mask.shape)
if line.is_solitary and line.is_grounded:
root_to_tip_xy = (xyd_ends[1] - xyd_ends[0])[:2]
sector_angle = math.atan2(root_to_tip_xy[1], root_to_tip_xy[0]) / math.pi * 180.
sector_radius = int(np.linalg.norm(root_to_tip_xy) * 0.5 + (SEEK_MARGIN[1] + SEEK_MARGIN[0]) * 0.5)
center = (xyd_ends.sum(axis=0) * 0.5).astype(np.int32)
sector_mask[:] = 0
cv2.ellipse(sector_mask, (center[0], center[1]), (sector_radius, sector_radius), sector_angle, -OPENING_ANGLE * 0.5, +OPENING_ANGLE * 0.5, SECTOR_COLOR, SEEK_MARGIN[1] - SEEK_MARGIN[0])
# TODO: what if tip is right on ?
# TODO: handle cases where sector_mask goes out of image
depth_in_sector = self.depth_img * sector_mask
occlusion_mask = (depth_in_sector < xyd_ends[1, 2] + DEPTH_MARGIN) * (depth_in_sector > 0.)
# TODO: Handle cases where the sector is out of frame in one camera
if occlusion_mask.sum() > MAX_OCCLUDING_PIXELS:
is_occluded = True
line.sector_mask = sector_mask.astype(np.bool)
line.occlusion_mask = occlusion_mask
line.tip_is_contained = (tip_is_contained != 0)
line.is_contained = ((root_is_contained * tip_is_contained) != 0)
line.is_occluded = is_occluded
def final_selection(self, line_list):
target_cosz_min = math.cos(self.target_max_tilt * pi / 180.)
for line in line_list:
if not (line.length > self.target_min_len and line.length < self.target_max_len):
continue
line_dist = line.xy_distance
if not (line_dist > self.target_min_dist and line_dist < self.target_max_dist):
continue
if line.direction[2] < target_cosz_min:
continue
line.is_final = True
def bkg_postprocess(self, line_list):
EXTEND_LEN = 1.
MIN_LEN = 0.2
target_cosz_min = math.cos(self.target_max_tilt * pi / 180.)
for line in line_list:
if line.is_good:
continue
if line.direction[2] < target_cosz_min:
continue
if line.length < MIN_LEN:
continue
if not (line.length < self.target_max_len) or not line.tip_is_contained:
line.extend_tip(EXTEND_LEN)
def remove_noise_lines(self, line_list, grid_xyzw):
MIN_LEN = 0.050
n_orig = len(line_list)
max_ground_z = np.max(grid_xyzw[:,:,2])
z_threshold = max_ground_z + 0.40
r_threshold = self.target_max_dist
n_remove = 0
for line in line_list:
if line.is_good:
continue
if ((line.xy_distance > r_threshold and line.position[2] > z_threshold)
or line.length < MIN_LEN):
line.is_ignored = True
n_remove += 1
print('Noise line removal : {} -> {}'.format(n_orig, n_orig - n_remove))
def print_line_info(self, line_list):
print('### Candidate line info #############################')
print(' Good flg=[sol, nmlc, ground, tip, ends, unoccl, final]')
print('-----------------------------------------------------')
for line in line_list:
# if not (line.is_solitary and not line.is_multiline_cluster and line.is_grounded):
if line.length < 0.200:
continue
flags = [
line.is_solitary,
not line.is_multiline_cluster,
line.is_grounded,
line.tip_is_contained,
line.is_contained,
not line.is_occluded,
line.is_final]
print(' {} flg={} len={:.3f} dist={:.3f} tilt={:.1f}deg'.format(line.is_good, flags, line.length, line.xy_distance, math.acos(line.direction[2]) / pi * 180.))
print('#####################################################')
def get_line_fit_geometry(self, line_list):
mesh_cylinders = []
for line in line_list:
# if line.is_ignored:
# continue
line_color = CMAP_CLUSTER(line.cluster_id)[:3]
if line.length <= 0.0:
print('`line.length` has non-positive value: {}'.format(line.length))
continue
mesh_cylinder = o3d.geometry.TriangleMesh.create_cylinder(radius=0.005, height=line.length)
mesh_cylinder.compute_vertex_normals()
mesh_cylinder.paint_uniform_color(line_color)
mesh_cylinder.translate([0., 0., line.length * 0.5])
mesh_cylinder.transform(line.tfm_mtx)
mesh_cylinders.append(mesh_cylinder)
line.add_mesh(mesh_cylinder)
if False:
mesh_sphere = o3d.geometry.TriangleMesh.create_sphere(radius=0.010)
mesh_sphere.compute_vertex_normals()
mesh_sphere.paint_uniform_color(line_color)
mesh_sphere.transform(line.tfm_mtx)
mesh_cylinders.append(mesh_sphere)
line.add_mesh(mesh_sphere)
return mesh_cylinders
|
[
"numpy.clip",
"math.acos",
"math.cos",
"numpy.array",
"numpy.argsort",
"cv2.ellipse",
"numpy.linalg.norm",
"numpy.arange",
"open3d.geometry.TriangleMesh.create_cylinder",
"numpy.asarray",
"numpy.max",
"numpy.exp",
"numpy.stack",
"numpy.linspace",
"numpy.dot",
"open3d.geometry.TriangleMesh.create_sphere",
"numpy.concatenate",
"numpy.argmin",
"numpy.meshgrid",
"open3d.utility.Vector3iVector",
"numpy.ones",
"numpy.argmax",
"open3d.geometry.TriangleMesh",
"math.atan2",
"open3d.utility.Vector3dVector",
"matplotlib.pyplot.get_cmap",
"numpy.zeros",
"open3d.geometry.PointCloud"
] |
[((897, 918), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""tab20"""'], {}), "('tab20')\n", (909, 918), True, 'import matplotlib.pyplot as plt\n'), ((3106, 3144), 'numpy.array', 'np.array', (['pcd.points'], {'dtype': 'np.float32'}), '(pcd.points, dtype=np.float32)\n', (3114, 3144), True, 'import numpy as np\n'), ((3281, 3319), 'numpy.asarray', 'np.asarray', (['pcd_grounds_list[2].points'], {}), '(pcd_grounds_list[2].points)\n', (3291, 3319), True, 'import numpy as np\n'), ((3520, 3550), 'numpy.asarray', 'np.asarray', (['pcd_grounds.points'], {}), '(pcd_grounds.points)\n', (3530, 3550), True, 'import numpy as np\n'), ((4830, 4877), 'numpy.arange', 'np.arange', (['depth_img.shape[0]'], {'dtype': 'np.float32'}), '(depth_img.shape[0], dtype=np.float32)\n', (4839, 4877), True, 'import numpy as np\n'), ((4894, 4941), 'numpy.arange', 'np.arange', (['depth_img.shape[1]'], {'dtype': 'np.float32'}), '(depth_img.shape[1], dtype=np.float32)\n', (4903, 4941), True, 'import numpy as np\n'), ((4965, 4990), 'numpy.meshgrid', 'np.meshgrid', (['arr_x', 'arr_y'], {}), '(arr_x, arr_y)\n', (4976, 4990), True, 'import numpy as np\n'), ((5410, 5459), 'numpy.stack', 'np.stack', (['[filled_x, filled_y, filled_z]'], {'axis': '(-1)'}), '([filled_x, filled_y, filled_z], axis=-1)\n', (5418, 5459), True, 'import numpy as np\n'), ((6299, 6339), 'numpy.stack', 'np.stack', (['[val_x, val_y, depth]'], {'axis': '(-1)'}), '([val_x, val_y, depth], axis=-1)\n', (6307, 6339), True, 'import numpy as np\n'), ((6652, 6677), 'open3d.geometry.PointCloud', 'o3d.geometry.PointCloud', ([], {}), '()\n', (6675, 6677), True, 'import open3d as o3d\n'), ((7546, 7574), 'numpy.argmax', 'np.argmax', (['x_binflg'], {'axis': '(-1)'}), '(x_binflg, axis=-1)\n', (7555, 7574), True, 'import numpy as np\n'), ((7594, 7622), 'numpy.argmax', 'np.argmax', (['y_binflg'], {'axis': '(-1)'}), '(y_binflg, axis=-1)\n', (7603, 7622), True, 'import numpy as np\n'), ((7959, 8006), 'numpy.zeros', 'np.zeros', (['[x_nbin, y_nbin, 4]'], {'dtype': 'np.float64'}), '([x_nbin, y_nbin, 4], dtype=np.float64)\n', (7967, 8006), True, 'import numpy as np\n'), ((10319, 10388), 'numpy.linalg.norm', 'np.linalg.norm', (['(filled_xyzw[:, :, :2] - empty_xyzw[:, :, :2])'], {'axis': '(-1)'}), '(filled_xyzw[:, :, :2] - empty_xyzw[:, :, :2], axis=-1)\n', (10333, 10388), True, 'import numpy as np\n'), ((11814, 11833), 'numpy.array', 'np.array', (['triangles'], {}), '(triangles)\n', (11822, 11833), True, 'import numpy as np\n'), ((11953, 11980), 'open3d.geometry.TriangleMesh', 'o3d.geometry.TriangleMesh', ([], {}), '()\n', (11978, 11980), True, 'import open3d as o3d\n'), ((12005, 12041), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['vertices'], {}), '(vertices)\n', (12031, 12041), True, 'import open3d as o3d\n'), ((12067, 12104), 'open3d.utility.Vector3iVector', 'o3d.utility.Vector3iVector', (['triangles'], {}), '(triangles)\n', (12093, 12104), True, 'import open3d as o3d\n'), ((13067, 13108), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['colors[:, :3]'], {}), '(colors[:, :3])\n', (13093, 13108), True, 'import open3d as o3d\n'), ((14715, 14742), 'math.cos', 'math.cos', (['(15.0 * pi / 180.0)'], {}), '(15.0 * pi / 180.0)\n', (14723, 14742), False, 'import math\n'), ((14792, 14842), 'numpy.array', 'np.array', (['[line.position[2] for line in line_list]'], {}), '([line.position[2] for line in line_list])\n', (14800, 14842), True, 'import numpy as np\n'), ((14864, 14883), 'numpy.argsort', 'np.argsort', (['z_array'], {}), '(z_array)\n', (14874, 14883), True, 'import numpy as np\n'), ((16374, 16401), 'math.cos', 'math.cos', (['(45.0 * pi / 180.0)'], {}), '(45.0 * pi / 180.0)\n', (16382, 16401), False, 'import math\n'), ((18265, 18311), 'numpy.zeros', 'np.zeros', (['self.depth_img.shape'], {'dtype': 'np.uint8'}), '(self.depth_img.shape, dtype=np.uint8)\n', (18273, 18311), True, 'import numpy as np\n'), ((20348, 20391), 'math.cos', 'math.cos', (['(self.target_max_tilt * pi / 180.0)'], {}), '(self.target_max_tilt * pi / 180.0)\n', (20356, 20391), False, 'import math\n'), ((20925, 20968), 'math.cos', 'math.cos', (['(self.target_max_tilt * pi / 180.0)'], {}), '(self.target_max_tilt * pi / 180.0)\n', (20933, 20968), False, 'import math\n'), ((21459, 21485), 'numpy.max', 'np.max', (['grid_xyzw[:, :, 2]'], {}), '(grid_xyzw[:, :, 2])\n', (21465, 21485), True, 'import numpy as np\n'), ((9205, 9240), 'numpy.linalg.norm', 'np.linalg.norm', (['pcl[:, :2]'], {'axis': '(-1)'}), '(pcl[:, :2], axis=-1)\n', (9219, 9240), True, 'import numpy as np\n'), ((10498, 10527), 'numpy.argmin', 'np.argmin', (['dist_array'], {'axis': '(0)'}), '(dist_array, axis=0)\n', (10507, 10527), True, 'import numpy as np\n'), ((10956, 10976), 'numpy.exp', 'np.exp', (['(-dsq * coeff)'], {}), '(-dsq * coeff)\n', (10962, 10976), True, 'import numpy as np\n'), ((11883, 11938), 'numpy.concatenate', 'np.concatenate', (['[triangles, triangles[:, ::-1]]'], {'axis': '(0)'}), '([triangles, triangles[:, ::-1]], axis=0)\n', (11897, 11938), True, 'import numpy as np\n'), ((12790, 12817), 'numpy.argsort', 'np.argsort', (['n_points_in_cls'], {}), '(n_points_in_cls)\n', (12800, 12817), True, 'import numpy as np\n'), ((12841, 12874), 'numpy.ones', 'np.ones', (['n_points'], {'dtype': 'np.int32'}), '(n_points, dtype=np.int32)\n', (12848, 12874), True, 'import numpy as np\n'), ((16835, 16891), 'numpy.linalg.norm', 'np.linalg.norm', (['flatten_grid_local_frame[:, :2]'], {'axis': '(-1)'}), '(flatten_grid_local_frame[:, :2], axis=-1)\n', (16849, 16891), True, 'import numpy as np\n'), ((16975, 17036), 'numpy.clip', 'np.clip', (['((MAX_R - flatten_grid_r[idx_sort]) / MAX_R)', '(0.0)', '(1.0)'], {}), '((MAX_R - flatten_grid_r[idx_sort]) / MAX_R, 0.0, 1.0)\n', (16982, 17036), True, 'import numpy as np\n'), ((23325, 23400), 'open3d.geometry.TriangleMesh.create_cylinder', 'o3d.geometry.TriangleMesh.create_cylinder', ([], {'radius': '(0.005)', 'height': 'line.length'}), '(radius=0.005, height=line.length)\n', (23366, 23400), True, 'import open3d as o3d\n'), ((7024, 7082), 'numpy.linspace', 'np.linspace', (['X_MIN', '(X_MIN + GRID_SIZE * x_nbin)', '(x_nbin + 1)'], {}), '(X_MIN, X_MIN + GRID_SIZE * x_nbin, x_nbin + 1)\n', (7035, 7082), True, 'import numpy as np\n'), ((7115, 7173), 'numpy.linspace', 'np.linspace', (['Y_MIN', '(Y_MIN + GRID_SIZE * y_nbin)', '(y_nbin + 1)'], {}), '(Y_MIN, Y_MIN + GRID_SIZE * y_nbin, y_nbin + 1)\n', (7126, 7173), True, 'import numpy as np\n'), ((15283, 15307), 'numpy.linalg.norm', 'np.linalg.norm', (['to_line2'], {}), '(to_line2)\n', (15297, 15307), True, 'import numpy as np\n'), ((15395, 15431), 'numpy.dot', 'np.dot', (['dir_to_line2', 'line.direction'], {}), '(dir_to_line2, line.direction)\n', (15401, 15431), True, 'import numpy as np\n'), ((16914, 16940), 'numpy.argsort', 'np.argsort', (['flatten_grid_r'], {}), '(flatten_grid_r)\n', (16924, 16940), True, 'import numpy as np\n'), ((17169, 17222), 'numpy.dot', 'np.dot', (['flatten_grid_local_frame[idx_sort, 2]', 'weight'], {}), '(flatten_grid_local_frame[idx_sort, 2], weight)\n', (17175, 17222), True, 'import numpy as np\n'), ((19295, 19489), 'cv2.ellipse', 'cv2.ellipse', (['sector_mask', '(center[0], center[1])', '(sector_radius, sector_radius)', 'sector_angle', '(-OPENING_ANGLE * 0.5)', '(+OPENING_ANGLE * 0.5)', 'SECTOR_COLOR', '(SEEK_MARGIN[1] - SEEK_MARGIN[0])'], {}), '(sector_mask, (center[0], center[1]), (sector_radius,\n sector_radius), sector_angle, -OPENING_ANGLE * 0.5, +OPENING_ANGLE * \n 0.5, SECTOR_COLOR, SEEK_MARGIN[1] - SEEK_MARGIN[0])\n', (19306, 19489), False, 'import cv2\n'), ((23768, 23820), 'open3d.geometry.TriangleMesh.create_sphere', 'o3d.geometry.TriangleMesh.create_sphere', ([], {'radius': '(0.01)'}), '(radius=0.01)\n', (23807, 23820), True, 'import open3d as o3d\n'), ((12656, 12676), 'numpy.arange', 'np.arange', (['n_cluster'], {}), '(n_cluster)\n', (12665, 12676), True, 'import numpy as np\n'), ((18991, 19039), 'math.atan2', 'math.atan2', (['root_to_tip_xy[1]', 'root_to_tip_xy[0]'], {}), '(root_to_tip_xy[1], root_to_tip_xy[0])\n', (19001, 19039), False, 'import math\n'), ((19093, 19123), 'numpy.linalg.norm', 'np.linalg.norm', (['root_to_tip_xy'], {}), '(root_to_tip_xy)\n', (19107, 19123), True, 'import numpy as np\n'), ((22809, 22837), 'math.acos', 'math.acos', (['line.direction[2]'], {}), '(line.direction[2])\n', (22818, 22837), False, 'import math\n')]
|
import cv2
import os
from os import listdir, makedirs
from os.path import isfile, join, exists
import numpy as np
import time
import math
DEBUG = True
FACTOR = 2
RESO_X = int(576 / FACTOR)
RESO_Y = int(640 / FACTOR)
CONF_VAL = 0
THRESHOLD = 0
UPPER_BOUND = 230
LOWER_BOUND = 150
def get_file_index(filename):
index = int(filename.split('.')[0])
return index
def create_windows():
cv2.namedWindow("RGB", cv2.WINDOW_NORMAL)
cv2.namedWindow("Depth", cv2.WINDOW_NORMAL)
cv2.resizeWindow("RGB", RESO_X, RESO_Y)
cv2.resizeWindow("Depth", RESO_X, RESO_Y)
def load_yolo(model_folder):
# load the COCO class labels our YOLO model was trained on
labelsPath = model_folder + "coco.names"
LABELS = open(labelsPath).read().strip().split("\n")
weightsPath = model_folder + "yolov3-spp.weights"
configPath = model_folder + "yolov3-spp.cfg"
print("[INFO] loading YOLO from disk...")
if DEBUG:
print("label: {}\nweights: {}\nconfig: {}".format(
labelsPath, weightsPath, configPath))
net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
return net, ln, LABELS
def process_frame(frame, net, ln, LABELS):
# get frame height and width
(H, W) = frame.shape[:2]
# construct a blob from the input frame and then perform a forward
# pass of the YOLO object detector, giving us our bounding boxes
# and associated probabilities
blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416),
swapRB=True, crop=False)
net.setInput(blob)
start_time = time.time()
layerOutputs = net.forward(ln)
duration = time.time() - start_time
if DEBUG:
print("[INFO] processed within {}s".format(round(duration, 2)))
# initialize our lists of detected bounding boxes, confidences,
# and class IDs, respectively
boxes = []
confidences = []
# loop over each of the layer outputs
for output in layerOutputs:
# loop over each of the detections
for detection in output:
# extract the class ID and confidence (i.e., probability)
# of the current object detection
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
# filter out weak predictions by ensuring the detected
# probability is greater than the minimum probability
if confidence > CONF_VAL and LABELS[classID] == "cell phone":
# scale the bounding box coordinates back relative to
# the size of the image, keeping in mind that YOLO
# actually returns the center (x, y)-coordinates of
# the bounding box followed by the boxes' width and
# height
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype("int")
# use the center (x, y)-coordinates to derive the top
# and and left corner of the bounding box
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
# update our list of bounding box coordinates and confidences
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
return boxes, confidences
def detect_object(experiment_name, save_images=False):
rgb_folder = "Experiment_Frames/" + experiment_name + "/rgb_frames/"
depth_folder = "Experiment_Frames/" + experiment_name + "/depth_frames/"
model_folder = "yolo-coco/"
output_folder = "Experiment_Output/" + experiment_name + "/"
# make the folders if not exist
if not exists(rgb_folder):
makedirs(rgb_folder)
if not exists(depth_folder):
makedirs(depth_folder)
if not exists(output_folder):
makedirs(output_folder)
if not exists(output_folder + 'depth/'):
makedirs(output_folder + 'depth/')
if not exists(output_folder + 'rgb/'):
makedirs(output_folder + 'rgb/')
# load rgb images
print("[INFO] loading rgb images from disk...")
img_files = [f for f in listdir(rgb_folder) if isfile(join(rgb_folder, f))]
img_files = sorted(img_files, key=get_file_index)
# load image net
net, ln, LABELS = load_yolo(model_folder)
out_file = open(output_folder + "/" + "positions.txt", "w")
# process each frame
for img_file in img_files:
if DEBUG:
print("[INFO] processing image {}".format(img_file))
# read rgb frame
frame = cv2.imread(rgb_folder + "/" + img_file, cv2.IMREAD_COLOR)
# read depth frame
depth = cv2.imread(depth_folder + "/" + img_file)
# rotate 90 degree for phone images
# frame = cv.rotate(frame, rotateCode=cv.ROTATE_90_CLOCKWISE)
# process using YOLO
boxes, confidences = process_frame(frame, net, ln, LABELS)
# suppress boxes
idxs = cv2.dnn.NMSBoxes(boxes, confidences, CONF_VAL, THRESHOLD)
# ensure at least one detection exists
if len(idxs) > 0:
# get first box
i = idxs.flatten()[0]
# extract the bounding box coordinates
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
# draw a bounding box rectangle and label on the frame
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
cv2.rectangle(depth, (x, y), (x + w, y + h), (255, 0, 0), 2)
if save_images:
# display and save image
cv2.imshow("RGB", frame)
cv2.imwrite(output_folder +
"rgb/" + img_file, frame)
cv2.imshow("Depth", depth)
cv2.imwrite(output_folder +
"depth/" + img_file, depth)
# get centroid of the bouding box
centroid_x = x + int(w / 2)
centroid_y = y + int(h / 2)
# get average depth within the bounding box
depth_pixels = depth[x: x+w, y: y+h, 0]
depth_pixels = depth_pixels.flatten()
mask = (depth_pixels > LOWER_BOUND) & (depth_pixels < UPPER_BOUND)
depth_pixels = depth_pixels[mask]
pixel_mean = np.mean(depth_pixels)
# save timestamp and position
if not math.isnan(pixel_mean):
timestamp = img_file.split('.')[0]
out_file.write("{},{},{},{}\n".format(
timestamp, centroid_x, centroid_y, round(pixel_mean, 4)
))
if DEBUG:
print("point is ({}, {}, {})".format(
centroid_x, centroid_y, round(pixel_mean, 4)))
key = cv2.waitKey(50)
if key != -1:
cv2.destroyAllWindows()
break
out_file.close()
|
[
"cv2.rectangle",
"cv2.imshow",
"numpy.array",
"cv2.destroyAllWindows",
"cv2.dnn.NMSBoxes",
"os.path.exists",
"numpy.mean",
"cv2.resizeWindow",
"os.listdir",
"cv2.waitKey",
"cv2.dnn.blobFromImage",
"numpy.argmax",
"time.time",
"cv2.namedWindow",
"cv2.imread",
"cv2.imwrite",
"os.makedirs",
"os.path.join",
"cv2.dnn.readNetFromDarknet",
"math.isnan"
] |
[((400, 441), 'cv2.namedWindow', 'cv2.namedWindow', (['"""RGB"""', 'cv2.WINDOW_NORMAL'], {}), "('RGB', cv2.WINDOW_NORMAL)\n", (415, 441), False, 'import cv2\n'), ((446, 489), 'cv2.namedWindow', 'cv2.namedWindow', (['"""Depth"""', 'cv2.WINDOW_NORMAL'], {}), "('Depth', cv2.WINDOW_NORMAL)\n", (461, 489), False, 'import cv2\n'), ((494, 533), 'cv2.resizeWindow', 'cv2.resizeWindow', (['"""RGB"""', 'RESO_X', 'RESO_Y'], {}), "('RGB', RESO_X, RESO_Y)\n", (510, 533), False, 'import cv2\n'), ((538, 579), 'cv2.resizeWindow', 'cv2.resizeWindow', (['"""Depth"""', 'RESO_X', 'RESO_Y'], {}), "('Depth', RESO_X, RESO_Y)\n", (554, 579), False, 'import cv2\n'), ((1058, 1109), 'cv2.dnn.readNetFromDarknet', 'cv2.dnn.readNetFromDarknet', (['configPath', 'weightsPath'], {}), '(configPath, weightsPath)\n', (1084, 1109), False, 'import cv2\n'), ((1523, 1599), 'cv2.dnn.blobFromImage', 'cv2.dnn.blobFromImage', (['frame', '(1 / 255.0)', '(416, 416)'], {'swapRB': '(True)', 'crop': '(False)'}), '(frame, 1 / 255.0, (416, 416), swapRB=True, crop=False)\n', (1544, 1599), False, 'import cv2\n'), ((1673, 1684), 'time.time', 'time.time', ([], {}), '()\n', (1682, 1684), False, 'import time\n'), ((1735, 1746), 'time.time', 'time.time', ([], {}), '()\n', (1744, 1746), False, 'import time\n'), ((3807, 3825), 'os.path.exists', 'exists', (['rgb_folder'], {}), '(rgb_folder)\n', (3813, 3825), False, 'from os.path import isfile, join, exists\n'), ((3835, 3855), 'os.makedirs', 'makedirs', (['rgb_folder'], {}), '(rgb_folder)\n', (3843, 3855), False, 'from os import listdir, makedirs\n'), ((3867, 3887), 'os.path.exists', 'exists', (['depth_folder'], {}), '(depth_folder)\n', (3873, 3887), False, 'from os.path import isfile, join, exists\n'), ((3897, 3919), 'os.makedirs', 'makedirs', (['depth_folder'], {}), '(depth_folder)\n', (3905, 3919), False, 'from os import listdir, makedirs\n'), ((3931, 3952), 'os.path.exists', 'exists', (['output_folder'], {}), '(output_folder)\n', (3937, 3952), False, 'from os.path import isfile, join, exists\n'), ((3962, 3985), 'os.makedirs', 'makedirs', (['output_folder'], {}), '(output_folder)\n', (3970, 3985), False, 'from os import listdir, makedirs\n'), ((3997, 4029), 'os.path.exists', 'exists', (["(output_folder + 'depth/')"], {}), "(output_folder + 'depth/')\n", (4003, 4029), False, 'from os.path import isfile, join, exists\n'), ((4039, 4073), 'os.makedirs', 'makedirs', (["(output_folder + 'depth/')"], {}), "(output_folder + 'depth/')\n", (4047, 4073), False, 'from os import listdir, makedirs\n'), ((4085, 4115), 'os.path.exists', 'exists', (["(output_folder + 'rgb/')"], {}), "(output_folder + 'rgb/')\n", (4091, 4115), False, 'from os.path import isfile, join, exists\n'), ((4125, 4157), 'os.makedirs', 'makedirs', (["(output_folder + 'rgb/')"], {}), "(output_folder + 'rgb/')\n", (4133, 4157), False, 'from os import listdir, makedirs\n'), ((4682, 4739), 'cv2.imread', 'cv2.imread', (["(rgb_folder + '/' + img_file)", 'cv2.IMREAD_COLOR'], {}), "(rgb_folder + '/' + img_file, cv2.IMREAD_COLOR)\n", (4692, 4739), False, 'import cv2\n'), ((4784, 4825), 'cv2.imread', 'cv2.imread', (["(depth_folder + '/' + img_file)"], {}), "(depth_folder + '/' + img_file)\n", (4794, 4825), False, 'import cv2\n'), ((5078, 5135), 'cv2.dnn.NMSBoxes', 'cv2.dnn.NMSBoxes', (['boxes', 'confidences', 'CONF_VAL', 'THRESHOLD'], {}), '(boxes, confidences, CONF_VAL, THRESHOLD)\n', (5094, 5135), False, 'import cv2\n'), ((6888, 6903), 'cv2.waitKey', 'cv2.waitKey', (['(50)'], {}), '(50)\n', (6899, 6903), False, 'import cv2\n'), ((2309, 2326), 'numpy.argmax', 'np.argmax', (['scores'], {}), '(scores)\n', (2318, 2326), True, 'import numpy as np\n'), ((4261, 4280), 'os.listdir', 'listdir', (['rgb_folder'], {}), '(rgb_folder)\n', (4268, 4280), False, 'from os import listdir, makedirs\n'), ((5499, 5559), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x, y)', '(x + w, y + h)', '(255, 0, 0)', '(2)'], {}), '(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)\n', (5512, 5559), False, 'import cv2\n'), ((5572, 5632), 'cv2.rectangle', 'cv2.rectangle', (['depth', '(x, y)', '(x + w, y + h)', '(255, 0, 0)', '(2)'], {}), '(depth, (x, y), (x + w, y + h), (255, 0, 0), 2)\n', (5585, 5632), False, 'import cv2\n'), ((6420, 6441), 'numpy.mean', 'np.mean', (['depth_pixels'], {}), '(depth_pixels)\n', (6427, 6441), True, 'import numpy as np\n'), ((6938, 6961), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (6959, 6961), False, 'import cv2\n'), ((4291, 4310), 'os.path.join', 'join', (['rgb_folder', 'f'], {}), '(rgb_folder, f)\n', (4295, 4310), False, 'from os.path import isfile, join, exists\n'), ((5719, 5743), 'cv2.imshow', 'cv2.imshow', (['"""RGB"""', 'frame'], {}), "('RGB', frame)\n", (5729, 5743), False, 'import cv2\n'), ((5760, 5813), 'cv2.imwrite', 'cv2.imwrite', (["(output_folder + 'rgb/' + img_file)", 'frame'], {}), "(output_folder + 'rgb/' + img_file, frame)\n", (5771, 5813), False, 'import cv2\n'), ((5858, 5884), 'cv2.imshow', 'cv2.imshow', (['"""Depth"""', 'depth'], {}), "('Depth', depth)\n", (5868, 5884), False, 'import cv2\n'), ((5901, 5956), 'cv2.imwrite', 'cv2.imwrite', (["(output_folder + 'depth/' + img_file)", 'depth'], {}), "(output_folder + 'depth/' + img_file, depth)\n", (5912, 5956), False, 'import cv2\n'), ((6504, 6526), 'math.isnan', 'math.isnan', (['pixel_mean'], {}), '(pixel_mean)\n', (6514, 6526), False, 'import math\n'), ((2913, 2935), 'numpy.array', 'np.array', (['[W, H, W, H]'], {}), '([W, H, W, H])\n', (2921, 2935), True, 'import numpy as np\n')]
|
# coding: utf-8
# # Using Dropout
# Let's see how we can use dropout for early stopping
from concept_dependency_graph import ConceptDependencyGraph
import data_generator as dg
from student import *
import simple_mdp as sm
import dynamics_model_class as dmc
import numpy as np
import dataset_utils
import tensorflow as tf
import tflearn
import copy
import time
def main():
n_concepts = 4
use_student2 = True
student2_str = '2' if use_student2 else ''
learn_prob = 0.5
lp_str = '-lp{}'.format(int(learn_prob*100)) if not use_student2 else ''
n_students = 100000
seqlen = 7
filter_mastery = True
filter_str = '' if not filter_mastery else '-filtered'
policy = 'random'
filename = 'test{}-n{}-l{}{}-{}{}.pickle'.format(student2_str, n_students, seqlen,
lp_str, policy, filter_str)
#concept_tree = sm.create_custom_dependency()
concept_tree = ConceptDependencyGraph()
concept_tree.init_default_tree(n_concepts)
if not use_student2:
test_student = Student(n=n_concepts,p_trans_satisfied=learn_prob, p_trans_not_satisfied=0.0, p_get_ex_correct_if_concepts_learned=1.0)
else:
test_student = Student2(n_concepts)
print(filename)
# load toy data
data = dataset_utils.load_data(filename='{}{}'.format(dg.SYN_DATA_DIR, filename))
print('Average posttest: {}'.format(sm.expected_reward(data)))
print('Percent of full posttest score: {}'.format(sm.percent_complete(data)))
print('Percent of all seen: {}'.format(sm.percent_all_seen(data)))
input_data_, output_mask_, target_data_ = dataset_utils.preprocess_data_for_rnn(data)
train_data = (input_data_[:,:,:], output_mask_[:,:,:], target_data_[:,:,:])
print(input_data_.shape)
print(output_mask_.shape)
print(target_data_.shape)
# test_model hidden=16
# test_model_mid hidden=10
# test_model_small hidden=5
# test_model_tiny hidden=3
model_id = "test2_model_small"
dropouts = np.array([1.0])
n_dropouts = dropouts.shape[0]
total_epochs = 14
reps = 20
class ExtractCallback(tflearn.callbacks.Callback):
def __init__(self):
self.tstates = []
def on_epoch_end(self, training_state):
self.tstates.append(copy.copy(training_state))
def test_dropout_losses():
losses = np.zeros((n_dropouts,reps,total_epochs))
val_losses = np.zeros((n_dropouts, reps,total_epochs))
for d in range(n_dropouts):
dropout = dropouts[d]
for r in range(reps):
print('----------------------------------------')
print('---------- Dropout {:3.1f} Rep {:2d} ----------'.format(dropout, r+1))
print('----------------------------------------')
ecall = ExtractCallback()
dmodel = dmc.DynamicsModel(model_id=model_id, timesteps=seqlen, dropout=dropout, load_checkpoint=False)
dmodel.train(train_data, n_epoch=total_epochs, callbacks=ecall, shuffle=False, load_checkpoint=False)
losses[d,r,:] = np.array([s.global_loss for s in ecall.tstates])
val_losses[d,r,:] = np.array([s.val_loss for s in ecall.tstates])
return losses, val_losses
losses, val_losses = test_dropout_losses()
np.savez("dropoutput",dropouts=dropouts, losses=losses, vals=val_losses)
if __name__ == '__main__':
starttime = time.time()
np.random.seed()
main()
endtime = time.time()
print('Time elapsed {}s'.format(endtime-starttime))
|
[
"numpy.savez",
"dynamics_model_class.DynamicsModel",
"dataset_utils.preprocess_data_for_rnn",
"concept_dependency_graph.ConceptDependencyGraph",
"numpy.array",
"numpy.zeros",
"simple_mdp.percent_all_seen",
"numpy.random.seed",
"simple_mdp.percent_complete",
"copy.copy",
"time.time",
"simple_mdp.expected_reward"
] |
[((951, 975), 'concept_dependency_graph.ConceptDependencyGraph', 'ConceptDependencyGraph', ([], {}), '()\n', (973, 975), False, 'from concept_dependency_graph import ConceptDependencyGraph\n'), ((1640, 1683), 'dataset_utils.preprocess_data_for_rnn', 'dataset_utils.preprocess_data_for_rnn', (['data'], {}), '(data)\n', (1677, 1683), False, 'import dataset_utils\n'), ((2026, 2041), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (2034, 2041), True, 'import numpy as np\n'), ((3349, 3422), 'numpy.savez', 'np.savez', (['"""dropoutput"""'], {'dropouts': 'dropouts', 'losses': 'losses', 'vals': 'val_losses'}), "('dropoutput', dropouts=dropouts, losses=losses, vals=val_losses)\n", (3357, 3422), True, 'import numpy as np\n'), ((3466, 3477), 'time.time', 'time.time', ([], {}), '()\n', (3475, 3477), False, 'import time\n'), ((3483, 3499), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (3497, 3499), True, 'import numpy as np\n'), ((3531, 3542), 'time.time', 'time.time', ([], {}), '()\n', (3540, 3542), False, 'import time\n'), ((2383, 2425), 'numpy.zeros', 'np.zeros', (['(n_dropouts, reps, total_epochs)'], {}), '((n_dropouts, reps, total_epochs))\n', (2391, 2425), True, 'import numpy as np\n'), ((2445, 2487), 'numpy.zeros', 'np.zeros', (['(n_dropouts, reps, total_epochs)'], {}), '((n_dropouts, reps, total_epochs))\n', (2453, 2487), True, 'import numpy as np\n'), ((1414, 1438), 'simple_mdp.expected_reward', 'sm.expected_reward', (['data'], {}), '(data)\n', (1432, 1438), True, 'import simple_mdp as sm\n'), ((1495, 1520), 'simple_mdp.percent_complete', 'sm.percent_complete', (['data'], {}), '(data)\n', (1514, 1520), True, 'import simple_mdp as sm\n'), ((1566, 1591), 'simple_mdp.percent_all_seen', 'sm.percent_all_seen', (['data'], {}), '(data)\n', (1585, 1591), True, 'import simple_mdp as sm\n'), ((2307, 2332), 'copy.copy', 'copy.copy', (['training_state'], {}), '(training_state)\n', (2316, 2332), False, 'import copy\n'), ((2885, 2983), 'dynamics_model_class.DynamicsModel', 'dmc.DynamicsModel', ([], {'model_id': 'model_id', 'timesteps': 'seqlen', 'dropout': 'dropout', 'load_checkpoint': '(False)'}), '(model_id=model_id, timesteps=seqlen, dropout=dropout,\n load_checkpoint=False)\n', (2902, 2983), True, 'import dynamics_model_class as dmc\n'), ((3130, 3178), 'numpy.array', 'np.array', (['[s.global_loss for s in ecall.tstates]'], {}), '([s.global_loss for s in ecall.tstates])\n', (3138, 3178), True, 'import numpy as np\n'), ((3215, 3260), 'numpy.array', 'np.array', (['[s.val_loss for s in ecall.tstates]'], {}), '([s.val_loss for s in ecall.tstates])\n', (3223, 3260), True, 'import numpy as np\n')]
|
from __future__ import print_function
import os
import time
import tensorflow as tf
import numpy as np
import sys
from zoneout_wrapper import ZoneoutWrapper
class SequencePredictor():
def add_placeholders(self):
"""Generates placeholder variables to represent the input tensors
"""
self.inputs_placeholder = tf.placeholder(tf.int32, shape=(None, self.config.max_length), name="x")
self.labels_placeholder = tf.placeholder(tf.int32, shape=(None, self.config.max_length), name="y")
self.dropout_placeholder = tf.placeholder(tf.float32)
def create_feed_dict(self, inputs_batch, labels_batch=None, initial_state=None, keep_prob=1.0):
"""Creates the feed_dict for the model.
NOTE: You do not have to do anything here.
"""
feed_dict = {
self.inputs_placeholder: inputs_batch,
self.dropout_placeholder: keep_prob,
}
if labels_batch is not None:
feed_dict[self.labels_placeholder] = labels_batch
if initial_state is not None:
feed_dict[self.in_state] = initial_state
return feed_dict
def add_embedding(self):
""" Creates one-hot encoding for the input. No embedding is used as of now
"""
embedding = tf.one_hot(self.inputs_placeholder, self.config.num_classes)
return embedding
def add_prediction_op(self):
""" Get the input from the embedding layer
"""
x = self.add_embedding()
""" Create a RNN first & define a placeholder for the initial state
"""
if self.config.model_type == "gru":
cell = tf.nn.rnn_cell.GRUCell(self.config.hidden_size)
elif self.config.model_type == "rnn":
cell = tf.nn.rnn_cell.BasicRNNCell(self.config.hidden_size)
else:
raise Exception("Unsuppoprted model type...")
if self.config.regularization == "dropout":
cell = tf.nn.rnn_cell.DropoutWrapper(cell, output_keep_prob=self.dropout_placeholder)
elif self.config.regularization == "zoneout":
cell = ZoneoutWrapper(cell, zoneout_prob=self.dropout_placeholder)
cell = tf.nn.rnn_cell.MultiRNNCell([cell] * self.config.num_layers, state_is_tuple=False)
batch_size = tf.shape(x)[0]
dynamic_max_length = tf.shape(x)[1]
zero_state = cell.zero_state(batch_size, tf.float32)
self.in_state = tf.placeholder_with_default(zero_state, [None, cell.state_size])
""" First find the sequence length and then use it to run the model
"""
#length = tf.reduce_sum(tf.reduce_max(tf.sign(x), 2), 1)
output, self.out_state = tf.nn.dynamic_rnn(cell, x, initial_state=self.in_state)
output = tf.reshape(output, shape=[-1, self.config.hidden_size])
""" Pass it through a linear + Softmax layer to get the predictions
"""
xavier_init = tf.contrib.layers.xavier_initializer()
W = tf.get_variable("W", shape=[self.config.hidden_size, self.config.num_classes], initializer=xavier_init )
b1 = tf.get_variable("b1", shape=[self.config.num_classes], initializer=xavier_init )
preds = tf.add(tf.matmul(output,W),b1)
preds = tf.reshape(preds, shape=[batch_size,dynamic_max_length, self.config.num_classes])
return preds
def add_loss_op(self, preds):
loss = tf.reduce_mean( tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.labels_placeholder, logits=preds) )
scaled_loss = loss/np.log(2)
tf.summary.scalar('loss', scaled_loss);
return scaled_loss
def add_training_op(self, loss):
"""Sets up the training Ops.
"""
global_step = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
optimizer = tf.train.AdamOptimizer(self.config.lr)
train_op = optimizer.minimize(loss, global_step=global_step)
return global_step, train_op
def loss_on_batch(self, sess, inputs_batch, labels_batch, initial_state=None):
feed = self.create_feed_dict(inputs_batch=inputs_batch, labels_batch=labels_batch, initial_state=initial_state, keep_prob=1.0)
loss, out_state = sess.run([self.loss,self.out_state], feed_dict=feed)
return loss, out_state
def train_on_batch(self, sess, inputs_batch, labels_batch, initial_state=None, dropout=1.0):
feed = self.create_feed_dict(inputs_batch=inputs_batch, labels_batch=labels_batch, initial_state=initial_state, keep_prob=dropout)
_, loss,out_state,_step, summary = sess.run([self.train_op, self.loss, self.out_state, self.global_step, self.merged_summaries], feed_dict=feed)
return loss, out_state, _step, summary
def build(self):
self.add_placeholders()
self.pred = self.add_prediction_op()
self.loss = self.add_loss_op(self.pred)
self.global_step, self.train_op = self.add_training_op(self.loss)
self.merged_summaries = tf.summary.merge_all()
def __init__(self, config):
self.config = config
self.build()
|
[
"tensorflow.shape",
"tensorflow.get_variable",
"numpy.log",
"tensorflow.nn.rnn_cell.DropoutWrapper",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.placeholder",
"tensorflow.nn.rnn_cell.GRUCell",
"tensorflow.nn.dynamic_rnn",
"tensorflow.matmul",
"tensorflow.train.AdamOptimizer",
"tensorflow.summary.scalar",
"tensorflow.one_hot",
"tensorflow.summary.merge_all",
"tensorflow.Variable",
"tensorflow.reshape",
"tensorflow.nn.rnn_cell.MultiRNNCell",
"tensorflow.nn.rnn_cell.BasicRNNCell",
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.placeholder_with_default",
"zoneout_wrapper.ZoneoutWrapper"
] |
[((338, 410), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '(None, self.config.max_length)', 'name': '"""x"""'}), "(tf.int32, shape=(None, self.config.max_length), name='x')\n", (352, 410), True, 'import tensorflow as tf\n'), ((445, 517), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '(None, self.config.max_length)', 'name': '"""y"""'}), "(tf.int32, shape=(None, self.config.max_length), name='y')\n", (459, 517), True, 'import tensorflow as tf\n'), ((553, 579), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (567, 579), True, 'import tensorflow as tf\n'), ((1291, 1351), 'tensorflow.one_hot', 'tf.one_hot', (['self.inputs_placeholder', 'self.config.num_classes'], {}), '(self.inputs_placeholder, self.config.num_classes)\n', (1301, 1351), True, 'import tensorflow as tf\n'), ((2198, 2285), 'tensorflow.nn.rnn_cell.MultiRNNCell', 'tf.nn.rnn_cell.MultiRNNCell', (['([cell] * self.config.num_layers)'], {'state_is_tuple': '(False)'}), '([cell] * self.config.num_layers, state_is_tuple\n =False)\n', (2225, 2285), True, 'import tensorflow as tf\n'), ((2448, 2512), 'tensorflow.placeholder_with_default', 'tf.placeholder_with_default', (['zero_state', '[None, cell.state_size]'], {}), '(zero_state, [None, cell.state_size])\n', (2475, 2512), True, 'import tensorflow as tf\n'), ((2700, 2755), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', (['cell', 'x'], {'initial_state': 'self.in_state'}), '(cell, x, initial_state=self.in_state)\n', (2717, 2755), True, 'import tensorflow as tf\n'), ((2773, 2828), 'tensorflow.reshape', 'tf.reshape', (['output'], {'shape': '[-1, self.config.hidden_size]'}), '(output, shape=[-1, self.config.hidden_size])\n', (2783, 2828), True, 'import tensorflow as tf\n'), ((2940, 2978), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (2976, 2978), True, 'import tensorflow as tf\n'), ((2991, 3099), 'tensorflow.get_variable', 'tf.get_variable', (['"""W"""'], {'shape': '[self.config.hidden_size, self.config.num_classes]', 'initializer': 'xavier_init'}), "('W', shape=[self.config.hidden_size, self.config.\n num_classes], initializer=xavier_init)\n", (3006, 3099), True, 'import tensorflow as tf\n'), ((3109, 3188), 'tensorflow.get_variable', 'tf.get_variable', (['"""b1"""'], {'shape': '[self.config.num_classes]', 'initializer': 'xavier_init'}), "('b1', shape=[self.config.num_classes], initializer=xavier_init)\n", (3124, 3188), True, 'import tensorflow as tf\n'), ((3253, 3340), 'tensorflow.reshape', 'tf.reshape', (['preds'], {'shape': '[batch_size, dynamic_max_length, self.config.num_classes]'}), '(preds, shape=[batch_size, dynamic_max_length, self.config.\n num_classes])\n', (3263, 3340), True, 'import tensorflow as tf\n'), ((3562, 3600), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'scaled_loss'], {}), "('loss', scaled_loss)\n", (3579, 3600), True, 'import tensorflow as tf\n'), ((3738, 3805), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'dtype': 'tf.int32', 'trainable': '(False)', 'name': '"""global_step"""'}), "(0, dtype=tf.int32, trainable=False, name='global_step')\n", (3749, 3805), True, 'import tensorflow as tf\n'), ((3827, 3865), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.config.lr'], {}), '(self.config.lr)\n', (3849, 3865), True, 'import tensorflow as tf\n'), ((4992, 5014), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (5012, 5014), True, 'import tensorflow as tf\n'), ((1660, 1707), 'tensorflow.nn.rnn_cell.GRUCell', 'tf.nn.rnn_cell.GRUCell', (['self.config.hidden_size'], {}), '(self.config.hidden_size)\n', (1682, 1707), True, 'import tensorflow as tf\n'), ((1970, 2048), 'tensorflow.nn.rnn_cell.DropoutWrapper', 'tf.nn.rnn_cell.DropoutWrapper', (['cell'], {'output_keep_prob': 'self.dropout_placeholder'}), '(cell, output_keep_prob=self.dropout_placeholder)\n', (1999, 2048), True, 'import tensorflow as tf\n'), ((2303, 2314), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (2311, 2314), True, 'import tensorflow as tf\n'), ((2347, 2358), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (2355, 2358), True, 'import tensorflow as tf\n'), ((3213, 3233), 'tensorflow.matmul', 'tf.matmul', (['output', 'W'], {}), '(output, W)\n', (3222, 3233), True, 'import tensorflow as tf\n'), ((3422, 3519), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'self.labels_placeholder', 'logits': 'preds'}), '(labels=self.\n labels_placeholder, logits=preds)\n', (3468, 3519), True, 'import tensorflow as tf\n'), ((3544, 3553), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (3550, 3553), True, 'import numpy as np\n'), ((1773, 1825), 'tensorflow.nn.rnn_cell.BasicRNNCell', 'tf.nn.rnn_cell.BasicRNNCell', (['self.config.hidden_size'], {}), '(self.config.hidden_size)\n', (1800, 1825), True, 'import tensorflow as tf\n'), ((2122, 2181), 'zoneout_wrapper.ZoneoutWrapper', 'ZoneoutWrapper', (['cell'], {'zoneout_prob': 'self.dropout_placeholder'}), '(cell, zoneout_prob=self.dropout_placeholder)\n', (2136, 2181), False, 'from zoneout_wrapper import ZoneoutWrapper\n')]
|
import os
import logging
import json
from nnattack.variables import auto_var, get_file_name
from params import (
compare_attacks,
compare_defense,
#compare_nns,
nn_k1_robustness,
nn_k3_robustness,
nn_k1_approx_robustness_figs,
dt_robustness_figs,
rf_robustness_figs,
nn_k1_robustness_figs,
nn_k3_robustness_figs,
dt_robustness,
rf_robustness,
mlp_ap_robustness,
mlp_at_robustness,
lr_ap_robustness,
lr_at_robustness,
nn1_def,
nn3_def,
dt_def,
rf_def,
lr_def,
mlp_def,
)
from main import eps_accuracy
logging.basicConfig(level=logging.DEBUG)
DEBUG = True if os.environ.get('DEBUG', False) else False
def main():
experiments = [
compare_attacks(),
compare_defense(),
#nn_k1_robustness_figs(),
#nn_k3_robustness_figs(),
#rf_robustness_figs(),
#dt_robustness_figs(),
dt_robustness(),
rf_robustness(),
nn_k3_robustness(),
nn_k1_robustness(),
#mlp_ap_robustness(),
#mlp_at_robustness(),
#lr_ap_robustness(),
#lr_at_robustness(),
#nn1_def(),
#nn3_def(),
#dt_def(),
#rf_def(),
#lr_def(),
#mlp_def(),
]
grid_params = []
for exp in experiments:
exp_fn, _, grid_param, run_param = exp()
if isinstance(grid_param, list):
grid_params.extend(grid_param)
else:
grid_params.append(grid_param)
if DEBUG:
run_param['n_jobs'] = 1
run_param['allow_failure'] = False
else:
run_param['n_jobs'] = 4
run_param['allow_failure'] = True
auto_var.run_grid_params(exp_fn, grid_params, **run_param)
#auto_var.run_grid_params(delete_file, grid_params, n_jobs=1,
# with_hook=False, allow_failure=False)
#auto_var.run_grid_params(celery_run, grid_params, n_jobs=1,
# allow_failure=False)
#auto_var.run_grid_params(temp_fix, grid_params, n_jobs=6,
# allow_failure=False, with_hook=False)
def delete_file(auto_var):
os.unlink(get_file_name(auto_var) + '.json')
def celery_run(auto_var):
run_exp.delay(auto_var.var_value)
from main import set_random_seed
import numpy as np
from sklearn.preprocessing import OneHotEncoder, MinMaxScaler
def temp_fix(auto_var):
file_name = get_file_name(auto_var)
print(file_name)
if os.path.exists("%s.json" % file_name):
with open("%s.json" % file_name, "r") as f:
ret = json.load(f)
if "tst_score" in ret:
return
else:
return
random_state = set_random_seed(auto_var)
ord = auto_var.get_var("ord")
X, y, eps_list = auto_var.get_var("dataset")
idxs = np.arange(len(X))
random_state.shuffle(idxs)
trnX, tstX, trny, tsty = X[idxs[:-200]], X[idxs[-200:]], y[idxs[:-200]], y[idxs[-200:]]
scaler = MinMaxScaler()
trnX = scaler.fit_transform(trnX)
tstX = scaler.transform(tstX)
lbl_enc = OneHotEncoder(categories=[np.sort(np.unique(y))], sparse=False)
#lbl_enc = OneHotEncoder(sparse=False)
lbl_enc.fit(trny.reshape(-1, 1))
auto_var.set_intermidiate_variable("lbl_enc", lbl_enc)
results = []
auto_var.set_intermidiate_variable("trnX", trnX)
auto_var.set_intermidiate_variable("trny", trny)
model_name = auto_var.get_variable_value("model")
attack_name = auto_var.get_variable_value("attack")
if 'adv_rf' in model_name:
pre_model = auto_var.get_var_with_argument('model', model_name[4:])
pre_model.fit(trnX, trny)
if 'blackbox' in attack_name:
auto_var.set_intermidiate_variable("model", pre_model)
elif 'adv_nn' in model_name and 'blackbox' in attack_name:
pre_model = auto_var.get_var_with_argument('model', model_name[4:])
pre_model.fit(trnX, trny)
auto_var.set_intermidiate_variable("model", pre_model)
model = auto_var.get_var("model")
auto_var.set_intermidiate_variable("model", model)
model.fit(trnX, trny)
pred = model.predict(tstX)
ori_tstX, ori_tsty = tstX, tsty # len = 200
idxs = np.where(pred == tsty)[0]
random_state.shuffle(idxs)
augX = None
if ('adv' in model_name) or ('advPruning' in model_name) or ('robustv2' in model_name):
assert hasattr(model, 'augX')
auto_var.set_intermidiate_variable("trnX", model.augX)
auto_var.set_intermidiate_variable("trny", model.augy)
augX, augy = model.augX, model.augy
ret['tst_score'] = (model.predict(ori_tstX) == ori_tsty).mean()
with open("%s.json" % file_name, "w") as f:
json.dump(ret, f)
if __name__ == "__main__":
main()
|
[
"nnattack.variables.auto_var.set_intermidiate_variable",
"nnattack.variables.auto_var.get_var",
"nnattack.variables.auto_var.run_grid_params",
"os.path.exists",
"numpy.where",
"params.dt_robustness",
"sklearn.preprocessing.MinMaxScaler",
"nnattack.variables.get_file_name",
"nnattack.variables.auto_var.get_variable_value",
"params.nn_k3_robustness",
"params.compare_defense",
"nnattack.variables.auto_var.get_var_with_argument",
"params.compare_attacks",
"logging.basicConfig",
"numpy.unique",
"os.environ.get",
"params.nn_k1_robustness",
"params.rf_robustness",
"main.set_random_seed",
"json.load",
"json.dump"
] |
[((595, 635), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (614, 635), False, 'import logging\n'), ((653, 683), 'os.environ.get', 'os.environ.get', (['"""DEBUG"""', '(False)'], {}), "('DEBUG', False)\n", (667, 683), False, 'import os\n'), ((1680, 1738), 'nnattack.variables.auto_var.run_grid_params', 'auto_var.run_grid_params', (['exp_fn', 'grid_params'], {}), '(exp_fn, grid_params, **run_param)\n', (1704, 1738), False, 'from nnattack.variables import auto_var, get_file_name\n'), ((2419, 2442), 'nnattack.variables.get_file_name', 'get_file_name', (['auto_var'], {}), '(auto_var)\n', (2432, 2442), False, 'from nnattack.variables import auto_var, get_file_name\n'), ((2471, 2508), 'os.path.exists', 'os.path.exists', (["('%s.json' % file_name)"], {}), "('%s.json' % file_name)\n", (2485, 2508), False, 'import os\n'), ((2688, 2713), 'main.set_random_seed', 'set_random_seed', (['auto_var'], {}), '(auto_var)\n', (2703, 2713), False, 'from main import set_random_seed\n'), ((2724, 2747), 'nnattack.variables.auto_var.get_var', 'auto_var.get_var', (['"""ord"""'], {}), "('ord')\n", (2740, 2747), False, 'from nnattack.variables import auto_var, get_file_name\n'), ((2770, 2797), 'nnattack.variables.auto_var.get_var', 'auto_var.get_var', (['"""dataset"""'], {}), "('dataset')\n", (2786, 2797), False, 'from nnattack.variables import auto_var, get_file_name\n'), ((2964, 2978), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (2976, 2978), False, 'from sklearn.preprocessing import OneHotEncoder, MinMaxScaler\n'), ((3215, 3269), 'nnattack.variables.auto_var.set_intermidiate_variable', 'auto_var.set_intermidiate_variable', (['"""lbl_enc"""', 'lbl_enc'], {}), "('lbl_enc', lbl_enc)\n", (3249, 3269), False, 'from nnattack.variables import auto_var, get_file_name\n'), ((3293, 3341), 'nnattack.variables.auto_var.set_intermidiate_variable', 'auto_var.set_intermidiate_variable', (['"""trnX"""', 'trnX'], {}), "('trnX', trnX)\n", (3327, 3341), False, 'from nnattack.variables import auto_var, get_file_name\n'), ((3346, 3394), 'nnattack.variables.auto_var.set_intermidiate_variable', 'auto_var.set_intermidiate_variable', (['"""trny"""', 'trny'], {}), "('trny', trny)\n", (3380, 3394), False, 'from nnattack.variables import auto_var, get_file_name\n'), ((3413, 3449), 'nnattack.variables.auto_var.get_variable_value', 'auto_var.get_variable_value', (['"""model"""'], {}), "('model')\n", (3440, 3449), False, 'from nnattack.variables import auto_var, get_file_name\n'), ((3468, 3505), 'nnattack.variables.auto_var.get_variable_value', 'auto_var.get_variable_value', (['"""attack"""'], {}), "('attack')\n", (3495, 3505), False, 'from nnattack.variables import auto_var, get_file_name\n'), ((4001, 4026), 'nnattack.variables.auto_var.get_var', 'auto_var.get_var', (['"""model"""'], {}), "('model')\n", (4017, 4026), False, 'from nnattack.variables import auto_var, get_file_name\n'), ((4031, 4081), 'nnattack.variables.auto_var.set_intermidiate_variable', 'auto_var.set_intermidiate_variable', (['"""model"""', 'model'], {}), "('model', model)\n", (4065, 4081), False, 'from nnattack.variables import auto_var, get_file_name\n'), ((736, 753), 'params.compare_attacks', 'compare_attacks', ([], {}), '()\n', (751, 753), False, 'from params import compare_attacks, compare_defense, nn_k1_robustness, nn_k3_robustness, nn_k1_approx_robustness_figs, dt_robustness_figs, rf_robustness_figs, nn_k1_robustness_figs, nn_k3_robustness_figs, dt_robustness, rf_robustness, mlp_ap_robustness, mlp_at_robustness, lr_ap_robustness, lr_at_robustness, nn1_def, nn3_def, dt_def, rf_def, lr_def, mlp_def\n'), ((763, 780), 'params.compare_defense', 'compare_defense', ([], {}), '()\n', (778, 780), False, 'from params import compare_attacks, compare_defense, nn_k1_robustness, nn_k3_robustness, nn_k1_approx_robustness_figs, dt_robustness_figs, rf_robustness_figs, nn_k1_robustness_figs, nn_k3_robustness_figs, dt_robustness, rf_robustness, mlp_ap_robustness, mlp_at_robustness, lr_ap_robustness, lr_at_robustness, nn1_def, nn3_def, dt_def, rf_def, lr_def, mlp_def\n'), ((922, 937), 'params.dt_robustness', 'dt_robustness', ([], {}), '()\n', (935, 937), False, 'from params import compare_attacks, compare_defense, nn_k1_robustness, nn_k3_robustness, nn_k1_approx_robustness_figs, dt_robustness_figs, rf_robustness_figs, nn_k1_robustness_figs, nn_k3_robustness_figs, dt_robustness, rf_robustness, mlp_ap_robustness, mlp_at_robustness, lr_ap_robustness, lr_at_robustness, nn1_def, nn3_def, dt_def, rf_def, lr_def, mlp_def\n'), ((947, 962), 'params.rf_robustness', 'rf_robustness', ([], {}), '()\n', (960, 962), False, 'from params import compare_attacks, compare_defense, nn_k1_robustness, nn_k3_robustness, nn_k1_approx_robustness_figs, dt_robustness_figs, rf_robustness_figs, nn_k1_robustness_figs, nn_k3_robustness_figs, dt_robustness, rf_robustness, mlp_ap_robustness, mlp_at_robustness, lr_ap_robustness, lr_at_robustness, nn1_def, nn3_def, dt_def, rf_def, lr_def, mlp_def\n'), ((972, 990), 'params.nn_k3_robustness', 'nn_k3_robustness', ([], {}), '()\n', (988, 990), False, 'from params import compare_attacks, compare_defense, nn_k1_robustness, nn_k3_robustness, nn_k1_approx_robustness_figs, dt_robustness_figs, rf_robustness_figs, nn_k1_robustness_figs, nn_k3_robustness_figs, dt_robustness, rf_robustness, mlp_ap_robustness, mlp_at_robustness, lr_ap_robustness, lr_at_robustness, nn1_def, nn3_def, dt_def, rf_def, lr_def, mlp_def\n'), ((1000, 1018), 'params.nn_k1_robustness', 'nn_k1_robustness', ([], {}), '()\n', (1016, 1018), False, 'from params import compare_attacks, compare_defense, nn_k1_robustness, nn_k3_robustness, nn_k1_approx_robustness_figs, dt_robustness_figs, rf_robustness_figs, nn_k1_robustness_figs, nn_k3_robustness_figs, dt_robustness, rf_robustness, mlp_ap_robustness, mlp_at_robustness, lr_ap_robustness, lr_at_robustness, nn1_def, nn3_def, dt_def, rf_def, lr_def, mlp_def\n'), ((3557, 3612), 'nnattack.variables.auto_var.get_var_with_argument', 'auto_var.get_var_with_argument', (['"""model"""', 'model_name[4:]'], {}), "('model', model_name[4:])\n", (3587, 3612), False, 'from nnattack.variables import auto_var, get_file_name\n'), ((4199, 4221), 'numpy.where', 'np.where', (['(pred == tsty)'], {}), '(pred == tsty)\n', (4207, 4221), True, 'import numpy as np\n'), ((4411, 4465), 'nnattack.variables.auto_var.set_intermidiate_variable', 'auto_var.set_intermidiate_variable', (['"""trnX"""', 'model.augX'], {}), "('trnX', model.augX)\n", (4445, 4465), False, 'from nnattack.variables import auto_var, get_file_name\n'), ((4474, 4528), 'nnattack.variables.auto_var.set_intermidiate_variable', 'auto_var.set_intermidiate_variable', (['"""trny"""', 'model.augy'], {}), "('trny', model.augy)\n", (4508, 4528), False, 'from nnattack.variables import auto_var, get_file_name\n'), ((4698, 4715), 'json.dump', 'json.dump', (['ret', 'f'], {}), '(ret, f)\n', (4707, 4715), False, 'import json\n'), ((2163, 2186), 'nnattack.variables.get_file_name', 'get_file_name', (['auto_var'], {}), '(auto_var)\n', (2176, 2186), False, 'from nnattack.variables import auto_var, get_file_name\n'), ((2580, 2592), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2589, 2592), False, 'import json\n'), ((3697, 3751), 'nnattack.variables.auto_var.set_intermidiate_variable', 'auto_var.set_intermidiate_variable', (['"""model"""', 'pre_model'], {}), "('model', pre_model)\n", (3731, 3751), False, 'from nnattack.variables import auto_var, get_file_name\n'), ((3835, 3890), 'nnattack.variables.auto_var.get_var_with_argument', 'auto_var.get_var_with_argument', (['"""model"""', 'model_name[4:]'], {}), "('model', model_name[4:])\n", (3865, 3890), False, 'from nnattack.variables import auto_var, get_file_name\n'), ((3933, 3987), 'nnattack.variables.auto_var.set_intermidiate_variable', 'auto_var.set_intermidiate_variable', (['"""model"""', 'pre_model'], {}), "('model', pre_model)\n", (3967, 3987), False, 'from nnattack.variables import auto_var, get_file_name\n'), ((3100, 3112), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (3109, 3112), True, 'import numpy as np\n')]
|
import torch
import os
from tqdm import tqdm
import numpy as np
from multiprocessing.pool import Pool
from itertools import islice, cycle
from utils.logging import logger
from utils.misc import ensure_dir
class Vocab(object):
def __init__(self):
self.tok2idx = {}
self.idx2tok = []
self.add('<pad>') # PAD index is 0
self.add('<unk>') # UNK index is 1
self.add('<bos>') # BOS index is 2
self.add('<eos>') # EOS index is 3
def __len__(self):
return len(self.idx2tok)
def add(self, token):
if token not in self.tok2idx:
self.tok2idx[token] = len(self.idx2tok)
self.idx2tok.append(token)
def encode(self, token):
return self.tok2idx.get(token, self.tok2idx['<unk>'])
def decode(self, token_id):
assert token_id < len(self.idx2tok), \
'token id must be less than %d, got %d' % (len(self.idx2tok), token_id)
return self.idx2tok[token_id]
def split_corpus(path, shard_size):
with open(path, "r") as f:
if shard_size <= 0:
yield f.readlines()
else:
while True:
shard = list(islice(f, shard_size))
if not shard:
break
yield shard
def build_vocab(src_file, max_vocab_size=0):
with open(src_file, 'r') as f:
tokens = f.read().split()
freq_dict = {}
for t in tokens:
freq_dict[t] = freq_dict.get(t, 0) + 1
tokens = sorted(
list(freq_dict.items()),
key=lambda x: x[1],
reverse=True
)
vsize = max_vocab_size if max_vocab_size > 0 else len(tokens)
vocab = [t[0] for t in tokens[:vsize]]
ret = Vocab()
for t in vocab:
ret.add(t)
return ret
def _worker(args):
src, tgt, feat_ext, vocab = args
if tgt == '':
return None
try:
return feat_ext(src), tgt, [vocab.encode(x) for x in ('<bos> '+tgt+' <eos>').split()]
except Exception as e:
return None
def build_shards(src_dir, save_dir, src_file, tgt_file, vocab,
shard_size, feat_ext, mode='train', feats=None
):
src_shards = split_corpus(src_file, shard_size)
tgt_shards = split_corpus(tgt_file, shard_size)
ensure_dir(save_dir)
shard_index = 0
for src_shard, tgt_shard in zip(src_shards, tgt_shards):
logger.info('Building %s shard %d' % (mode, shard_index))
audio_paths = [os.path.join(src_dir, p.strip()) for p in src_shard]
assert all([os.path.exists(p) for p in audio_paths]), \
"following audio files not found: %s" % \
' '.join([p.strip() for p in audio_paths if not os.path.exists(p)])
targets = [t.strip() for t in tgt_shard]
src_tgt_pairs = list(zip(audio_paths, targets, cycle([feat_ext]), cycle([vocab])))
with Pool(50) as p:
result = list(tqdm(p.imap(_worker, src_tgt_pairs), total=len(src_tgt_pairs)))
result = [r for r in result if r is not None]
audio_feats, transcriptions, indices = zip(*result)
shard = {
'src': np.asarray(audio_feats),
'tgt': np.asarray(transcriptions),
'indices': np.asarray([np.asarray(x).reshape(-1,1) for x in indices]),
'feats': feats
}
shard_path = os.path.join(save_dir, '%s.%05d.pt' % (mode, shard_index))
logger.info('Saving shard %d to %s' % (shard_index, shard_path))
torch.save(shard, shard_path)
shard_index += 1
|
[
"os.path.exists",
"itertools.cycle",
"itertools.islice",
"utils.misc.ensure_dir",
"os.path.join",
"numpy.asarray",
"torch.save",
"multiprocessing.pool.Pool",
"utils.logging.logger.info"
] |
[((2338, 2358), 'utils.misc.ensure_dir', 'ensure_dir', (['save_dir'], {}), '(save_dir)\n', (2348, 2358), False, 'from utils.misc import ensure_dir\n'), ((2449, 2506), 'utils.logging.logger.info', 'logger.info', (["('Building %s shard %d' % (mode, shard_index))"], {}), "('Building %s shard %d' % (mode, shard_index))\n", (2460, 2506), False, 'from utils.logging import logger\n'), ((3417, 3475), 'os.path.join', 'os.path.join', (['save_dir', "('%s.%05d.pt' % (mode, shard_index))"], {}), "(save_dir, '%s.%05d.pt' % (mode, shard_index))\n", (3429, 3475), False, 'import os\n'), ((3484, 3548), 'utils.logging.logger.info', 'logger.info', (["('Saving shard %d to %s' % (shard_index, shard_path))"], {}), "('Saving shard %d to %s' % (shard_index, shard_path))\n", (3495, 3548), False, 'from utils.logging import logger\n'), ((3557, 3586), 'torch.save', 'torch.save', (['shard', 'shard_path'], {}), '(shard, shard_path)\n', (3567, 3586), False, 'import torch\n'), ((2936, 2944), 'multiprocessing.pool.Pool', 'Pool', (['(50)'], {}), '(50)\n', (2940, 2944), False, 'from multiprocessing.pool import Pool\n'), ((3201, 3224), 'numpy.asarray', 'np.asarray', (['audio_feats'], {}), '(audio_feats)\n', (3211, 3224), True, 'import numpy as np\n'), ((3246, 3272), 'numpy.asarray', 'np.asarray', (['transcriptions'], {}), '(transcriptions)\n', (3256, 3272), True, 'import numpy as np\n'), ((2603, 2620), 'os.path.exists', 'os.path.exists', (['p'], {}), '(p)\n', (2617, 2620), False, 'import os\n'), ((2886, 2903), 'itertools.cycle', 'cycle', (['[feat_ext]'], {}), '([feat_ext])\n', (2891, 2903), False, 'from itertools import islice, cycle\n'), ((2905, 2919), 'itertools.cycle', 'cycle', (['[vocab]'], {}), '([vocab])\n', (2910, 2919), False, 'from itertools import islice, cycle\n'), ((1179, 1200), 'itertools.islice', 'islice', (['f', 'shard_size'], {}), '(f, shard_size)\n', (1185, 1200), False, 'from itertools import islice, cycle\n'), ((2761, 2778), 'os.path.exists', 'os.path.exists', (['p'], {}), '(p)\n', (2775, 2778), False, 'import os\n'), ((3310, 3323), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (3320, 3323), True, 'import numpy as np\n')]
|
import os
import time
import argparse
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from config import get_config, export_config
from model.textcnn import TextCNN
from model.textrnn import TextRNN
from sklearn.model_selection import train_test_split
from dataloader import Word2VecEmbeddings, Doc2VecEmbeddings, Char2VecEmbeddings, DataLoader, DataIterator
parser = argparse.ArgumentParser(description='train/test movie review classification model')
parser.add_argument('--checkpoint', type=str, help='pre-trained model', default=None)
parser.add_argument('--refine_data', type=bool, help='solving data imbalance problem', default=False)
args = parser.parse_args()
# parsed args
checkpoint = args.checkpoint
refine_data = args.refine_data
# Configuration
config, _ = get_config()
np.random.seed(config.seed)
tf.set_random_seed(config.seed)
def data_distribution(y_, size=10, img='dist.png'):
"""
movie rate data distribution via plot chart
:param y_: rate data, numpy array
:param size: classes, int
:param img: save to, str
:return: numpy array
"""
from matplotlib import pyplot as plt
# showing data distribution
y_dist = np.zeros((10,), dtype=np.int32)
for y in tqdm(y_):
if size == 1:
y_dist[y - 1] += 1
else:
y_dist[np.argmax(y, axis=-1)] += 1
plt.figure(figsize=(10, 8))
plt.xlabel('rate')
plt.ylabel('frequency')
plt.grid(True)
plt.bar(range(size), y_dist, width=.35, align='center', alpha=.5, label='rainfall')
plt.xticks(range(10), list(range(1, 11)))
plt.savefig(img)
plt.show()
return y_dist
def data_confusion_matrix(y_pred, y_true, labels, normalize=True):
import itertools
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
"""
0-3: bad
4-7: normal
7-10: good
"""
def labeling(y):
if 0 <= y < 3:
return 0
elif 3 <= y < 7:
return 1
else:
return 2
y_pred = np.array([labeling(y) for y in y_pred])
y_true = np.array([labeling(y[0]) for y in y_true])[:-20]
assert y_pred.shape[0] == y_true.shape[0]
cnf_mat = confusion_matrix(y_pred, y_true)
np.set_printoptions(precision=2)
if normalize:
cnf_mat = cnf_mat.astype('float') / cnf_mat.sum(axis=1)[:, np.newaxis]
plt.figure()
plt.imshow(cnf_mat, interpolation='nearest', cmap=plt.cm.Blues)
plt.title("Confusion Matrix")
plt.colorbar()
tick_marks = np.arange(len(labels))
plt.xticks(tick_marks, labels, rotation=45)
plt.yticks(tick_marks, labels)
thresh = cnf_mat.max() / 2.
for i, j in itertools.product(range(cnf_mat.shape[0]), range(cnf_mat.shape[1])):
plt.text(j, i, format(cnf_mat[i, j], '.2f'),
horizontalalignment="center",
color="white" if cnf_mat[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
plt.savefig("./confusion_matrix.png")
plt.show()
def load_trained_embeds(embed_mode='char'):
"""
:param embed_mode: embedding mode, str
:return: embedding vector, numpy array
"""
if embed_mode == 'd2v':
vec = Doc2VecEmbeddings(config.d2v_model, config.embed_size) # Doc2Vec Loader
if config.verbose:
print("[+] Doc2Vec loaded! Total %d pre-trained sentences, %d dims" % (len(vec), config.embed_size))
elif embed_mode == 'w2v':
vec = Word2VecEmbeddings(config.w2v_model, config.embed_size) # WOrd2Vec Loader
if config.verbose:
print("[+] Word2Vec loaded! Total %d pre-trained words, %d dims" % (len(vec), config.embed_size))
else:
vec = Char2VecEmbeddings()
if config.verbose:
print("[+] Using Char2Vec, %d dims" % config.embed_size)
return vec
if __name__ == '__main__':
embed_type = config.use_pre_trained_embeds
# Stage 1 : loading trained embeddings
vectors = load_trained_embeds(embed_type)
# Stage 2 : loading tokenize data
if config.use_pre_trained_embeds == 'c2v': # Char2Vec
if os.path.isfile(config.processed_dataset):
ds = DataLoader(file=config.processed_dataset,
fn_to_save=None,
load_from='db',
n_classes=config.n_classes,
analyzer='char',
is_analyzed=True,
use_save=False,
config=config) # DataSet Loader
else:
ds = DataLoader(file=None,
fn_to_save=config.processed_dataset,
load_from='db',
n_classes=config.n_classes,
analyzer='char',
is_analyzed=False,
use_save=True,
config=config) # DataSet Loader
ds_len = len(ds)
x_data = np.zeros((ds_len, config.sequence_length), dtype=np.uint8)
sen_len = list()
min_length, max_length, avg_length = config.sequence_length, 0, 0
for i in tqdm(range(ds_len)):
sentence = ' '.join(ds.sentences[i]).strip('\n')
sentence_length = len(sentence)
if sentence_length < min_length:
min_length = sentence_length
if sentence_length > max_length:
max_length = sentence_length
sen_len.append(sentence_length)
sent = vectors.decompose_str_as_one_hot(sentence,
warning=False)[:config.sequence_length]
x_data[i] = np.pad(sent, (0, config.sequence_length - len(sent)), 'constant', constant_values=0)
if config.verbose:
print("[*] Total %d samples (training)" % x_data.shape[0])
print(" [*] min length of reviews : %d" % min_length)
print(" [*] max length of reviews : %d" % max_length)
avg_length = sum(sen_len) / x_data.shape[0]
print(" [*] avg length of reviews : %d" % avg_length)
else: # Word2Vec / Doc2Vec
ds = DataLoader(file=config.processed_dataset,
n_classes=config.n_classes,
analyzer=None,
is_analyzed=True,
use_save=False,
config=config) # DataSet Loader
ds_len = len(ds)
x_data = np.zeros((ds_len, config.sequence_length), dtype=np.int32)
for i in tqdm(range(ds_len)):
sent = ds.sentences[i][:config.sequence_length]
x_data[i] = np.pad(vectors.words_to_index(sent),
(0, config.sequence_length - len(sent)), 'constant', constant_values=config.vocab_size)
y_data = np.array(ds.labels).reshape(-1, config.n_classes)
ds = None
if config.verbose:
print("[*] sentence to %s index conversion finish!" % config.use_pre_trained_embeds)
if refine_data:
# resizing the amount of rate-10 data
# 2.5M to 500K # downsize to 20%
if not config.n_classes == 1:
rate_10_idx = [idx for idx, y in tqdm(enumerate(y_data)) if np.argmax(y, axis=-1) == 9]
else:
rate_10_idx = [idx for idx, y in tqdm(enumerate(y_data)) if y == 10]
rand_idx = np.random.choice(rate_10_idx, 4 * len(rate_10_idx) // 5)
x_data = np.delete(x_data, rand_idx, axis=0).reshape(-1, config.sequence_length)
y_data = np.delete(y_data, rand_idx, axis=0).reshape(-1, config.n_classes)
if config.verbose:
print("[*] refined comment : ", x_data.shape)
print("[*] refined rate : ", y_data.shape)
# shuffle/split data
x_train, x_valid, y_train, y_valid = train_test_split(x_data, y_data, random_state=config.seed,
test_size=config.test_size, shuffle=True)
if config.verbose:
print("[*] train/test %d/%d(%.1f/%.1f) split!" % (len(y_train), len(y_valid),
1. - config.test_size, config.test_size))
del x_data, y_data
data_size = x_train.shape[0]
# DataSet Iterator
di = DataIterator(x=x_train, y=y_train, batch_size=config.batch_size)
if config.device == 'gpu':
dev_config = tf.ConfigProto()
dev_config.gpu_options.allow_growth = True
else:
dev_config = None
with tf.Session(config=dev_config) as s:
if config.model == 'charcnn':
# Model Loaded
model = TextCNN(s=s,
mode=config.mode,
w2v_embeds=vectors.embeds if not embed_type == 'c2v' else None,
n_classes=config.n_classes,
optimizer=config.optimizer,
kernel_sizes=config.kernel_size,
n_filters=config.filter_size,
n_dims=config.embed_size,
vocab_size=config.character_size if embed_type == 'c2v' else config.vocab_size + 1,
sequence_length=config.sequence_length,
lr=config.lr,
lr_decay=config.lr_decay,
lr_lower_boundary=config.lr_lower_boundary,
fc_unit=config.fc_unit,
th=config.act_threshold,
grad_clip=config.grad_clip,
summary=config.pretrained,
score_function=config.score_function,
use_se_module=config.use_se_module,
se_radio=config.se_ratio,
se_type=config.se_type,
use_multi_channel=config.use_multi_channel)
elif config.model == 'charrnn':
model = TextRNN(s=s,
mode=config.mode,
w2v_embeds=vectors.embeds if not embed_type == 'c2v' else None,
n_classes=config.n_classes,
optimizer=config.optimizer,
n_gru_cells=config.n_gru_cells,
n_gru_layers=config.n_gru_layers,
n_attention_size=config.n_attention_size,
n_dims=config.embed_size,
vocab_size=config.character_size if embed_type == 'c2v' else config.vocab_size + 1,
sequence_length=config.sequence_length,
lr=config.lr,
lr_decay=config.lr_decay,
lr_lower_boundary=config.lr_lower_boundary,
fc_unit=config.fc_unit,
grad_clip=config.grad_clip,
summary=config.pretrained)
else:
raise NotImplementedError("[-] Not Implemented Yet")
if config.verbose:
print("[+] %s model loaded" % config.model)
# Initializing
s.run(tf.global_variables_initializer())
# exporting config
export_config()
# loading checkpoint
global_step = 0
if checkpoint:
print("[*] Reading checkpoints...")
ckpt = tf.train.get_checkpoint_state(config.pretrained)
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
model.saver.restore(s, ckpt.model_checkpoint_path)
global_step = int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])
print("[+] global step : %d" % global_step, " successfully loaded")
else:
print('[-] No checkpoint file found')
start_time = time.time()
if config.is_train:
best_loss = 1e1 # initial value
batch_size = config.batch_size
model.global_step.assign(tf.constant(global_step))
restored_epochs = global_step // (data_size // batch_size)
for epoch in range(restored_epochs, config.epochs):
for x_tr, y_tr in di.iterate():
# training
_, loss, acc = s.run([model.train_op, model.loss, model.accuracy],
feed_dict={
model.x: x_tr,
model.y: y_tr,
model.do_rate: config.drop_out,
})
if global_step and global_step % config.logging_step == 0:
# validation
rand_idx = np.random.choice(np.arange(len(y_valid)), len(y_valid) // 20) # 5% of valid data
x_va, y_va = x_valid[rand_idx], y_valid[rand_idx]
valid_loss, valid_acc = 0., 0.
valid_iter = len(y_va) // batch_size
for i in tqdm(range(0, valid_iter)):
v_loss, v_acc = s.run([model.loss, model.accuracy],
feed_dict={
model.x: x_va[batch_size * i:batch_size * (i + 1)],
model.y: y_va[batch_size * i:batch_size * (i + 1)],
model.do_rate: .0,
})
valid_acc += v_acc
valid_loss += v_loss
valid_loss /= valid_iter
valid_acc /= valid_iter
print("[*] epoch %03d global step %07d" % (epoch, global_step),
" train_loss : {:.8f} train_acc : {:.4f}".format(loss, acc),
" valid_loss : {:.8f} valid_acc : {:.4f}".format(valid_loss, valid_acc))
# summary
summary = s.run(model.merged,
feed_dict={
model.x: x_tr,
model.y: y_tr,
model.do_rate: .0,
})
# Summary saver
model.writer.add_summary(summary, global_step)
# Model save
model.saver.save(s, config.pretrained + '%s.ckpt' % config.model,
global_step=global_step)
if valid_loss < best_loss:
print("[+] model improved {:.7f} to {:.7f}".format(best_loss, valid_loss))
best_loss = valid_loss
model.best_saver.save(s, config.pretrained + '%s-best_loss.ckpt' % config.model,
global_step=global_step)
print()
model.global_step.assign_add(tf.constant(1))
global_step += 1
end_time = time.time()
print("[+] Training Done! Elapsed {:.8f}s".format(end_time - start_time))
else: # test
x_train, y_train = None, None
x_va, y_va = x_valid, y_valid
valid_loss, valid_acc = 0., 0.
batch_size = config.batch_size
valid_iter = len(y_va) // config.batch_size
v_rates = []
for i in tqdm(range(0, valid_iter)):
v_loss, v_acc, v_rate = s.run([model.loss, model.accuracy, model.rates],
feed_dict={
model.x: x_va[batch_size * i:batch_size * (i + 1)],
model.y: y_va[batch_size * i:batch_size * (i + 1)],
model.do_rate: .0,
})
valid_acc += v_acc
valid_loss += v_loss
for j in v_rate:
v_rates.append(j)
valid_loss /= valid_iter
valid_acc /= valid_iter
print("[+] Validation Result (%s model %d global steps), total %d samples" %
(config.model, global_step, x_valid.shape[0]))
print(" => valid_loss (MSE) : {:.8f} valid_acc (th=1.0) : {:.4f}".format(valid_loss, valid_acc))
"""
with open('pred.txt', 'w') as f:
f.writelines([str("{:.4f}\n".format(rate[0])) for rate in v_rates])
"""
# confusion matrix
data_confusion_matrix(v_rates, y_va, ["bad", "normal", "good"])
|
[
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"model.textcnn.TextCNN",
"numpy.array",
"tensorflow.set_random_seed",
"dataloader.DataLoader",
"model.textrnn.TextRNN",
"matplotlib.pyplot.imshow",
"config.export_config",
"dataloader.Doc2VecEmbeddings",
"argparse.ArgumentParser",
"numpy.delete",
"matplotlib.pyplot.xlabel",
"tensorflow.Session",
"matplotlib.pyplot.yticks",
"numpy.random.seed",
"dataloader.Char2VecEmbeddings",
"dataloader.DataIterator",
"tensorflow.ConfigProto",
"sklearn.metrics.confusion_matrix",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xticks",
"sklearn.model_selection.train_test_split",
"numpy.argmax",
"os.path.isfile",
"tensorflow.train.get_checkpoint_state",
"dataloader.Word2VecEmbeddings",
"matplotlib.pyplot.title",
"time.time",
"numpy.set_printoptions",
"matplotlib.pyplot.show",
"tqdm.tqdm",
"matplotlib.pyplot.colorbar",
"tensorflow.global_variables_initializer",
"config.get_config",
"numpy.zeros",
"matplotlib.pyplot.figure",
"tensorflow.constant",
"matplotlib.pyplot.tight_layout"
] |
[((388, 476), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""train/test movie review classification model"""'}), "(description=\n 'train/test movie review classification model')\n", (411, 476), False, 'import argparse\n'), ((791, 803), 'config.get_config', 'get_config', ([], {}), '()\n', (801, 803), False, 'from config import get_config, export_config\n'), ((805, 832), 'numpy.random.seed', 'np.random.seed', (['config.seed'], {}), '(config.seed)\n', (819, 832), True, 'import numpy as np\n'), ((833, 864), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['config.seed'], {}), '(config.seed)\n', (851, 864), True, 'import tensorflow as tf\n'), ((1192, 1223), 'numpy.zeros', 'np.zeros', (['(10,)'], {'dtype': 'np.int32'}), '((10,), dtype=np.int32)\n', (1200, 1223), True, 'import numpy as np\n'), ((1237, 1245), 'tqdm.tqdm', 'tqdm', (['y_'], {}), '(y_)\n', (1241, 1245), False, 'from tqdm import tqdm\n'), ((1366, 1393), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (1376, 1393), True, 'import matplotlib.pyplot as plt\n'), ((1399, 1417), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""rate"""'], {}), "('rate')\n", (1409, 1417), True, 'import matplotlib.pyplot as plt\n'), ((1422, 1445), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""frequency"""'], {}), "('frequency')\n", (1432, 1445), True, 'import matplotlib.pyplot as plt\n'), ((1450, 1464), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1458, 1464), True, 'import matplotlib.pyplot as plt\n'), ((1605, 1621), 'matplotlib.pyplot.savefig', 'plt.savefig', (['img'], {}), '(img)\n', (1616, 1621), True, 'import matplotlib.pyplot as plt\n'), ((1626, 1636), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1634, 1636), True, 'import matplotlib.pyplot as plt\n'), ((2217, 2249), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_pred', 'y_true'], {}), '(y_pred, y_true)\n', (2233, 2249), False, 'from sklearn.metrics import confusion_matrix\n'), ((2254, 2286), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(2)'}), '(precision=2)\n', (2273, 2286), True, 'import numpy as np\n'), ((2390, 2402), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2400, 2402), True, 'import matplotlib.pyplot as plt\n'), ((2408, 2471), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cnf_mat'], {'interpolation': '"""nearest"""', 'cmap': 'plt.cm.Blues'}), "(cnf_mat, interpolation='nearest', cmap=plt.cm.Blues)\n", (2418, 2471), True, 'import matplotlib.pyplot as plt\n'), ((2476, 2505), 'matplotlib.pyplot.title', 'plt.title', (['"""Confusion Matrix"""'], {}), "('Confusion Matrix')\n", (2485, 2505), True, 'import matplotlib.pyplot as plt\n'), ((2510, 2524), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (2522, 2524), True, 'import matplotlib.pyplot as plt\n'), ((2570, 2613), 'matplotlib.pyplot.xticks', 'plt.xticks', (['tick_marks', 'labels'], {'rotation': '(45)'}), '(tick_marks, labels, rotation=45)\n', (2580, 2613), True, 'import matplotlib.pyplot as plt\n'), ((2618, 2648), 'matplotlib.pyplot.yticks', 'plt.yticks', (['tick_marks', 'labels'], {}), '(tick_marks, labels)\n', (2628, 2648), True, 'import matplotlib.pyplot as plt\n'), ((2943, 2967), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True label"""'], {}), "('True label')\n", (2953, 2967), True, 'import matplotlib.pyplot as plt\n'), ((2972, 3001), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted label"""'], {}), "('Predicted label')\n", (2982, 3001), True, 'import matplotlib.pyplot as plt\n'), ((3006, 3024), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3022, 3024), True, 'import matplotlib.pyplot as plt\n'), ((3030, 3067), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./confusion_matrix.png"""'], {}), "('./confusion_matrix.png')\n", (3041, 3067), True, 'import matplotlib.pyplot as plt\n'), ((3073, 3083), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3081, 3083), True, 'import matplotlib.pyplot as plt\n'), ((7918, 8023), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x_data', 'y_data'], {'random_state': 'config.seed', 'test_size': 'config.test_size', 'shuffle': '(True)'}), '(x_data, y_data, random_state=config.seed, test_size=config\n .test_size, shuffle=True)\n', (7934, 8023), False, 'from sklearn.model_selection import train_test_split\n'), ((8377, 8441), 'dataloader.DataIterator', 'DataIterator', ([], {'x': 'x_train', 'y': 'y_train', 'batch_size': 'config.batch_size'}), '(x=x_train, y=y_train, batch_size=config.batch_size)\n', (8389, 8441), False, 'from dataloader import Word2VecEmbeddings, Doc2VecEmbeddings, Char2VecEmbeddings, DataLoader, DataIterator\n'), ((3274, 3328), 'dataloader.Doc2VecEmbeddings', 'Doc2VecEmbeddings', (['config.d2v_model', 'config.embed_size'], {}), '(config.d2v_model, config.embed_size)\n', (3291, 3328), False, 'from dataloader import Word2VecEmbeddings, Doc2VecEmbeddings, Char2VecEmbeddings, DataLoader, DataIterator\n'), ((4174, 4214), 'os.path.isfile', 'os.path.isfile', (['config.processed_dataset'], {}), '(config.processed_dataset)\n', (4188, 4214), False, 'import os\n'), ((5074, 5132), 'numpy.zeros', 'np.zeros', (['(ds_len, config.sequence_length)'], {'dtype': 'np.uint8'}), '((ds_len, config.sequence_length), dtype=np.uint8)\n', (5082, 5132), True, 'import numpy as np\n'), ((6267, 6404), 'dataloader.DataLoader', 'DataLoader', ([], {'file': 'config.processed_dataset', 'n_classes': 'config.n_classes', 'analyzer': 'None', 'is_analyzed': '(True)', 'use_save': '(False)', 'config': 'config'}), '(file=config.processed_dataset, n_classes=config.n_classes,\n analyzer=None, is_analyzed=True, use_save=False, config=config)\n', (6277, 6404), False, 'from dataloader import Word2VecEmbeddings, Doc2VecEmbeddings, Char2VecEmbeddings, DataLoader, DataIterator\n'), ((6583, 6641), 'numpy.zeros', 'np.zeros', (['(ds_len, config.sequence_length)'], {'dtype': 'np.int32'}), '((ds_len, config.sequence_length), dtype=np.int32)\n', (6591, 6641), True, 'import numpy as np\n'), ((8495, 8511), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (8509, 8511), True, 'import tensorflow as tf\n'), ((8609, 8638), 'tensorflow.Session', 'tf.Session', ([], {'config': 'dev_config'}), '(config=dev_config)\n', (8619, 8638), True, 'import tensorflow as tf\n'), ((11390, 11405), 'config.export_config', 'export_config', ([], {}), '()\n', (11403, 11405), False, 'from config import get_config, export_config\n'), ((12033, 12044), 'time.time', 'time.time', ([], {}), '()\n', (12042, 12044), False, 'import time\n'), ((3531, 3586), 'dataloader.Word2VecEmbeddings', 'Word2VecEmbeddings', (['config.w2v_model', 'config.embed_size'], {}), '(config.w2v_model, config.embed_size)\n', (3549, 3586), False, 'from dataloader import Word2VecEmbeddings, Doc2VecEmbeddings, Char2VecEmbeddings, DataLoader, DataIterator\n'), ((3767, 3787), 'dataloader.Char2VecEmbeddings', 'Char2VecEmbeddings', ([], {}), '()\n', (3785, 3787), False, 'from dataloader import Word2VecEmbeddings, Doc2VecEmbeddings, Char2VecEmbeddings, DataLoader, DataIterator\n'), ((4233, 4410), 'dataloader.DataLoader', 'DataLoader', ([], {'file': 'config.processed_dataset', 'fn_to_save': 'None', 'load_from': '"""db"""', 'n_classes': 'config.n_classes', 'analyzer': '"""char"""', 'is_analyzed': '(True)', 'use_save': '(False)', 'config': 'config'}), "(file=config.processed_dataset, fn_to_save=None, load_from='db',\n n_classes=config.n_classes, analyzer='char', is_analyzed=True, use_save\n =False, config=config)\n", (4243, 4410), False, 'from dataloader import Word2VecEmbeddings, Doc2VecEmbeddings, Char2VecEmbeddings, DataLoader, DataIterator\n'), ((4647, 4823), 'dataloader.DataLoader', 'DataLoader', ([], {'file': 'None', 'fn_to_save': 'config.processed_dataset', 'load_from': '"""db"""', 'n_classes': 'config.n_classes', 'analyzer': '"""char"""', 'is_analyzed': '(False)', 'use_save': '(True)', 'config': 'config'}), "(file=None, fn_to_save=config.processed_dataset, load_from='db',\n n_classes=config.n_classes, analyzer='char', is_analyzed=False,\n use_save=True, config=config)\n", (4657, 4823), False, 'from dataloader import Word2VecEmbeddings, Doc2VecEmbeddings, Char2VecEmbeddings, DataLoader, DataIterator\n'), ((6934, 6953), 'numpy.array', 'np.array', (['ds.labels'], {}), '(ds.labels)\n', (6942, 6953), True, 'import numpy as np\n'), ((8730, 9494), 'model.textcnn.TextCNN', 'TextCNN', ([], {'s': 's', 'mode': 'config.mode', 'w2v_embeds': "(vectors.embeds if not embed_type == 'c2v' else None)", 'n_classes': 'config.n_classes', 'optimizer': 'config.optimizer', 'kernel_sizes': 'config.kernel_size', 'n_filters': 'config.filter_size', 'n_dims': 'config.embed_size', 'vocab_size': "(config.character_size if embed_type == 'c2v' else config.vocab_size + 1)", 'sequence_length': 'config.sequence_length', 'lr': 'config.lr', 'lr_decay': 'config.lr_decay', 'lr_lower_boundary': 'config.lr_lower_boundary', 'fc_unit': 'config.fc_unit', 'th': 'config.act_threshold', 'grad_clip': 'config.grad_clip', 'summary': 'config.pretrained', 'score_function': 'config.score_function', 'use_se_module': 'config.use_se_module', 'se_radio': 'config.se_ratio', 'se_type': 'config.se_type', 'use_multi_channel': 'config.use_multi_channel'}), "(s=s, mode=config.mode, w2v_embeds=vectors.embeds if not embed_type ==\n 'c2v' else None, n_classes=config.n_classes, optimizer=config.optimizer,\n kernel_sizes=config.kernel_size, n_filters=config.filter_size, n_dims=\n config.embed_size, vocab_size=config.character_size if embed_type ==\n 'c2v' else config.vocab_size + 1, sequence_length=config.\n sequence_length, lr=config.lr, lr_decay=config.lr_decay,\n lr_lower_boundary=config.lr_lower_boundary, fc_unit=config.fc_unit, th=\n config.act_threshold, grad_clip=config.grad_clip, summary=config.\n pretrained, score_function=config.score_function, use_se_module=config.\n use_se_module, se_radio=config.se_ratio, se_type=config.se_type,\n use_multi_channel=config.use_multi_channel)\n", (8737, 9494), False, 'from model.textcnn import TextCNN\n'), ((11319, 11352), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (11350, 11352), True, 'import tensorflow as tf\n'), ((11551, 11599), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['config.pretrained'], {}), '(config.pretrained)\n', (11580, 11599), True, 'import tensorflow as tf\n'), ((15486, 15497), 'time.time', 'time.time', ([], {}), '()\n', (15495, 15497), False, 'import time\n'), ((1333, 1354), 'numpy.argmax', 'np.argmax', (['y'], {'axis': '(-1)'}), '(y, axis=-1)\n', (1342, 1354), True, 'import numpy as np\n'), ((7552, 7587), 'numpy.delete', 'np.delete', (['x_data', 'rand_idx'], {'axis': '(0)'}), '(x_data, rand_idx, axis=0)\n', (7561, 7587), True, 'import numpy as np\n'), ((7641, 7676), 'numpy.delete', 'np.delete', (['y_data', 'rand_idx'], {'axis': '(0)'}), '(y_data, rand_idx, axis=0)\n', (7650, 7676), True, 'import numpy as np\n'), ((10098, 10703), 'model.textrnn.TextRNN', 'TextRNN', ([], {'s': 's', 'mode': 'config.mode', 'w2v_embeds': "(vectors.embeds if not embed_type == 'c2v' else None)", 'n_classes': 'config.n_classes', 'optimizer': 'config.optimizer', 'n_gru_cells': 'config.n_gru_cells', 'n_gru_layers': 'config.n_gru_layers', 'n_attention_size': 'config.n_attention_size', 'n_dims': 'config.embed_size', 'vocab_size': "(config.character_size if embed_type == 'c2v' else config.vocab_size + 1)", 'sequence_length': 'config.sequence_length', 'lr': 'config.lr', 'lr_decay': 'config.lr_decay', 'lr_lower_boundary': 'config.lr_lower_boundary', 'fc_unit': 'config.fc_unit', 'grad_clip': 'config.grad_clip', 'summary': 'config.pretrained'}), "(s=s, mode=config.mode, w2v_embeds=vectors.embeds if not embed_type ==\n 'c2v' else None, n_classes=config.n_classes, optimizer=config.optimizer,\n n_gru_cells=config.n_gru_cells, n_gru_layers=config.n_gru_layers,\n n_attention_size=config.n_attention_size, n_dims=config.embed_size,\n vocab_size=config.character_size if embed_type == 'c2v' else config.\n vocab_size + 1, sequence_length=config.sequence_length, lr=config.lr,\n lr_decay=config.lr_decay, lr_lower_boundary=config.lr_lower_boundary,\n fc_unit=config.fc_unit, grad_clip=config.grad_clip, summary=config.\n pretrained)\n", (10105, 10703), False, 'from model.textrnn import TextRNN\n'), ((12199, 12223), 'tensorflow.constant', 'tf.constant', (['global_step'], {}), '(global_step)\n', (12210, 12223), True, 'import tensorflow as tf\n'), ((7334, 7355), 'numpy.argmax', 'np.argmax', (['y'], {'axis': '(-1)'}), '(y, axis=-1)\n', (7343, 7355), True, 'import numpy as np\n'), ((15409, 15423), 'tensorflow.constant', 'tf.constant', (['(1)'], {}), '(1)\n', (15420, 15423), True, 'import tensorflow as tf\n')]
|
import numpy as np
import microdf as mdf
def gini(df, col, w=None, negatives=None):
"""Calculates Gini index.
:param df: DataFrame.
:param col: Name of column in df representing value.
:param w: Column representing weight in df.
:param negatives: An optional string indicating how to treat negative
values of x:
'zero' replaces negative values with zeroes.
'shift' subtracts the minimum value from all values of x,
when this minimum is negative. That is, it adds the absolute
minimum value.
Defaults to None, which leaves negative values as they are.
:returns: A float, the Gini index.
"""
# Requires float numpy arrays (not pandas Series or lists) to work.
x = np.array(df[col]).astype("float")
if negatives == "zero":
x[x < 0] = 0
if negatives == "shift" and np.amin(x) < 0:
x -= np.amin(x)
if w is not None:
w = np.array(df[w]).astype("float")
sorted_indices = np.argsort(x)
sorted_x = x[sorted_indices]
sorted_w = w[sorted_indices]
cumw = np.cumsum(sorted_w)
cumxw = np.cumsum(sorted_x * sorted_w)
return np.sum(cumxw[1:] * cumw[:-1] - cumxw[:-1] * cumw[1:]) / (
cumxw[-1] * cumw[-1]
)
else:
sorted_x = np.sort(x)
n = len(x)
cumxw = np.cumsum(sorted_x)
# The above formula, with all weights equal to 1 simplifies to:
return (n + 1 - 2 * np.sum(cumxw) / cumxw[-1]) / n
def top_x_pct_share(df, col, top_x_pct, w=None):
"""Calculates top x% share.
:param df: DataFrame.
:param col: Name of column in df representing value.
:param top_x_pct: Decimal between 0 and 1 of the top %, e.g. 0.1, 0.001.
:param w: Column representing weight in df.
:returns: The share of w-weighted val held by the top x%.
"""
threshold = mdf.weighted_quantile(df, col, w, 1 - top_x_pct)
top_x_pct_sum = mdf.weighted_sum(df[df[col] >= threshold], col, w)
total_sum = mdf.weighted_sum(df, col, w)
return top_x_pct_sum / total_sum
def bottom_x_pct_share(df, col, bottom_x_pct, w=None):
"""Calculates bottom x% share.
:param df: DataFrame.
:param col: Name of column in df representing value.
:param bottom_x_pct: Decimal between 0 and 1 of the top %, e.g. 0.1, 0.001.
:param w: Column representing weight in df.
:returns: The share of w-weighted val held by the bottom x%.
"""
return 1 - top_x_pct_share(df, col, 1 - bottom_x_pct, w, top=False)
def bottom_50_pct_share(df, col, w=None):
"""Calculates bottom 50% share.
:param df: DataFrame.
:param col: Name of column in df representing value.
:param w: Column representing weight in df.
:returns: The share of w-weighted val held by the bottom 50%.
"""
return bottom_x_pct_share(df, col, 0.5, w)
def top_50_pct_share(df, col, w=None):
"""Calculates top 50% share.
:param df: DataFrame.
:param col: Name of column in df representing value.
:param w: Column representing weight in df.
:returns: The share of w-weighted val held by the top 50%.
"""
return top_x_pct_share(df, col, 0.5, w)
def top_10_pct_share(df, col, w=None):
"""Calculates top 10% share.
:param df: DataFrame.
:param col: Name of column in df representing value.
:param w: Column representing weight in df.
:returns: The share of w-weighted val held by the top 10%.
"""
return top_x_pct_share(df, col, 0.1, w)
def top_1_pct_share(df, col, w=None):
"""Calculates top 1% share.
:param df: DataFrame.
:param col: Name of column in df representing value.
:param w: Column representing weight in df.
:returns: The share of w-weighted val held by the top 1%.
"""
return top_x_pct_share(df, col, 0.01, w)
def top_0_1_pct_share(df, col, w=None):
"""Calculates top 0.1% share.
:param df: DataFrame.
:param col: Name of column in df representing value.
:param w: Column representing weight in df.
:returns: The share of w-weighted val held by the top 0.1%.
"""
return top_x_pct_share(df, col, 0.001, w)
def t10_b50(df, col, w=None):
"""Calculates ratio between the top 10% and bottom 50% shares.
:param df: DataFrame.
:param col: Name of column in df representing value.
:param w: Column representing weight in df.
:returns: The share of w-weighted val held by the top 10% divided by
the share of w-weighted val held by the bottom 50%.
"""
return top_10_pct_share(df, col, w) / bottom_50_pct_share(df, col, w)
|
[
"numpy.amin",
"numpy.sort",
"numpy.argsort",
"numpy.array",
"numpy.sum",
"microdf.weighted_sum",
"numpy.cumsum",
"microdf.weighted_quantile"
] |
[((1888, 1936), 'microdf.weighted_quantile', 'mdf.weighted_quantile', (['df', 'col', 'w', '(1 - top_x_pct)'], {}), '(df, col, w, 1 - top_x_pct)\n', (1909, 1936), True, 'import microdf as mdf\n'), ((1957, 2007), 'microdf.weighted_sum', 'mdf.weighted_sum', (['df[df[col] >= threshold]', 'col', 'w'], {}), '(df[df[col] >= threshold], col, w)\n', (1973, 2007), True, 'import microdf as mdf\n'), ((2024, 2052), 'microdf.weighted_sum', 'mdf.weighted_sum', (['df', 'col', 'w'], {}), '(df, col, w)\n', (2040, 2052), True, 'import microdf as mdf\n'), ((895, 905), 'numpy.amin', 'np.amin', (['x'], {}), '(x)\n', (902, 905), True, 'import numpy as np\n'), ((997, 1010), 'numpy.argsort', 'np.argsort', (['x'], {}), '(x)\n', (1007, 1010), True, 'import numpy as np\n'), ((1100, 1119), 'numpy.cumsum', 'np.cumsum', (['sorted_w'], {}), '(sorted_w)\n', (1109, 1119), True, 'import numpy as np\n'), ((1136, 1166), 'numpy.cumsum', 'np.cumsum', (['(sorted_x * sorted_w)'], {}), '(sorted_x * sorted_w)\n', (1145, 1166), True, 'import numpy as np\n'), ((1312, 1322), 'numpy.sort', 'np.sort', (['x'], {}), '(x)\n', (1319, 1322), True, 'import numpy as np\n'), ((1358, 1377), 'numpy.cumsum', 'np.cumsum', (['sorted_x'], {}), '(sorted_x)\n', (1367, 1377), True, 'import numpy as np\n'), ((751, 768), 'numpy.array', 'np.array', (['df[col]'], {}), '(df[col])\n', (759, 768), True, 'import numpy as np\n'), ((866, 876), 'numpy.amin', 'np.amin', (['x'], {}), '(x)\n', (873, 876), True, 'import numpy as np\n'), ((1182, 1235), 'numpy.sum', 'np.sum', (['(cumxw[1:] * cumw[:-1] - cumxw[:-1] * cumw[1:])'], {}), '(cumxw[1:] * cumw[:-1] - cumxw[:-1] * cumw[1:])\n', (1188, 1235), True, 'import numpy as np\n'), ((940, 955), 'numpy.array', 'np.array', (['df[w]'], {}), '(df[w])\n', (948, 955), True, 'import numpy as np\n'), ((1478, 1491), 'numpy.sum', 'np.sum', (['cumxw'], {}), '(cumxw)\n', (1484, 1491), True, 'import numpy as np\n')]
|
import math
import numpy as np
from numba import cuda, float32
from numba.cuda.testing import unittest
import numba.cuda.random
from numba.cuda.testing import skip_on_cudasim, CUDATestCase
from numba.cuda.random import \
xoroshiro128p_uniform_float32, xoroshiro128p_normal_float32, \
xoroshiro128p_uniform_float64, xoroshiro128p_normal_float64
from numba.core import config
# Distributions
UNIFORM = 1
NORMAL = 2
@cuda.jit
def rng_kernel_float32(states, out, count, distribution):
thread_id = cuda.grid(1)
for i in range(count):
if distribution == UNIFORM:
out[thread_id * count + i] = xoroshiro128p_uniform_float32(states, thread_id)
elif distribution == NORMAL:
out[thread_id * count + i] = xoroshiro128p_normal_float32(states, thread_id)
@cuda.jit
def rng_kernel_float64(states, out, count, distribution):
thread_id = cuda.grid(1)
for i in range(count):
if distribution == UNIFORM:
out[thread_id * count + i] = xoroshiro128p_uniform_float64(states, thread_id)
elif distribution == NORMAL:
out[thread_id * count + i] = xoroshiro128p_normal_float64(states, thread_id)
class TestCudaRandomXoroshiro128p(CUDATestCase):
def test_create(self):
states = cuda.random.create_xoroshiro128p_states(10, seed=1)
s = states.copy_to_host()
self.assertEqual(len(np.unique(s)), 10)
def test_create_subsequence_start(self):
states = cuda.random.create_xoroshiro128p_states(10, seed=1)
s1 = states.copy_to_host()
states = cuda.random.create_xoroshiro128p_states(10, seed=1,
subsequence_start=3)
s2 = states.copy_to_host()
# Starting seeds should match up with offset of 3
np.testing.assert_array_equal(s1[3:], s2[:-3])
def test_create_stream(self):
stream = cuda.stream()
states = cuda.random.create_xoroshiro128p_states(10, seed=1, stream=stream)
s = states.copy_to_host()
self.assertEqual(len(np.unique(s)), 10)
def check_uniform(self, kernel_func, dtype):
states = cuda.random.create_xoroshiro128p_states(32 * 2, seed=1)
out = np.zeros(2 * 32 * 32, dtype=np.float32)
kernel_func[2, 32](states, out, 32, UNIFORM)
self.assertAlmostEqual(out.min(), 0.0, delta=1e-3)
self.assertAlmostEqual(out.max(), 1.0, delta=1e-3)
self.assertAlmostEqual(out.mean(), 0.5, delta=1.5e-2)
self.assertAlmostEqual(out.std(), 1.0/(2*math.sqrt(3)), delta=6e-3)
def test_uniform_float32(self):
self.check_uniform(rng_kernel_float32, np.float32)
@skip_on_cudasim('skip test for speed under cudasim')
def test_uniform_float64(self):
self.check_uniform(rng_kernel_float64, np.float64)
def check_normal(self, kernel_func, dtype):
states = cuda.random.create_xoroshiro128p_states(32 * 2, seed=1)
out = np.zeros(2 * 32 * 32, dtype=dtype)
kernel_func[2, 32](states, out, 32, NORMAL)
self.assertAlmostEqual(out.mean(), 0.0, delta=4e-3)
self.assertAlmostEqual(out.std(), 1.0, delta=2e-3)
def test_normal_float32(self):
self.check_normal(rng_kernel_float32, np.float32)
@skip_on_cudasim('skip test for speed under cudasim')
def test_normal_float64(self):
self.check_normal(rng_kernel_float64, np.float64)
if __name__ == '__main__':
unittest.main()
|
[
"numba.cuda.random.create_xoroshiro128p_states",
"numba.cuda.random.xoroshiro128p_normal_float64",
"numba.cuda.random.xoroshiro128p_uniform_float32",
"numpy.unique",
"numba.cuda.grid",
"numba.cuda.random.xoroshiro128p_normal_float32",
"math.sqrt",
"numba.cuda.random.xoroshiro128p_uniform_float64",
"numba.cuda.stream",
"numba.cuda.testing.unittest.main",
"numpy.zeros",
"numba.cuda.testing.skip_on_cudasim",
"numpy.testing.assert_array_equal"
] |
[((512, 524), 'numba.cuda.grid', 'cuda.grid', (['(1)'], {}), '(1)\n', (521, 524), False, 'from numba import cuda, float32\n'), ((891, 903), 'numba.cuda.grid', 'cuda.grid', (['(1)'], {}), '(1)\n', (900, 903), False, 'from numba import cuda, float32\n'), ((2636, 2688), 'numba.cuda.testing.skip_on_cudasim', 'skip_on_cudasim', (['"""skip test for speed under cudasim"""'], {}), "('skip test for speed under cudasim')\n", (2651, 2688), False, 'from numba.cuda.testing import skip_on_cudasim, CUDATestCase\n'), ((3228, 3280), 'numba.cuda.testing.skip_on_cudasim', 'skip_on_cudasim', (['"""skip test for speed under cudasim"""'], {}), "('skip test for speed under cudasim')\n", (3243, 3280), False, 'from numba.cuda.testing import skip_on_cudasim, CUDATestCase\n'), ((3406, 3421), 'numba.cuda.testing.unittest.main', 'unittest.main', ([], {}), '()\n', (3419, 3421), False, 'from numba.cuda.testing import unittest\n'), ((1279, 1330), 'numba.cuda.random.create_xoroshiro128p_states', 'cuda.random.create_xoroshiro128p_states', (['(10)'], {'seed': '(1)'}), '(10, seed=1)\n', (1318, 1330), False, 'from numba import cuda, float32\n'), ((1476, 1527), 'numba.cuda.random.create_xoroshiro128p_states', 'cuda.random.create_xoroshiro128p_states', (['(10)'], {'seed': '(1)'}), '(10, seed=1)\n', (1515, 1527), False, 'from numba import cuda, float32\n'), ((1581, 1653), 'numba.cuda.random.create_xoroshiro128p_states', 'cuda.random.create_xoroshiro128p_states', (['(10)'], {'seed': '(1)', 'subsequence_start': '(3)'}), '(10, seed=1, subsequence_start=3)\n', (1620, 1653), False, 'from numba import cuda, float32\n'), ((1768, 1814), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['s1[3:]', 's2[:-3]'], {}), '(s1[3:], s2[:-3])\n', (1797, 1814), True, 'import numpy as np\n'), ((1867, 1880), 'numba.cuda.stream', 'cuda.stream', ([], {}), '()\n', (1878, 1880), False, 'from numba import cuda, float32\n'), ((1898, 1964), 'numba.cuda.random.create_xoroshiro128p_states', 'cuda.random.create_xoroshiro128p_states', (['(10)'], {'seed': '(1)', 'stream': 'stream'}), '(10, seed=1, stream=stream)\n', (1937, 1964), False, 'from numba import cuda, float32\n'), ((2114, 2169), 'numba.cuda.random.create_xoroshiro128p_states', 'cuda.random.create_xoroshiro128p_states', (['(32 * 2)'], {'seed': '(1)'}), '(32 * 2, seed=1)\n', (2153, 2169), False, 'from numba import cuda, float32\n'), ((2184, 2223), 'numpy.zeros', 'np.zeros', (['(2 * 32 * 32)'], {'dtype': 'np.float32'}), '(2 * 32 * 32, dtype=np.float32)\n', (2192, 2223), True, 'import numpy as np\n'), ((2850, 2905), 'numba.cuda.random.create_xoroshiro128p_states', 'cuda.random.create_xoroshiro128p_states', (['(32 * 2)'], {'seed': '(1)'}), '(32 * 2, seed=1)\n', (2889, 2905), False, 'from numba import cuda, float32\n'), ((2920, 2954), 'numpy.zeros', 'np.zeros', (['(2 * 32 * 32)'], {'dtype': 'dtype'}), '(2 * 32 * 32, dtype=dtype)\n', (2928, 2954), True, 'import numpy as np\n'), ((630, 678), 'numba.cuda.random.xoroshiro128p_uniform_float32', 'xoroshiro128p_uniform_float32', (['states', 'thread_id'], {}), '(states, thread_id)\n', (659, 678), False, 'from numba.cuda.random import xoroshiro128p_uniform_float32, xoroshiro128p_normal_float32, xoroshiro128p_uniform_float64, xoroshiro128p_normal_float64\n'), ((1009, 1057), 'numba.cuda.random.xoroshiro128p_uniform_float64', 'xoroshiro128p_uniform_float64', (['states', 'thread_id'], {}), '(states, thread_id)\n', (1038, 1057), False, 'from numba.cuda.random import xoroshiro128p_uniform_float32, xoroshiro128p_normal_float32, xoroshiro128p_uniform_float64, xoroshiro128p_normal_float64\n'), ((757, 804), 'numba.cuda.random.xoroshiro128p_normal_float32', 'xoroshiro128p_normal_float32', (['states', 'thread_id'], {}), '(states, thread_id)\n', (785, 804), False, 'from numba.cuda.random import xoroshiro128p_uniform_float32, xoroshiro128p_normal_float32, xoroshiro128p_uniform_float64, xoroshiro128p_normal_float64\n'), ((1136, 1183), 'numba.cuda.random.xoroshiro128p_normal_float64', 'xoroshiro128p_normal_float64', (['states', 'thread_id'], {}), '(states, thread_id)\n', (1164, 1183), False, 'from numba.cuda.random import xoroshiro128p_uniform_float32, xoroshiro128p_normal_float32, xoroshiro128p_uniform_float64, xoroshiro128p_normal_float64\n'), ((1394, 1406), 'numpy.unique', 'np.unique', (['s'], {}), '(s)\n', (1403, 1406), True, 'import numpy as np\n'), ((2028, 2040), 'numpy.unique', 'np.unique', (['s'], {}), '(s)\n', (2037, 2040), True, 'import numpy as np\n'), ((2507, 2519), 'math.sqrt', 'math.sqrt', (['(3)'], {}), '(3)\n', (2516, 2519), False, 'import math\n')]
|
# -*- coding: utf-8 -*-
# Functions and Script to extract data
import blocksci
import pandas as pd
import numpy as np
import networkx as nx
import multiprocessing as mp
import itertools
import random
import time
import string
import pickle
import csv
import gc
import os, sys
from functools import partial
#***********CLASSES AND FUNTIONS***********
# Class that creates a blockchain a blockchain partition (dictionary) given data range and partition type (blocks,days,weeks)
class BchainPartition():
def __init__(self,chain,start_timestamp,end_timestamp,ptype='blocks',sample_size=10):
blocks=chain.range(start=start_timestamp,end=end_timestamp)
self.block_h=blocks.height
print('Start_block: {}'.format(self.block_h[0]))
print('End_block: {}'.format(self.block_h[-1]))
if sample_size>0: #Samples blocks from the
sample_list=list(np.random.choice(self.block_h,sample_size))
sample_blocks=[chain[ix_b] for ix_b in sample_list]
txs=[b.txes for b in sample_blocks]
self.partition={h:[t for t in t_l] for h,t_l in zip(sample_list,txs)}
self.no_parts=len(sample_blocks)
else:
if ptype=='blocks':
self.partition={b.height:[tx for tx in b.txes] for b in blocks}
self.no_parts=np.int32(len(blocks))
print('Number of Blocks: {} '.format(len(blocks)))
print('Highest block height: {}'.format(blocks[-1].height))
print('Number of Transactions: {} '.format(len(txs)))
# ***TODO: Create partition for other types of partitions (use tx.block_time)
# Function that takes blockchain partition and outputs pandas data frame with features
# for the graph defined by each split in the partition
def partition_data(chainpartiton,directory,filename):
# Dictionary with partition
partition=chainpartiton.partition
partindex=partition.keys()
parts=partition.values()
data_tuples=[]
graphs=[]
print('Number of parts: {}'.format(len(partindex)))
tuples=[(index,part) for index,part in zip(partindex,parts)]
no_parts=len(tuples)
processed=0
for t in tuples:
data_i,columns_i,graph_i=graph_features(t,slice_type='blocks')
with open(filename,'a') as f:
writer = csv.writer(f, delimiter=',')
if len(data_tuples)==0: # Write column names on first pass
writer.writerow(columns_i)
writer.writerow(data_i)
# Save graph
nx.write_gpickle(graph_i,directory+str(graph_i.graph['graph_id'])+'.gpickle')
data_tuples.append((data_i,columns_i))
graphs.append(graph_i)
processed+=1
progress=(processed/no_parts)*100
#sys.stdout.write("Download progress: %d%% \r" % (progress) )
sys.stdout.write("Download progress: {:07.4f} \r".format(progress) )
sys.stdout.flush()
'''
chunksize=len(tuples)%ncpu
with mp.Pool(processes=ncpu) as pool:
data_tuples=pool.map(graph_features,tuples,chunksize)
'''
columns=data_tuples[0][1] #This value is being re-written. This design choice is to mantain consistency with columns.
data=[i for i,j in data_tuples]
data=np.array(data)
df=pd.DataFrame(data=data[:,:],columns=columns)
return (df,graphs)
# Function that receives a chain part (list of transactions), generates transaction graph and calculates statistics
def graph_features(chain_part_tuple,slice_type='blocks'):
index=chain_part_tuple[0]
chain_part=chain_part_tuple[1]
block_height=chain_part[-1].block_height
graph=block_graph(chain_part,index,slice_type)
nx.info(graph)
nodes=graph.nodes(data=True)
edges=graph.edges(data=True)
data=[index]
columns=['block_height']
# Number of Nodes
no_nodes=nx.number_of_nodes(graph)
data.append(no_nodes)
columns.append('no_nodes')
# Number of Edges (address to address transactions)
no_edges=nx.number_of_edges(graph)
data.append(no_edges)
columns.append('no_edges')
# Total value transacted
total_value=np.sum(np.array([a['value'] for n1,n2,a in edges]))
data.append(total_value)
columns.append('value_transacted')
# Total Density
density=nx.density(graph)
data.append(density)
columns.append('total_density')
# Nodes with self loops nx.loops nodes_with_selfloops(G) nodes_with_selfloops(G)
nodes_self=nx.number_of_selfloops(graph)
data.append(nodes_self)
columns.append('nodes_self')
# Value of self loops nodes_with_selfloops(G)
values=np.array([a['value'] for n1,n2,a in nx.selfloop_edges(graph,data=True)])
selfloop_value=np.sum(values)
data.append(selfloop_value)
columns.append('selfloop_value')
# Number of transactions to old addresses
old_nodes=[n for n,a in nodes if a['block_created']<block_height]
edges_to_old=graph.in_edges(old_nodes,data=True)
data.append(len(edges_to_old))
columns.append('old_nodes_in')
# Ratio of transactions to old addresses to total transactions
ratio_oldin_totalin=len(edges_to_old)/(no_edges+1)
data.append(ratio_oldin_totalin)
columns.append('ratio_oldin_totalin')
# Value of transactions to old addresses
value_to_old=[a['value'] for n1,n2,a in edges_to_old]
data.append(np.sum(np.array(value_to_old)))
columns.append('value_to_old')
# Old address density
old_graph=nx.induced_subgraph(graph,old_nodes)
old_density=nx.density(old_graph)
data.append(old_density)
columns.append('old_density')
# ***TODO*** (Aggregated graph analysis)
# Accumulated reuse
# Dominance (Agg graph or new vs. old dominance)
#https://networkx.github.io/documentation/stable/reference/algorithms/dominance.html
# Common ancenstors (as with dominance the address ancestor path should be proportional
#to the blockchain lenght if address reuse is minimal)
#***********
#print('{} Processed'.format(index))
return (data,columns,graph)
# Function that creates transaction graph for a given number transactions
def block_graph(txs,index,slice_type):
# Create graph and process
graph = nx.MultiDiGraph(graph_id=index,slice_type=slice_type)
nodes=[]
edges=[]
# Extract transactions information
init_block=txs[0].block.height
txs_dic={tx.index:tx for tx in txs}
txs_ix=list(txs_dic.keys())
txs_ix.sort()
start_ix=txs_ix[0]
end_ix=txs_ix[-1]
# Generate edges to input to graph
# TODO:Re-write for pre-process: See last answ with qeues https://stackoverflow.com/questions/33107019/multiple-threads-writing-to-the-same-csv-in-python
'''
with mp.Pool(processes=ncpu) as pool:
edges=pool.map(extract_nodes_edges,txs,chunksize)
'''
for tx in txs:
edges_i,nodes_i=extract_nodes_edges(tx)
nodes.append(nodes_i)
edges.append(edges_i)
nodes=list(itertools.chain.from_iterable(nodes))
edges=list(itertools.chain.from_iterable(edges))
# Input to graph
graph.add_nodes_from(nodes)
graph.add_edges_from(edges)
#print('Generated Graph for Block starting at:{}'.format(init_block))
return graph
# Function that receives a transaction and generates nodes and edges from addresses in transaction
def extract_nodes_edges(transaction):
# Initialize values and get info from transaction
edges=[]
output_value=transaction.output_value
block_height=transaction.block_height
tx_id=transaction.index
# Get inputs, types and values
inputs=transaction.inputs.address
input_val=transaction.inputs.value
input_nodes=[(inp.address_num,{'raw_type':inp.raw_type,'block_created':inp.first_tx.block.height})for inp in inputs]
# Get outputs and types
outputs=transaction.outputs.address
output_nodes=[(out.address_num,{'raw_type':out.raw_type,'block_created':out.first_tx.block.height})for out in outputs]
# ****TODO: Add address balance as attribute to node****
# Create nodes
nodes=input_nodes+output_nodes
# Create edges (NetworkX will automatically create nodes when given edges)
for i in range(len(inputs)):
value=input_val[i]
prop_value=value/len(outputs)
for o in range(len(outputs)):
edge=(inputs[i].address_num,outputs[o].address_num,{'value':prop_value,'tx_id':block_height})
edges.append(edge)
return edges,nodes
#***********SCRIPT***********
# Point to parsed blockchain data
ncpu=mp.cpu_count()
chain = blocksci.Blockchain("/home/ubuntu/bitcoin")
types=blocksci.address_type.types
total_blocks=chain.blocks
print('Total Blocks up to {}: {} '.format(total_blocks[-1].time,len(total_blocks)))
#---SCRIPT: generates data for graphs in each part of the partition
# Create directories and files to store graphs and dataframe
# Generate an extraction ID (Each id has random id)
extraction_id = ''.join([random.choice(string.ascii_letters + string.digits) for n in range(6)])
print('Extraction id: {}'.format(extraction_id))
#---Save Dataframes
# Create directory and save
start='2010-02-01 00:00:00'
end='2018-02-01 11:59:59'
blocks=chain.range(start=start,end=end)
sample_size=35000
start_c=start
start_c=start_c.replace('-','_').replace(' ','_').replace(':','_')
end_c=end
end_c=end_c.replace('-','_').replace(' ','_').replace(':','_')
directory='extractions/'+extraction_id+'-'+str(sample_size)+'-blocks-'+start_c+'-'+end_c+'/graphs'+'/'
if not os.path.exists(directory):
os.makedirs(directory)
# Create Filename and save
filename='extractions/'+extraction_id+'-'+str(sample_size)+'-blocks-'+start_c+'-'+end_c+'/'+extraction_id+'-'+str(sample_size)+'-blocks-'+start_c+'-'+end_c+'.csv'
start_time=time.time()
partition=BchainPartition(chain,start,end,sample_size=sample_size)
df,graphs=partition_data(partition,directory,filename)
df.head()
end_time=time.time()
print('Time taken={}'.format(end_time-start_time))
print('\n***EXTRACTION COMPLETED SUCCESSFULLY***')
|
[
"networkx.number_of_selfloops",
"networkx.induced_subgraph",
"multiprocessing.cpu_count",
"numpy.array",
"blocksci.Blockchain",
"os.path.exists",
"networkx.info",
"itertools.chain.from_iterable",
"networkx.number_of_nodes",
"pandas.DataFrame",
"sys.stdout.flush",
"networkx.MultiDiGraph",
"random.choice",
"numpy.random.choice",
"csv.writer",
"time.time",
"networkx.number_of_edges",
"os.makedirs",
"networkx.selfloop_edges",
"numpy.sum",
"networkx.density"
] |
[((8556, 8570), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (8568, 8570), True, 'import multiprocessing as mp\n'), ((8579, 8622), 'blocksci.Blockchain', 'blocksci.Blockchain', (['"""/home/ubuntu/bitcoin"""'], {}), "('/home/ubuntu/bitcoin')\n", (8598, 8622), False, 'import blocksci\n'), ((9788, 9799), 'time.time', 'time.time', ([], {}), '()\n', (9797, 9799), False, 'import time\n'), ((9942, 9953), 'time.time', 'time.time', ([], {}), '()\n', (9951, 9953), False, 'import time\n'), ((3245, 3259), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (3253, 3259), True, 'import numpy as np\n'), ((3267, 3313), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data[:, :]', 'columns': 'columns'}), '(data=data[:, :], columns=columns)\n', (3279, 3313), True, 'import pandas as pd\n'), ((3679, 3693), 'networkx.info', 'nx.info', (['graph'], {}), '(graph)\n', (3686, 3693), True, 'import networkx as nx\n'), ((3842, 3867), 'networkx.number_of_nodes', 'nx.number_of_nodes', (['graph'], {}), '(graph)\n', (3860, 3867), True, 'import networkx as nx\n'), ((3995, 4020), 'networkx.number_of_edges', 'nx.number_of_edges', (['graph'], {}), '(graph)\n', (4013, 4020), True, 'import networkx as nx\n'), ((4277, 4294), 'networkx.density', 'nx.density', (['graph'], {}), '(graph)\n', (4287, 4294), True, 'import networkx as nx\n'), ((4457, 4486), 'networkx.number_of_selfloops', 'nx.number_of_selfloops', (['graph'], {}), '(graph)\n', (4479, 4486), True, 'import networkx as nx\n'), ((4702, 4716), 'numpy.sum', 'np.sum', (['values'], {}), '(values)\n', (4708, 4716), True, 'import numpy as np\n'), ((5460, 5497), 'networkx.induced_subgraph', 'nx.induced_subgraph', (['graph', 'old_nodes'], {}), '(graph, old_nodes)\n', (5479, 5497), True, 'import networkx as nx\n'), ((5513, 5534), 'networkx.density', 'nx.density', (['old_graph'], {}), '(old_graph)\n', (5523, 5534), True, 'import networkx as nx\n'), ((6215, 6269), 'networkx.MultiDiGraph', 'nx.MultiDiGraph', ([], {'graph_id': 'index', 'slice_type': 'slice_type'}), '(graph_id=index, slice_type=slice_type)\n', (6230, 6269), True, 'import networkx as nx\n'), ((9529, 9554), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (9543, 9554), False, 'import os, sys\n'), ((9560, 9582), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (9571, 9582), False, 'import os, sys\n'), ((2905, 2923), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2921, 2923), False, 'import os, sys\n'), ((4131, 4176), 'numpy.array', 'np.array', (["[a['value'] for n1, n2, a in edges]"], {}), "([a['value'] for n1, n2, a in edges])\n", (4139, 4176), True, 'import numpy as np\n'), ((6971, 7007), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['nodes'], {}), '(nodes)\n', (7000, 7007), False, 'import itertools\n'), ((7024, 7060), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['edges'], {}), '(edges)\n', (7053, 7060), False, 'import itertools\n'), ((8977, 9028), 'random.choice', 'random.choice', (['(string.ascii_letters + string.digits)'], {}), '(string.ascii_letters + string.digits)\n', (8990, 9028), False, 'import random\n'), ((2318, 2346), 'csv.writer', 'csv.writer', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (2328, 2346), False, 'import csv\n'), ((5358, 5380), 'numpy.array', 'np.array', (['value_to_old'], {}), '(value_to_old)\n', (5366, 5380), True, 'import numpy as np\n'), ((898, 941), 'numpy.random.choice', 'np.random.choice', (['self.block_h', 'sample_size'], {}), '(self.block_h, sample_size)\n', (914, 941), True, 'import numpy as np\n'), ((4646, 4681), 'networkx.selfloop_edges', 'nx.selfloop_edges', (['graph'], {'data': '(True)'}), '(graph, data=True)\n', (4663, 4681), True, 'import networkx as nx\n')]
|
import datetime
import numpy as np
import libpySat as pySat
from astropy import _erfa as erfa
from scipy.misc import derivative
from scipy import interpolate
class TransformPolarMotion:
def __init__(self,fxp,fyp):
self.fxp=fxp
self.fyp=fyp
self.epochSave = datetime.datetime.now()
self.rotSave = np.matrix(([0, 0, 0], [0, 0, 0], [0, 0, 0]), dtype=float)
self.sprime=0.0
def __getPolarMotion(self, epoch: datetime.datetime):
"""
:param epoch:
:return: polar motion: x,y [mas]
"""
mjd=pySat.UTC2MJD(epoch)
return self.fxp(mjd),self.fyp(mjd)
def __getPolarMotionDot(self, epoch: datetime.datetime):
"""
:param epoch:
:return: polar motion: x,y [mas/s]
"""
mjd=pySat.UTC2MJD(epoch)
xpdot=derivative(self.fxp,mjd,dx=1e-3,n=1)
ypdot = derivative(self.fyp, mjd, dx=1e-3,n=1)
return xpdot,ypdot
def getMatrix_PolarMotion(self,epoch:datetime.datetime):
"""
Get the polar motion matrix. Relates ITRF to TIRS.
:param epoch:
:return:
"""
if (epoch !=self.epochSave):
xp,yp = self.__getPolarMotion(epoch)
# TODO: Implementation of tidal and libration terms for polar motion...
xp*=np.pi/180.0/3600.0
yp*=np.pi/180.0/3600.0
sp= self.__getTIO(epoch)
#print(xp,yp,sp)
rxy= np.matmul(pySat.RotationMatrix3DY(xp),pySat.RotationMatrix3DX(yp))
rs=pySat.RotationMatrix3DZ(-sp)
self.rotSave=np.matmul(rs,rxy)
self.epochSave = epoch
return self.rotSave
else:
return self.rotSave
def __getTIO(self, epoch:datetime.datetime ):
"""
Gets the Terrestrial Intermediate Origin (TIO) locator s'
Terrestrial Intermediate Ref Sys (TIRS) defined by TIO and CIP.
TIRS related to to CIRS by Earth Rotation Angle
:param epoch:
:return:
"""
mjd = pySat.pySatTime.UTC2MJD(epoch)
self.sprime=erfa.sp00(2400000.5,mjd)
return self.sprime
def getMatrix_PolarMotionDot(self,epoch:datetime.datetime):
"""
Get the polar motion matrix. Relates ITRF to TIRS.
:param epoch:
:return:
"""
# TODO: Implementation of tidal and libration terms for polar motion...
xp, yp = self.__getPolarMotion(epoch)
xpDot,ypDot = self.__getPolarMotionDot(epoch)
xp *= np.pi / 180.0 / 3600.0
yp *= np.pi / 180.0 / 3600.0
xpDot*=np.pi/180.0/3600.0
ypDot*=np.pi/180.0/3600.0
spDot = -47.0 / 1.0e6 / 3600.0 / 180.0 * np.pi / 86400.0 / 36525.0
sp = self.__getTIO(epoch)
print('Pmotion dot:',xpDot,ypDot,spDot)
rxy= np.matmul(pySat.RotationMatrix3DY(xp),pySat.RotationMatrix3DX(yp))
rxyDot = np.matmul(xpDot* pySat.RotationMatrix3DY(xp), pySat.RotationMatrix3DX(yp)) \
+np.matmul( pySat.RotationMatrix3DY(xp),ypDot* pySat.RotationMatrix3DX(yp))
rs=pySat.RotationMatrix3DZ(-sp)
rsDot=-spDot*pySat.RotationMatrix3DZ(-sp)
return np.matmul(rsDot,rxy) + np.matmul(rs,rxyDot)
|
[
"astropy._erfa.sp00",
"libpySat.UTC2MJD",
"libpySat.pySatTime.UTC2MJD",
"scipy.misc.derivative",
"datetime.datetime.now",
"libpySat.RotationMatrix3DZ",
"libpySat.RotationMatrix3DY",
"numpy.matmul",
"libpySat.RotationMatrix3DX",
"numpy.matrix"
] |
[((288, 311), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (309, 311), False, 'import datetime\n'), ((335, 392), 'numpy.matrix', 'np.matrix', (['([0, 0, 0], [0, 0, 0], [0, 0, 0])'], {'dtype': 'float'}), '(([0, 0, 0], [0, 0, 0], [0, 0, 0]), dtype=float)\n', (344, 392), True, 'import numpy as np\n'), ((577, 597), 'libpySat.UTC2MJD', 'pySat.UTC2MJD', (['epoch'], {}), '(epoch)\n', (590, 597), True, 'import libpySat as pySat\n'), ((806, 826), 'libpySat.UTC2MJD', 'pySat.UTC2MJD', (['epoch'], {}), '(epoch)\n', (819, 826), True, 'import libpySat as pySat\n'), ((841, 881), 'scipy.misc.derivative', 'derivative', (['self.fxp', 'mjd'], {'dx': '(0.001)', 'n': '(1)'}), '(self.fxp, mjd, dx=0.001, n=1)\n', (851, 881), False, 'from scipy.misc import derivative\n'), ((894, 934), 'scipy.misc.derivative', 'derivative', (['self.fyp', 'mjd'], {'dx': '(0.001)', 'n': '(1)'}), '(self.fyp, mjd, dx=0.001, n=1)\n', (904, 934), False, 'from scipy.misc import derivative\n'), ((2056, 2086), 'libpySat.pySatTime.UTC2MJD', 'pySat.pySatTime.UTC2MJD', (['epoch'], {}), '(epoch)\n', (2079, 2086), True, 'import libpySat as pySat\n'), ((2107, 2132), 'astropy._erfa.sp00', 'erfa.sp00', (['(2400000.5)', 'mjd'], {}), '(2400000.5, mjd)\n', (2116, 2132), True, 'from astropy import _erfa as erfa\n'), ((3106, 3134), 'libpySat.RotationMatrix3DZ', 'pySat.RotationMatrix3DZ', (['(-sp)'], {}), '(-sp)\n', (3129, 3134), True, 'import libpySat as pySat\n'), ((1549, 1577), 'libpySat.RotationMatrix3DZ', 'pySat.RotationMatrix3DZ', (['(-sp)'], {}), '(-sp)\n', (1572, 1577), True, 'import libpySat as pySat\n'), ((1603, 1621), 'numpy.matmul', 'np.matmul', (['rs', 'rxy'], {}), '(rs, rxy)\n', (1612, 1621), True, 'import numpy as np\n'), ((2851, 2878), 'libpySat.RotationMatrix3DY', 'pySat.RotationMatrix3DY', (['xp'], {}), '(xp)\n', (2874, 2878), True, 'import libpySat as pySat\n'), ((2879, 2906), 'libpySat.RotationMatrix3DX', 'pySat.RotationMatrix3DX', (['yp'], {}), '(yp)\n', (2902, 2906), True, 'import libpySat as pySat\n'), ((3156, 3184), 'libpySat.RotationMatrix3DZ', 'pySat.RotationMatrix3DZ', (['(-sp)'], {}), '(-sp)\n', (3179, 3184), True, 'import libpySat as pySat\n'), ((3201, 3222), 'numpy.matmul', 'np.matmul', (['rsDot', 'rxy'], {}), '(rsDot, rxy)\n', (3210, 3222), True, 'import numpy as np\n'), ((3224, 3245), 'numpy.matmul', 'np.matmul', (['rs', 'rxyDot'], {}), '(rs, rxyDot)\n', (3233, 3245), True, 'import numpy as np\n'), ((1477, 1504), 'libpySat.RotationMatrix3DY', 'pySat.RotationMatrix3DY', (['xp'], {}), '(xp)\n', (1500, 1504), True, 'import libpySat as pySat\n'), ((1505, 1532), 'libpySat.RotationMatrix3DX', 'pySat.RotationMatrix3DX', (['yp'], {}), '(yp)\n', (1528, 1532), True, 'import libpySat as pySat\n'), ((2971, 2998), 'libpySat.RotationMatrix3DX', 'pySat.RotationMatrix3DX', (['yp'], {}), '(yp)\n', (2994, 2998), True, 'import libpySat as pySat\n'), ((3031, 3058), 'libpySat.RotationMatrix3DY', 'pySat.RotationMatrix3DY', (['xp'], {}), '(xp)\n', (3054, 3058), True, 'import libpySat as pySat\n'), ((2942, 2969), 'libpySat.RotationMatrix3DY', 'pySat.RotationMatrix3DY', (['xp'], {}), '(xp)\n', (2965, 2969), True, 'import libpySat as pySat\n'), ((3066, 3093), 'libpySat.RotationMatrix3DX', 'pySat.RotationMatrix3DX', (['yp'], {}), '(yp)\n', (3089, 3093), True, 'import libpySat as pySat\n')]
|
import numpy as np
import pandas as pd
class WetChickenBaselinePolicy:
def __init__(self, env, gamma, method='heuristic', epsilon=0.1, convergence=0.1, learning_rate=0.1, max_nb_it=999,
order_epsilon=3, order_learning_rate=3):
self.env = env
self.gamma = gamma
self.nb_states = env.width * env.length
self.nb_actions = 5
self.pi = np.ones((self.nb_states, self.nb_actions)) / self.nb_actions
self.epsilon = epsilon
self.convergence = convergence
self.learning_rate = learning_rate
self.method = method
self.max_nb_it = max_nb_it
self.order_epsilon = order_epsilon
self.order_learning_rate = order_learning_rate
self.compute_baseline()
def compute_baseline(self):
if self.method == 'fixed_learning':
old_q = np.zeros((self.nb_states, self.nb_actions))
q = np.ones((self.nb_states, self.nb_actions)) * 1 / (1 - self.gamma) * 4 # Optimistic initialisation
nb_it = 0
state = self.env.get_state_int()
# while (np.linalg.norm(old_q - q) > self.convergence) and nb_it < 999:
while nb_it < self.max_nb_it:
action = np.random.choice(self.pi.shape[1], p=self.pi[state])
state, reward, next_state = self.env.step(action)
old_q = q.copy()
q[state, action] += self.learning_rate * (
reward + self.gamma * np.max(q[next_state, :]) - q[state, action])
self.pi = self.epsilon * np.ones((self.nb_states, self.nb_actions)) / 5
for s in range(self.nb_states):
self.pi[s, np.argmax(q[s, :])] += 1 - self.epsilon
nb_it += 1
elif self.method == 'variable_learning':
old_q = np.zeros((self.nb_states, self.nb_actions))
q = np.ones((self.nb_states, self.nb_actions)) * 1 / (1 - self.gamma) * 4 # Optimistic initialisation
nb_it = 0
state = self.env.get_state_int()
# while (np.linalg.norm(old_q - q) > self.convergence) and nb_it < 999:
while nb_it < self.max_nb_it:
nb_it += 1
epsilon = self.epsilon * 1 / nb_it ** (1 / self.order_epsilon)
learning_rate = self.learning_rate * 1 / nb_it ** (1 / self.order_learning_rate)
action = np.random.choice(self.pi.shape[1], p=self.pi[state])
state, reward, next_state = self.env.step(action)
old_q = q.copy()
q[state, action] += learning_rate * (
reward + self.gamma * np.max(q[next_state, :]) - q[state, action])
self.pi = epsilon * np.ones((self.nb_states, self.nb_actions)) / 5
for s in range(self.nb_states):
self.pi[s, np.argmax(q[s, :])] += 1 - epsilon
elif self.method == 'variable_learning':
old_q = np.zeros((self.nb_states, self.nb_actions))
q = np.ones((self.nb_states, self.nb_actions)) * 1 / (1 - self.gamma) * 4 # Optimistic initialisation
nb_it = 0
state = self.env.get_state_int()
# while (np.linalg.norm(old_q - q) > self.convergence) and nb_it < 999:
while nb_it < self.max_nb_it:
nb_it += 1
epsilon = self.epsilon * 1 / nb_it ** (1 / self.order_epsilon)
learning_rate = self.learning_rate * 1 / nb_it ** (1 / self.order_learning_rate)
action = np.random.choice(self.pi.shape[1], p=self.pi[state])
state, reward, next_state = self.env.step(action)
old_q = q.copy()
q[state, action] += learning_rate * (
reward + self.gamma * np.max(q[next_state, :]) - q[state, action])
self.pi = epsilon * np.ones((self.nb_states, self.nb_actions)) / 5
for s in range(self.nb_states):
self.pi[s, np.argmax(q[s, :])] += 1 - epsilon
elif self.method == 'state_count_dependent_variable':
old_q = np.zeros((self.nb_states, self.nb_actions))
q = np.ones((self.nb_states, self.nb_actions)) * 1 / (1 - self.gamma) * 4 # Optimistic initialisation
nb_it = 0
state = self.env.get_state_int()
# while (np.linalg.norm(old_q - q) > self.convergence) and nb_it < 999:
count_state_action = np.zeros((self.nb_states, self.nb_actions))
while nb_it < self.max_nb_it:
nb_it += 1
epsilon = self.epsilon * 1 / nb_it ** (1 / self.order_epsilon)
action = np.random.choice(self.pi.shape[1], p=self.pi[state])
count_state_action[state, action] += 1
learning_rate = self.learning_rate * 1 / count_state_action[state, action] ** (
1 / self.order_learning_rate)
state, reward, next_state = self.env.step(action)
old_q = q.copy()
q[state, action] += learning_rate * (
reward + self.gamma * np.max(q[next_state, :]) - q[state, action])
self.pi = epsilon * np.ones((self.nb_states, self.nb_actions)) / 5
for s in range(self.nb_states):
self.pi[s, np.argmax(q[s, :])] += 1 - epsilon
elif self.method == 'heuristic':
# Try to get to in the middle of the river and then paddle as strong as possible against the stream
# I.e. try to get to state (2,2), as a number 12, and then choose action 2
pi = np.zeros((self.nb_states, self.nb_actions))
for state in range(self.nb_states):
for action in range(self.nb_actions):
x, y = int(state / self.nb_actions), state % self.nb_actions
if x > 2:
pi[state, 2] = 1 # We are too close to the waterfall ==> paddle as strong as possible
elif y < 2:
pi[state, 4] = 1 # We are not in immediate danger, but too close to the left ==> go right
elif y > 2:
pi[state, 3] = 1 # We are not in immediate danger, but too close to the right ==> go left
elif x == 2:
pi[state, 2] = 1 # We are perfect now, try to keep the position by paddling as strong as poss
elif x == 1:
pi[state, 1] = 1 # Close to perfect, just paddle a bit
else:
pi[state, 0] = 1 # Right lane, but too high up, just drift with the river
self.pi = (1 - self.epsilon) * pi + self.epsilon * self.pi
else:
print(
f'Method {self.method} is not available. Only acceptable methods are: \'heuristic\' and \'state_count_dependent_learning\' ')
class ContinuousWetChickenHeuristic:
def __init__(self, epsilon):
self.epsilon = epsilon
def pi(self, state):
x, y = state[0], state[1]
pi = np.zeros(5)
if x > 2.5:
pi[2] = 1 # We are too close to the waterfall ==> paddle as strong as possible
elif y < 2:
pi[4] = 1 # We are not in immediate danger, but too close to the left ==> go right
elif y > 3:
pi[3] = 1 # We are not in immediate danger, but too close to the right ==> go left
elif x > 2:
pi[2] = 1 # We are perfect now, try to keep the position by paddling as strong as poss
elif x > 1:
pi[1] = 1 # Close to perfect, just paddle a bit
else:
pi[0] = 1 # Right lane, but too high up, just drift with the river
pi = (1 - self.epsilon) * pi + self.epsilon * 1/5
return pi
|
[
"numpy.ones",
"numpy.random.choice",
"numpy.argmax",
"numpy.max",
"numpy.zeros"
] |
[((7130, 7141), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (7138, 7141), True, 'import numpy as np\n'), ((394, 436), 'numpy.ones', 'np.ones', (['(self.nb_states, self.nb_actions)'], {}), '((self.nb_states, self.nb_actions))\n', (401, 436), True, 'import numpy as np\n'), ((859, 902), 'numpy.zeros', 'np.zeros', (['(self.nb_states, self.nb_actions)'], {}), '((self.nb_states, self.nb_actions))\n', (867, 902), True, 'import numpy as np\n'), ((1236, 1288), 'numpy.random.choice', 'np.random.choice', (['self.pi.shape[1]'], {'p': 'self.pi[state]'}), '(self.pi.shape[1], p=self.pi[state])\n', (1252, 1288), True, 'import numpy as np\n'), ((1841, 1884), 'numpy.zeros', 'np.zeros', (['(self.nb_states, self.nb_actions)'], {}), '((self.nb_states, self.nb_actions))\n', (1849, 1884), True, 'import numpy as np\n'), ((2421, 2473), 'numpy.random.choice', 'np.random.choice', (['self.pi.shape[1]'], {'p': 'self.pi[state]'}), '(self.pi.shape[1], p=self.pi[state])\n', (2437, 2473), True, 'import numpy as np\n'), ((2984, 3027), 'numpy.zeros', 'np.zeros', (['(self.nb_states, self.nb_actions)'], {}), '((self.nb_states, self.nb_actions))\n', (2992, 3027), True, 'import numpy as np\n'), ((919, 961), 'numpy.ones', 'np.ones', (['(self.nb_states, self.nb_actions)'], {}), '((self.nb_states, self.nb_actions))\n', (926, 961), True, 'import numpy as np\n'), ((1579, 1621), 'numpy.ones', 'np.ones', (['(self.nb_states, self.nb_actions)'], {}), '((self.nb_states, self.nb_actions))\n', (1586, 1621), True, 'import numpy as np\n'), ((3564, 3616), 'numpy.random.choice', 'np.random.choice', (['self.pi.shape[1]'], {'p': 'self.pi[state]'}), '(self.pi.shape[1], p=self.pi[state])\n', (3580, 3616), True, 'import numpy as np\n'), ((4140, 4183), 'numpy.zeros', 'np.zeros', (['(self.nb_states, self.nb_actions)'], {}), '((self.nb_states, self.nb_actions))\n', (4148, 4183), True, 'import numpy as np\n'), ((4483, 4526), 'numpy.zeros', 'np.zeros', (['(self.nb_states, self.nb_actions)'], {}), '((self.nb_states, self.nb_actions))\n', (4491, 4526), True, 'import numpy as np\n'), ((1705, 1723), 'numpy.argmax', 'np.argmax', (['q[s, :]'], {}), '(q[s, :])\n', (1714, 1723), True, 'import numpy as np\n'), ((1901, 1943), 'numpy.ones', 'np.ones', (['(self.nb_states, self.nb_actions)'], {}), '((self.nb_states, self.nb_actions))\n', (1908, 1943), True, 'import numpy as np\n'), ((2754, 2796), 'numpy.ones', 'np.ones', (['(self.nb_states, self.nb_actions)'], {}), '((self.nb_states, self.nb_actions))\n', (2761, 2796), True, 'import numpy as np\n'), ((4700, 4752), 'numpy.random.choice', 'np.random.choice', (['self.pi.shape[1]'], {'p': 'self.pi[state]'}), '(self.pi.shape[1], p=self.pi[state])\n', (4716, 4752), True, 'import numpy as np\n'), ((5656, 5699), 'numpy.zeros', 'np.zeros', (['(self.nb_states, self.nb_actions)'], {}), '((self.nb_states, self.nb_actions))\n', (5664, 5699), True, 'import numpy as np\n'), ((1493, 1517), 'numpy.max', 'np.max', (['q[next_state, :]'], {}), '(q[next_state, :])\n', (1499, 1517), True, 'import numpy as np\n'), ((2880, 2898), 'numpy.argmax', 'np.argmax', (['q[s, :]'], {}), '(q[s, :])\n', (2889, 2898), True, 'import numpy as np\n'), ((3044, 3086), 'numpy.ones', 'np.ones', (['(self.nb_states, self.nb_actions)'], {}), '((self.nb_states, self.nb_actions))\n', (3051, 3086), True, 'import numpy as np\n'), ((3897, 3939), 'numpy.ones', 'np.ones', (['(self.nb_states, self.nb_actions)'], {}), '((self.nb_states, self.nb_actions))\n', (3904, 3939), True, 'import numpy as np\n'), ((2673, 2697), 'numpy.max', 'np.max', (['q[next_state, :]'], {}), '(q[next_state, :])\n', (2679, 2697), True, 'import numpy as np\n'), ((4023, 4041), 'numpy.argmax', 'np.argmax', (['q[s, :]'], {}), '(q[s, :])\n', (4032, 4041), True, 'import numpy as np\n'), ((4200, 4242), 'numpy.ones', 'np.ones', (['(self.nb_states, self.nb_actions)'], {}), '((self.nb_states, self.nb_actions))\n', (4207, 4242), True, 'import numpy as np\n'), ((5238, 5280), 'numpy.ones', 'np.ones', (['(self.nb_states, self.nb_actions)'], {}), '((self.nb_states, self.nb_actions))\n', (5245, 5280), True, 'import numpy as np\n'), ((3816, 3840), 'numpy.max', 'np.max', (['q[next_state, :]'], {}), '(q[next_state, :])\n', (3822, 3840), True, 'import numpy as np\n'), ((5364, 5382), 'numpy.argmax', 'np.argmax', (['q[s, :]'], {}), '(q[s, :])\n', (5373, 5382), True, 'import numpy as np\n'), ((5157, 5181), 'numpy.max', 'np.max', (['q[next_state, :]'], {}), '(q[next_state, :])\n', (5163, 5181), True, 'import numpy as np\n')]
|
import matplotlib.pyplot as plt
import networkx as nx
import numpy
plt.ion()
# test
def plot_neural_network(mek):
G = nx.DiGraph(numpy.transpose(mek.nn.links))
mylabels = dict(zip(range(len(mek.nn.neurons)),
[to_string(i)+'\n#'
+ str(ix)+'' for (ix, i) in enumerate(mek.nn.neurons)]))
G = nx.relabel_nodes(G, mylabels)
pos = nx.layout.spring_layout(G, k=2)
epos = [(u, v) for (u, v, d) in G.edges(data=True) if d['weight'] > 0.5]
eneg = [(u, v) for (u, v, d) in G.edges(data=True) if d['weight'] <= -0.5]
arrowsize = 50
colorspos = numpy.arange(len(epos))/5.0+4.0*len(epos)/5.0
colorsneg = numpy.arange(len(eneg))/5.0+4.0*len(eneg)/5.0
nx.draw_networkx_edges(G, pos, edgelist=epos, edge_color=colorspos,
width=3, arrowsize=arrowsize, alpha=1, arrowstyle='->', edge_cmap=plt.cm.Blues)
nx.draw_networkx_edges(G, pos, edgelist=eneg,
width=2, arrowsize=arrowsize, alpha=1, edge_color=colorsneg, arrowstyle='->', edge_cmap=plt.cm.Reds)
nodes = nx.draw_networkx_nodes(
G, pos, node_size=1500, node_color='gray', alpha=1)
nx.draw_networkx_labels(G, pos, font_size=10,
font_family='sans-serif', font_weight='bold')
ax = plt.gca()
ax.set_axis_off()
plt.show()
def to_string(name):
out = ""
for i in name:
out = out + str(i)
return(out)
|
[
"networkx.layout.spring_layout",
"networkx.relabel_nodes",
"matplotlib.pyplot.gca",
"networkx.draw_networkx_nodes",
"networkx.draw_networkx_labels",
"matplotlib.pyplot.ion",
"numpy.transpose",
"networkx.draw_networkx_edges",
"matplotlib.pyplot.show"
] |
[((68, 77), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (75, 77), True, 'import matplotlib.pyplot as plt\n'), ((354, 383), 'networkx.relabel_nodes', 'nx.relabel_nodes', (['G', 'mylabels'], {}), '(G, mylabels)\n', (370, 383), True, 'import networkx as nx\n'), ((394, 425), 'networkx.layout.spring_layout', 'nx.layout.spring_layout', (['G'], {'k': '(2)'}), '(G, k=2)\n', (417, 425), True, 'import networkx as nx\n'), ((731, 882), 'networkx.draw_networkx_edges', 'nx.draw_networkx_edges', (['G', 'pos'], {'edgelist': 'epos', 'edge_color': 'colorspos', 'width': '(3)', 'arrowsize': 'arrowsize', 'alpha': '(1)', 'arrowstyle': '"""->"""', 'edge_cmap': 'plt.cm.Blues'}), "(G, pos, edgelist=epos, edge_color=colorspos, width=3,\n arrowsize=arrowsize, alpha=1, arrowstyle='->', edge_cmap=plt.cm.Blues)\n", (753, 882), True, 'import networkx as nx\n'), ((910, 1060), 'networkx.draw_networkx_edges', 'nx.draw_networkx_edges', (['G', 'pos'], {'edgelist': 'eneg', 'width': '(2)', 'arrowsize': 'arrowsize', 'alpha': '(1)', 'edge_color': 'colorsneg', 'arrowstyle': '"""->"""', 'edge_cmap': 'plt.cm.Reds'}), "(G, pos, edgelist=eneg, width=2, arrowsize=arrowsize,\n alpha=1, edge_color=colorsneg, arrowstyle='->', edge_cmap=plt.cm.Reds)\n", (932, 1060), True, 'import networkx as nx\n'), ((1097, 1171), 'networkx.draw_networkx_nodes', 'nx.draw_networkx_nodes', (['G', 'pos'], {'node_size': '(1500)', 'node_color': '"""gray"""', 'alpha': '(1)'}), "(G, pos, node_size=1500, node_color='gray', alpha=1)\n", (1119, 1171), True, 'import networkx as nx\n'), ((1185, 1280), 'networkx.draw_networkx_labels', 'nx.draw_networkx_labels', (['G', 'pos'], {'font_size': '(10)', 'font_family': '"""sans-serif"""', 'font_weight': '"""bold"""'}), "(G, pos, font_size=10, font_family='sans-serif',\n font_weight='bold')\n", (1208, 1280), True, 'import networkx as nx\n'), ((1314, 1323), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1321, 1323), True, 'import matplotlib.pyplot as plt\n'), ((1350, 1360), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1358, 1360), True, 'import matplotlib.pyplot as plt\n'), ((136, 165), 'numpy.transpose', 'numpy.transpose', (['mek.nn.links'], {}), '(mek.nn.links)\n', (151, 165), False, 'import numpy\n')]
|
"""
Overview
--------
general info about this module
Classes and Inheritance Structure
----------------------------------------------
.. inheritance-diagram::
Summary
---------
.. autosummary::
list of the module you want
Module API
----------
"""
from __future__ import absolute_import, division, print_function
from builtins import (bytes, str, open, super, range,
zip, round, input, int, pow, object, map, zip)
__author__ = "<NAME>"
import ast
import decorator
from datetime import datetime, date, time
from astropy.time import Time as astropyTime
from astropy.time import TimeDelta as astropyTimeDelta
from astropy.coordinates import Angle as astropyAngle
from .catalog import BasicCatalog
import numpy as np
@decorator.decorator
def check_par_list(func,par_list,*args, **kwargs):
for par in par_list:
if isinstance(par,Parameter):
pass
else:
raise RuntimeError('each parameter in the par_list has to be an instance of Parameters')
return func(par_list, *args, **kwargs)
class ParameterGroup(object):
def __init__(self,par_list,name,exclusive=True,def_selected=None,selected=None):
self.name=name
self._par_list=par_list
self._check_pars(par_list)
self.exclusive=True
self.msk = np.ones(len(par_list), dtype=np.bool)
if exclusive==True:
self.msk[::]=False
if def_selected is None:
self.msk[0]==True
if def_selected is not None:
self.select(def_selected)
if selected is not None:
self.select(selected)
@property
def par_list(self):
return self._par_list
@property
def names(self):
return [p.name for p in self._par_list]
def select(self,name):
if isinstance(name,Parameter):
name=Parameter.value
for ID,p in enumerate(self._par_list):
if p.name==name:
self.msk[ID]=True
self._selected=self._par_list[ID].name
if self.msk.sum()>1 and self.exclusive==True:
raise RuntimeError('only one paramter can be selected in mutually exclusive groups')
def _check_pars(self, par_list):
for p in par_list:
if isinstance(p,Parameter):
pass
elif isinstance(p,ParameterRange):
pass
else:
raise RuntimeError('you can group Paramters or ParamtersRanges found',type(p))
def to_list(self):
_l=[]
for p in self._par_list:
if isinstance(p,Parameter):
_l.append(p)
elif isinstance(p,ParameterRange):
_l.extend(p.to_list())
return _l
def add_par(self,par):
self.par_list.append(par)
self.msk=np.append(self.msk,False)
def build_selector(self,name):
return Parameter(name, allowed_values=self.names)
class ParameterRange(object):
def __init__(self,p1,p2,name):
self._check_pars(p1,p2)
self.name=name
self.p1=p1
self.p2=p2
def _check_pars(self,p1,p2):
if type(p1)!=type(p2):
raise RuntimeError('pars must be of the same time')
for p in (p1,p2):
try:
assert (isinstance(p,Parameter))
except:
raise RuntimeError('both p1 and p2 must be Parameters objects, found',type(p))
def to_list(self):
return [self.p1,self.p2]
class ParameterTuple(object):
def __init__(self,p_list,name):
self._check_pars(p_list)
self.name=name
self.p_list=tuple(p_list)
def _check_pars(self,p_list):
if any( type(x)!=type(p_list[0]) for x in p_list):
raise RuntimeError('pars must be of the same time')
for p in (p_list):
try:
assert (isinstance(p,Parameter))
except:
raise RuntimeError('both p1 and p2 must be Parameters objects, found',type(p))
def to_list(self):
return self.p_list
class Parameter(object):
def __init__(self,value=None,units=None,name=None,allowed_units=[],check_value=None,allowed_values=None,units_name=None):
self.check_value=check_value
self._allowed_units = allowed_units
self._allowed_values = allowed_values
self.name = name
self.units=units
self.value = value
self.units_name=units_name
#self._wtform_dict=wtform_dict
@property
def value(self):
return self._value
@value.setter
def value(self,v):
#print ('set',self.name,v,self._allowed_values)
if v is not None:
if self.check_value is not None:
self.check_value(v, units=self.units,name=self.name)
if self._allowed_values is not None:
if v not in self._allowed_values:
raise RuntimeError('value',v,'not allowed, allowed=',self._allowed_values)
#print('set->',self.name,v,type(v))
if type(v)==str or type(v)== unicode:
self._value=v.strip()
else:
self._value = v
else:
self._value=None
@property
def units(self):
return self._units
@units.setter
def units(self,units):
if self._allowed_units !=[] and self._allowed_units is not None:
self.chekc_units(units,self._allowed_units,self.name)
self._units=units
def set_from_form(self,form,verbose=False):
par_name = self.name
units_name = self.units_name
v = None
u = None
in_dictionary=False
if units_name is not None:
if units_name in form.keys():
u = form[units_name]
if par_name in form.keys():
v=form[par_name]
in_dictionary=True
if in_dictionary is True:
self.set_par(value=v,units=u)
#print('setting par:', par_name, 'to val=', self.value, 'and units', units_name, 'to', self.units )
else:
if verbose is True:
print('setting par:', par_name, 'not in dictionary')
def set_par(self,value,units=None):
if units is not None:
self.units=units
self.value=value
def get_form(self,wtform_cls,key,validators,defaults):
return wtform_cls('key', validators=validators, default=defaults)
@staticmethod
def chekc_units(units,allowed,name):
if units not in allowed:
raise RuntimeError('wrong units for par: %s'%name, ' found: ',units,' allowed:', allowed)
@staticmethod
def check_value(val,units,par_name):
pass
# def get_form_field(self,key=None,default=None,validators=None,wtform_dict=None,wtform=None):
# if key is None:
# key=self.name
#
# if wtform is None and wtform_dict is None:
#
# wtform_dict=self._wtform_dict
#
# if default is not None:
# self.check_value(default,self.units)
# else:
# default=self.value
#
#
# if wtform is not None and wtform_dict is not None:
# raise RuntimeError('either you provide wtform or wtform_dict or you pass a wtform_dict to the constructor')
#
# elif wtform_dict is not None:
# wtform=wtform_dict[self.units]
#
# else:
# raise RuntimeError('yuo must provide wtform or wtform_dict')
#
# return wtform(label=key, validators=validators, default=default)
def reprJSON(self):
return dict(name=self.name, units=self.units, value=self.value)
#class Instrument(Parameter):
# def __init__(self,T_format,name,value=None):
#wtform_dict = {'iso': SelectField}
class Name(Parameter):
def __init__(self,value=None, name_format='str', name=None):
_allowed_units = ['str']
super(Name,self).__init__(value=value,
units=name_format,
check_value=self.check_name_value,
name=name,
allowed_units=_allowed_units)
@staticmethod
def check_name_value(value, units=None, name=None):
pass
class Float(Parameter):
def __init__(self,value=None,units=None,name=None):
_allowed_units = None
#wtform_dict = {'keV': FloatField}
super(Float, self).__init__(value=value,
units=units,
check_value=self.check_float_value,
name=name,
allowed_units=_allowed_units)
#wtform_dict=wtform_dict)
self.value=value
@property
def value(self):
return self._v
@value.setter
def value(self, v):
if v is not None and v!='':
self.check_float_value(v,name=self.name)
self._v = np.float(v)
else:
self._v=None
@staticmethod
def check_float_value(value, units=None,name=None):
#print('check type of ',name,'value', value, 'type',type(value))
if value is None or value=='':
pass
else:
try:
value=ast.literal_eval(value)
except:
pass
value=np.float(value)
if type(value) == int or type(value) == np.int:
pass
elif type(value) == float or type(value) == np.float:
pass
else:
raise RuntimeError('type of ', name, 'not valid', type(value))
class Integer(Parameter):
def __init__(self,value=None,units=None,name=None):
_allowed_units = None
#wtform_dict = {'keV': FloatField}
super(Integer, self).__init__(value=value,
units=units,
check_value=self.check_int_value,
name=name,
allowed_units=_allowed_units)
#wtform_dict=wtform_dict)
self.value=value
@property
def value(self):
return self._v
@value.setter
def value(self, v):
if v is not None and v!='':
self.check_int_value(v,name=self.name)
self._v = np.int(v)
else:
self._v=None
@staticmethod
def check_int_value(value, units=None,name=None):
#print('check type of ',name,'value', value, 'type',type(value))
if value is None or value=='':
pass
else:
try:
value=ast.literal_eval(value)
except:
pass
value=np.int(value)
if type(value) == int or type(value) == np.int:
pass
elif type(value) == float or type(value) == np.float:
pass
else:
raise RuntimeError('type of ', name, 'not valid', type(value))
class Time(Parameter):
def __init__(self,value=None,T_format=None,name=None,Time_format_name=None):
#_allowed_units = astropyTime.FORMATS
#wtform_dict = {'iso': StringField}
#wtform_dict['mjd'] = FloatField
#wtform_dict['prod_list'] = TextAreaField
super(Time,self).__init__(value=value,
units=T_format,
units_name=Time_format_name,
name=name,
allowed_units=None)
#wtform_dict=wtform_dict)
self._set_time(value,format=T_format)
@property
def value(self):
return self._astropy_time.value
@value.setter
def value(self, v):
units=self.units
self._set_time(v, format=units)
def _set_time(self,value,format):
try:
value=ast.literal_eval(value)
except:
pass
self._astropy_time = astropyTime(value, format=format)
self._value =value
class TimeDelta(Parameter):
def __init__(self, value=None, delta_T_format='sec', name=None, delta_T_format_name=None):
# _allowed_units = astropyTime.FORMATS
# wtform_dict = {'iso': StringField}
# wtform_dict['mjd'] = FloatField
# wtform_dict['prod_list'] = TextAreaField
super(TimeDelta, self).__init__(value=value,
units=delta_T_format,
units_name=delta_T_format_name,
name=name,
allowed_units=None)
# wtform_dict=wtform_dict)
self._set_time(value, format=delta_T_format)
@property
def value(self):
return self._astropy_time_delta.value
@value.setter
def value(self, v):
units = self.units
self._set_time(v, format=units)
def _set_time(self, value, format):
try:
value = ast.literal_eval(value)
except:
pass
#print ('value',value)
self._astropy_time_delta = astropyTimeDelta(value, format=format)
self._value = value
class InputProdList(Parameter):
def __init__(self,value=None,_format='names_list',name=None):
_allowed_units = ['names_list']
if value is None:
value=[]
super(InputProdList, self).__init__(value=value,
units=_format,
check_value=self.check_list_value,
name=name,
allowed_units=_allowed_units)
#wtform_dict=wtform_dict)
self._split(value)
def _split(self,str_list):
if type(str_list)==list:
pass
elif type(str_list)==str or type(str(str_list)):
if ',' in str_list:
str_list= str_list.split(',')
else:
str_list = str_list.split(' ')
else:
raise RuntimeError('parameter format is not correct')
if str_list == ['']:
str_list = []
return str_list
@property
def value(self):
if self._value==[''] or self._value is None:
return []
else:
return self._value
@value.setter
def value(self, v):
#print('set', self.name, v, self._allowed_values)
if v is not None:
if self.check_value is not None:
self.check_value(v, units=self.units, name=self.name)
if self._allowed_values is not None:
if v not in self._allowed_values:
raise RuntimeError('value', v, 'not allowed, allowed=', self._allowed_values)
if v == [''] or v is None or str(v) == '':
self._value=['']
else:
self._value = v
else:
self._value = ['']
self._value=self._split(self._value)
#print ('set to ',self._value)
@staticmethod
def check_list_value(value,units,name='par'):
if units=='names_list':
try:
#print(type(value))
assert (type(value) == list or type(value) == str or type(str(value))== str)
except:
raise RuntimeError('par:',name,', value is not product list format : list of strings','it is',type(value),value)
else:
raise RuntimeError(name,'units not valid',units)
class Angle(Parameter):
def __init__(self,value=None, units=None,name=None):
super(Angle, self).__init__(value=value,
units=units,
name=name,
allowed_units=None)
# wtform_dict=wtform_dict)
self._set_angle(value, units=units)
@property
def value(self):
return self._astropy_angle.value
@value.setter
def value(self, v, units=None):
if units is None:
units = self.units
self._set_angle(v, units=units)
def _set_angle(self, value, units):
if value=='' or value is None:
pass
else:
self._astropy_angle = astropyAngle(value, unit=units)
self._value = self._astropy_angle.value
# class AngularDistance(Parameter):
# def __init__(self, angular_units,name, value=None):
# _allowed_units = ['deg']
# super(AngularDistance, self).__init__(value=value,
# units=angular_units,
# check_value=self.check_angle_value,
# name=name,
# allowed_units=_allowed_units)
#
#
#
# @staticmethod
# def check_angle_value(value, units=None, name=None):
# print('check type of ', name, 'value', value, 'type', type(value))
# pass
#
class SpectralBoundary(Parameter):
def __init__(self,value=None,E_units='keV',name=None):
_allowed_units = ['keV','eV','MeV','GeV','TeV','Hz','MHz','GHz']
#wtform_dict = {'keV': FloatField}
super(SpectralBoundary, self).__init__(value=value,
units=E_units,
check_value=self.check_energy_value,
name=name,
allowed_units=_allowed_units)
#wtform_dict=wtform_dict)
@staticmethod
def check_energy_value(value, units=None,name=None):
#print('check type of ',name,'value', value, 'type',type(value))
try:
value=ast.literal_eval(value)
except:
pass
if type(value)==int or type(value)==np.int:
pass
elif type(value)==float or type(value)==np.float:
pass
else:
raise RuntimeError('type of ',name,'not valid',type(value))
class Energy(Parameter):
def __init__(self,value=None,E_units=None,name=None):
_allowed_units = ['keV','eV','MeV','GeV','TeV']
#wtform_dict = {'keV': FloatField}
super(Energy, self).__init__(value=value,
units=E_units,
check_value=self.check_energy_value,
name=name,
allowed_units=_allowed_units)
#wtform_dict=wtform_dict)
@staticmethod
def check_energy_value(value, units=None,name=None):
#print('check type of ',name,'value', value, 'type',type(value))
try:
value=ast.literal_eval(value)
except:
pass
if type(value)==int or type(value)==np.int:
pass
elif type(value)==float or type(value)==np.float:
pass
else:
raise RuntimeError('type of ',name,'not valid',type(value))
class DetectionThreshold(Parameter):
def __init__(self,value=None,units='sigma',name=None):
_allowed_units = ['sigma']
#wtform_dict = {'keV': FloatField}
super(DetectionThreshold, self).__init__(value=value,
units=units,
check_value=self.check_value,
name=name,
allowed_units=_allowed_units)
#wtform_dict=wtform_dict)
@staticmethod
def check_value(value, units=None,name=None):
#print('check type of ',name,'value', value, 'type',type(value))
try:
value=ast.literal_eval(value)
except:
pass
if type(value)==int or type(value)==np.int:
pass
elif type(value)==float or type(value)==np.float:
pass
else:
raise RuntimeError('type of ',name,'not valid',type(value))
class UserCatalog(Parameter):
def __init__(self, value=None,name_format='str', name=None):
_allowed_units = ['str']
super(UserCatalog,self).__init__(value=value,
units=name_format,
check_value=self.check_name_value,
name=name,
allowed_units=_allowed_units)
@staticmethod
def check_name_value(value, units=None, name=None):
pass
|
[
"numpy.float",
"astropy.coordinates.Angle",
"astropy.time.TimeDelta",
"builtins.super",
"builtins.str",
"numpy.append",
"astropy.time.Time",
"ast.literal_eval",
"numpy.int"
] |
[((2855, 2881), 'numpy.append', 'np.append', (['self.msk', '(False)'], {}), '(self.msk, False)\n', (2864, 2881), True, 'import numpy as np\n'), ((12208, 12241), 'astropy.time.Time', 'astropyTime', (['value'], {'format': 'format'}), '(value, format=format)\n', (12219, 12241), True, 'from astropy.time import Time as astropyTime\n'), ((13351, 13389), 'astropy.time.TimeDelta', 'astropyTimeDelta', (['value'], {'format': 'format'}), '(value, format=format)\n', (13367, 13389), True, 'from astropy.time import TimeDelta as astropyTimeDelta\n'), ((9105, 9116), 'numpy.float', 'np.float', (['v'], {}), '(v)\n', (9113, 9116), True, 'import numpy as np\n'), ((9496, 9511), 'numpy.float', 'np.float', (['value'], {}), '(value)\n', (9504, 9511), True, 'import numpy as np\n'), ((10517, 10526), 'numpy.int', 'np.int', (['v'], {}), '(v)\n', (10523, 10526), True, 'import numpy as np\n'), ((10904, 10917), 'numpy.int', 'np.int', (['value'], {}), '(value)\n', (10910, 10917), True, 'import numpy as np\n'), ((12113, 12136), 'ast.literal_eval', 'ast.literal_eval', (['value'], {}), '(value)\n', (12129, 12136), False, 'import ast\n'), ((13227, 13250), 'ast.literal_eval', 'ast.literal_eval', (['value'], {}), '(value)\n', (13243, 13250), False, 'import ast\n'), ((16626, 16657), 'astropy.coordinates.Angle', 'astropyAngle', (['value'], {'unit': 'units'}), '(value, unit=units)\n', (16638, 16657), True, 'from astropy.coordinates import Angle as astropyAngle\n'), ((18093, 18116), 'ast.literal_eval', 'ast.literal_eval', (['value'], {}), '(value)\n', (18109, 18116), False, 'import ast\n'), ((19097, 19120), 'ast.literal_eval', 'ast.literal_eval', (['value'], {}), '(value)\n', (19113, 19120), False, 'import ast\n'), ((20092, 20115), 'ast.literal_eval', 'ast.literal_eval', (['value'], {}), '(value)\n', (20108, 20115), False, 'import ast\n'), ((8010, 8027), 'builtins.super', 'super', (['Name', 'self'], {}), '(Name, self)\n', (8015, 8027), False, 'from builtins import bytes, str, open, super, range, zip, round, input, int, pow, object, map, zip\n'), ((8534, 8552), 'builtins.super', 'super', (['Float', 'self'], {}), '(Float, self)\n', (8539, 8552), False, 'from builtins import bytes, str, open, super, range, zip, round, input, int, pow, object, map, zip\n'), ((9413, 9436), 'ast.literal_eval', 'ast.literal_eval', (['value'], {}), '(value)\n', (9429, 9436), False, 'import ast\n'), ((9948, 9968), 'builtins.super', 'super', (['Integer', 'self'], {}), '(Integer, self)\n', (9953, 9968), False, 'from builtins import bytes, str, open, super, range, zip, round, input, int, pow, object, map, zip\n'), ((10821, 10844), 'ast.literal_eval', 'ast.literal_eval', (['value'], {}), '(value)\n', (10837, 10844), False, 'import ast\n'), ((11484, 11501), 'builtins.super', 'super', (['Time', 'self'], {}), '(Time, self)\n', (11489, 11501), False, 'from builtins import bytes, str, open, super, range, zip, round, input, int, pow, object, map, zip\n'), ((12599, 12621), 'builtins.super', 'super', (['TimeDelta', 'self'], {}), '(TimeDelta, self)\n', (12604, 12621), False, 'from builtins import bytes, str, open, super, range, zip, round, input, int, pow, object, map, zip\n'), ((13616, 13642), 'builtins.super', 'super', (['InputProdList', 'self'], {}), '(InputProdList, self)\n', (13621, 13642), False, 'from builtins import bytes, str, open, super, range, zip, round, input, int, pow, object, map, zip\n'), ((15906, 15924), 'builtins.super', 'super', (['Angle', 'self'], {}), '(Angle, self)\n', (15911, 15924), False, 'from builtins import bytes, str, open, super, range, zip, round, input, int, pow, object, map, zip\n'), ((17562, 17591), 'builtins.super', 'super', (['SpectralBoundary', 'self'], {}), '(SpectralBoundary, self)\n', (17567, 17591), False, 'from builtins import bytes, str, open, super, range, zip, round, input, int, pow, object, map, zip\n'), ((18576, 18595), 'builtins.super', 'super', (['Energy', 'self'], {}), '(Energy, self)\n', (18581, 18595), False, 'from builtins import bytes, str, open, super, range, zip, round, input, int, pow, object, map, zip\n'), ((19575, 19606), 'builtins.super', 'super', (['DetectionThreshold', 'self'], {}), '(DetectionThreshold, self)\n', (19580, 19606), False, 'from builtins import bytes, str, open, super, range, zip, round, input, int, pow, object, map, zip\n'), ((20519, 20543), 'builtins.super', 'super', (['UserCatalog', 'self'], {}), '(UserCatalog, self)\n', (20524, 20543), False, 'from builtins import bytes, str, open, super, range, zip, round, input, int, pow, object, map, zip\n'), ((14147, 14160), 'builtins.str', 'str', (['str_list'], {}), '(str_list)\n', (14150, 14160), False, 'from builtins import bytes, str, open, super, range, zip, round, input, int, pow, object, map, zip\n'), ((15102, 15108), 'builtins.str', 'str', (['v'], {}), '(v)\n', (15105, 15108), False, 'from builtins import bytes, str, open, super, range, zip, round, input, int, pow, object, map, zip\n'), ((15558, 15568), 'builtins.str', 'str', (['value'], {}), '(value)\n', (15561, 15568), False, 'from builtins import bytes, str, open, super, range, zip, round, input, int, pow, object, map, zip\n')]
|
# -*- coding: utf-8 -*-
import numpy as np
from scipy.signal import fftconvolve
def energy(traces, duration, dt=1):
"""
Compute an mean-squared energy measurement for each point of a
seismic section.
:param traces: The data array to use for calculating MS energy.
Must be 1D or 2D numpy array.
:param duration: the time duration of the window (in seconds), or
samples if dt=1.
:param dt: the sample interval of the data (in seconds). Defaults
to 1 so duration can be in samples.
:returns: An array the same dimensions as the input array.
"""
energy_data = np.zeros(traces.shape)
signal = traces * traces
n_samples = int(duration / dt)
window = np.ones(n_samples)
if np.ndim(signal) == 1:
# Compute the sliding average using a convolution
energy_data = fftconvolve(signal, window, mode='same') \
/ n_samples
elif np.ndim(signal) == 2:
for trace in range(signal.shape[1]):
energy_data[:, trace] = (fftconvolve(signal[:, trace],
window,
mode='same'))
else:
raise ValueError('Array must be 1D or 2D')
return energy_data
|
[
"numpy.zeros",
"scipy.signal.fftconvolve",
"numpy.ones",
"numpy.ndim"
] |
[((651, 673), 'numpy.zeros', 'np.zeros', (['traces.shape'], {}), '(traces.shape)\n', (659, 673), True, 'import numpy as np\n'), ((752, 770), 'numpy.ones', 'np.ones', (['n_samples'], {}), '(n_samples)\n', (759, 770), True, 'import numpy as np\n'), ((779, 794), 'numpy.ndim', 'np.ndim', (['signal'], {}), '(signal)\n', (786, 794), True, 'import numpy as np\n'), ((881, 921), 'scipy.signal.fftconvolve', 'fftconvolve', (['signal', 'window'], {'mode': '"""same"""'}), "(signal, window, mode='same')\n", (892, 921), False, 'from scipy.signal import fftconvolve\n'), ((967, 982), 'numpy.ndim', 'np.ndim', (['signal'], {}), '(signal)\n', (974, 982), True, 'import numpy as np\n'), ((1071, 1121), 'scipy.signal.fftconvolve', 'fftconvolve', (['signal[:, trace]', 'window'], {'mode': '"""same"""'}), "(signal[:, trace], window, mode='same')\n", (1082, 1121), False, 'from scipy.signal import fftconvolve\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: Azimuthal integration
# https://github.com/silx-kit/pyFAI
#
# Copyright (C) 2003-2018 European Synchrotron Radiation Facility, Grenoble,
# France
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Utilities, mainly for image treatment
"""
__authors__ = ["<NAME>", "<NAME>"]
__contact__ = "<EMAIL>"
__license__ = "MIT"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "19/02/2019"
__status__ = "production"
import logging
import numpy
import fabio
import weakref
from scipy import ndimage
from scipy.interpolate import interp1d
from scipy.optimize.optimize import fmin
from scipy.optimize.optimize import fminbound
from .third_party import six
from .utils import stringutil
from .utils import header_utils
from ._version import calc_hexversion
if ("hexversion" not in dir(fabio)) or (fabio.hexversion < calc_hexversion(0, 4, 0, "dev", 5)):
# Short cut fabio.factory do not exists on older versions
fabio.factory = fabio.fabioimage.FabioImage.factory
logger = logging.getLogger(__name__)
class ImageReductionFilter(object):
"""
Generic filter applied in a set of images.
"""
def init(self, max_images=None):
"""
Initialize the filter before using it.
:param int max_images: Max images supported by the filter
"""
pass
def add_image(self, image):
"""
Add an image to the filter.
:param numpy.ndarray image: image to add
"""
raise NotImplementedError()
def get_parameters(self):
"""Return a dictionary containing filter parameters
:rtype: dict
"""
return {"cutoff": None, "quantiles": None}
def get_result(self):
"""
Get the result of the filter.
:return: result filter
"""
raise NotImplementedError()
class ImageAccumulatorFilter(ImageReductionFilter):
"""
Filter applied in a set of images in which it is possible
to reduce data step by step into a single merged image.
"""
def init(self, max_images=None):
self._count = 0
self._accumulated_image = None
def add_image(self, image):
"""
Add an image to the filter.
:param numpy.ndarray image: image to add
"""
self._accumulated_image = self._accumulate(self._accumulated_image, image)
self._count += 1
def _accumulate(self, accumulated_image, added_image):
"""
Add an image to the filter.
:param numpy.ndarray accumulated_image: image use to accumulate
information
:param numpy.ndarray added_image: image to add
"""
raise NotImplementedError()
def get_result(self):
"""
Get the result of the filter.
:return: result filter
:rtype: numpy.ndarray
"""
result = self._accumulated_image
# release the allocated memory
self._accumulated_image = None
return result
class MaxAveraging(ImageAccumulatorFilter):
name = "max"
def _accumulate(self, accumulated_image, added_image):
if accumulated_image is None:
return added_image
return numpy.maximum(accumulated_image, added_image)
class MinAveraging(ImageAccumulatorFilter):
name = "min"
def _accumulate(self, accumulated_image, added_image):
if accumulated_image is None:
return added_image
return numpy.minimum(accumulated_image, added_image)
class SumAveraging(ImageAccumulatorFilter):
name = "sum"
def _accumulate(self, accumulated_image, added_image):
if accumulated_image is None:
return added_image
return accumulated_image + added_image
class MeanAveraging(SumAveraging):
name = "mean"
def get_result(self):
result = super(MeanAveraging, self).get_result()
return result / numpy.float32(self._count)
class ImageStackFilter(ImageReductionFilter):
"""
Filter creating a stack from all images and computing everything at the
end.
"""
def init(self, max_images=None):
self._stack = None
self._max_stack_size = max_images
self._count = 0
def add_image(self, image):
"""
Add an image to the filter.
:param numpy.ndarray image: image to add
"""
if self._stack is None:
shape = self._max_stack_size, image.shape[0], image.shape[1]
self._stack = numpy.zeros(shape, dtype=numpy.float32)
self._stack[self._count] = image
self._count += 1
def _compute_stack_reduction(self, stack):
"""Called after initialization of the stack and return the reduction
result."""
raise NotImplementedError()
def get_result(self):
if self._stack is None:
raise Exception("No data to reduce")
shape = self._count, self._stack.shape[1], self._stack.shape[2]
self._stack.resize(shape)
result = self._compute_stack_reduction(self._stack)
# release the allocated memory
self._stack = None
return result
class AverageDarkFilter(ImageStackFilter):
"""
Filter based on the algorithm of average_dark
TODO: Must be split according to each filter_name, and removed
"""
def __init__(self, filter_name, cut_off, quantiles):
super(AverageDarkFilter, self).__init__()
self._filter_name = filter_name
self._cut_off = cut_off
self._quantiles = quantiles
@property
def name(self):
return self._filter_name
def get_parameters(self):
"""Return a dictionary containing filter parameters"""
return {"cutoff": self._cut_off, "quantiles": self._quantiles}
def _compute_stack_reduction(self, stack):
"""
Compute the stack reduction.
:param numpy.ndarray stack: stack to reduce
:return: result filter
:rtype: numpy.ndarray
"""
return average_dark(stack,
self._filter_name,
self._cut_off,
self._quantiles)
_FILTERS = [
MaxAveraging,
MinAveraging,
MeanAveraging,
SumAveraging,
]
_FILTER_NAME_MAPPING = {}
for _f in _FILTERS:
_FILTER_NAME_MAPPING[_f.name] = _f
_AVERAGE_DARK_FILTERS = set(["min", "max", "sum", "mean", "std", "quantiles", "median"])
def is_algorithm_name_exists(filter_name):
"""Return true if the name is a name of a filter algorithm"""
if filter_name in _FILTER_NAME_MAPPING:
return True
elif filter_name in _AVERAGE_DARK_FILTERS:
return True
return False
class AlgorithmCreationError(RuntimeError):
"""Exception returned if creation of an ImageReductionFilter is not
possible"""
pass
def create_algorithm(filter_name, cut_off=None, quantiles=None):
"""Factory to create algorithm according to parameters
:param cutoff: keep all data where (I-center)/std < cutoff
:type cutoff: float or None
:param quantiles: 2-tuple of floats average out data between the two
quantiles
:type quantiles: tuple(float, float) or None
:return: An algorithm
:rtype: ImageReductionFilter
:raise AlgorithmCreationError: If it is not possible to create the
algorithm
"""
if filter_name in _FILTER_NAME_MAPPING and cut_off is None:
# use less memory
filter_class = _FILTER_NAME_MAPPING[filter_name]
algorithm = filter_class()
elif filter_name in _AVERAGE_DARK_FILTERS:
# must create a big array with all the data
if filter_name == "quantiles" and quantiles is None:
raise AlgorithmCreationError("Quantiles algorithm expect quantiles parameters")
algorithm = AverageDarkFilter(filter_name, cut_off, quantiles)
else:
raise AlgorithmCreationError("No algorithm available for the expected parameters")
return algorithm
def bounding_box(img):
"""
Tries to guess the bounding box around a valid massif
:param img: 2D array like
:return: 4-typle (d0_min, d1_min, d0_max, d1_max)
"""
img = img.astype(numpy.int)
img0 = (img.sum(axis=1) > 0).astype(numpy.int)
img1 = (img.sum(axis=0) > 0).astype(numpy.int)
dimg0 = img0[1:] - img0[:-1]
min0 = dimg0.argmax()
max0 = dimg0.argmin() + 1
dimg1 = img1[1:] - img1[:-1]
min1 = dimg1.argmax()
max1 = dimg1.argmin() + 1
if max0 == 1:
max0 = img0.size
if max1 == 1:
max1 = img1.size
return (min0, min1, max0, max1)
def remove_saturated_pixel(ds, threshold=0.1, minimum=None, maximum=None):
"""
Remove saturated fixes from an array inplace.
:param ds: a dataset as ndarray
:param float threshold: what is the upper limit?
all pixel > max*(1-threshold) are discareded.
:param float minimum: minumum valid value (or True for auto-guess)
:param float maximum: maximum valid value
:return: the input dataset
"""
shape = ds.shape
if ds.dtype == numpy.uint16:
maxt = (1.0 - threshold) * 65535.0
elif ds.dtype == numpy.int16:
maxt = (1.0 - threshold) * 32767.0
elif ds.dtype == numpy.uint8:
maxt = (1.0 - threshold) * 255.0
elif ds.dtype == numpy.int8:
maxt = (1.0 - threshold) * 127.0
else:
if maximum is None:
maxt = (1.0 - threshold) * ds.max()
else:
maxt = maximum
if maximum is not None:
maxt = min(maxt, maximum)
invalid = (ds > maxt)
if minimum:
if minimum is True:
# automatic guess of the best minimum TODO: use the HWHM to guess the minumum...
data_min = ds.min()
x, y = numpy.histogram(numpy.log(ds - data_min + 1.0), bins=100)
f = interp1d((y[1:] + y[:-1]) / 2.0, -x, bounds_error=False, fill_value=-x.min())
max_low = fmin(f, y[1], disp=0)
max_hi = fmin(f, y[-1], disp=0)
if max_hi > max_low:
f = interp1d((y[1:] + y[:-1]) / 2.0, x, bounds_error=False)
min_center = fminbound(f, max_low, max_hi)
else:
min_center = max_hi
minimum = float(numpy.exp(y[((min_center / y) > 1).sum() - 1])) - 1.0 + data_min
logger.debug("removeSaturatedPixel: best minimum guessed is %s", minimum)
ds[ds < minimum] = minimum
ds -= minimum # - 1.0
if invalid.sum(dtype=int) == 0:
logger.debug("No saturated area where found")
return ds
gi = ndimage.morphology.binary_dilation(invalid)
lgi, nc = ndimage.label(gi)
if nc > 100:
logger.warning("More than 100 saturated zones were found on this image !!!!")
for zone in range(nc + 1):
dzone = (lgi == zone)
if dzone.sum(dtype=int) > ds.size // 2:
continue
min0, min1, max0, max1 = bounding_box(dzone)
ksize = min(max0 - min0, max1 - min1)
subset = ds[max(0, min0 - 4 * ksize):min(shape[0], max0 + 4 * ksize), max(0, min1 - 4 * ksize):min(shape[1], max1 + 4 * ksize)]
while subset.max() > maxt:
subset = ndimage.median_filter(subset, ksize)
ds[max(0, min0 - 4 * ksize):min(shape[0], max0 + 4 * ksize), max(0, min1 - 4 * ksize):min(shape[1], max1 + 4 * ksize)] = subset
return ds
def average_dark(lstimg, center_method="mean", cutoff=None, quantiles=(0.5, 0.5)):
"""
Averages a serie of dark (or flat) images.
Centers the result on the mean or the median ...
but averages all frames within cutoff*std
:param lstimg: list of 2D images or a 3D stack
:param str center_method: is the center calculated by a "mean", "median",
"quantile", "std"
:param cutoff: keep all data where (I-center)/std < cutoff
:type cutoff: float or None
:param quantiles: 2-tuple of floats average out data between the two
quantiles
:type quantiles: tuple(float, float) or None
:return: 2D image averaged
"""
if "ndim" in dir(lstimg) and lstimg.ndim == 3:
stack = lstimg.astype(numpy.float32)
shape = stack.shape[1:]
length = stack.shape[0]
else:
shape = lstimg[0].shape
length = len(lstimg)
if length == 1:
return lstimg[0].astype(numpy.float32)
stack = numpy.zeros((length, shape[0], shape[1]), dtype=numpy.float32)
for i, img in enumerate(lstimg):
stack[i] = img
if center_method in dir(stack):
center = stack.__getattribute__(center_method)(axis=0)
elif center_method == "median":
logger.info("Filtering data (median)")
center = numpy.median(stack, axis=0)
elif center_method.startswith("quantil"):
logger.info("Filtering data (quantiles: %s)", quantiles)
sorted_ = numpy.sort(stack, axis=0)
lower = max(0, int(numpy.floor(min(quantiles) * length)))
upper = min(length, int(numpy.ceil(max(quantiles) * length)))
if (upper == lower):
if upper < length:
upper += 1
elif lower > 0:
lower -= 1
else:
logger.warning("Empty selection for quantil %s, would keep points from %s to %s", quantiles, lower, upper)
center = sorted_[lower:upper].mean(axis=0)
else:
raise RuntimeError("Cannot understand method: %s in average_dark" % center_method)
if cutoff is None or cutoff <= 0:
output = center
else:
std = stack.std(axis=0)
strides = 0, std.strides[0], std.strides[1]
std.shape = 1, shape[0], shape[1]
std.strides = strides
center.shape = 1, shape[0], shape[1]
center.strides = strides
mask = ((abs(stack - center) / std) > cutoff)
stack[numpy.where(mask)] = 0.0
summed = stack.sum(axis=0)
output = summed / numpy.float32(numpy.maximum(1, (length - mask.sum(axis=0))))
return output
def _normalize_image_stack(image_stack):
"""
Convert input data to a list of 2D numpy arrays or a stack
of numpy array (3D array).
:param image_stack: slice of images
:type image_stack: list or numpy.ndarray
:return: A stack of image (list of 2D array or a single 3D array)
:rtype: list or numpy.ndarray
"""
if image_stack is None:
return None
if isinstance(image_stack, numpy.ndarray) and image_stack.ndim == 3:
# numpy image stack (single 3D image)
return image_stack
if isinstance(image_stack, list):
# list of numpy images (multi 2D images)
result = []
for image in image_stack:
if isinstance(image, six.string_types):
data = fabio.open(image).data
elif isinstance(image, numpy.ndarray) and image.ndim == 2:
data = image
else:
raise Exception("Unsupported image type '%s' in image_stack" % type(image))
result.append(data)
return result
raise Exception("Unsupported type '%s' for image_stack" % type(image_stack))
class AverageWriter():
"""Interface for using writer in `Average` process."""
def write_header(self, merged_files, nb_frames, monitor_name):
"""Write the header of the average
:param list merged_files: List of files used to generate this output
:param int nb_frames: Number of frames used
:param str monitor_name: Name of the monitor used. Can be None.
"""
raise NotImplementedError()
def write_reduction(self, algorithm, data):
"""Write one reduction
:param ImageReductionFilter algorithm: Algorithm used
:param object data: Data of this reduction
"""
raise NotImplementedError()
def close(self):
"""Close the writer. Must not be used anymore."""
raise NotImplementedError()
class MultiFilesAverageWriter(AverageWriter):
"""Write reductions into multi files. File headers are duplicated."""
def __init__(self, file_name_pattern, file_format, dry_run=False):
"""
:param str file_name_pattern: File name pattern for the output files.
If it contains "{method_name}", it is updated for each
reduction writing with the name of the reduction.
:param str file_format: File format used. It is the default
extension file.
:param bool dry_run: If dry_run, the file is created on memory but not
saved on the file system at the end
"""
self._file_name_pattern = file_name_pattern
self._global_header = {}
self._fabio_images = weakref.WeakKeyDictionary()
self._dry_run = dry_run
# in case "edf.gz"
if "." in file_format:
file_format = file_format.split(".")[0]
self._fabio_class = fabio.factory(file_format + "image")
def write_header(self, merged_files, nb_frames, monitor_name):
self._global_header["nfiles"] = len(merged_files)
self._global_header["nframes"] = nb_frames
if monitor_name is not None:
self._global_header["monitor_name"] = monitor_name
pattern = "merged_file_%%0%ii" % len(str(len(merged_files)))
for i, f in enumerate(merged_files):
name = pattern % i
self._global_header[name] = f.filename
def _get_file_name(self, reduction_name):
keys = {"method_name": reduction_name}
return stringutil.safe_format(self._file_name_pattern, keys)
def write_reduction(self, algorithm, data):
file_name = self._get_file_name(algorithm.name)
# overwrite the method
header = fabio.fabioimage.OrderedDict()
header["method"] = algorithm.name
for name, value in self._global_header.items():
header[name] = str(value)
filter_parameters = algorithm.get_parameters()
for name, value in filter_parameters.items():
header[name] = str(value)
image = self._fabio_class.__class__(data=data, header=header)
if not self._dry_run:
image.write(file_name)
logger.info("Wrote %s", file_name)
self._fabio_images[algorithm] = image
def get_fabio_image(self, algorithm):
"""Get the constructed fabio image
:rtype: fabio.fabioimage.FabioImage
"""
return self._fabio_images[algorithm]
def close(self):
"""Close the writer. Must not be used anymore."""
self._header = None
def common_prefix(string_list):
"""Return the common prefix of a list of strings
TODO: move it into utils package
:param list(str) string_list: List of strings
:rtype: str
"""
prefix = ""
for ch in zip(string_list):
c = ch[0]
good = True
for i in ch:
if i != c:
good = False
break
if good:
prefix += c
else:
break
return prefix
class AverageObserver(object):
def image_loaded(self, fabio_image, image_index, images_count):
"""Called when an input image is loaded"""
pass
def process_started(self):
"""Called when the full processing is started"""
pass
def algorithm_started(self, algorithm):
"""Called when an algorithm is started"""
pass
def frame_processed(self, algorithm, frame_index, frames_count):
"""Called after providing a frame to an algorithm"""
pass
def result_processing(self, algorithm):
"""Called before the result of an algorithm is computed"""
pass
def algorithm_finished(self, algorithm):
"""Called when an algorithm is finished"""
pass
def process_finished(self):
"""Called when the full process is finished"""
pass
class Average(object):
"""Process images to generate an average using different algorithms."""
def __init__(self):
"""Constructor"""
self._dark = None
self._raw_flat = None
self._flat = None
self._monitor_key = None
self._threshold = None
self._minimum = None
self._maximum = None
self._fabio_images = []
self._writer = None
self._algorithms = []
self._nb_frames = 0
self._correct_flat_from_dark = False
self._results = weakref.WeakKeyDictionary()
self._observer = None
def set_observer(self, observer):
"""Set an observer to the average process.
:param AverageObserver observer: An observer
"""
self._observer = observer
def set_dark(self, dark_list):
"""Defines images used as dark.
:param list dark_list: List of dark used
"""
if dark_list is None:
self._dark = None
return
darks = _normalize_image_stack(dark_list)
self._dark = average_dark(darks, center_method="mean", cutoff=4)
def set_flat(self, flat_list):
"""Defines images used as flat.
:param list flat_list: List of dark used
"""
if flat_list is None:
self._raw_flat = None
return
flats = _normalize_image_stack(flat_list)
self._raw_flat = average_dark(flats, center_method="mean", cutoff=4)
def set_correct_flat_from_dark(self, correct_flat_from_dark):
"""Defines if the dark must be applied on the flat.
:param bool correct_flat_from_dark: If true, the dark is applied.
"""
self._correct_flat_from_dark = correct_flat_from_dark
def get_counter_frames(self):
"""Returns the number of frames used for the process.
:rtype: int
"""
return self._nb_frames
def get_fabio_images(self):
"""Returns source images as fabio images.
:rtype: list(fabio.fabioimage.FabioImage)"""
return self._fabio_images
def set_images(self, image_list):
"""Defines the set set of source images to used to process an average.
:param list image_list: List of filename, numpy arrays, fabio images
used as source for the computation.
"""
self._fabio_images = []
self._nb_frames = 0
if len(image_list) > 100:
# if too many files are opened, it may crash. The har limit is 1024
copy_data = True
else:
copy_data = False
for image_index, image in enumerate(image_list):
if isinstance(image, six.string_types):
logger.info("Reading %s", image)
fabio_image = fabio.open(image)
if copy_data and fabio_image.nframes == 1:
# copy the data so that we can close the file right now.
fimg = fabio_image.convert(fabio_image.__class__)
fimg.filename = image
fabio_image.close()
fabio_image = fimg
elif isinstance(image, fabio.fabioimage.fabioimage):
fabio_image = image
else:
if fabio.hexversion < 262148:
logger.error("Old version of fabio detected, upgrade to 0.4 or newer")
# Assume this is a numpy array like
if not isinstance(image, numpy.ndarray):
raise RuntimeError("Not good type for input, got %s, expected numpy array" % type(image))
fabio_image = fabio.numpyimage.NumpyImage(data=image)
if self._observer:
self._observer.image_loaded(fabio_image, image_index, len(image_list))
self._fabio_images.append(fabio_image)
self._nb_frames += fabio_image.nframes
def set_monitor_name(self, monitor_name):
"""Defines the monitor name used to correct images before processing
the average. This monitor must be part of the file header, else the
image is skipped.
:param str monitor_name: Name of the monitor available on the header
file
"""
self._monitor_key = monitor_name
def set_pixel_filter(self, threshold, minimum, maximum):
"""Defines the filter applied on each pixels of the images before
processing the average.
:param threshold: what is the upper limit?
all pixel > max*(1-threshold) are discareded.
:param minimum: minimum valid value or True
:param maximum: maximum valid value
"""
self._threshold = threshold
self._minimum = minimum
self._maximum = maximum
def set_writer(self, writer):
"""Defines the object write which will be used to store the result.
:param AverageWriter writer: The writer to use."""
self._writer = writer
def add_algorithm(self, algorithm):
"""Defines another algorithm which will be computed on the source.
:param ImageReductionFilter algorithm: An averaging algorithm.
"""
self._algorithms.append(algorithm)
def _get_corrected_image(self, fabio_image, image):
"""Returns an image corrected by pixel filter, saturation, flat, dark,
and monitor correction. The internal computation is done in float
64bits. The result is provided as float 32 bits.
:param fabio.fabioimage.FabioImage fabio_image: Object containing the
header of the data to process
:param numpy.ndarray image: Data to process
:rtype: numpy.ndarray
"""
corrected_image = numpy.ascontiguousarray(image, numpy.float64)
if self._threshold or self._minimum or self._maximum:
corrected_image = remove_saturated_pixel(corrected_image, self._threshold, self._minimum, self._maximum)
if self._dark is not None:
corrected_image -= self._dark
if self._flat is not None:
corrected_image /= self._flat
if self._monitor_key is not None:
try:
monitor = header_utils.get_monitor_value(fabio_image, self._monitor_key)
corrected_image /= monitor
except header_utils.MonitorNotFound as e:
logger.warning("Monitor not found in filename '%s', data skipped. Cause: %s", fabio_image.filename, str(e))
return None
return numpy.ascontiguousarray(corrected_image, numpy.float32)
def _get_image_reduction(self, algorithm):
"""Returns the result of an averaging algorithm using all over
parameters defined in this object.
:param ImageReductionFilter algorithm: Averaging algorithm
:rtype: numpy.ndarray
"""
algorithm.init(max_images=self._nb_frames)
frame_index = 0
for fabio_image in self._fabio_images:
for frame in range(fabio_image.nframes):
if fabio_image.nframes == 1:
data = fabio_image.data
else:
data = fabio_image.getframe(frame).data
logger.debug("Intensity range for %s#%i is %s --> %s", fabio_image.filename, frame, data.min(), data.max())
corrected_image = self._get_corrected_image(fabio_image, data)
if corrected_image is not None:
algorithm.add_image(corrected_image)
if self._observer:
self._observer.frame_processed(algorithm, frame_index, self._nb_frames)
frame_index += 1
if self._observer:
self._observer.result_processing(algorithm)
return algorithm.get_result()
def _update_flat(self):
"""
Update the flat according to the last process parameters
:rtype: numpy.ndarray
"""
if self._raw_flat is not None:
flat = numpy.array(self._raw_flat)
if self._correct_flat_from_dark:
if self._dark is not None:
flat -= self._dark
else:
logger.debug("No dark. Flat correction using dark skipped")
flat[numpy.where(flat <= 0)] = 1.0
else:
flat = None
self._flat = flat
def process(self):
"""Process source images to all defined averaging algorithms defined
using defined parameters. To access to the results you have to define
a writer (`AverageWriter`). To follow the process forward you have to
define an observer (`AverageObserver`).
"""
self._update_flat()
writer = self._writer
if self._observer:
self._observer.process_started()
if writer is not None:
writer.write_header(self._fabio_images, self._nb_frames, self._monitor_key)
for algorithm in self._algorithms:
if self._observer:
self._observer.algorithm_started(algorithm)
image_reduction = self._get_image_reduction(algorithm)
logger.debug("Intensity range in merged dataset : %s --> %s", image_reduction.min(), image_reduction.max())
if writer is not None:
writer.write_reduction(algorithm, image_reduction)
self._results[algorithm] = image_reduction
if self._observer:
self._observer.algorithm_finished(algorithm)
if self._observer:
self._observer.process_finished()
if writer is not None:
writer.close()
def get_image_reduction(self, algorithm):
"""Returns the result of an algorithm. The `process` must be already
done.
:param ImageReductionFilter algorithm: An averaging algorithm
:rtype: numpy.ndarray
"""
return self._results[algorithm]
def average_images(listImages, output=None, threshold=0.1, minimum=None,
maximum=None, darks=None, flats=None, filter_="mean",
correct_flat_from_dark=False, cutoff=None, quantiles=None,
fformat="edf", monitor_key=None):
"""
Takes a list of filenames and create an average frame discarding all
saturated pixels.
:param listImages: list of string representing the filenames
:param output: name of the optional output file
:param threshold: what is the upper limit? all pixel > max*(1-threshold)
are discareded.
:param minimum: minimum valid value or True
:param maximum: maximum valid value
:param darks: list of dark current images for subtraction
:param flats: list of flat field images for division
:param filter_: can be "min", "max", "median", "mean", "sum", "quantiles"
(default='mean')
:param correct_flat_from_dark: shall the flat be re-corrected ?
:param cutoff: keep all data where (I-center)/std < cutoff
:param quantiles: 2-tuple containing the lower and upper quantile (0<q<1)
to average out.
:param fformat: file format of the output image, default: edf
:param monitor_key str: Key containing the monitor. Can be none.
:return: filename with the data or the data ndarray in case format=None
"""
# input sanitization
if not is_algorithm_name_exists(filter_):
logger.warning("Filter %s not understood. switch to mean filter", filter_)
filter_ = "mean"
if quantiles is not None and filter_ != "quantiles":
logger.warning("Set method to quantiles as quantiles parameters is defined.")
filter_ = "quantiles"
average = Average()
average.set_images(listImages)
average.set_dark(darks)
average.set_flat(flats)
average.set_correct_flat_from_dark(correct_flat_from_dark)
average.set_monitor_name(monitor_key)
average.set_pixel_filter(threshold, minimum, maximum)
algorithm = create_algorithm(filter_, cutoff, quantiles)
average.add_algorithm(algorithm)
# define writer
if fformat is not None:
if fformat.startswith("."):
fformat = fformat.lstrip(".")
if output is None:
prefix = common_prefix([i.filename for i in average.get_fabio_images()])
output = "filt%02i-%s.%s" % (average.get_counter_frames(), prefix, fformat)
output = "{method_name}" + output
if output is not None:
writer = MultiFilesAverageWriter(output, fformat)
average.set_writer(writer)
else:
writer = None
average.process()
if writer is not None:
fabio_image = writer.get_fabio_image(algorithm)
return fabio_image.filename
else:
return average.get_image_reduction(algorithm)
|
[
"logging.getLogger",
"scipy.optimize.optimize.fmin",
"numpy.log",
"numpy.ascontiguousarray",
"scipy.interpolate.interp1d",
"numpy.array",
"scipy.optimize.optimize.fminbound",
"fabio.open",
"fabio.factory",
"numpy.where",
"numpy.sort",
"scipy.ndimage.label",
"numpy.maximum",
"weakref.WeakKeyDictionary",
"fabio.fabioimage.OrderedDict",
"scipy.ndimage.morphology.binary_dilation",
"fabio.numpyimage.NumpyImage",
"numpy.median",
"numpy.minimum",
"numpy.zeros",
"scipy.ndimage.median_filter",
"numpy.float32"
] |
[((2111, 2138), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2128, 2138), False, 'import logging\n'), ((11662, 11705), 'scipy.ndimage.morphology.binary_dilation', 'ndimage.morphology.binary_dilation', (['invalid'], {}), '(invalid)\n', (11696, 11705), False, 'from scipy import ndimage\n'), ((11720, 11737), 'scipy.ndimage.label', 'ndimage.label', (['gi'], {}), '(gi)\n', (11733, 11737), False, 'from scipy import ndimage\n'), ((4292, 4337), 'numpy.maximum', 'numpy.maximum', (['accumulated_image', 'added_image'], {}), '(accumulated_image, added_image)\n', (4305, 4337), False, 'import numpy\n'), ((4545, 4590), 'numpy.minimum', 'numpy.minimum', (['accumulated_image', 'added_image'], {}), '(accumulated_image, added_image)\n', (4558, 4590), False, 'import numpy\n'), ((13443, 13505), 'numpy.zeros', 'numpy.zeros', (['(length, shape[0], shape[1])'], {'dtype': 'numpy.float32'}), '((length, shape[0], shape[1]), dtype=numpy.float32)\n', (13454, 13505), False, 'import numpy\n'), ((17755, 17782), 'weakref.WeakKeyDictionary', 'weakref.WeakKeyDictionary', ([], {}), '()\n', (17780, 17782), False, 'import weakref\n'), ((17955, 17991), 'fabio.factory', 'fabio.factory', (["(file_format + 'image')"], {}), "(file_format + 'image')\n", (17968, 17991), False, 'import fabio\n'), ((18782, 18812), 'fabio.fabioimage.OrderedDict', 'fabio.fabioimage.OrderedDict', ([], {}), '()\n', (18810, 18812), False, 'import fabio\n'), ((21491, 21518), 'weakref.WeakKeyDictionary', 'weakref.WeakKeyDictionary', ([], {}), '()\n', (21516, 21518), False, 'import weakref\n'), ((26651, 26696), 'numpy.ascontiguousarray', 'numpy.ascontiguousarray', (['image', 'numpy.float64'], {}), '(image, numpy.float64)\n', (26674, 26696), False, 'import numpy\n'), ((27442, 27497), 'numpy.ascontiguousarray', 'numpy.ascontiguousarray', (['corrected_image', 'numpy.float32'], {}), '(corrected_image, numpy.float32)\n', (27465, 27497), False, 'import numpy\n'), ((4993, 5019), 'numpy.float32', 'numpy.float32', (['self._count'], {}), '(self._count)\n', (5006, 5019), False, 'import numpy\n'), ((5573, 5612), 'numpy.zeros', 'numpy.zeros', (['shape'], {'dtype': 'numpy.float32'}), '(shape, dtype=numpy.float32)\n', (5584, 5612), False, 'import numpy\n'), ((11011, 11032), 'scipy.optimize.optimize.fmin', 'fmin', (['f', 'y[1]'], {'disp': '(0)'}), '(f, y[1], disp=0)\n', (11015, 11032), False, 'from scipy.optimize.optimize import fmin\n'), ((11054, 11076), 'scipy.optimize.optimize.fmin', 'fmin', (['f', 'y[-1]'], {'disp': '(0)'}), '(f, y[-1], disp=0)\n', (11058, 11076), False, 'from scipy.optimize.optimize import fmin\n'), ((12262, 12298), 'scipy.ndimage.median_filter', 'ndimage.median_filter', (['subset', 'ksize'], {}), '(subset, ksize)\n', (12283, 12298), False, 'from scipy import ndimage\n'), ((13773, 13800), 'numpy.median', 'numpy.median', (['stack'], {'axis': '(0)'}), '(stack, axis=0)\n', (13785, 13800), False, 'import numpy\n'), ((14901, 14918), 'numpy.where', 'numpy.where', (['mask'], {}), '(mask)\n', (14912, 14918), False, 'import numpy\n'), ((28913, 28940), 'numpy.array', 'numpy.array', (['self._raw_flat'], {}), '(self._raw_flat)\n', (28924, 28940), False, 'import numpy\n'), ((10853, 10883), 'numpy.log', 'numpy.log', (['(ds - data_min + 1.0)'], {}), '(ds - data_min + 1.0)\n', (10862, 10883), False, 'import numpy\n'), ((11130, 11185), 'scipy.interpolate.interp1d', 'interp1d', (['((y[1:] + y[:-1]) / 2.0)', 'x'], {'bounds_error': '(False)'}), '((y[1:] + y[:-1]) / 2.0, x, bounds_error=False)\n', (11138, 11185), False, 'from scipy.interpolate import interp1d\n'), ((11215, 11244), 'scipy.optimize.optimize.fminbound', 'fminbound', (['f', 'max_low', 'max_hi'], {}), '(f, max_low, max_hi)\n', (11224, 11244), False, 'from scipy.optimize.optimize import fminbound\n'), ((13930, 13955), 'numpy.sort', 'numpy.sort', (['stack'], {'axis': '(0)'}), '(stack, axis=0)\n', (13940, 13955), False, 'import numpy\n'), ((23726, 23743), 'fabio.open', 'fabio.open', (['image'], {}), '(image)\n', (23736, 23743), False, 'import fabio\n'), ((29187, 29209), 'numpy.where', 'numpy.where', (['(flat <= 0)'], {}), '(flat <= 0)\n', (29198, 29209), False, 'import numpy\n'), ((15821, 15838), 'fabio.open', 'fabio.open', (['image'], {}), '(image)\n', (15831, 15838), False, 'import fabio\n'), ((24577, 24616), 'fabio.numpyimage.NumpyImage', 'fabio.numpyimage.NumpyImage', ([], {'data': 'image'}), '(data=image)\n', (24604, 24616), False, 'import fabio\n')]
|
import numpy as np
from scipy.misc import toimage
from scipy.ndimage.filters import gaussian_filter
from os import mkdir
from os.path import dirname, join
from time import time
from keras.models import Model
from keras.layers import Dense
from keras import backend as K
from keras.applications.vgg16 import VGG16
# define output path and make folder
output_path = join(dirname(__file__), 'output')
try:
mkdir(output_path)
except FileExistsError:
# folder exists, which is what we wanted
pass
# set channel dimension based on image data format from Keras backend
if K.image_data_format() == 'channels_last':
ch_dim = 3
else:
ch_dim = 1
# for VGG16 specific testing
is_VGG16 = True
VGG16_MEAN_VALUES = np.array([103.939, 116.779, 123.68])
# set learning rate
learning_rate = 2500.0
# how many times we update image
no_of_iterations = 500
# specify L2-decay
# used to prevent a small number of extreme pixel values from dominating the output image
l2_decay = 0.0001
# specify frequency of blurring and standard deviation for kernel for Gaussian blur
# used to penalize high frequency information in the output image
blur_interval = 4
# standard deviation values between 0.0 and 0.3 work poorly, according to yosinski
blur_std = 1.0
# specify value percentile limit
# used to induce sparsity by setting pixels with small absolute value to zero
value_percentile = 0
# specify norm percentile limit
# used to induce sparsity by setting pixels with small norm to zero
norm_percentile = 0
# specify contribution percentile limit
# used to induce sparsity by setting pixels with small contribution to zero
contribution_percentile = 0
# specify absolute contribution percentile limit
# used to induce sparsity by setting pixels with small absolute contribution to zero
abs_contribution_percentile = 0
# choose whether to include regularization
regularize = True
# utility function used to convert an array into a savable image array
def deprocess(vis_array):
# remove batch dimension, and alter color dimension accordingly
img_array = vis_array[0]
if K.image_data_format() == 'channels_first':
# alter dimensions from (color, height, width) to (height, width, color)
img_array = img_array.transpose((1, 2, 0))
if is_VGG16:
# add mean values
img_array += VGG16_MEAN_VALUES.reshape((1, 1, 3))
# change back to RGB
img_array = img_array[:, :, ::-1]
# clip in [0, 255], and convert to uint8
img_array = np.clip(img_array, 0, 255).astype('uint8')
return img_array
# creates a model to generate gradients from
def create_model():
base_model = VGG16(include_top=True, weights='imagenet')
# save weights from last layer (softmax)
softmax_weights = base_model.layers[-1].get_weights()
# create new last layer for model with linear activation and connect to same layer as old layer
out = Dense(1000, activation='linear', weights=softmax_weights)(base_model.layers[-1].input)
return Model(base_model.input, out)
# saves the visualization and a text file describing its creation environment
def save_visualization(img, layer_no, unit_index, loss_value):
# create appropriate name to identify image
if regularize:
img_name = 'regularized'
else:
img_name = 'vanilla'
img_name += '_{}_{}_{}'.format(layer_no, unit_index, time())
# save the resulting image to disk
# avoid scipy.misc.imsave because it will normalize the image pixel value between 0 and 255
toimage(img).save(join(output_path, img_name + '.png'))
# also save a txt-file containing information about creation environment and obtained loss
img_info = 'Image "{}.png" was created from unit {} in layer {}, using the following hyperparameters:\n\n' \
'Learning rate: {}\n' \
'Number of iterations: {}\n' \
'----------\n' \
''.format(img_name, unit_index, layer_no, learning_rate, no_of_iterations)
if regularize:
img_info += 'Regularization enabled\n\n' \
'L2-decay: {}\n' \
'Blur interval and std: {} & {}\n' \
'Value percentile: {}\n' \
'Norm percentile: {}\n' \
'Contribution percentile: {}\n' \
'Abs. contribution percentile: {}\n' \
''.format(l2_decay, blur_interval, blur_std, value_percentile, norm_percentile,
contribution_percentile, abs_contribution_percentile)
else:
img_info += 'Regularization disabled\n'
img_info += '----------\n' \
'Obtained loss value: {}\n' \
''.format(loss_value)
with open(join(output_path, img_name + '_info.txt'), 'w') as f:
f.write(img_info)
print('\nImage of unit {} from layer {} have been saved as {}.png\n'.format(unit_index, layer_no, img_name))
# returns a function for computing loss and gradients w.r.t. the activations for the chosen unit in the output tensor
def get_loss_and_gradient_function(input_tensor, output_tensor, unit_index):
# if unit index is specified as integer, convert to tuple
if isinstance(unit_index, int):
unit_index = (unit_index,)
if len(output_tensor.shape[1:]) != len(unit_index):
raise ValueError('Index mismatch: Unit indices should be of length {}, not {}'
.format(len(output_tensor.shape[1:]), len(unit_index)))
else:
tensor_min = np.array([0 for _ in output_tensor.shape[1:]])
tensor_max = np.array([int(dim) - 1 for dim in output_tensor.shape[1:]])
if np.any(np.array(unit_index) < tensor_min) or np.any(np.array(unit_index) > tensor_max):
raise ValueError('Invalid unit index {}: Unit indices should have values between {} and {}'
.format(np.array(unit_index), tensor_min, tensor_max))
# pad with batch index
unit_index = (0,) + unit_index
# loss is the activation of the unit in the chosen output tensor (chosen layer output)
loss = output_tensor[unit_index]
# compute gradients of the loss of the chosen unit w.r.t. the input image
gradients = K.gradients(loss, input_tensor)[0]
# return function returning the loss and gradients given a visualization image
# add a flag to disable the learning phase
return K.function([input_tensor, K.learning_phase()], [loss, gradients])
# creates an random, initial image to manipulate into a visualization
def create_initial_image(model_input_shape):
# add (1,) for batch dimension
return np.random.normal(0, 10, (1,) + model_input_shape[1:])
# regularizes visualization with various techniques
# each technique is activated by non-zero values for their respective global variables
def apply_ensemble_regularization(visualization, pixel_gradients, iteration_no):
# regularizer #1
# apply L2-decay
if l2_decay > 0:
visualization *= (1 - l2_decay)
# regularizer #2
# apply Gaussian blur
if blur_interval > 0 and blur_std > 0:
# only blur at certain iterations, as blurring is expensive
if not iteration_no % blur_interval:
# define standard deviations for blur kernel
blur_kernel_std = [0, blur_std, blur_std, blur_std]
# blur along height and width, but not along channel (color) dimension
blur_kernel_std[ch_dim] = 0
# perform blurring
visualization = gaussian_filter(visualization, sigma=blur_kernel_std)
# regularizer #3
# apply value limit
if value_percentile > 0:
# find absolute values
abs_visualization = abs(visualization)
# find mask of high values (values above chosen value percentile)
high_value_mask = abs_visualization >= np.percentile(abs_visualization, value_percentile)
# apply to image to set pixels with small values to zero
visualization *= high_value_mask
# regularizer #4
# apply norm limit
if norm_percentile > 0:
# compute pixel norms along channel (color) dimension
pixel_norms = np.linalg.norm(visualization, axis=ch_dim)
# find initial mask of high norms (norms above chosen norm percentile)
high_norm_mask = pixel_norms >= np.percentile(pixel_norms, norm_percentile)
# expand mask to account for color dimension
high_norm_mask = expand_for_color(high_norm_mask)
# apply to image to set pixels with small norms to zero
visualization *= high_norm_mask
# regularizer #5
# apply contribution limit
if contribution_percentile > 0:
# predict the contribution of each pixel
predicted_contribution = -visualization * pixel_gradients
# sum over channel (color) dimension
contribution = predicted_contribution.sum(ch_dim)
# find initial mask of high contributions (contr. above chosen contr. percentile)
high_contribution_mask = contribution >= np.percentile(contribution, contribution_percentile)
# expand mask to account for color dimension
high_contribution_mask = expand_for_color(high_contribution_mask)
# apply to image to set pixels with small contributions to zero
visualization *= high_contribution_mask
# regularizer #6
# apply absolute contribution limit
if abs_contribution_percentile > 0:
# alternative approach
# predict the contribution of each pixel
predicted_contribution = -visualization * pixel_gradients
# sum over channel (color) dimension, and find absolute value
abs_contribution = abs(predicted_contribution.sum(ch_dim))
# find initial mask of high absolute contributions (abs. contr. above chosen abs. contr. percentile)
high_abs_contribution_mask = abs_contribution >= np.percentile(abs_contribution, abs_contribution_percentile)
# expand mask to account for color dimension
high_abs_contribution_mask = expand_for_color(high_abs_contribution_mask)
# apply to image to set pixels with small absolute contributions to zero
visualization *= high_abs_contribution_mask
return visualization
# use to expand a (batch, height, width)-numpy array with a channel (color) dimension
def expand_for_color(np_array):
# expand at channel (color) dimension
np_array = np.expand_dims(np_array, axis=ch_dim)
# create tile repetition list, repeating thrice in channel (color) dimension
tile_reps = [1, 1, 1, 1]
tile_reps[ch_dim] = 3
# apply tile repetition
np_array = np.tile(np_array, tile_reps)
return np_array
def main():
# create model to generate gradients from
model = create_model()
# select units to visualize for by adding (layer number, unit index), where unit index is tuple for layers with
# 3D structured output, like convolutional and pooling layers
# units_to_visualize = [(22, 130), (2, 351), (22, 736), (22, 850)]
# units_to_visualize = [(22, 402), (22, 587), (22, 950)]
units_to_visualize = [(1, (112, 112, ch)) for ch in range(1)]
# unit indices in last layer represent the following classes:
# 130 flamingo, 351 hartebeest, 736 pool table, 850 teddy bear
# for the chosen layer number and unit index
for layer_no, unit_index in units_to_visualize:
print('\nProcessing unit {} in layer {}'.format(unit_index, layer_no))
# used to time generation of each image
start_time = time()
if layer_no < 0 or layer_no >= len(model.layers):
raise ValueError('Invalid layer number {}: Layer numbers should be between {} and {}'.format(layer_no, 0, len(model.layers) - 1))
# create and save loss and gradient function for current unit
compute_loss_and_gradients = get_loss_and_gradient_function(model.input, model.layers[layer_no].output, unit_index)
# create an initial visualization image
visualization = create_initial_image(model.input_shape)
# perform gradient ascent update with or without regularization for n steps
for i in range(1, no_of_iterations + 1):
# compute loss and gradient values (input 0 as arg. #2 to deactivate training layers, like dropout)
loss_value, pixel_gradients = compute_loss_and_gradients([visualization, 0])
# update visualization image
visualization += pixel_gradients * learning_rate
# if regularization has been activated, regularize image
if regularize:
visualization = apply_ensemble_regularization(visualization, pixel_gradients, i)
# print('Current loss value:', loss_value)
print('Round {} finished.'.format(i))
# process visualization to match with standard image dimensions
visualization_image = deprocess(visualization)
# save visualization image, complete with info about creation environment
save_visualization(visualization_image, layer_no, unit_index, loss_value)
print('Visualization for unit {} from layer {} completed in {:.4f} seconds'.format(unit_index, layer_no, time() - start_time))
main()
|
[
"numpy.clip",
"keras.applications.vgg16.VGG16",
"scipy.ndimage.filters.gaussian_filter",
"keras.backend.learning_phase",
"keras.backend.gradients",
"numpy.array",
"numpy.linalg.norm",
"keras.layers.Dense",
"keras.backend.image_data_format",
"os.mkdir",
"keras.models.Model",
"numpy.random.normal",
"numpy.tile",
"os.path.dirname",
"time.time",
"os.path.join",
"scipy.misc.toimage",
"numpy.expand_dims",
"numpy.percentile"
] |
[((710, 746), 'numpy.array', 'np.array', (['[103.939, 116.779, 123.68]'], {}), '([103.939, 116.779, 123.68])\n', (718, 746), True, 'import numpy as np\n'), ((372, 389), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (379, 389), False, 'from os.path import dirname, join\n'), ((407, 425), 'os.mkdir', 'mkdir', (['output_path'], {}), '(output_path)\n', (412, 425), False, 'from os import mkdir\n'), ((572, 593), 'keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (591, 593), True, 'from keras import backend as K\n'), ((2579, 2622), 'keras.applications.vgg16.VGG16', 'VGG16', ([], {'include_top': '(True)', 'weights': '"""imagenet"""'}), "(include_top=True, weights='imagenet')\n", (2584, 2622), False, 'from keras.applications.vgg16 import VGG16\n'), ((2922, 2950), 'keras.models.Model', 'Model', (['base_model.input', 'out'], {}), '(base_model.input, out)\n', (2927, 2950), False, 'from keras.models import Model\n'), ((6174, 6227), 'numpy.random.normal', 'np.random.normal', (['(0)', '(10)', '((1,) + model_input_shape[1:])'], {}), '(0, 10, (1,) + model_input_shape[1:])\n', (6190, 6227), True, 'import numpy as np\n'), ((9646, 9683), 'numpy.expand_dims', 'np.expand_dims', (['np_array'], {'axis': 'ch_dim'}), '(np_array, axis=ch_dim)\n', (9660, 9683), True, 'import numpy as np\n'), ((9851, 9879), 'numpy.tile', 'np.tile', (['np_array', 'tile_reps'], {}), '(np_array, tile_reps)\n', (9858, 9879), True, 'import numpy as np\n'), ((2067, 2088), 'keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (2086, 2088), True, 'from keras import backend as K\n'), ((2826, 2883), 'keras.layers.Dense', 'Dense', (['(1000)'], {'activation': '"""linear"""', 'weights': 'softmax_weights'}), "(1000, activation='linear', weights=softmax_weights)\n", (2831, 2883), False, 'from keras.layers import Dense\n'), ((3268, 3274), 'time.time', 'time', ([], {}), '()\n', (3272, 3274), False, 'from time import time\n'), ((3425, 3461), 'os.path.join', 'join', (['output_path', "(img_name + '.png')"], {}), "(output_path, img_name + '.png')\n", (3429, 3461), False, 'from os.path import dirname, join\n'), ((5137, 5185), 'numpy.array', 'np.array', (['[(0) for _ in output_tensor.shape[1:]]'], {}), '([(0) for _ in output_tensor.shape[1:]])\n', (5145, 5185), True, 'import numpy as np\n'), ((5780, 5811), 'keras.backend.gradients', 'K.gradients', (['loss', 'input_tensor'], {}), '(loss, input_tensor)\n', (5791, 5811), True, 'from keras import backend as K\n'), ((7562, 7604), 'numpy.linalg.norm', 'np.linalg.norm', (['visualization'], {'axis': 'ch_dim'}), '(visualization, axis=ch_dim)\n', (7576, 7604), True, 'import numpy as np\n'), ((10702, 10708), 'time.time', 'time', ([], {}), '()\n', (10706, 10708), False, 'from time import time\n'), ((2435, 2461), 'numpy.clip', 'np.clip', (['img_array', '(0)', '(255)'], {}), '(img_array, 0, 255)\n', (2442, 2461), True, 'import numpy as np\n'), ((3407, 3419), 'scipy.misc.toimage', 'toimage', (['img'], {}), '(img)\n', (3414, 3419), False, 'from scipy.misc import toimage\n'), ((4408, 4449), 'os.path.join', 'join', (['output_path', "(img_name + '_info.txt')"], {}), "(output_path, img_name + '_info.txt')\n", (4412, 4449), False, 'from os.path import dirname, join\n'), ((5975, 5993), 'keras.backend.learning_phase', 'K.learning_phase', ([], {}), '()\n', (5991, 5993), True, 'from keras import backend as K\n'), ((6980, 7033), 'scipy.ndimage.filters.gaussian_filter', 'gaussian_filter', (['visualization'], {'sigma': 'blur_kernel_std'}), '(visualization, sigma=blur_kernel_std)\n', (6995, 7033), False, 'from scipy.ndimage.filters import gaussian_filter\n'), ((7278, 7328), 'numpy.percentile', 'np.percentile', (['abs_visualization', 'value_percentile'], {}), '(abs_visualization, value_percentile)\n', (7291, 7328), True, 'import numpy as np\n'), ((7715, 7758), 'numpy.percentile', 'np.percentile', (['pixel_norms', 'norm_percentile'], {}), '(pixel_norms, norm_percentile)\n', (7728, 7758), True, 'import numpy as np\n'), ((8358, 8410), 'numpy.percentile', 'np.percentile', (['contribution', 'contribution_percentile'], {}), '(contribution, contribution_percentile)\n', (8371, 8410), True, 'import numpy as np\n'), ((9143, 9203), 'numpy.percentile', 'np.percentile', (['abs_contribution', 'abs_contribution_percentile'], {}), '(abs_contribution, abs_contribution_percentile)\n', (9156, 9203), True, 'import numpy as np\n'), ((5271, 5291), 'numpy.array', 'np.array', (['unit_index'], {}), '(unit_index)\n', (5279, 5291), True, 'import numpy as np\n'), ((5316, 5336), 'numpy.array', 'np.array', (['unit_index'], {}), '(unit_index)\n', (5324, 5336), True, 'import numpy as np\n'), ((5463, 5483), 'numpy.array', 'np.array', (['unit_index'], {}), '(unit_index)\n', (5471, 5483), True, 'import numpy as np\n'), ((12215, 12221), 'time.time', 'time', ([], {}), '()\n', (12219, 12221), False, 'from time import time\n')]
|
import json
import logging
import os
import sys
from argparse import ArgumentParser
import re
import numpy as np
import pandas as pd
import torch
from transformers import GPT2Tokenizer
from src.data.cleaning import mask_not_na, inds_unique, mask_long_enough
from src.data.nli import TransformersSeqPairDataset
from src.models.pg_trainer import AutoregressivePGTrainer
parser = ArgumentParser()
parser.add_argument("--experiment_dir", type=str, default="debug")
parser.add_argument("--paraphrase_path", type=str,
default="/home/matej/Documents/paraphrase-nli/experiments/SciTail_NLI/PARAPHRASE_IDENTIFICATION/id-scitail-roberta-base-argmax/all_para_id.csv")
parser.add_argument("--pretrained_name_or_path", type=str, default="gpt2")
parser.add_argument("--model_type", type=str, default="gpt2",
choices=["gpt2"])
parser.add_argument("--num_epochs", type=int, default=10)
parser.add_argument("--max_seq_len", type=int, default=79)
parser.add_argument("--batch_size", type=int, default=8)
parser.add_argument("--learning_rate", type=float, default=2e-5)
parser.add_argument("--early_stopping_rounds", type=int, default=5)
parser.add_argument("--validate_every_n_examples", type=int, default=5000)
parser.add_argument("--random_seed", type=int, default=17)
parser.add_argument("--use_cpu", action="store_true")
if __name__ == "__main__":
args = parser.parse_args()
DEVICE = torch.device("cpu") if args.use_cpu else torch.device("cuda")
if not os.path.exists(args.experiment_dir):
os.makedirs(args.experiment_dir)
if args.random_seed is not None:
np.random.seed(args.random_seed)
torch.manual_seed(args.random_seed)
with open(os.path.join(args.experiment_dir, "experiment_config.json"), "w") as f:
json.dump(vars(args), fp=f, indent=4)
# Set up logging to file and stdout
logger = logging.getLogger()
logger.setLevel(logging.INFO)
for curr_handler in [logging.StreamHandler(sys.stdout),
logging.FileHandler(os.path.join(args.experiment_dir, "experiment.log"))]:
curr_handler.setFormatter(logging.Formatter("%(asctime)s [%(levelname)-5.5s] %(message)s"))
logger.addHandler(curr_handler)
for k, v in vars(args).items():
v_str = str(v)
v_str = f"...{v_str[-(50 - 3):]}" if len(v_str) > 50 else v_str
logging.info(f"|{k:30s}|{v_str:50s}|")
# No AutoTokenizerFast at the moment?
if args.model_type == "gpt2":
tokenizer_cls = GPT2Tokenizer
else:
raise NotImplementedError(f"Model_type '{args.model_type}' is not supported")
tokenizer = tokenizer_cls.from_pretrained(args.pretrained_name_or_path)
tokenizer.add_special_tokens({
"eos_token": "<EOS>",
"pad_token": "<PAD>",
"additional_special_tokens": ["<PARA>"]
})
tokenizer.save_pretrained(args.experiment_dir)
SEPARATOR_ID = int(tokenizer.encode("<PARA>", add_special_tokens=False)[0])
df = pd.read_csv(args.paraphrase_path)
# Basic data cleaning - remove NAs (?), duplicate pairs, pairs with one sequence very short
df = df.loc[mask_not_na(df["sequence1"], df["sequence2"])]
df = df.iloc[inds_unique(df["sequence1"], df["sequence2"])]
df = df.loc[mask_long_enough(df["sequence1"], df["sequence2"])]
df = df.loc[df["label"] == 1].reset_index(drop=True)
df["formatted"] = list(map(
lambda pair: f"{pair[0]} <PARA> {pair[1]} {tokenizer.eos_token}",
zip(df["sequence1"].tolist(), df["sequence2"].tolist())
))
num_ex = df.shape[0]
indices = np.random.permutation(num_ex)
train_df = df.iloc[indices[:int(0.7 * num_ex)]]
dev_df = df.iloc[indices[int(0.7 * num_ex): int(0.85 * num_ex)]]
test_df = df.iloc[indices[int(0.85 * num_ex):]]
train_df.drop("formatted", axis=1).to_csv(os.path.join(args.experiment_dir, "train.csv"), sep=",", index=False)
dev_df.drop("formatted", axis=1).to_csv(os.path.join(args.experiment_dir, "dev.csv"), sep=",", index=False)
test_df.drop("formatted", axis=1).to_csv(os.path.join(args.experiment_dir, "test.csv"), sep=",", index=False)
_encoded_train = tokenizer.batch_encode_plus(
train_df["formatted"].tolist(),
max_length=args.max_seq_len, padding="max_length", truncation="longest_first", return_tensors="pt"
)
_train_labels = _encoded_train["input_ids"].clone()
for idx_ex in range(_train_labels.shape[0]):
for idx_token in range(args.max_seq_len):
_train_labels[idx_ex, idx_token] = -100
if _encoded_train["input_ids"][idx_ex, idx_token] == SEPARATOR_ID:
break
_encoded_train["labels"] = _train_labels
_encoded_dev = tokenizer.batch_encode_plus(
dev_df["formatted"].tolist(),
max_length=args.max_seq_len, padding="max_length", truncation="longest_first", return_tensors="pt"
)
_dev_labels = _encoded_dev["input_ids"].clone()
for idx_ex in range(_dev_labels.shape[0]):
for idx_token in range(args.max_seq_len):
_dev_labels[idx_ex, idx_token] = -100
if _encoded_dev["input_ids"][idx_ex, idx_token] == SEPARATOR_ID:
break
_encoded_dev["labels"] = _dev_labels
_encoded_test = tokenizer.batch_encode_plus(
test_df["formatted"].tolist(),
max_length=args.max_seq_len, padding="max_length", truncation="longest_first", return_tensors="pt"
)
_test_labels = _encoded_test["input_ids"].clone()
for idx_ex in range(_test_labels.shape[0]):
for idx_token in range(args.max_seq_len):
_test_labels[idx_ex, idx_token] = -100
if _encoded_test["input_ids"][idx_ex, idx_token] == SEPARATOR_ID:
break
_encoded_test["labels"] = _test_labels
train_set = TransformersSeqPairDataset(**_encoded_train)
dev_set = TransformersSeqPairDataset(**_encoded_dev)
test_set = TransformersSeqPairDataset(**_encoded_test)
logging.info(f"Loaded {len(train_set)} training examples, {len(dev_set)} dev examples and "
f"{len(test_set)} test examples")
pg_trainer = AutoregressivePGTrainer(args.experiment_dir,
pretrained_model_name_or_path=args.pretrained_name_or_path,
tokenizer_path=args.experiment_dir,
batch_size=args.batch_size,
learning_rate=args.learning_rate,
validate_every_n_steps=args.validate_every_n_examples,
early_stopping_tol=args.early_stopping_rounds,
device=("cuda" if not args.use_cpu else "cpu"))
pg_trainer.run(train_dataset=train_set, val_dataset=dev_set, num_epochs=args.num_epochs)
# Reload best model
pg_trainer = AutoregressivePGTrainer.from_pretrained(args.experiment_dir)
dev_prompts = dev_df["sequence1"].apply(lambda s: f"{s} <PARA>")
test_prompts = test_df["sequence1"].apply(lambda s: f"{s} <PARA>")
dev_df["sequence2"].to_csv(os.path.join(args.experiment_dir, "dev_ref.txt"), sep=",", index=False, header=False)
test_df["sequence2"].to_csv(os.path.join(args.experiment_dir, "test_ref.txt"), sep=",", index=False, header=False)
dev_df["sequence1"].to_csv(os.path.join(args.experiment_dir, "dev_input_copy.txt"), sep=",", index=False, header=False)
test_df["sequence1"].to_csv(os.path.join(args.experiment_dir, "test_input_copy.txt"), sep=",", index=False, header=False)
strategies = {
"greedy": {},
"beam": {"num_beams": 5, "early_stopping": True},
"top_p": {"do_sample": True, "top_p": 0.9, "top_k": 0},
"top_k": {"do_sample": True, "top_k": 10}
}
for curr_strat, strat_kwargs in strategies.items():
dev_pred_para = pg_trainer.generate(dev_prompts.tolist(), max_seq_len=args.max_seq_len, strategy=strat_kwargs)
with open(os.path.join(args.experiment_dir, f"dev_{curr_strat}_hyp.txt"), "w", encoding="utf-8") as f:
for _txt in dev_pred_para:
print(re.sub(r"(\n)+", " ", _txt.strip()), file=f)
test_pred_para = pg_trainer.generate(test_prompts.tolist(), max_seq_len=args.max_seq_len, strategy=strat_kwargs)
with open(os.path.join(args.experiment_dir, f"test_{curr_strat}_hyp.txt"), "w", encoding="utf-8") as f:
for _txt in test_pred_para:
print(re.sub(r"(\n)+", " ", _txt.strip()), file=f)
|
[
"logging.getLogger",
"src.data.nli.TransformersSeqPairDataset",
"logging.StreamHandler",
"pandas.read_csv",
"src.data.cleaning.inds_unique",
"logging.info",
"os.path.exists",
"argparse.ArgumentParser",
"src.data.cleaning.mask_not_na",
"numpy.random.seed",
"numpy.random.permutation",
"src.data.cleaning.mask_long_enough",
"src.models.pg_trainer.AutoregressivePGTrainer.from_pretrained",
"torch.device",
"torch.manual_seed",
"os.makedirs",
"logging.Formatter",
"os.path.join",
"src.models.pg_trainer.AutoregressivePGTrainer"
] |
[((380, 396), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (394, 396), False, 'from argparse import ArgumentParser\n'), ((1810, 1829), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1827, 1829), False, 'import logging\n'), ((2809, 2842), 'pandas.read_csv', 'pd.read_csv', (['args.paraphrase_path'], {}), '(args.paraphrase_path)\n', (2820, 2842), True, 'import pandas as pd\n'), ((3370, 3399), 'numpy.random.permutation', 'np.random.permutation', (['num_ex'], {}), '(num_ex)\n', (3391, 3399), True, 'import numpy as np\n'), ((5367, 5411), 'src.data.nli.TransformersSeqPairDataset', 'TransformersSeqPairDataset', ([], {}), '(**_encoded_train)\n', (5393, 5411), False, 'from src.data.nli import TransformersSeqPairDataset\n'), ((5423, 5465), 'src.data.nli.TransformersSeqPairDataset', 'TransformersSeqPairDataset', ([], {}), '(**_encoded_dev)\n', (5449, 5465), False, 'from src.data.nli import TransformersSeqPairDataset\n'), ((5478, 5521), 'src.data.nli.TransformersSeqPairDataset', 'TransformersSeqPairDataset', ([], {}), '(**_encoded_test)\n', (5504, 5521), False, 'from src.data.nli import TransformersSeqPairDataset\n'), ((5669, 6040), 'src.models.pg_trainer.AutoregressivePGTrainer', 'AutoregressivePGTrainer', (['args.experiment_dir'], {'pretrained_model_name_or_path': 'args.pretrained_name_or_path', 'tokenizer_path': 'args.experiment_dir', 'batch_size': 'args.batch_size', 'learning_rate': 'args.learning_rate', 'validate_every_n_steps': 'args.validate_every_n_examples', 'early_stopping_tol': 'args.early_stopping_rounds', 'device': "('cuda' if not args.use_cpu else 'cpu')"}), "(args.experiment_dir, pretrained_model_name_or_path=\n args.pretrained_name_or_path, tokenizer_path=args.experiment_dir,\n batch_size=args.batch_size, learning_rate=args.learning_rate,\n validate_every_n_steps=args.validate_every_n_examples,\n early_stopping_tol=args.early_stopping_rounds, device='cuda' if not\n args.use_cpu else 'cpu')\n", (5692, 6040), False, 'from src.models.pg_trainer import AutoregressivePGTrainer\n'), ((6225, 6285), 'src.models.pg_trainer.AutoregressivePGTrainer.from_pretrained', 'AutoregressivePGTrainer.from_pretrained', (['args.experiment_dir'], {}), '(args.experiment_dir)\n', (6264, 6285), False, 'from src.models.pg_trainer import AutoregressivePGTrainer\n'), ((1388, 1407), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1400, 1407), False, 'import torch\n'), ((1429, 1449), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (1441, 1449), False, 'import torch\n'), ((1458, 1493), 'os.path.exists', 'os.path.exists', (['args.experiment_dir'], {}), '(args.experiment_dir)\n', (1472, 1493), False, 'import os\n'), ((1497, 1529), 'os.makedirs', 'os.makedirs', (['args.experiment_dir'], {}), '(args.experiment_dir)\n', (1508, 1529), False, 'import os\n'), ((1567, 1599), 'numpy.random.seed', 'np.random.seed', (['args.random_seed'], {}), '(args.random_seed)\n', (1581, 1599), True, 'import numpy as np\n'), ((1602, 1637), 'torch.manual_seed', 'torch.manual_seed', (['args.random_seed'], {}), '(args.random_seed)\n', (1619, 1637), False, 'import torch\n'), ((1883, 1916), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (1904, 1916), False, 'import logging\n'), ((2248, 2286), 'logging.info', 'logging.info', (['f"""|{k:30s}|{v_str:50s}|"""'], {}), "(f'|{k:30s}|{v_str:50s}|')\n", (2260, 2286), False, 'import logging\n'), ((2949, 2994), 'src.data.cleaning.mask_not_na', 'mask_not_na', (["df['sequence1']", "df['sequence2']"], {}), "(df['sequence1'], df['sequence2'])\n", (2960, 2994), False, 'from src.data.cleaning import mask_not_na, inds_unique, mask_long_enough\n'), ((3010, 3055), 'src.data.cleaning.inds_unique', 'inds_unique', (["df['sequence1']", "df['sequence2']"], {}), "(df['sequence1'], df['sequence2'])\n", (3021, 3055), False, 'from src.data.cleaning import mask_not_na, inds_unique, mask_long_enough\n'), ((3070, 3120), 'src.data.cleaning.mask_long_enough', 'mask_long_enough', (["df['sequence1']", "df['sequence2']"], {}), "(df['sequence1'], df['sequence2'])\n", (3086, 3120), False, 'from src.data.cleaning import mask_not_na, inds_unique, mask_long_enough\n'), ((3608, 3654), 'os.path.join', 'os.path.join', (['args.experiment_dir', '"""train.csv"""'], {}), "(args.experiment_dir, 'train.csv')\n", (3620, 3654), False, 'import os\n'), ((3719, 3763), 'os.path.join', 'os.path.join', (['args.experiment_dir', '"""dev.csv"""'], {}), "(args.experiment_dir, 'dev.csv')\n", (3731, 3763), False, 'import os\n'), ((3829, 3874), 'os.path.join', 'os.path.join', (['args.experiment_dir', '"""test.csv"""'], {}), "(args.experiment_dir, 'test.csv')\n", (3841, 3874), False, 'import os\n'), ((6450, 6498), 'os.path.join', 'os.path.join', (['args.experiment_dir', '"""dev_ref.txt"""'], {}), "(args.experiment_dir, 'dev_ref.txt')\n", (6462, 6498), False, 'import os\n'), ((6565, 6614), 'os.path.join', 'os.path.join', (['args.experiment_dir', '"""test_ref.txt"""'], {}), "(args.experiment_dir, 'test_ref.txt')\n", (6577, 6614), False, 'import os\n'), ((6681, 6736), 'os.path.join', 'os.path.join', (['args.experiment_dir', '"""dev_input_copy.txt"""'], {}), "(args.experiment_dir, 'dev_input_copy.txt')\n", (6693, 6736), False, 'import os\n'), ((6803, 6859), 'os.path.join', 'os.path.join', (['args.experiment_dir', '"""test_input_copy.txt"""'], {}), "(args.experiment_dir, 'test_input_copy.txt')\n", (6815, 6859), False, 'import os\n'), ((1650, 1709), 'os.path.join', 'os.path.join', (['args.experiment_dir', '"""experiment_config.json"""'], {}), "(args.experiment_dir, 'experiment_config.json')\n", (1662, 1709), False, 'import os\n'), ((1945, 1996), 'os.path.join', 'os.path.join', (['args.experiment_dir', '"""experiment.log"""'], {}), "(args.experiment_dir, 'experiment.log')\n", (1957, 1996), False, 'import os\n'), ((2028, 2093), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s [%(levelname)-5.5s] %(message)s"""'], {}), "('%(asctime)s [%(levelname)-5.5s] %(message)s')\n", (2045, 2093), False, 'import logging\n'), ((7266, 7328), 'os.path.join', 'os.path.join', (['args.experiment_dir', 'f"""dev_{curr_strat}_hyp.txt"""'], {}), "(args.experiment_dir, f'dev_{curr_strat}_hyp.txt')\n", (7278, 7328), False, 'import os\n'), ((7572, 7635), 'os.path.join', 'os.path.join', (['args.experiment_dir', 'f"""test_{curr_strat}_hyp.txt"""'], {}), "(args.experiment_dir, f'test_{curr_strat}_hyp.txt')\n", (7584, 7635), False, 'import os\n')]
|
import numpy as np
from pyspark.sql import SparkSession
from pyspark import SparkContext, SparkConf
from TransEmodule import utils
def check_entities(x, map):
if x in map:
return map[x]
else:
return None
def calculate_rankings(rank_list):
flat = rank_list.map(lambda x: x[0]).persist()
prepare_mean = flat.map(lambda x: (x, 1))
prepare_hits = flat.map(lambda x: (1 if x <= 10 else 0, 1))
x = prepare_mean.reduce(lambda x, y: (x[0] + y[0], x[1] + y[1]))
mean = x[0]/x[1]
x = prepare_hits.reduce(lambda x, y: (x[0] + y[0], x[1] + y[1]))
hits = x[0]/x[1]
return mean, hits
def testing(partition, test_entities_to_id, test_labels_to_id,
entities_to_id_map, label_to_id_map, entity_embedding,
label_embedding):
rank_list = []
i = 0
for (h, l, t) in partition:
# get train ids from testset ids
h_train = check_entities(utils.get_id_by_value(test_entities_to_id.value, h)[0],
entities_to_id_map.value)
l_train = check_entities(utils.get_id_by_value(test_labels_to_id.value, l)[0],
label_to_id_map.value)
t_train = check_entities(utils.get_id_by_value(test_entities_to_id.value, t)[0],
entities_to_id_map.value)
if h_train is None or l_train is None or t_train is None:
continue
# head
corrupted_entities = entity_embedding.value.vector + label_embedding.value.vector[l_train] - entity_embedding.value.vector[t_train]
distances = np.apply_along_axis(lambda x: np.sum(np.square(x)), 1, corrupted_entities)
indices = np.argsort(distances)
rank = np.where(indices == h_train)
rank_list.append(rank[0])
# tail
corrupted_entities = entity_embedding.value.vector[h_train] + label_embedding.value.vector[l_train]
distances = np.apply_along_axis(lambda x: np.sum(np.square(corrupted_entities - x)), 1, entity_embedding.value.vector)
indices = np.argsort(distances)
rank = np.where(indices == t_train)
rank_list.append(rank[0])
if i % 50 == 0:
rank_list_baby = np.concatenate(rank_list, axis=0)
print("Mean: " + str(np.mean(rank_list_baby)))
print("Hit: " + str(np.mean(rank_list_baby <= 10)*100))
print(i)
i += 1
return rank_list
def test(testset, test_entities_to_id, test_labels_to_id,
entities_to_id_map, label_to_id_map, entity_embedding,
label_embedding):
testset_rdd = sc.parallelize(testset).persist()
test_entities_BC = sc.broadcast(test_entities_to_id)
test_labels_BC = sc.broadcast(test_labels_to_id)
entities_embedding_BC = sc.broadcast(entity_embedding)
labels_embedding_BC = sc.broadcast(label_embedding)
entities_map_BC = sc.broadcast(entities_to_id_map)
labels_map_BC = sc.broadcast(label_to_id_map)
rank_list = testset_rdd.mapPartitions(lambda x: testing(x,
test_entities_BC,
test_labels_BC,
entities_map_BC,
labels_map_BC,
entities_embedding_BC,
labels_embedding_BC)
)
mean, hits = calculate_rankings(rank_list)
return mean, hits
if __name__ == "__main__":
# change the paths if you are not using
# our terraform project!
# create the session
conf = SparkConf().setAll([("spark.worker.cleanup.enabled", True),
("spark.serializer",
"org.apache.spark.serializer.KryoSerializer"),
("spark.kryo.registrationRequired", "false"),
("spark.master", "spark://s01:7077")])
sc = SparkContext(conf=conf).getOrCreate()
sc.addPyFile('TransEmodule.zip')
entity_embedding, label_embedding = utils.restore('/home/ubuntu/entity_embedding_999.pkl',
'/home/ubuntu/label_embedding_999.pkl')
ds_to_id, entities_to_id_map, label_to_id_map = utils.load_dataset(sc, "hdfs://s01:9000/train2.tsv")
testset, test_entities_to_id, test_labels_to_id = utils.load_dataset(sc, "hdfs://s01:9000/test2.tsv")
mean, hits = test(testset, test_entities_to_id, test_labels_to_id,
entities_to_id_map, label_to_id_map,
entity_embedding, label_embedding)
print("Mean: " + str(mean) + "\nHits@10: " + str(hits))
|
[
"numpy.mean",
"TransEmodule.utils.get_id_by_value",
"numpy.where",
"TransEmodule.utils.load_dataset",
"pyspark.SparkConf",
"numpy.square",
"numpy.argsort",
"TransEmodule.utils.restore",
"numpy.concatenate",
"pyspark.SparkContext"
] |
[((4227, 4325), 'TransEmodule.utils.restore', 'utils.restore', (['"""/home/ubuntu/entity_embedding_999.pkl"""', '"""/home/ubuntu/label_embedding_999.pkl"""'], {}), "('/home/ubuntu/entity_embedding_999.pkl',\n '/home/ubuntu/label_embedding_999.pkl')\n", (4240, 4325), False, 'from TransEmodule import utils\n'), ((4429, 4481), 'TransEmodule.utils.load_dataset', 'utils.load_dataset', (['sc', '"""hdfs://s01:9000/train2.tsv"""'], {}), "(sc, 'hdfs://s01:9000/train2.tsv')\n", (4447, 4481), False, 'from TransEmodule import utils\n'), ((4537, 4588), 'TransEmodule.utils.load_dataset', 'utils.load_dataset', (['sc', '"""hdfs://s01:9000/test2.tsv"""'], {}), "(sc, 'hdfs://s01:9000/test2.tsv')\n", (4555, 4588), False, 'from TransEmodule import utils\n'), ((1696, 1717), 'numpy.argsort', 'np.argsort', (['distances'], {}), '(distances)\n', (1706, 1717), True, 'import numpy as np\n'), ((1733, 1761), 'numpy.where', 'np.where', (['(indices == h_train)'], {}), '(indices == h_train)\n', (1741, 1761), True, 'import numpy as np\n'), ((2066, 2087), 'numpy.argsort', 'np.argsort', (['distances'], {}), '(distances)\n', (2076, 2087), True, 'import numpy as np\n'), ((2103, 2131), 'numpy.where', 'np.where', (['(indices == t_train)'], {}), '(indices == t_train)\n', (2111, 2131), True, 'import numpy as np\n'), ((2221, 2254), 'numpy.concatenate', 'np.concatenate', (['rank_list'], {'axis': '(0)'}), '(rank_list, axis=0)\n', (2235, 2254), True, 'import numpy as np\n'), ((3762, 3773), 'pyspark.SparkConf', 'SparkConf', ([], {}), '()\n', (3771, 3773), False, 'from pyspark import SparkContext, SparkConf\n'), ((4110, 4133), 'pyspark.SparkContext', 'SparkContext', ([], {'conf': 'conf'}), '(conf=conf)\n', (4122, 4133), False, 'from pyspark import SparkContext, SparkConf\n'), ((931, 982), 'TransEmodule.utils.get_id_by_value', 'utils.get_id_by_value', (['test_entities_to_id.value', 'h'], {}), '(test_entities_to_id.value, h)\n', (952, 982), False, 'from TransEmodule import utils\n'), ((1080, 1129), 'TransEmodule.utils.get_id_by_value', 'utils.get_id_by_value', (['test_labels_to_id.value', 'l'], {}), '(test_labels_to_id.value, l)\n', (1101, 1129), False, 'from TransEmodule import utils\n'), ((1224, 1275), 'TransEmodule.utils.get_id_by_value', 'utils.get_id_by_value', (['test_entities_to_id.value', 't'], {}), '(test_entities_to_id.value, t)\n', (1245, 1275), False, 'from TransEmodule import utils\n'), ((1640, 1652), 'numpy.square', 'np.square', (['x'], {}), '(x)\n', (1649, 1652), True, 'import numpy as np\n'), ((1978, 2011), 'numpy.square', 'np.square', (['(corrupted_entities - x)'], {}), '(corrupted_entities - x)\n', (1987, 2011), True, 'import numpy as np\n'), ((2288, 2311), 'numpy.mean', 'np.mean', (['rank_list_baby'], {}), '(rank_list_baby)\n', (2295, 2311), True, 'import numpy as np\n'), ((2346, 2375), 'numpy.mean', 'np.mean', (['(rank_list_baby <= 10)'], {}), '(rank_list_baby <= 10)\n', (2353, 2375), True, 'import numpy as np\n')]
|
from unittest import TestCase
import numpy as np
import dianna
import dianna.visualization
from dianna.methods import LIME
from tests.test_onnx_runner import generate_data
from tests.utils import ModelRunner
from tests.utils import run_model
class LimeOnImages(TestCase):
def test_lime_function(self):
np.random.seed(42)
input_data = np.random.random((1, 224, 224, 3))
labels = ('batch', 'y', 'x', 'channels')
explainer = LIME(random_state=42, axes_labels=labels)
heatmap = explainer.explain_image(run_model, input_data, num_samples=100)
heatmap_expected = np.load('tests/test_data/heatmap_lime_function.npy')
assert heatmap.shape == input_data[0].shape[:2]
assert np.allclose(heatmap, heatmap_expected, atol=.01)
def test_lime_filename(self):
np.random.seed(42)
model_filename = 'tests/test_data/mnist_model.onnx'
black_and_white = generate_data(batch_size=1)
# Make data 3-channel instead of 1-channel
input_data = np.zeros([1, 3] + list(black_and_white.shape[2:])) + black_and_white
input_data = input_data.astype(np.float32)
labels = ('batch', 'channels', 'y', 'x')
def preprocess(data):
# select single channel out of 3, but keep the channel axis
return data[:, [0], ...]
heatmap = dianna.explain_image(model_filename, input_data, method="LIME", preprocess_function=preprocess, random_state=42,
axes_labels=labels)
heatmap_expected = np.load('tests/test_data/heatmap_lime_filename.npy')
assert heatmap.shape == input_data[0, 0].shape
assert np.allclose(heatmap, heatmap_expected, atol=.01)
def test_lime_text():
model_path = 'tests/test_data/movie_review_model.onnx'
word_vector_file = 'tests/test_data/word_vectors.txt'
runner = ModelRunner(model_path, word_vector_file, max_filter_size=5)
review = 'such a bad movie'
explanation = dianna.explain_text(runner, review, labels=[0], method='LIME', random_state=42)[0]
words = [element[0] for element in explanation]
word_indices = [element[1] for element in explanation]
scores = [element[2] for element in explanation]
expected_words = ['bad', 'such', 'movie', 'a']
expected_word_indices = [7, 0, 11, 5]
expected_scores = [.492, -.046, .036, -.008]
assert words == expected_words
assert word_indices == expected_word_indices
assert np.allclose(scores, expected_scores, atol=.01)
|
[
"tests.test_onnx_runner.generate_data",
"numpy.allclose",
"dianna.explain_text",
"numpy.random.random",
"dianna.methods.LIME",
"tests.utils.ModelRunner",
"numpy.random.seed",
"dianna.explain_image",
"numpy.load"
] |
[((1889, 1949), 'tests.utils.ModelRunner', 'ModelRunner', (['model_path', 'word_vector_file'], {'max_filter_size': '(5)'}), '(model_path, word_vector_file, max_filter_size=5)\n', (1900, 1949), False, 'from tests.utils import ModelRunner\n'), ((2487, 2534), 'numpy.allclose', 'np.allclose', (['scores', 'expected_scores'], {'atol': '(0.01)'}), '(scores, expected_scores, atol=0.01)\n', (2498, 2534), True, 'import numpy as np\n'), ((317, 335), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (331, 335), True, 'import numpy as np\n'), ((357, 391), 'numpy.random.random', 'np.random.random', (['(1, 224, 224, 3)'], {}), '((1, 224, 224, 3))\n', (373, 391), True, 'import numpy as np\n'), ((462, 503), 'dianna.methods.LIME', 'LIME', ([], {'random_state': '(42)', 'axes_labels': 'labels'}), '(random_state=42, axes_labels=labels)\n', (466, 503), False, 'from dianna.methods import LIME\n'), ((613, 665), 'numpy.load', 'np.load', (['"""tests/test_data/heatmap_lime_function.npy"""'], {}), "('tests/test_data/heatmap_lime_function.npy')\n", (620, 665), True, 'import numpy as np\n'), ((737, 786), 'numpy.allclose', 'np.allclose', (['heatmap', 'heatmap_expected'], {'atol': '(0.01)'}), '(heatmap, heatmap_expected, atol=0.01)\n', (748, 786), True, 'import numpy as np\n'), ((829, 847), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (843, 847), True, 'import numpy as np\n'), ((935, 962), 'tests.test_onnx_runner.generate_data', 'generate_data', ([], {'batch_size': '(1)'}), '(batch_size=1)\n', (948, 962), False, 'from tests.test_onnx_runner import generate_data\n'), ((1363, 1499), 'dianna.explain_image', 'dianna.explain_image', (['model_filename', 'input_data'], {'method': '"""LIME"""', 'preprocess_function': 'preprocess', 'random_state': '(42)', 'axes_labels': 'labels'}), "(model_filename, input_data, method='LIME',\n preprocess_function=preprocess, random_state=42, axes_labels=labels)\n", (1383, 1499), False, 'import dianna\n'), ((1563, 1615), 'numpy.load', 'np.load', (['"""tests/test_data/heatmap_lime_filename.npy"""'], {}), "('tests/test_data/heatmap_lime_filename.npy')\n", (1570, 1615), True, 'import numpy as np\n'), ((1686, 1735), 'numpy.allclose', 'np.allclose', (['heatmap', 'heatmap_expected'], {'atol': '(0.01)'}), '(heatmap, heatmap_expected, atol=0.01)\n', (1697, 1735), True, 'import numpy as np\n'), ((2002, 2081), 'dianna.explain_text', 'dianna.explain_text', (['runner', 'review'], {'labels': '[0]', 'method': '"""LIME"""', 'random_state': '(42)'}), "(runner, review, labels=[0], method='LIME', random_state=42)\n", (2021, 2081), False, 'import dianna\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 21 16:52:10 2017
@author: margauxmouchene
"""
import numpy as np
import pytest
from numpy.testing import assert_almost_equal
from landlab import RasterModelGrid
from landlab.components import (
FlowAccumulator,
FlowDirectorSteepest,
TransportLengthHillslopeDiffuser,
)
def test_route_to_multiple_error_raised():
mg = RasterModelGrid((10, 10))
z = mg.add_zeros("node", "topographic__elevation")
z += mg.x_of_node + mg.y_of_node
fa = FlowAccumulator(mg, flow_director="MFD")
fa.run_one_step()
with pytest.raises(NotImplementedError):
TransportLengthHillslopeDiffuser(mg, erodibility=1.0, slope_crit=0.5)
def test_tl_hill_diff():
"""Test cases where S>Sc, S=Sc and S<Sc"""
# Test cases where S>Sc, S=Sc and S<Sc
# Set up a 3x16 grid with closed boundaries and initial elevations.
mg = RasterModelGrid((3, 12))
z = np.array(
[
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
5.,
1.9,
1.9,
1.9,
1.9,
1.3,
1.3,
1.3,
1.3,
1.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
]
)
mg.add_field("node", "topographic__elevation", z)
mg.set_closed_boundaries_at_grid_edges(True, True, True, True)
# Parameter values for test
k = 0.001
Sc = 0.6
# Instantiate flow director and tl hillslope diffuser
fdir = FlowDirectorSteepest(mg)
tl_diff = TransportLengthHillslopeDiffuser(mg, erodibility=k, slope_crit=Sc)
# Run flow director
fdir.run_one_step()
# test slopes
s_out = mg.at_node["topographic__steepest_slope"]
s_test = np.array(
[
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
3.1,
0.,
0.,
0.,
0.6,
0.,
0.,
0.,
0.3,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
]
)
assert_almost_equal(s_out, s_test, decimal=10)
# Run tl hillslope diffusion component
tl_diff.run_one_step(1.)
# Test results
# flux_out
fo_test = np.array(
[
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.025,
0.,
0.,
0.,
0.0006,
0.,
0.,
0.,
0.0003,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
]
)
fo_out = mg.at_node["sediment__flux_out"]
assert_almost_equal(fo_out, fo_test, decimal=10)
# updated elevation
elev_test = np.array(
[
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
4.975,
1.9,
1.9,
1.9,
1.8994,
1.3,
1.3,
1.3,
1.2997,
1.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
]
)
elev_out = mg.at_node["topographic__elevation"]
assert_almost_equal(elev_out, elev_test, decimal=10)
# Run another time step because deposition and transfer were null
# the first time
fdir.run_one_step()
tl_diff.run_one_step(1.)
# Test results
# flux_out
fo_test = np.array(
[
0.00000000e+00,
0.00000000e+00,
0.00000000e+00,
0.00000000e+00,
0.00000000e+00,
0.00000000e+00,
0.00000000e+00,
0.00000000e+00,
0.00000000e+00,
0.00000000e+00,
0.00000000e+00,
0.00000000e+00,
0.00000000e+00,
2.47500000e-02,
0.00000000e+00,
0.00000000e+00,
6.00000000e-07,
5.99400000e-04,
0.00000000e+00,
0.00000000e+00,
3.00000000e-07,
2.99700000e-04,
0.00000000e+00,
0.00000000e+00,
0.00000000e+00,
0.00000000e+00,
0.00000000e+00,
0.00000000e+00,
0.00000000e+00,
0.00000000e+00,
0.00000000e+00,
0.00000000e+00,
0.00000000e+00,
0.00000000e+00,
0.00000000e+00,
0.00000000e+00,
]
)
fo_out = mg.at_node["sediment__flux_out"]
assert_almost_equal(fo_out, fo_test, decimal=10)
# updated elevation
elev_test = np.array(
[
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
4.95025,
1.925,
1.9,
1.8999994,
1.8988006,
1.3006,
1.3,
1.2999997,
1.2994003,
1.0003,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
0.,
]
)
elev_out = mg.at_node["topographic__elevation"]
assert_almost_equal(elev_out, elev_test, decimal=10)
|
[
"landlab.components.FlowAccumulator",
"landlab.RasterModelGrid",
"landlab.components.TransportLengthHillslopeDiffuser",
"numpy.array",
"numpy.testing.assert_almost_equal",
"pytest.raises",
"landlab.components.FlowDirectorSteepest"
] |
[((408, 433), 'landlab.RasterModelGrid', 'RasterModelGrid', (['(10, 10)'], {}), '((10, 10))\n', (423, 433), False, 'from landlab import RasterModelGrid\n'), ((535, 575), 'landlab.components.FlowAccumulator', 'FlowAccumulator', (['mg'], {'flow_director': '"""MFD"""'}), "(mg, flow_director='MFD')\n", (550, 575), False, 'from landlab.components import FlowAccumulator, FlowDirectorSteepest, TransportLengthHillslopeDiffuser\n'), ((921, 945), 'landlab.RasterModelGrid', 'RasterModelGrid', (['(3, 12)'], {}), '((3, 12))\n', (936, 945), False, 'from landlab import RasterModelGrid\n'), ((954, 1154), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 1.9,\n 1.9, 1.9, 1.9, 1.3, 1.3, 1.3, 1.3, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 5.0, 1.9, 1.9, 1.9, 1.9, 1.3, 1.3, 1.3, 1.3, 1.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n', (962, 1154), True, 'import numpy as np\n'), ((1825, 1849), 'landlab.components.FlowDirectorSteepest', 'FlowDirectorSteepest', (['mg'], {}), '(mg)\n', (1845, 1849), False, 'from landlab.components import FlowAccumulator, FlowDirectorSteepest, TransportLengthHillslopeDiffuser\n'), ((1864, 1930), 'landlab.components.TransportLengthHillslopeDiffuser', 'TransportLengthHillslopeDiffuser', (['mg'], {'erodibility': 'k', 'slope_crit': 'Sc'}), '(mg, erodibility=k, slope_crit=Sc)\n', (1896, 1930), False, 'from landlab.components import FlowAccumulator, FlowDirectorSteepest, TransportLengthHillslopeDiffuser\n'), ((2066, 2266), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 3.1, 0.0,\n 0.0, 0.0, 0.6, 0.0, 0.0, 0.0, 0.3, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 3.1, 0.0, 0.0, 0.0, 0.6, 0.0, 0.0, 0.0, 0.3, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n', (2074, 2266), True, 'import numpy as np\n'), ((2685, 2731), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['s_out', 's_test'], {'decimal': '(10)'}), '(s_out, s_test, decimal=10)\n', (2704, 2731), False, 'from numpy.testing import assert_almost_equal\n'), ((2854, 3061), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.025, \n 0.0, 0.0, 0.0, 0.0006, 0.0, 0.0, 0.0, 0.0003, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.025, 0.0, 0.0, 0.0, 0.0006, 0.0, 0.0, 0.0, 0.0003, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n', (2862, 3061), True, 'import numpy as np\n'), ((3527, 3575), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['fo_out', 'fo_test'], {'decimal': '(10)'}), '(fo_out, fo_test, decimal=10)\n', (3546, 3575), False, 'from numpy.testing import assert_almost_equal\n'), ((3617, 3824), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.975, \n 1.9, 1.9, 1.9, 1.8994, 1.3, 1.3, 1.3, 1.2997, 1.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 4.975, 1.9, 1.9, 1.9, 1.8994, 1.3, 1.3, 1.3, 1.2997, 1.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n', (3625, 3824), True, 'import numpy as np\n'), ((4302, 4354), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['elev_out', 'elev_test'], {'decimal': '(10)'}), '(elev_out, elev_test, decimal=10)\n', (4321, 4354), False, 'from numpy.testing import assert_almost_equal\n'), ((4549, 4769), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.02475, \n 0.0, 0.0, 6e-07, 0.0005994, 0.0, 0.0, 3e-07, 0.0002997, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.02475, 0.0, 0.0, 6e-07, 0.0005994, 0.0, 0.0, 3e-07, 0.0002997, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n', (4557, 4769), True, 'import numpy as np\n'), ((5643, 5691), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['fo_out', 'fo_test'], {'decimal': '(10)'}), '(fo_out, fo_test, decimal=10)\n', (5662, 5691), False, 'from numpy.testing import assert_almost_equal\n'), ((5733, 5974), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.95025, \n 1.925, 1.9, 1.8999994, 1.8988006, 1.3006, 1.3, 1.2999997, 1.2994003, \n 1.0003, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 4.95025, 1.925, 1.9, 1.8999994, 1.8988006, 1.3006, 1.3, 1.2999997, \n 1.2994003, 1.0003, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0])\n', (5741, 5974), True, 'import numpy as np\n'), ((6447, 6499), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['elev_out', 'elev_test'], {'decimal': '(10)'}), '(elev_out, elev_test, decimal=10)\n', (6466, 6499), False, 'from numpy.testing import assert_almost_equal\n'), ((608, 642), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (621, 642), False, 'import pytest\n'), ((652, 721), 'landlab.components.TransportLengthHillslopeDiffuser', 'TransportLengthHillslopeDiffuser', (['mg'], {'erodibility': '(1.0)', 'slope_crit': '(0.5)'}), '(mg, erodibility=1.0, slope_crit=0.5)\n', (684, 721), False, 'from landlab.components import FlowAccumulator, FlowDirectorSteepest, TransportLengthHillslopeDiffuser\n')]
|
#!/usr/bin/env python3
import logging
import numpy as np
import time
import torch
import cv2
logger = logging.getLogger(__name__)
def retry_load_images(image_paths, retry=10, backend="pytorch"):
"""
This function is to load images with support of retrying for failed load.
Args:
image_paths (list): paths of images needed to be loaded.
retry (int, optional): maximum time of loading retrying. Defaults to 10.
backend (str): `pytorch` or `cv2`.
Returns:
imgs (list): list of loaded images.
"""
for i in range(retry):
imgs = [cv2.imread(image_path) for image_path in image_paths]
if all(img is not None for img in imgs):
if backend == "pytorch":
imgs = torch.as_tensor(np.stack(imgs))
return imgs
else:
logger.warn("Reading failed. Will retry.")
time.sleep(1.0)
if i == retry - 1:
raise Exception("Failed to load images {}".format(image_paths))
def get_sequence(center_idx, half_len, sample_rate, num_frames):
"""
Sample frames among the corresponding clip.
Args:
center_idx (int): center frame idx for current clip
half_len (int): half of the clip length
sample_rate (int): sampling rate for sampling frames inside of the clip
num_frames (int): number of expected sampled frames
Returns:
seq (list): list of indexes of sampled frames in this clip.
"""
seq = list(range(center_idx - half_len, center_idx + half_len, sample_rate))
for seq_idx in range(len(seq)):
if seq[seq_idx] < 0:
seq[seq_idx] = 0
elif seq[seq_idx] >= num_frames:
seq[seq_idx] = num_frames - 1
return seq
def pack_pathway_output(cfg, frames):
"""
Prepare output as a list of tensors. Each tensor corresponding to a
unique pathway.
Args:
frames (tensor): frames of images sampled from the video. The
dimension is `channel` x `num frames` x `height` x `width`.
Returns:
frame_list (list): list of tensors with the dimension of
`channel` x `num frames` x `height` x `width`.
"""
# if cfg.MODEL.ARCH in cfg.MODEL.SINGLE_PATHWAY_ARCH:
# frame_list = [frames]
if cfg.MODEL.ARCH in cfg.MODEL.MULTI_PATHWAY_ARCH:
fast_pathway = frames
# Perform temporal sampling from the fast pathway.
slow_pathway = torch.index_select(
frames,
1,
torch.linspace(
0, frames.shape[1] - 1, frames.shape[1] // cfg.SLOWFAST.ALPHA
).long(),
)
frame_list = [slow_pathway, fast_pathway]
else:
frame_list = [frames]
# raise NotImplementedError(
# "Model arch {} is not in {}".format(
# cfg.MODEL.ARCH,
# cfg.MODEL.SINGLE_PATHWAY_ARCH + cfg.MODEL.MULTI_PATHWAY_ARCH,
# )
# )
return frame_list
|
[
"logging.getLogger",
"time.sleep",
"numpy.stack",
"cv2.imread",
"torch.linspace"
] |
[((105, 132), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (122, 132), False, 'import logging\n'), ((595, 617), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (605, 617), False, 'import cv2\n'), ((896, 911), 'time.sleep', 'time.sleep', (['(1.0)'], {}), '(1.0)\n', (906, 911), False, 'import time\n'), ((775, 789), 'numpy.stack', 'np.stack', (['imgs'], {}), '(imgs)\n', (783, 789), True, 'import numpy as np\n'), ((2522, 2599), 'torch.linspace', 'torch.linspace', (['(0)', '(frames.shape[1] - 1)', '(frames.shape[1] // cfg.SLOWFAST.ALPHA)'], {}), '(0, frames.shape[1] - 1, frames.shape[1] // cfg.SLOWFAST.ALPHA)\n', (2536, 2599), False, 'import torch\n')]
|
"""Shelving Filter Cascade with Adjustable Transition Slope and Bandwidth
<NAME>, <NAME>, <NAME>
In: Proc. of 148th AES Convention, Virtual Vienna, May 2020, Paper 10339
http://www.aes.org/e-lib/browse.cfm?elib=20756
"""
import numpy as np
from scipy.signal import tf2sos, freqs
from matplotlib import rcParams
def halfpadloss_shelving_filter_num_den_coeff(G):
"""Half-pad-loss polynomial coefficients for 1st/2nd order shelving filter.
- see type III in
long-url: https://github.com/spatialaudio/digital-signal-processing-lecture/blob/master/filter_desig/audiofilter.ipynb # noqa
- see Sec. 3.2 in https://doi.org/10.3390/app6050129
"""
sign = np.sign(G) # amplify/boost (1) or attenuate/cut (-1)
g = 10**(np.abs(G) / 20) # linear gain
n1, n2 = g**(sign / 4), g**(sign / 2) # numerator coeff
d1, d2 = 1 / n1, 1 / n2 # denominator coeff
return n1, n2, d1, d2
def normalized_low_shelving_1st_coeff(G=-10*np.log10(2)):
"""See low_shelving_1st_coeff() for omega=1."""
n1, n2, d1, d2 = halfpadloss_shelving_filter_num_den_coeff(G)
b, a = np.array([0, 1, n2]), np.array([0, 1, d2])
return b, a
def low_shelving_1st_coeff(omega=1, G=-10*np.log10(2)):
"""Half-pad-loss/mid-level low shelving filter 1st order.
Parameters
----------
omega : angular frequency in rad/s at half-pad-loss/mid-level
G : level in dB (G/2 at omega)
Returns
-------
b[0] s^2 + b[1] s^1 + b[2] s^0
b,a : coefficients for Laplace H(s) = ------------------------------
a[0] s^2 + a[1] s^1 + a[2] s^0
with s = j omega, note: b[0]=a[0]=0 here for 1st order filter
see halfpadloss_shelving_filter_num_den_coeff() for references
"""
b, a = normalized_low_shelving_1st_coeff(G=G)
scale = omega**np.arange(-2., 1.) # powers in the Laplace domain
return b * scale, a * scale
def normalized_high_shelving_1st_coeff(G=-10*np.log10(2)):
"""See high_shelving_1st_coeff() for omega=1."""
n1, n2, d1, d2 = halfpadloss_shelving_filter_num_den_coeff(G)
b, a = np.array([0, n2, 1]), np.array([0, d2, 1])
return b, a
def high_shelving_1st_coeff(omega=1, G=-10*np.log10(2)):
"""Half-pad-loss/mid-level high shelving filter 1st order.
Parameters
----------
omega : angular frequency in rad/s at half-pad-loss/mid-level
G : level in dB (G/2 at omega)
Returns
-------
b[0] s^2 + b[1] s^1 + b[2] s^0
b,a : coefficients for Laplace H(s) = ------------------------------
a[0] s^2 + a[1] s^1 + a[2] s^0
with s = j omega, note: b[0]=a[0]=0 here for 1st order filter
see halfpadloss_shelving_filter_num_den_coeff() for references
"""
b, a = normalized_high_shelving_1st_coeff(G=G)
scale = omega**np.arange(-2., 1.) # powers in the Laplace domain
return b * scale, a * scale
def normalized_low_shelving_2nd_coeff(G=-10*np.log10(2), Q=1/np.sqrt(2)):
"""See low_shelving_2nd_coeff() for omega=1."""
n1, n2, d1, d2 = halfpadloss_shelving_filter_num_den_coeff(G)
b, a = np.array([1, n1 / Q, n2]), np.array([1, d1 / Q, d2])
return b, a
def low_shelving_2nd_coeff(omega=1, G=-10*np.log10(2), Q=1/np.sqrt(2)):
"""Half-pad-loss/mid-level low shelving filter 2nd order.
Parameters
----------
omega : angular frequency in rad/s at half-pad-loss/mid-level
G : level in dB (G/2 at omega)
Q : pole/zero quality, Q>0.5
Returns
-------
b[0] s^2 + b[1] s^1 + b[2] s^0
b,a : coefficients for Laplace H(s) = ------------------------------
a[0] s^2 + a[1] s^1 + a[2] s^0
with s = j omega
see halfpadloss_shelving_filter_num_den_coeff() for references
"""
b, a = normalized_low_shelving_2nd_coeff(G=G, Q=Q)
scale = omega**np.arange(-2., 1.) # powers in the Laplace domain
return b * scale, a * scale
def normalized_high_shelving_2nd_coeff(G=-10*np.log10(2), Q=1/np.sqrt(2)):
"""See high_shelving_2nd_coeff() for omega=1."""
n1, n2, d1, d2 = halfpadloss_shelving_filter_num_den_coeff(G)
b, a = np.array([n2, n1 / Q, 1]), np.array([d2, d1 / Q, 1])
return b, a
def high_shelving_2nd_coeff(omega=1, G=-10*np.log10(2), Q=1/np.sqrt(2)):
"""Half-pad-loss/mid-level high shelving filter 2nd order.
Parameters
----------
omega : angular frequency in rad/s at half-pad-loss/mid-level
G : level in dB (G/2 at omega)
Q : pole/zero quality, Q>0.5
Returns
-------
b[0] s^2 + b[1] s^1 + b[2] s^0
b,a : coefficients for Laplace H(s) = ------------------------------
a[0] s^2 + a[1] s^1 + a[2] s^0
with s = j omega
see halfpadloss_shelving_filter_num_den_coeff() for references
"""
b, a = normalized_high_shelving_2nd_coeff(G=G, Q=Q)
scale = omega**np.arange(-2., 1.) # powers in the Laplace domain
return b * scale, a * scale
def db(x, *, power=False):
"""Convert *x* to decibel.
Parameters
----------
x : array_like
Input data. Values of 0 lead to negative infinity.
power : bool, optional
If ``power=False`` (the default), *x* is squared before
conversion.
"""
with np.errstate(divide='ignore'):
return (10 if power else 20) * np.log10(np.abs(x))
def db2lin(x):
return 10**(x / 20)
def shelving_slope_parameters(slope=None, BWd=None, Gd=None):
"""Compute the third parameter from the given two.
Parameters
----------
slope : float, optional
Desired shelving slope in decibel per octave.
BW : float, optional
Desired bandwidth of the slope in octave.
G : float, optional
Desired gain of the stop band in decibel.
"""
if slope == 0:
raise ValueError("`slope` should be nonzero.")
if slope and BWd is not None:
Gd = -BWd * slope
elif BWd and Gd is not None:
slope = -Gd / BWd
elif Gd and slope is not None:
if Gd * slope > 1:
raise ValueError("`Gd` and `slope` cannot have the same sign.")
else:
BWd = np.abs(Gd / slope)
else:
print('At lest two parameters need to be specified.')
return slope, BWd, Gd
def shelving_filter_parameters(biquad_per_octave, **kwargs):
"""Parameters for shelving filter design.
Parameters
----------
biquad_per_octave : float
Number of biquad filters per octave.
Returns
-------
num_biquad : int
Number of biquad filters.
Gb : float
Gain of each biquad filter in decibel.
G : float
Gain of overall (concatenated) filters in decibel. This might differ
from what is returned by `shelving_parameters`.
"""
slope, BWd, Gd = shelving_slope_parameters(**kwargs)
num_biquad = int(np.ceil(BWd * biquad_per_octave))
Gb = -slope / biquad_per_octave
G = Gb * num_biquad
return num_biquad, Gb, G
def check_shelving_filter_validity(biquad_per_octave, **kwargs):
"""Level, slope, bandwidth validity for shelving filter cascade.
Parameters
----------
biquad_per_octave : float
Number of biquad filters per octave.
see shelving_slope_parameters(), shelving_filter_parameters()
Returns
-------
flag = [Boolean, Boolean, Boolean]
if all True then intended parameter triplet holds, if not all True
deviations from desired response occur
"""
flag = [True, True, True]
slope, BWd, Gd = shelving_slope_parameters(**kwargs)
num_biquad, Gb, G = shelving_filter_parameters(biquad_per_octave, **kwargs)
# BWd < 1 octave generally fails
if BWd <= 1:
flag[0] = False
# BWd * biquad_per_octave needs to be integer
flag[1] = float(BWd * biquad_per_octave).is_integer()
# biquad_per_octave must be large enough
# for slope < 12.04 dB at least one biquad per ocatve is required
tmp = slope / (20*np.log10(4))
if tmp > 1.:
if biquad_per_octave < tmp:
flag[2] = False
else:
if biquad_per_octave < 1:
flag[2] = False
return flag
def low_shelving_1st_cascade(w0, Gb, num_biquad, biquad_per_octave):
"""Low shelving filter design using cascaded biquad filters.
- see low_shelving_2nd_cascade()
- under construction for code improvement
"""
sos = np.zeros((num_biquad, 6))
for m in range(num_biquad):
wm = w0 * 2**(-(m + 0.5) / biquad_per_octave)
b, a = low_shelving_1st_coeff(omega=wm, G=Gb)
sos[m] = tf2sos(b, a)
return sos
def high_shelving_1st_cascade(w0, Gb, num_biquad, biquad_per_octave):
"""High shelving filter design using cascaded biquad filters.
- see low_shelving_2nd_cascade()
- under construction for code improvement
"""
sos = np.zeros((num_biquad, 6))
for m in range(num_biquad):
wm = w0 * 2**(-(m + 0.5) / biquad_per_octave)
b, a = high_shelving_1st_coeff(omega=wm, G=Gb)
sos[m] = tf2sos(b, a)
return sos
def low_shelving_2nd_cascade(w0, Gb, num_biquad, biquad_per_octave,
Q=1/np.sqrt(2)):
"""Low shelving filter design using cascaded biquad filters.
Parameters
----------
w0 : float
Cut-off frequency in radian per second.
Gb : float
Gain of each biquad filter in decibel.
num_biquad : int
Number of biquad filters.
Q : float, optional
Quality factor of each biquad filter.
Returns
-------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. Each row corresponds to a second-order
section, with the first three columns providing the numerator
coefficients and the last three providing the denominator
coefficients.
"""
sos = np.zeros((num_biquad, 6))
for m in range(num_biquad):
wm = w0 * 2**(-(m + 0.5) / biquad_per_octave)
b, a = low_shelving_2nd_coeff(omega=wm, G=Gb, Q=Q)
sos[m] = tf2sos(b, a)
return sos
def high_shelving_2nd_cascade(w0, Gb, num_biquad, biquad_per_octave,
Q=1/np.sqrt(2)):
"""High shelving filter design using cascaded biquad filters.
- see low_shelving_2nd_cascade()
- under construction for code improvement
"""
sos = np.zeros((num_biquad, 6))
for m in range(num_biquad):
wm = w0 * 2**(-(m + 0.5) / biquad_per_octave)
b, a = high_shelving_2nd_coeff(omega=wm, G=Gb, Q=Q)
sos[m] = tf2sos(b, a)
return sos
def sosfreqs(sos, worN=200, plot=None):
"""Compute the frequency response of an analog filter in SOS format.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. Each row corresponds to a second-order
section, with the first three columns providing the numerator
coefficients and the last three providing the denominator
coefficients.
worN : {None, int, array_like}, optional
If None, then compute at 200 frequencies around the interesting parts
of the response curve (determined by pole-zero locations). If a single
integer, then compute at that many frequencies. Otherwise, compute the
response at the angular frequencies (e.g. rad/s) given in `worN`.
plot : callable, optional
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqs`.
Returns
-------
w : ndarray
The angular frequencies at which `h` was computed.
h : ndarray
The frequency response.
"""
h = 1.
for row in sos:
w, rowh = freqs(row[:3], row[3:], worN=worN, plot=plot)
h *= rowh
return w, h
def matchedz_zpk(s_zeros, s_poles, s_gain, fs):
"""Matched-z transform of poles and zeros.
Parameters
----------
s_zeros : array_like
Zeros in the Laplace domain.
s_poles : array_like
Poles in the Laplace domain.
s_gain : float
System gain in the Laplace domain.
fs : int
Sampling frequency in Hertz.
Returns
-------
z_zeros : numpy.ndarray
Zeros in the z-domain.
z_poles : numpy.ndarray
Poles in the z-domain.
z_gain : float
System gain in the z-domain.
See Also
--------
:func:`scipy.signal.bilinear_zpk`
"""
z_zeros = np.exp(s_zeros / fs)
z_poles = np.exp(s_poles / fs)
omega = 1j * np.pi * fs
s_gain *= np.prod((omega - s_zeros) / (omega - s_poles)
* (-1 - z_poles) / (-1 - z_zeros))
return z_zeros, z_poles, np.abs(s_gain)
def nearest_value(x0, x, f):
"""Plot helping."""
return f[np.abs(x - x0).argmin()]
def set_rcparams():
"""Plot helping."""
rcParams['axes.linewidth'] = 0.5
rcParams['axes.edgecolor'] = 'black'
rcParams['axes.facecolor'] = 'None'
rcParams['axes.labelcolor'] = 'black'
rcParams['xtick.color'] = 'black'
rcParams['ytick.color'] = 'black'
rcParams['font.family'] = 'serif'
rcParams['font.size'] = 13
rcParams['text.usetex'] = True
rcParams['text.latex.preamble'] = r'\usepackage{amsmath}'
rcParams['text.latex.preamble'] = r'\usepackage{gensymb}'
rcParams['legend.title_fontsize'] = 10
def set_outdir():
"""Plot helping."""
return '../graphics/'
def interaction_matrix_sge(G_proto, gain_factor, w_command, w_control,
bandwidth):
"""
Parameters
----------
G_proto: array_like
Prototype gain in decibel.
gain_factor: float
Gain factor.
w_command: array_like
Normalized command frequencies.
w_control: array_like
Normalized control frequencies.
bandwidth: array_like
Bandwidth.
"""
num_command = len(w_command)
num_control = len(w_control)
leak = np.zeros((num_command, num_control))
G_bandwidth = gain_factor * G_proto
g_proto = db2lin(G_proto)
g_bandwidth = db2lin(G_bandwidth)
z1 = np.exp(-1j * w_control)
z2 = z1**2
poly = np.zeros((num_command, 3))
poly[6] = 0.000321, 0.00474, 0.00544
poly[7] = 0.00108, 0.0221, 0.0169
poly[8] = 0.00184, 0.125, 0.0212
poly[9] = -0.00751, 0.730, -0.0672
for m, (Gp, gp, p, gb, wc, bw) in enumerate(
zip(G_proto, g_proto, poly, g_bandwidth, w_command, bandwidth)):
G_nyquist = np.sign(Gp) * np.polyval(p, np.abs(Gp))
gn = db2lin(G_nyquist)
gp2 = gp**2
gb2 = gb**2
gn2 = gn**2
F = np.abs(gp2 - gb2)
G00 = np.abs(gp2 - 1)
F00 = np.abs(gb2 - 1)
G01 = np.abs(gp2 - gn)
G11 = np.abs(gp2 - gn2)
F01 = np.abs(gb2 - gn)
F11 = np.abs(gb2 - gn2)
W2 = np.sqrt(G11 / G00) * np.tan(wc / 2)**2
DW = (1 + np.sqrt(F00 / F11) * W2) * np.tan(bw / 2)
C = F11 * DW**2 - 2 * W2 * (F01 - np.sqrt(F00 * F11))
D = 2 * W2 * (G01 - np.sqrt(G00 * G11))
A = np.sqrt((C + D) / F)
B = np.sqrt((gp2 * C + gb2 * D) / F)
num = np.array([gn+W2+B, -2*(gn-W2), (gn-B+W2)]) / (1+W2+A)
den = np.array([1, -2*(1-W2)/(1+W2+A), (1+W2-A)/(1+W2+A)])
H = (num[0] + num[1]*z1 + num[2]*z2)\
/ (den[0] + den[1]*z1 + den[2]*z2)
G = db(H) / Gp
leak[m] = np.abs(G)
return leak
def peq_seg(g_ref, g_nyquist, g, g_bandwidth, w_command, bandwidth):
"""
Parameters
----------
g_ref: float
Reference linear gain.
g_nyquist: float
Nyquist linear gain.
g_bandwidth: float
(Optimized) linear gain.
w_command: float
Normalized command frequencies.
bandwidth: float
Bandwidth.
"""
g2 = g**2
gb2 = g_bandwidth**2
gr2 = g_ref**2
gn2 = g_nyquist**2
grn = g_ref * g_nyquist
F = np.abs(g2 - gb2)
G00 = np.abs(g2 - gr2)
F00 = np.abs(gb2 - gr2)
G01 = np.abs(g2 - grn)
G11 = np.abs(g2 - gn2)
F01 = np.abs(gb2 - grn)
F11 = np.abs(gb2 - gn2)
W2 = np.sqrt(G11 / G00) * np.tan(w_command / 2)**2
DW = (1 + np.sqrt(F00 / F11) * W2) * np.tan(bandwidth / 2)
C = F11 * DW**2 - 2 * W2 * (F01 - np.sqrt(F00 * F11))
D = 2 * W2 * (G01 - np.sqrt(G00 * G11))
A = np.sqrt((C + D) / F)
B = np.sqrt((g**2 * C + g_bandwidth**2 * D) / F)
b = np.array([(g_nyquist + g_ref * W2 + B),
-2*(g_nyquist - g_ref * W2),
(g_nyquist - B + g_ref * W2)]) / (1 + W2 + A)
a = np.array([1, -2*(1 - W2) / (1 + W2 + A), (1 + W2 - A) / (1 + W2 + A)])
return b, a
def optimized_peq_seg(gain_command, gain_proto, gain_factor, w_command,
w_control, bandwidth):
"""
Parameters
----------
gain_command: array_like
Command gain in decibel.
gain_proto: array_like
Prototype gain in decibel.
gain_factor: float
Gain factor.
w_command: array_like
Normalized command frequencies.
w_control: array_like
Normalized control frequencies.
bandwidth: array_like
Bandwidths.
Returns
-------
b_opt: array_like (N, 3)
Moving average coefficients.
a_opt: array_like (N, 3)
Autoregressive (recursive) coefficients.
"""
num_command = len(gain_command)
# symmetric GEG design
gain_control = np.zeros(2 * num_command - 1)
gain_control[::2] = gain_command
gain_control[1::2] = 0.5 * (gain_command[:-1] + gain_command[1:])
# interaction matrix "B"
B = interaction_matrix_sge(gain_proto, gain_factor,
w_command, w_control, bandwidth)
gain2 = np.zeros((2 * num_command - 1, 1))
gain2[::2, 0] = gain_command
gain2[1::2, 0] = 0.5 * (gain_command[:-1] + gain_command[1:])
# band weights
weights = np.ones(2 * num_command - 1)
weights[1::2] *= 0.5
W = np.diag(weights)
gain_opt =\
np.matmul(np.linalg.inv(np.linalg.multi_dot([B, W, np.transpose(B)])),
np.linalg.multi_dot([B, W, gain2]))
gain_opt_bandwidth = gain_factor * gain_opt
gain_opt = np.squeeze(gain_opt)
gain_opt_bandwidth = np.squeeze(gain_opt_bandwidth)
g_opt = db2lin(gain_opt)
g_opt_bandwidth = db2lin(gain_opt_bandwidth)
poly = np.zeros((num_command, 3))
poly[6] = 0.000321, 0.00474, 0.00544
poly[7] = 0.00108, 0.0221, 0.0169
poly[8] = 0.00184, 0.125, 0.0212
poly[9] = -0.00751, 0.730, -0.0672
b_opt = np.zeros((3, num_command))
a_opt = np.zeros((3, num_command))
for m, (Go, go, gob, wc, bw, p) in enumerate(
zip(gain_opt, g_opt, g_opt_bandwidth, w_command, bandwidth, poly)):
gain_nyquist = np.sign(Go) * np.polyval(p, np.abs(Go))
b, a = peq_seg(1, db2lin(gain_nyquist), go, gob, wc, bw)
b_opt[:, m] = b
a_opt[:, m] = a
return b_opt, a_opt
def fracorder_lowshelving_eastty(w1, w2, G1, G2, rB=None):
"""
Parameters
----------
w1: float
Lower corner frequency.
w2: float
Upper corner frequency.
G1: float
Target level at lower corner frequency in dB.
G2: float
Target level at upper corner frequency in dB.
rB: float
Gain per octave.
Returns
-------
z: array_like
Complex zeros in the Laplace domain.
p: array_like
Complex poles in the Laplace domain.
k: float
Gain.
"""
Gd = G1 - G2
n_eff = effective_order(w1, w2, Gd, rB)
n_int, n_frac = np.divmod(n_eff, 1)
n_int = int(n_int)
z = np.array([])
p = np.array([])
# Second-order sections (complex conjugate pole/zero pairs)
if n_int > 0:
alpha = complex_zp_angles(n_int, n_frac)
alpha = np.concatenate((alpha, -alpha))
z = w1 * np.exp(1j * alpha)
p = w2 * np.exp(1j * alpha)
# First-order section (real pole/zero)
if n_eff % 2 != 0:
s_lower, s_upper = real_zp(n_int, n_frac, w1, w2)
if n_int % 2 == 0:
z_real = s_lower
p_real = s_upper
elif n_int % 2 == 1:
z_real = s_upper
p_real = s_lower
z = np.append(z, z_real)
p = np.append(p, p_real)
return z, p, 1
def effective_order(w1, w2, Gd, rB=None):
"""Effective order of shelving filter.
Parameters
----------
w1: float
Lower corner frequency.
w2: float
Upper corner frequency.
Gd: float
Target level difference in dB.
rB: float
Gain per octave.
"""
if rB is None:
rB = db(2) * np.sign(Gd) # Butterworth
return Gd / rB / np.log2(w2/w1)
def complex_zp_angles(n_int, n_frac):
"""Polar angles of the complex conjugate zeros/poles.
These correspond to the second-order section filters.
Parameters
----------
n_int: int
Interger order.
n_frac: float
Fractional order [0, 1).
"""
# linear interpolation of angles
num_zp_pair = int(n_int+1) // 2
return np.pi/2 * np.stack([
(1-n_frac) * (1 + (2*m+1)/n_int)
+ n_frac * (1 + (2*m+1)/(n_int+1))
for m in range(num_zp_pair)])
def real_zp(n_int, n_frac, w_lower, w_upper):
"""Real-valued zero and pole.
These correspond to the first-order section filters.
Parameters
----------
n_int: int
Integer order
n_frac: float
Fractional order [0, 1).
w_lower: float
Lower corner frequency.
w_upper: float
Upper corner frequency.
Returns
-------
s_lower: float
Smaller real-valued zero or pole.
s_upper: float
Larger real-valued zero or pole.
"""
w_mean = np.sqrt(w_lower * w_upper)
ratio = (w_upper / w_lower)
# logarithmic interpolation of zero/pole radius
if n_int % 2 == 0: # even
s_lower = -w_mean * ratio**(-n_frac/2)
s_upper = -w_mean * ratio**(n_frac/2)
elif n_int % 2 == 1: # odd
s_lower = -w_lower * ratio**(n_frac/2)
s_upper = -w_upper * ratio**(-n_frac/2)
return s_lower, s_upper
|
[
"numpy.prod",
"numpy.log10",
"numpy.sqrt",
"numpy.linalg.multi_dot",
"numpy.array",
"scipy.signal.freqs",
"numpy.arange",
"numpy.divmod",
"numpy.exp",
"numpy.concatenate",
"numpy.abs",
"numpy.ceil",
"scipy.signal.tf2sos",
"numpy.ones",
"numpy.squeeze",
"numpy.sign",
"numpy.log2",
"numpy.transpose",
"numpy.tan",
"numpy.diag",
"numpy.append",
"numpy.errstate",
"numpy.zeros"
] |
[((676, 686), 'numpy.sign', 'np.sign', (['G'], {}), '(G)\n', (683, 686), True, 'import numpy as np\n'), ((8566, 8591), 'numpy.zeros', 'np.zeros', (['(num_biquad, 6)'], {}), '((num_biquad, 6))\n', (8574, 8591), True, 'import numpy as np\n'), ((9018, 9043), 'numpy.zeros', 'np.zeros', (['(num_biquad, 6)'], {}), '((num_biquad, 6))\n', (9026, 9043), True, 'import numpy as np\n'), ((10050, 10075), 'numpy.zeros', 'np.zeros', (['(num_biquad, 6)'], {}), '((num_biquad, 6))\n', (10058, 10075), True, 'import numpy as np\n'), ((10553, 10578), 'numpy.zeros', 'np.zeros', (['(num_biquad, 6)'], {}), '((num_biquad, 6))\n', (10561, 10578), True, 'import numpy as np\n'), ((12738, 12758), 'numpy.exp', 'np.exp', (['(s_zeros / fs)'], {}), '(s_zeros / fs)\n', (12744, 12758), True, 'import numpy as np\n'), ((12773, 12793), 'numpy.exp', 'np.exp', (['(s_poles / fs)'], {}), '(s_poles / fs)\n', (12779, 12793), True, 'import numpy as np\n'), ((12836, 12921), 'numpy.prod', 'np.prod', (['((omega - s_zeros) / (omega - s_poles) * (-1 - z_poles) / (-1 - z_zeros))'], {}), '((omega - s_zeros) / (omega - s_poles) * (-1 - z_poles) / (-1 - z_zeros)\n )\n', (12843, 12921), True, 'import numpy as np\n'), ((14214, 14250), 'numpy.zeros', 'np.zeros', (['(num_command, num_control)'], {}), '((num_command, num_control))\n', (14222, 14250), True, 'import numpy as np\n'), ((14369, 14394), 'numpy.exp', 'np.exp', (['(-1.0j * w_control)'], {}), '(-1.0j * w_control)\n', (14375, 14394), True, 'import numpy as np\n'), ((14420, 14446), 'numpy.zeros', 'np.zeros', (['(num_command, 3)'], {}), '((num_command, 3))\n', (14428, 14446), True, 'import numpy as np\n'), ((16185, 16201), 'numpy.abs', 'np.abs', (['(g2 - gb2)'], {}), '(g2 - gb2)\n', (16191, 16201), True, 'import numpy as np\n'), ((16212, 16228), 'numpy.abs', 'np.abs', (['(g2 - gr2)'], {}), '(g2 - gr2)\n', (16218, 16228), True, 'import numpy as np\n'), ((16239, 16256), 'numpy.abs', 'np.abs', (['(gb2 - gr2)'], {}), '(gb2 - gr2)\n', (16245, 16256), True, 'import numpy as np\n'), ((16268, 16284), 'numpy.abs', 'np.abs', (['(g2 - grn)'], {}), '(g2 - grn)\n', (16274, 16284), True, 'import numpy as np\n'), ((16295, 16311), 'numpy.abs', 'np.abs', (['(g2 - gn2)'], {}), '(g2 - gn2)\n', (16301, 16311), True, 'import numpy as np\n'), ((16322, 16339), 'numpy.abs', 'np.abs', (['(gb2 - grn)'], {}), '(gb2 - grn)\n', (16328, 16339), True, 'import numpy as np\n'), ((16350, 16367), 'numpy.abs', 'np.abs', (['(gb2 - gn2)'], {}), '(gb2 - gn2)\n', (16356, 16367), True, 'import numpy as np\n'), ((16599, 16619), 'numpy.sqrt', 'np.sqrt', (['((C + D) / F)'], {}), '((C + D) / F)\n', (16606, 16619), True, 'import numpy as np\n'), ((16628, 16676), 'numpy.sqrt', 'np.sqrt', (['((g ** 2 * C + g_bandwidth ** 2 * D) / F)'], {}), '((g ** 2 * C + g_bandwidth ** 2 * D) / F)\n', (16635, 16676), True, 'import numpy as np\n'), ((16841, 16913), 'numpy.array', 'np.array', (['[1, -2 * (1 - W2) / (1 + W2 + A), (1 + W2 - A) / (1 + W2 + A)]'], {}), '([1, -2 * (1 - W2) / (1 + W2 + A), (1 + W2 - A) / (1 + W2 + A)])\n', (16849, 16913), True, 'import numpy as np\n'), ((17691, 17720), 'numpy.zeros', 'np.zeros', (['(2 * num_command - 1)'], {}), '(2 * num_command - 1)\n', (17699, 17720), True, 'import numpy as np\n'), ((17991, 18025), 'numpy.zeros', 'np.zeros', (['(2 * num_command - 1, 1)'], {}), '((2 * num_command - 1, 1))\n', (17999, 18025), True, 'import numpy as np\n'), ((18159, 18187), 'numpy.ones', 'np.ones', (['(2 * num_command - 1)'], {}), '(2 * num_command - 1)\n', (18166, 18187), True, 'import numpy as np\n'), ((18221, 18237), 'numpy.diag', 'np.diag', (['weights'], {}), '(weights)\n', (18228, 18237), True, 'import numpy as np\n'), ((18452, 18472), 'numpy.squeeze', 'np.squeeze', (['gain_opt'], {}), '(gain_opt)\n', (18462, 18472), True, 'import numpy as np\n'), ((18498, 18528), 'numpy.squeeze', 'np.squeeze', (['gain_opt_bandwidth'], {}), '(gain_opt_bandwidth)\n', (18508, 18528), True, 'import numpy as np\n'), ((18620, 18646), 'numpy.zeros', 'np.zeros', (['(num_command, 3)'], {}), '((num_command, 3))\n', (18628, 18646), True, 'import numpy as np\n'), ((18815, 18841), 'numpy.zeros', 'np.zeros', (['(3, num_command)'], {}), '((3, num_command))\n', (18823, 18841), True, 'import numpy as np\n'), ((18854, 18880), 'numpy.zeros', 'np.zeros', (['(3, num_command)'], {}), '((3, num_command))\n', (18862, 18880), True, 'import numpy as np\n'), ((19844, 19863), 'numpy.divmod', 'np.divmod', (['n_eff', '(1)'], {}), '(n_eff, 1)\n', (19853, 19863), True, 'import numpy as np\n'), ((19895, 19907), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (19903, 19907), True, 'import numpy as np\n'), ((19916, 19928), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (19924, 19928), True, 'import numpy as np\n'), ((22027, 22053), 'numpy.sqrt', 'np.sqrt', (['(w_lower * w_upper)'], {}), '(w_lower * w_upper)\n', (22034, 22053), True, 'import numpy as np\n'), ((956, 967), 'numpy.log10', 'np.log10', (['(2)'], {}), '(2)\n', (964, 967), True, 'import numpy as np\n'), ((1099, 1119), 'numpy.array', 'np.array', (['[0, 1, n2]'], {}), '([0, 1, n2])\n', (1107, 1119), True, 'import numpy as np\n'), ((1121, 1141), 'numpy.array', 'np.array', (['[0, 1, d2]'], {}), '([0, 1, d2])\n', (1129, 1141), True, 'import numpy as np\n'), ((1202, 1213), 'numpy.log10', 'np.log10', (['(2)'], {}), '(2)\n', (1210, 1213), True, 'import numpy as np\n'), ((1865, 1885), 'numpy.arange', 'np.arange', (['(-2.0)', '(1.0)'], {}), '(-2.0, 1.0)\n', (1874, 1885), True, 'import numpy as np\n'), ((1995, 2006), 'numpy.log10', 'np.log10', (['(2)'], {}), '(2)\n', (2003, 2006), True, 'import numpy as np\n'), ((2139, 2159), 'numpy.array', 'np.array', (['[0, n2, 1]'], {}), '([0, n2, 1])\n', (2147, 2159), True, 'import numpy as np\n'), ((2161, 2181), 'numpy.array', 'np.array', (['[0, d2, 1]'], {}), '([0, d2, 1])\n', (2169, 2181), True, 'import numpy as np\n'), ((2243, 2254), 'numpy.log10', 'np.log10', (['(2)'], {}), '(2)\n', (2251, 2254), True, 'import numpy as np\n'), ((2908, 2928), 'numpy.arange', 'np.arange', (['(-2.0)', '(1.0)'], {}), '(-2.0, 1.0)\n', (2917, 2928), True, 'import numpy as np\n'), ((3037, 3048), 'numpy.log10', 'np.log10', (['(2)'], {}), '(2)\n', (3045, 3048), True, 'import numpy as np\n'), ((3054, 3064), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (3061, 3064), True, 'import numpy as np\n'), ((3196, 3221), 'numpy.array', 'np.array', (['[1, n1 / Q, n2]'], {}), '([1, n1 / Q, n2])\n', (3204, 3221), True, 'import numpy as np\n'), ((3223, 3248), 'numpy.array', 'np.array', (['[1, d1 / Q, d2]'], {}), '([1, d1 / Q, d2])\n', (3231, 3248), True, 'import numpy as np\n'), ((3309, 3320), 'numpy.log10', 'np.log10', (['(2)'], {}), '(2)\n', (3317, 3320), True, 'import numpy as np\n'), ((3326, 3336), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (3333, 3336), True, 'import numpy as np\n'), ((3981, 4001), 'numpy.arange', 'np.arange', (['(-2.0)', '(1.0)'], {}), '(-2.0, 1.0)\n', (3990, 4001), True, 'import numpy as np\n'), ((4111, 4122), 'numpy.log10', 'np.log10', (['(2)'], {}), '(2)\n', (4119, 4122), True, 'import numpy as np\n'), ((4128, 4138), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (4135, 4138), True, 'import numpy as np\n'), ((4271, 4296), 'numpy.array', 'np.array', (['[n2, n1 / Q, 1]'], {}), '([n2, n1 / Q, 1])\n', (4279, 4296), True, 'import numpy as np\n'), ((4298, 4323), 'numpy.array', 'np.array', (['[d2, d1 / Q, 1]'], {}), '([d2, d1 / Q, 1])\n', (4306, 4323), True, 'import numpy as np\n'), ((4385, 4396), 'numpy.log10', 'np.log10', (['(2)'], {}), '(2)\n', (4393, 4396), True, 'import numpy as np\n'), ((4402, 4412), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (4409, 4412), True, 'import numpy as np\n'), ((5059, 5079), 'numpy.arange', 'np.arange', (['(-2.0)', '(1.0)'], {}), '(-2.0, 1.0)\n', (5068, 5079), True, 'import numpy as np\n'), ((5441, 5469), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""'}), "(divide='ignore')\n", (5452, 5469), True, 'import numpy as np\n'), ((7032, 7064), 'numpy.ceil', 'np.ceil', (['(BWd * biquad_per_octave)'], {}), '(BWd * biquad_per_octave)\n', (7039, 7064), True, 'import numpy as np\n'), ((8749, 8761), 'scipy.signal.tf2sos', 'tf2sos', (['b', 'a'], {}), '(b, a)\n', (8755, 8761), False, 'from scipy.signal import tf2sos, freqs\n'), ((9202, 9214), 'scipy.signal.tf2sos', 'tf2sos', (['b', 'a'], {}), '(b, a)\n', (9208, 9214), False, 'from scipy.signal import tf2sos, freqs\n'), ((9333, 9343), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (9340, 9343), True, 'import numpy as np\n'), ((10238, 10250), 'scipy.signal.tf2sos', 'tf2sos', (['b', 'a'], {}), '(b, a)\n', (10244, 10250), False, 'from scipy.signal import tf2sos, freqs\n'), ((10371, 10381), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (10378, 10381), True, 'import numpy as np\n'), ((10742, 10754), 'scipy.signal.tf2sos', 'tf2sos', (['b', 'a'], {}), '(b, a)\n', (10748, 10754), False, 'from scipy.signal import tf2sos, freqs\n'), ((12007, 12052), 'scipy.signal.freqs', 'freqs', (['row[:3]', 'row[3:]'], {'worN': 'worN', 'plot': 'plot'}), '(row[:3], row[3:], worN=worN, plot=plot)\n', (12012, 12052), False, 'from scipy.signal import tf2sos, freqs\n'), ((12968, 12982), 'numpy.abs', 'np.abs', (['s_gain'], {}), '(s_gain)\n', (12974, 12982), True, 'import numpy as np\n'), ((14893, 14910), 'numpy.abs', 'np.abs', (['(gp2 - gb2)'], {}), '(gp2 - gb2)\n', (14899, 14910), True, 'import numpy as np\n'), ((14926, 14941), 'numpy.abs', 'np.abs', (['(gp2 - 1)'], {}), '(gp2 - 1)\n', (14932, 14941), True, 'import numpy as np\n'), ((14956, 14971), 'numpy.abs', 'np.abs', (['(gb2 - 1)'], {}), '(gb2 - 1)\n', (14962, 14971), True, 'import numpy as np\n'), ((14987, 15003), 'numpy.abs', 'np.abs', (['(gp2 - gn)'], {}), '(gp2 - gn)\n', (14993, 15003), True, 'import numpy as np\n'), ((15018, 15035), 'numpy.abs', 'np.abs', (['(gp2 - gn2)'], {}), '(gp2 - gn2)\n', (15024, 15035), True, 'import numpy as np\n'), ((15050, 15066), 'numpy.abs', 'np.abs', (['(gb2 - gn)'], {}), '(gb2 - gn)\n', (15056, 15066), True, 'import numpy as np\n'), ((15081, 15098), 'numpy.abs', 'np.abs', (['(gb2 - gn2)'], {}), '(gb2 - gn2)\n', (15087, 15098), True, 'import numpy as np\n'), ((15334, 15354), 'numpy.sqrt', 'np.sqrt', (['((C + D) / F)'], {}), '((C + D) / F)\n', (15341, 15354), True, 'import numpy as np\n'), ((15367, 15399), 'numpy.sqrt', 'np.sqrt', (['((gp2 * C + gb2 * D) / F)'], {}), '((gp2 * C + gb2 * D) / F)\n', (15374, 15399), True, 'import numpy as np\n'), ((15482, 15554), 'numpy.array', 'np.array', (['[1, -2 * (1 - W2) / (1 + W2 + A), (1 + W2 - A) / (1 + W2 + A)]'], {}), '([1, -2 * (1 - W2) / (1 + W2 + A), (1 + W2 - A) / (1 + W2 + A)])\n', (15490, 15554), True, 'import numpy as np\n'), ((15669, 15678), 'numpy.abs', 'np.abs', (['G'], {}), '(G)\n', (15675, 15678), True, 'import numpy as np\n'), ((16378, 16396), 'numpy.sqrt', 'np.sqrt', (['(G11 / G00)'], {}), '(G11 / G00)\n', (16385, 16396), True, 'import numpy as np\n'), ((16465, 16486), 'numpy.tan', 'np.tan', (['(bandwidth / 2)'], {}), '(bandwidth / 2)\n', (16471, 16486), True, 'import numpy as np\n'), ((16682, 16784), 'numpy.array', 'np.array', (['[g_nyquist + g_ref * W2 + B, -2 * (g_nyquist - g_ref * W2), g_nyquist - B +\n g_ref * W2]'], {}), '([g_nyquist + g_ref * W2 + B, -2 * (g_nyquist - g_ref * W2), \n g_nyquist - B + g_ref * W2])\n', (16690, 16784), True, 'import numpy as np\n'), ((18352, 18386), 'numpy.linalg.multi_dot', 'np.linalg.multi_dot', (['[B, W, gain2]'], {}), '([B, W, gain2])\n', (18371, 18386), True, 'import numpy as np\n'), ((20077, 20108), 'numpy.concatenate', 'np.concatenate', (['(alpha, -alpha)'], {}), '((alpha, -alpha))\n', (20091, 20108), True, 'import numpy as np\n'), ((20490, 20510), 'numpy.append', 'np.append', (['z', 'z_real'], {}), '(z, z_real)\n', (20499, 20510), True, 'import numpy as np\n'), ((20523, 20543), 'numpy.append', 'np.append', (['p', 'p_real'], {}), '(p, p_real)\n', (20532, 20543), True, 'import numpy as np\n'), ((20961, 20977), 'numpy.log2', 'np.log2', (['(w2 / w1)'], {}), '(w2 / w1)\n', (20968, 20977), True, 'import numpy as np\n'), ((743, 752), 'numpy.abs', 'np.abs', (['G'], {}), '(G)\n', (749, 752), True, 'import numpy as np\n'), ((8145, 8156), 'numpy.log10', 'np.log10', (['(4)'], {}), '(4)\n', (8153, 8156), True, 'import numpy as np\n'), ((14749, 14760), 'numpy.sign', 'np.sign', (['Gp'], {}), '(Gp)\n', (14756, 14760), True, 'import numpy as np\n'), ((15113, 15131), 'numpy.sqrt', 'np.sqrt', (['(G11 / G00)'], {}), '(G11 / G00)\n', (15120, 15131), True, 'import numpy as np\n'), ((15197, 15211), 'numpy.tan', 'np.tan', (['(bw / 2)'], {}), '(bw / 2)\n', (15203, 15211), True, 'import numpy as np\n'), ((15414, 15466), 'numpy.array', 'np.array', (['[gn + W2 + B, -2 * (gn - W2), gn - B + W2]'], {}), '([gn + W2 + B, -2 * (gn - W2), gn - B + W2])\n', (15422, 15466), True, 'import numpy as np\n'), ((16399, 16420), 'numpy.tan', 'np.tan', (['(w_command / 2)'], {}), '(w_command / 2)\n', (16405, 16420), True, 'import numpy as np\n'), ((16570, 16588), 'numpy.sqrt', 'np.sqrt', (['(G00 * G11)'], {}), '(G00 * G11)\n', (16577, 16588), True, 'import numpy as np\n'), ((19034, 19045), 'numpy.sign', 'np.sign', (['Go'], {}), '(Go)\n', (19041, 19045), True, 'import numpy as np\n'), ((20126, 20146), 'numpy.exp', 'np.exp', (['(1.0j * alpha)'], {}), '(1.0j * alpha)\n', (20132, 20146), True, 'import numpy as np\n'), ((20162, 20182), 'numpy.exp', 'np.exp', (['(1.0j * alpha)'], {}), '(1.0j * alpha)\n', (20168, 20182), True, 'import numpy as np\n'), ((20913, 20924), 'numpy.sign', 'np.sign', (['Gd'], {}), '(Gd)\n', (20920, 20924), True, 'import numpy as np\n'), ((5519, 5528), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (5525, 5528), True, 'import numpy as np\n'), ((13051, 13065), 'numpy.abs', 'np.abs', (['(x - x0)'], {}), '(x - x0)\n', (13057, 13065), True, 'import numpy as np\n'), ((14777, 14787), 'numpy.abs', 'np.abs', (['Gp'], {}), '(Gp)\n', (14783, 14787), True, 'import numpy as np\n'), ((15134, 15148), 'numpy.tan', 'np.tan', (['(wc / 2)'], {}), '(wc / 2)\n', (15140, 15148), True, 'import numpy as np\n'), ((15302, 15320), 'numpy.sqrt', 'np.sqrt', (['(G00 * G11)'], {}), '(G00 * G11)\n', (15309, 15320), True, 'import numpy as np\n'), ((16438, 16456), 'numpy.sqrt', 'np.sqrt', (['(F00 / F11)'], {}), '(F00 / F11)\n', (16445, 16456), True, 'import numpy as np\n'), ((16526, 16544), 'numpy.sqrt', 'np.sqrt', (['(F00 * F11)'], {}), '(F00 * F11)\n', (16533, 16544), True, 'import numpy as np\n'), ((19062, 19072), 'numpy.abs', 'np.abs', (['Go'], {}), '(Go)\n', (19068, 19072), True, 'import numpy as np\n'), ((6324, 6342), 'numpy.abs', 'np.abs', (['(Gd / slope)'], {}), '(Gd / slope)\n', (6330, 6342), True, 'import numpy as np\n'), ((15170, 15188), 'numpy.sqrt', 'np.sqrt', (['(F00 / F11)'], {}), '(F00 / F11)\n', (15177, 15188), True, 'import numpy as np\n'), ((15254, 15272), 'numpy.sqrt', 'np.sqrt', (['(F00 * F11)'], {}), '(F00 * F11)\n', (15261, 15272), True, 'import numpy as np\n'), ((18314, 18329), 'numpy.transpose', 'np.transpose', (['B'], {}), '(B)\n', (18326, 18329), True, 'import numpy as np\n')]
|
import inspect
import sys
import numpy as np
import attrdict
from mtwaffle import graphs
from mtwaffle import mt
class Site(attrdict.AttrDict):
index_map = {
'xx': [0, 0],
'xy': [0, 1],
'yx': [1, 0],
'yy': [1, 1]
}
EXCLUDED_CALLABLES = ('between_freqs', )
def __init__(self, freqs, zs, name='', phase_func=None, **kwargs):
super(attrdict.AttrDict, self).__init__()
self.freqs = np.asarray(freqs)
self.zs = np.asarray(zs)
self.name = name
if phase_func is None:
phase_func = mt.phase
self.phase_func = phase_func
for key, value in kwargs.items():
setattr(self, key, value)
@property
def periods(self):
return 1. / self.freqs
@property
def phases(self):
return self.phase_func(self.zs)
def inspect_mt_callable(self, name):
f = mt.callables[name]
argnames = [ # Find arguments of callable from mtwaffle.mt
p.name for p in inspect.signature(f).parameters.values()
if p.kind == p.POSITIONAL_OR_KEYWORD and p.default is p.empty
]
return f, argnames
def help(self, output=sys.stdout):
'''Print a list of the attributes which are available.'''
output.write('''
Attributes of mtwaffle.mtsite.Site are calculated using functions from the mtwaffle.mt module:
mtsite.Site mtwaffle.mt function
attribute (args are Site attributes) Function description
-------------- ------------------------------ ----------------------------------------------
''')
label = lambda f: f.__doc__.splitlines()[0] if f.__doc__ else 'MISSING DOC'
fnames = []
for fname, f in mt.callables.items():
try:
getattr(self, fname)
except:
pass
else:
fnames.append(fname)
for fname in fnames:
f, argnames = self.inspect_mt_callable(fname)
cname = self.__class__.__name__
argsig = ', '.join(['{}'.format(arg) for arg in argnames])
source = '{}({})'.format(fname, argsig)
label_attr = '{}'.format(fname.ljust(14))
label_source = source.ljust(30)
label_help = label(f)
output.write('{} {} {}\n'.format(label_attr, label_source, label_help))
# print('{fname}({sig})'.format(
# fname=fname, sig=', '.join([
# '{c}.{a}'.format(c=self.__class__.__name__, a=arg) for arg in f_arg_names])))
# output.write('{}.{} -- {}\n'.format(
# self.__class__.__name__,
# fname.ljust(max([len(fi) for fi in fnames])),
# doc(mt.callables[fname])
# )
# )
def get_property(self, key):
# Is the key ending with xx, xy, yx, or yy?
if key[-2:] in self.index_map:
indices = self.index_map[key[-2:]]
if key.startswith('res_'):
return self.appres[[Ellipsis] + indices]
elif key.startswith('phase_'):
return self.phases[[Ellipsis] + indices]
elif key.startswith('zr_'):
return self.zs.real[[Ellipsis] + indices]
elif key.startswith('zi_'):
return self.zs.imag[[Ellipsis] + indices]
# See if we can complete a function from mtwaffle.mt using the
# existing attributes in this Site:
elif key in mt.callables and not key in self.EXCLUDED_CALLABLES:
f, argnames = self.inspect_mt_callable(key)
return f(*[getattr(self, arg) for arg in argnames])
return False
def __getattr__(self, key):
value = self.get_property(key)
if value is False:
return super(attrdict.AttrDict, self).__getattr__(key)
else:
return value
def __getitem__(self, key):
value = self.get_property(key)
if value is False:
return super(attrdict.AttrDict, self).__getitem__(key)
else:
return value
def plot_res_phase(self, **kwargs):
args = (
(self.freqs, self.freqs),
(self.res_xy, self.res_yx),
(self.phase_xy, self.phase_yx),
)
if not 'res_indiv_kws' in kwargs:
kwargs['res_indiv_kws'] = (
{'label': 'xy', 'color': 'b'},
{'label': 'yx', 'color': 'g'},
)
return graphs.plot_res_phase(*args, **kwargs)
def plot_impedance_tensors(self, *args, **kwargs):
return graphs.plot_impedance_tensors(
self.zs, self.freqs, **kwargs)
def plot_ptensell(self, *args, **kwargs):
return graphs.plot_ptensell(
self.ptensors, self.freqs, *args, **kwargs
)
def plot_ptensell_filled(self, *args, **kwargs):
return graphs.plot_ptensell_filled(
self.ptensors, self.freqs, *args, **kwargs
)
def plot_mohr_imp(self, *args, **kwargs):
kwargs['title'] = kwargs.get('title', self.name)
return graphs.plot_mohr_imp(
self.zs, self.freqs, *args, **kwargs
)
def plot_mohr_ptensor(self, *args, **kwargs):
return graphs.plot_mohr_ptensor(
self.ptensors, self.freqs, *args, **kwargs
)
|
[
"mtwaffle.graphs.plot_impedance_tensors",
"mtwaffle.mt.callables.items",
"mtwaffle.graphs.plot_mohr_imp",
"numpy.asarray",
"mtwaffle.graphs.plot_ptensell",
"inspect.signature",
"mtwaffle.graphs.plot_mohr_ptensor",
"mtwaffle.graphs.plot_ptensell_filled",
"mtwaffle.graphs.plot_res_phase"
] |
[((450, 467), 'numpy.asarray', 'np.asarray', (['freqs'], {}), '(freqs)\n', (460, 467), True, 'import numpy as np\n'), ((486, 500), 'numpy.asarray', 'np.asarray', (['zs'], {}), '(zs)\n', (496, 500), True, 'import numpy as np\n'), ((1758, 1778), 'mtwaffle.mt.callables.items', 'mt.callables.items', ([], {}), '()\n', (1776, 1778), False, 'from mtwaffle import mt\n'), ((4560, 4598), 'mtwaffle.graphs.plot_res_phase', 'graphs.plot_res_phase', (['*args'], {}), '(*args, **kwargs)\n', (4581, 4598), False, 'from mtwaffle import graphs\n'), ((4670, 4730), 'mtwaffle.graphs.plot_impedance_tensors', 'graphs.plot_impedance_tensors', (['self.zs', 'self.freqs'], {}), '(self.zs, self.freqs, **kwargs)\n', (4699, 4730), False, 'from mtwaffle import graphs\n'), ((4806, 4870), 'mtwaffle.graphs.plot_ptensell', 'graphs.plot_ptensell', (['self.ptensors', 'self.freqs', '*args'], {}), '(self.ptensors, self.freqs, *args, **kwargs)\n', (4826, 4870), False, 'from mtwaffle import graphs\n'), ((4962, 5033), 'mtwaffle.graphs.plot_ptensell_filled', 'graphs.plot_ptensell_filled', (['self.ptensors', 'self.freqs', '*args'], {}), '(self.ptensors, self.freqs, *args, **kwargs)\n', (4989, 5033), False, 'from mtwaffle import graphs\n'), ((5175, 5233), 'mtwaffle.graphs.plot_mohr_imp', 'graphs.plot_mohr_imp', (['self.zs', 'self.freqs', '*args'], {}), '(self.zs, self.freqs, *args, **kwargs)\n', (5195, 5233), False, 'from mtwaffle import graphs\n'), ((5322, 5390), 'mtwaffle.graphs.plot_mohr_ptensor', 'graphs.plot_mohr_ptensor', (['self.ptensors', 'self.freqs', '*args'], {}), '(self.ptensors, self.freqs, *args, **kwargs)\n', (5346, 5390), False, 'from mtwaffle import graphs\n'), ((1026, 1046), 'inspect.signature', 'inspect.signature', (['f'], {}), '(f)\n', (1043, 1046), False, 'import inspect\n')]
|
# Copyright 2021 Dakewe Biotech Corporation. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import PIL.BmpImagePlugin
import cv2
import numpy as np
import torch
import torchvision.transforms as transforms
from PIL import Image
__all__ = [
"opencv2pil", "opencv2tensor", "pil2opencv", "process_image"
]
def opencv2pil(image: np.ndarray) -> PIL.BmpImagePlugin.BmpImageFile:
""" OpenCV Convert to PIL.Image format.
Returns:
PIL.Image.
"""
image = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
return image
def opencv2tensor(image: np.ndarray, gpu: int) -> torch.Tensor:
""" OpenCV Convert to torch.Tensor format.
Returns:
torch.Tensor.
"""
rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
nhwc_image = torch.from_numpy(rgb_image).div(255.0).unsqueeze(0)
input_tensor = nhwc_image.permute(0, 3, 1, 2)
if gpu is not None:
input_tensor = input_tensor.cuda(gpu, non_blocking=True)
return input_tensor
def pil2opencv(image: PIL.BmpImagePlugin.BmpImageFile) -> np.ndarray:
""" PIL.Image Convert to OpenCV format.
Returns:
np.ndarray.
"""
image = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)
return image
def process_image(image: PIL.BmpImagePlugin.BmpImageFile, gpu: int = None) -> torch.Tensor:
""" PIL.Image Convert to PyTorch format.
Args:
image (PIL.BmpImagePlugin.BmpImageFile): File read by PIL.Image.
gpu (int): Graphics card model.
Returns:
torch.Tensor.
"""
tensor = transforms.ToTensor()(image)
input_tensor = tensor.unsqueeze(0)
if gpu is not None:
input_tensor = input_tensor.cuda(gpu, non_blocking=True)
return input_tensor
|
[
"torchvision.transforms.ToTensor",
"torch.from_numpy",
"numpy.asarray",
"cv2.cvtColor"
] |
[((1328, 1366), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (1340, 1366), False, 'import cv2\n'), ((1098, 1136), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (1110, 1136), False, 'import cv2\n'), ((1783, 1800), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (1793, 1800), True, 'import numpy as np\n'), ((2158, 2179), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2177, 2179), True, 'import torchvision.transforms as transforms\n'), ((1384, 1411), 'torch.from_numpy', 'torch.from_numpy', (['rgb_image'], {}), '(rgb_image)\n', (1400, 1411), False, 'import torch\n')]
|
import fmtrack
import os
import matplotlib.colors as colors
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import pickle
import pyvista
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import (RBF, Matern, RationalQuadratic,
ExpSineSquared, DotProduct,
ConstantKernel, WhiteKernel)
from sklearn.neighbors import KernelDensity
from sklearn import preprocessing
##########################################################################################
# get filepath for matplotlib style
##########################################################################################
stylepath = os.path.dirname(os.path.abspath(fmtrack.__file__)) + '/el_papers.mplstyle'
##########################################################################################
# import data
##########################################################################################
def import_cell_info(file_prefix_1,file_prefix_2,root_directory):
cell_mesh_1 = np.loadtxt(root_directory+'/Gel_cell_coords/' + file_prefix_1 + '_cell_mesh.txt')
cell_normal_1 = np.loadtxt(root_directory+'/Gel_cell_coords/' + file_prefix_1 + '_cell_normals.txt')
cell_center_1 = np.loadtxt(root_directory+'/Gel_cell_coords/' + file_prefix_1 + '_cell_center.txt')
cell_vol_1 = np.loadtxt(root_directory+'/Gel_cell_coords/' + file_prefix_1 + '_cell_volume.txt')
cell_mesh_2 = np.loadtxt(root_directory+'/Gel_cell_coords/' + file_prefix_2 + '_cell_mesh.txt')
cell_normal_2 = np.loadtxt(root_directory+'/Gel_cell_coords/' + file_prefix_2 + '_cell_normals.txt')
cell_center_2 = np.loadtxt(root_directory+'/Gel_cell_coords/' + file_prefix_2 + '_cell_center.txt')
cell_vol_2 = np.loadtxt(root_directory+'/Gel_cell_coords/' + file_prefix_2 + '_cell_volume.txt')
return cell_mesh_1, cell_normal_1, cell_center_1, cell_vol_1, cell_mesh_2, cell_normal_2, cell_center_2, cell_vol_2
def import_bead_disps(folder):
X = np.loadtxt(folder + '/X.txt')
Y = np.loadtxt(folder + '/Y.txt')
Z = np.loadtxt(folder + '/Z.txt')
U = np.loadtxt(folder + '/U.txt')
V = np.loadtxt(folder + '/V.txt')
W = np.loadtxt(folder + '/W.txt')
return X, Y, Z, U, V, W
##########################################################################################
# additional computations based on the data
##########################################################################################
# can be implemented as needed based on some rule for excluding outliers
def remove_outliers(X, Y, Z, U, V, W):
# maximum plausible displacement
# z-score based displacement
# more complex strategy for determining outliers (hot-spot analysis)
# this code should be implemented on a case by case basis
return X_new, Y_new, Z_new, U_new, V_new, W_new
# compare bead displacement to it's neighbors
def color_point_neighbor_similarity(X, Y, Z, U, V, W, num_neigh):
num_beads = X.shape[0]
neigh_score = []
num_pts = X.shape[0]
for kk in range(0,num_pts):
x = X[kk]; y = Y[kk]; z = Z[kk]
u = U[kk]; v = V[kk]; w = W[kk]
dist_all = ((x - X)**2.0 + (y - Y)**2.0 + (z - Z)**2.0)**(1.0/2.0)
dist_all_sorted = np.argsort(dist_all)
score_dist = np.zeros((num_neigh))
for jj in range(0,num_neigh):
idx = dist_all_sorted[jj]
u2 = U[idx]; v2 = V[idx]; w2 = W[idx]
score_dist[jj] = ((u - u2)**2.0 + (v - v2)**2.0 + (w - w2)**2.0)**(1.0/2.0)
neigh_score.append(np.mean(score_dist))
return neigh_score
# compare bead displacement direction to the initial cell configuration
def color_point_direction(X, Y, Z, U, V, W, cell_mesh, cell_normal):
num_beads = X.shape[0]
dir_score = []
dist_from_cell = []
mag_list = []
# --> down sample the cell mesh (computational efficiency)
num_pts = X.shape[0]
samp = np.random.randint(cell_mesh.shape[0]-1,size=np.min([num_pts,10000]))
reduced_cell_mesh = cell_mesh[samp,:]
reduced_cell_normal = cell_normal[samp,:]
for kk in range(0,num_pts):
x = X[kk]; y = Y[kk]; z = Z[kk]
u = U[kk]; v = V[kk]; w = W[kk]
mag = (u**2.0 + v**2.0 + w**2.0)**(1.0/2.0)
du = u/mag; dv = v/mag; dw = w/mag
dist_all = ((x - reduced_cell_mesh[:,0])**2.0 + (y - reduced_cell_mesh[:,1])**2.0\
+ (z - reduced_cell_mesh[:,2])**2.0)**(1.0/2.0)
arg = np.argmin(dist_all)
val = du*reduced_cell_normal[arg,0] + dv*reduced_cell_normal[arg,1] + dw*reduced_cell_normal[arg,2]
dir_score.append(val)
dist_from_cell.append(dist_all[arg])
mag_list.append(mag)
return dir_score, dist_from_cell, mag_list
# compute bead displacement to the domain edge
def compute_dist_from_edge(X, Y, Z, X_DIM, Y_DIM, Z_DIM):
num_pts = X.shape[0]
dist_from_edge = []
for kk in range(0,num_pts):
x = X[kk]; y = Y[kk]; z = Z[kk]
x_edge = np.min([np.abs(x),np.abs(X_DIM-x)])
y_edge = np.min([np.abs(y),np.abs(Y_DIM-y)])
z_edge = np.min([np.abs(z),np.abs(Z_DIM-z)])
dist = np.min([x_edge,y_edge,z_edge])
dist_from_edge.append(dist)
return dist_from_edge
# bin data to assist with plotting
def mean_bins(data1, data2):
cent_mark = [10,30,50,70]
less_than = [20,40,60,80]
mean_val = []
arg = np.argsort(data1)
data1 = np.sort(data1)
data2 = data2[arg]
idx_d = 0
for idx_l in range(0,len(less_than)):
arr = []; arr.append(0)
while idx_d < data1.shape[0] and data1[idx_d] < less_than[idx_l]:
arr.append(data2[idx_d])
idx_d += 1
mean_val.append(np.mean(arr))
return cent_mark, mean_val
##########################################################################################
# plot raw data (position)
##########################################################################################
# --> y axis is bead displacement magnitude, x axis is distance from cell surface
def plot_surface_disp(axi,cell_mesh,dist_from_edge,dist_from_cell, mag_list):
#--> remove points
keep = []
for kk in range(0,len(dist_from_edge)):
if dist_from_edge[kk] > 5:
keep.append(kk)
keep = np.asarray(keep)
dist_from_cell = np.asarray(dist_from_cell)
mag_list = np.asarray(mag_list)
cent_mark,mean_val = mean_bins(dist_from_cell[keep],mag_list[keep])
axi.plot(dist_from_cell[keep],mag_list[keep],'k.',markersize=0.75)
axi.plot(cent_mark, mean_val,'ro',markersize=10)
axi.set_ylim((0,10))
axi.set_xlabel('distance to cell surface')
axi.set_ylabel(r'displacement magnitude $\mu m$')
# --> 3D plot of the cell, configuration number influences title and color
def plot_cell_3D(ax,cell_num,cell_mesh, cell_center, cell_vol, X_DIM, Y_DIM, Z_DIM):
if cell_num == 1:
col = (0.75,0.75,0.75)
elif cell_num == 2:
col = (0,0,0)
verts = cell_mesh; cent = cell_center; vol = cell_vol
ax.set_aspect('auto')
ax.plot(verts[:,0],verts[:,1],verts[:,2],'.',color=col)
ax.set_xlim((-1,X_DIM))
ax.set_ylim((-1,Y_DIM))
ax.set_zlim((-1,Z_DIM))
if cell_num == 1:
ax.set_title('cell config 1, %.1f $\mu m^3$'%(vol))
elif cell_num == 2:
ax.set_title('cell config 2, %.1f $\mu m^3$'%(vol))
# --> plot of scores (type 1 is similarity to neighbors, type 2 is direction relative to cell)
def plot_scores_subplot(data,title,axi,color_type):
num_pts = 250
X_plot = np.linspace(np.min(data),np.max(data),num_pts).reshape(-1,1)
X = data.reshape(-1,1)
kde = KernelDensity(kernel='gaussian', bandwidth=0.1).fit(X)
log_dens = kde.score_samples(X_plot)
axi.set_xlabel('score')
axi.set_ylabel('probability density function')
axi.set_title(title)
ci_max = np.max(data); ci_min = np.min(data)
axi.plot(X_plot[:,0],np.exp(log_dens),'k-',linewidth=0.5)
for kk in range(0,num_pts):
ci = X_plot[kk,0]
if color_type == 1: #--> all positive numbers, blue is 0, red is high
col = ((ci-ci_min)/(ci_max-ci_min), 0, 1.0 - (ci-ci_min)/(ci_max-ci_min))
elif color_type == 2:
if ci < 0.0:
col = (np.abs(ci),0,0.5*np.abs(ci))
else:
col = (0, np.abs(ci), np.abs(ci))
axi.plot(X_plot[kk, 0], np.exp(log_dens[kk]),'.',color=col)
return
# --> helper function plots slice of cell
def plot_cell(cent,project_1,project_2,project_out,col,axi):
buffer = 0.5
buffer_up = cent + buffer
buffer_low = cent - buffer
plot_1 = []
plot_2 = []
num_pts = project_1.shape[0]
for kk in range(0,num_pts):
if project_out[kk] < buffer_up and project_out[kk] > buffer_low:
plot_1.append(project_1[kk])
plot_2.append(project_2[kk])
axi.plot(plot_1,plot_2,'.',color=col)
return
# --> helper function plots slice of vectors
def plot_vectors(color_type, color_info, project_1, project_2, project_1d, project_2d, cent, project_out, axi):
ci_min = np.min(color_info); ci_max = np.max(color_info)
num_pts = project_1.shape[0]
for kk in range(0,num_pts):
# --> the vectors themselves
scale = 1
proj1_a = project_1[kk]; proj1_d = project_1d[kk]*scale
proj2_a = project_2[kk]; proj2_d = project_2d[kk]*scale
pout = project_out[kk];
buffer = 10
# --> color of the vectors
ci = color_info[kk]
if pout > cent - buffer and pout < cent + buffer:
# --> colortype
if color_type == 1: #--> all positive numbers, blue is 0, red is high
col = ((ci-ci_min)/(ci_max-ci_min), 0, 1.0 - (ci-ci_min)/(ci_max-ci_min))
elif color_type == 2:
if ci < 0.0:
col = (np.abs(ci),0,0.5*np.abs(ci))
else:
col = (0, np.abs(ci), np.abs(ci))
# --> plot the vectors
axi.arrow(proj1_a,proj2_a,proj1_d,proj2_d,color = col,linewidth=1.0,head_width=1.5)
return
# --> plot a slice plot, each has beads and a cell
def plot_cell_vector_slice(color_type, color_info, X, Y, Z, U, V, W, cell_center_1,\
cell_mesh_1, cell_center_2, cell_mesh_2, plane_type, axi, X_DIM, Y_DIM, Z_DIM):
num_beads = X.shape[0]
XYZ = np.zeros((num_beads,3)); XYZ[:,0] = X; XYZ[:,1] = Y; XYZ[:,2] = Z
UVW = np.zeros((num_beads,3)); UVW[:,0] = U; UVW[:,1] = V; UVW[:,2] = W
cell_center_avg = 0.5*cell_center_1 + 0.5*cell_center_2
if plane_type == 1: #XZ-plane
idx_1 = 0
idx_2 = 2
idx_out = 1
elif plane_type == 2: #YZ-plane
idx_1 = 1
idx_2 = 2
idx_out = 0
elif plane_type == 3: #XY-plane
idx_1 = 0
idx_2 = 1
idx_out = 2
cent = cell_center_avg[idx_out]
project_1_cell_A = cell_mesh_1[:,idx_1]
project_2_cell_A = cell_mesh_1[:,idx_2]
project_out_cell_A = cell_mesh_1[:,idx_out]
cell_color_A = (0.75,0.75,0.75)
project_1_cell_B = cell_mesh_2[:,idx_1]
project_2_cell_B = cell_mesh_2[:,idx_2]
project_out_cell_B = cell_mesh_2[:,idx_out]
cell_color_B = (0.0,0.0,0.0)
project_1_bead = XYZ[:,idx_1]
project_2_bead = XYZ[:,idx_2]
project_1d_bead = UVW[:,idx_1]
project_2d_bead = UVW[:,idx_2]
project_out_bead = XYZ[:,idx_out]
# call cell plot for cell 1
plot_cell(cent,project_1_cell_A,project_2_cell_A,project_out_cell_A,cell_color_A,axi)
# call cell plot for cell 2
plot_cell(cent,project_1_cell_B,project_2_cell_B,project_out_cell_B,cell_color_B,axi)
# call vector plot
plot_vectors(color_type, color_info, project_1_bead, project_2_bead, project_1d_bead, project_2d_bead, cent, project_out_bead, axi)
center = cell_center_avg
if plane_type == 1: #XZ-plane
axi.plot([-1,X_DIM],[center[2],center[2]],'k:',linewidth=1.0)
axi.plot([center[0],center[0]],[-1,Z_DIM],'k:',linewidth=1.0)
axi.set_xlim((-1,X_DIM))
axi.set_ylim((-1,Z_DIM))
axi.set_xlabel(r'x-position $\mu m$')
axi.set_ylabel(r'z-position $\mu m$')
elif plane_type == 2: #YZ-plane
axi.plot([-1,Y_DIM],[center[2],center[2]],'k:',linewidth=1.0)
axi.plot([center[1],center[1]],[-1,Z_DIM],'k:',linewidth=1.0)
axi.set_xlim((-1,Y_DIM))
axi.set_ylim((-1,Z_DIM))
axi.set_xlabel(r'y-position $\mu m$')
axi.set_ylabel(r'z-position $\mu m$')
elif plane_type == 3: #XY-plane
axi.plot([-1,X_DIM],[center[1],center[1]],'k:',linewidth=1.0)
axi.plot([center[0],center[0]],[-1,Y_DIM],'k:',linewidth=1.0)
axi.set_xlim((-1,X_DIM))
axi.set_ylim((-1,Y_DIM))
axi.set_xlabel(r'x-position $\mu m$')
axi.set_ylabel(r'y-position $\mu m$')
return
def plot_vector_field(X,Y,Z,U,V,W,cell_init,cell_final,dir_score,should_show,should_save,foldername):
XYZ = np.vstack((X,Y,Z)).transpose()
UVW = np.vstack((U,V,W)).transpose()
point_cloud = pyvista.PolyData(XYZ)
point_cloud["dot(cell normal, displacement)"] = dir_score
point_cloud['vectors'] = UVW
geom = pyvista.Arrow()
arrows = point_cloud.glyph(orient='vectors', scale=False, factor=5.0,geom=geom)
mesh_init = pyvista.PolyData(cell_init)
mesh_final = pyvista.PolyData(cell_final)
if should_show:
plotter = pyvista.Plotter()
plotter.add_mesh(cell_final, color='maroon')
cmap = plt.cm.get_cmap("viridis_r")
plotter.add_mesh(arrows, cmap=cmap)
plotter.remove_scalar_bar()
plotter.add_scalar_bar('Dot(Cell Normal, Vector)', title_font_size=20, label_font_size=15, position_y=0.05)
plotter.show_grid()
plotter.show(title='Bead Deformation around Cell')
if should_save:
mesh_init.save(os.path.join(foldername,'cell_init.vtk'))
mesh_final.save(os.path.join(foldername,'cell_final.vtk'))
arrows.save(os.path.join(foldername,'arrows.vtk'))
# --> plot a cell-vector row
def plot_cell_vector_slice_row(ax_list,color_type,color_info,X,Y,Z,U,V,W,cell_center_1,cell_mesh_1,cell_center_2,cell_mesh_2,X_DIM,Y_DIM,Z_DIM):
axi = ax_list[0]
plane_type = 1
plot_cell_vector_slice(color_type, color_info, X, Y, Z, U, V, W, cell_center_1,cell_mesh_1, cell_center_2, cell_mesh_2, plane_type, axi, X_DIM, Y_DIM, Z_DIM)
axi = ax_list[1]
plane_type = 2
plot_cell_vector_slice(color_type, color_info, X, Y, Z, U, V, W, cell_center_1,cell_mesh_1, cell_center_2, cell_mesh_2, plane_type, axi, X_DIM, Y_DIM, Z_DIM)
axi = ax_list[2]
plane_type = 3
plot_cell_vector_slice(color_type, color_info, X, Y, Z, U, V, W, cell_center_1,cell_mesh_1, cell_center_2, cell_mesh_2, plane_type, axi, X_DIM, Y_DIM, Z_DIM)
return
# --> plot cells
def plot_only_cells(cell_mesh_1,cell_center_1,cell_vol_1,cell_mesh_2,cell_center_2,cell_vol_2,X_DIM,Y_DIM,Z_DIM,folder,figtype_list):
fig = plt.figure()
plt.style.use(stylepath)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
fig.set_figheight(5)
fig.set_figwidth(10)
axi = fig.add_subplot(1, 2, 1, projection='3d')
plot_cell_3D(axi,1,cell_mesh_1, cell_center_1, cell_vol_1, X_DIM, Y_DIM, Z_DIM)
axi = fig.add_subplot(1, 2, 2, projection='3d')
plot_cell_3D(axi,2,cell_mesh_2, cell_center_2, cell_vol_2, X_DIM, Y_DIM, Z_DIM)
plt.tight_layout()
for end in figtype_list:
fname = folder + '/Cell_plots_3D' + end
plt.savefig(fname)
return
# --> plot scores
def plot_only_scores(neigh_score,dir_score,folder,figtype_list):
fig = plt.figure()
plt.style.use(stylepath)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
fig.set_figheight(5)
fig.set_figwidth(10)
axi = fig.add_subplot(1,2,1)
data = np.asarray(neigh_score)
color_type = 1
title = 'neighbor distance score'
plot_scores_subplot(data,title,axi,color_type)
axi = fig.add_subplot(1,2,2)
data = np.asarray(dir_score)
color_type = 2
title = r'$n_{cell} \cdot n_{vector}$'
plot_scores_subplot(data,title,axi,color_type)
plt.tight_layout()
for end in figtype_list:
fname = folder + '/Score_plots' + end
plt.savefig(fname)
return
# --> plot slice
def plot_only_slice(dir_score,X,Y,Z,U,V,W,cell_center_1,cell_mesh_1,cell_center_2,cell_mesh_2,X_DIM,Y_DIM,Z_DIM,folder,figtype_list):
fig = plt.figure()
plt.style.use(stylepath)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
fig.set_figheight(5)
fig.set_figwidth(15)
color_type = 2
color_info = dir_score
axi = fig.add_subplot(1,3,1)
plane_type = 1
plot_cell_vector_slice(color_type, color_info, X, Y, Z, U, V, W, cell_center_1,\
cell_mesh_1, cell_center_2, cell_mesh_2, plane_type, axi, X_DIM, Y_DIM, Z_DIM)
axi = fig.add_subplot(1,3,2)
plane_type = 2
plot_cell_vector_slice(color_type, color_info, X, Y, Z, U, V, W, cell_center_1,\
cell_mesh_1, cell_center_2, cell_mesh_2, plane_type, axi, X_DIM, Y_DIM, Z_DIM)
axi = fig.add_subplot(1,3,3)
plane_type = 3
plot_cell_vector_slice(color_type, color_info, X, Y, Z, U, V, W, cell_center_1,\
cell_mesh_1, cell_center_2, cell_mesh_2, plane_type, axi, X_DIM, Y_DIM, Z_DIM)
plt.tight_layout()
for end in figtype_list:
fname = folder + '/Bead_disp_slice' + end
plt.savefig(fname)
return
# --> plot distance
def plot_only_distance(cell_mesh,dist_from_edge,dist_from_cell,mag_list,folder,figtype_list):
fig = plt.figure()
plt.style.use(stylepath)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
fig.set_figheight(5)
fig.set_figwidth(5)
axi = fig.add_subplot(1,1,1)
plot_surface_disp(axi,cell_mesh,dist_from_edge,dist_from_cell, mag_list)
plt.tight_layout()
for end in figtype_list:
fname = folder + '/Disp_wrt_dist' + end
plt.savefig(fname)
return
# --> plot all
def plot_all(folder, root_directory, file_prefix_1,file_prefix_2,dir_score,neigh_score,dist_from_edge,dist_from_cell,mag_list,\
X,Y,Z,U,V,W,cell_center_1,cell_mesh_1,cell_vol_1,cell_center_2,cell_mesh_2,cell_vol_2,X_DIM,Y_DIM,Z_DIM,figtype_list):
fig = plt.figure()
plt.style.use(stylepath)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
fig.set_figheight(10)
fig.set_figwidth(20)
axi = fig.add_subplot(2,4,1)
data = np.asarray(dir_score)
color_type = 2
title = r'$n_{cell} \cdot n_{vector}$'
plot_scores_subplot(data,title,axi,color_type)
axi = fig.add_subplot(2,4,2)
plane_type = 1
plot_cell_vector_slice(color_type, dir_score, X, Y, Z, U, V, W, cell_center_1,\
cell_mesh_1, cell_center_2, cell_mesh_2, plane_type, axi, X_DIM, Y_DIM, Z_DIM)
axi = fig.add_subplot(2,4,3)
plane_type = 2
plot_cell_vector_slice(color_type, dir_score, X, Y, Z, U, V, W, cell_center_1,\
cell_mesh_1, cell_center_2, cell_mesh_2, plane_type, axi, X_DIM, Y_DIM, Z_DIM)
axi = fig.add_subplot(2,4,4)
plane_type = 3
plot_cell_vector_slice(color_type, dir_score, X, Y, Z, U, V, W, cell_center_1,\
cell_mesh_1, cell_center_2, cell_mesh_2, plane_type, axi, X_DIM, Y_DIM, Z_DIM)
axi = fig.add_subplot(2,4,5)
data = np.asarray(neigh_score)
color_type = 1
title = 'neighbor distance score'
plot_scores_subplot(data,title,axi,color_type)
axi = fig.add_subplot(2,4,6)
plot_surface_disp(axi,cell_mesh_1,dist_from_edge,dist_from_cell, mag_list)
axi = fig.add_subplot(2,4,7, projection='3d')
plot_cell_3D(axi,1,cell_mesh_1, cell_center_1, cell_vol_1, X_DIM, Y_DIM, Z_DIM)
axi = fig.add_subplot(2,4,8, projection='3d')
plot_cell_3D(axi,2,cell_mesh_2, cell_center_2, cell_vol_2, X_DIM, Y_DIM, Z_DIM)
plt.tight_layout()
for end in figtype_list:
fname = folder + '/Summary_plot' + end
plt.savefig(fname)
for end in figtype_list:
fname = root_directory + '/Post_proc_summary' + '/' + 'Summary_' + file_prefix_1 + '_to_' + file_prefix_2 + end
plt.savefig(fname)
return
# call individual plots, plus call multiple subplots
def call_plot_main(plot_type,file_prefix_1,file_prefix_2,num_feat,X_DIM,Y_DIM,Z_DIM,figtype_list,use_corrected_cell,root_directory,should_plot):
folder = root_directory + '/Track_' + file_prefix_1 + '_to_' + file_prefix_2
if use_corrected_cell:
cell_mesh_2 = np.loadtxt(folder + '/cell_mesh_2_corrected.txt')
X, Y, Z, U, V, W = import_bead_disps(folder)
cell_mesh_1, cell_normal_1, cell_center_1, cell_vol_1, cell_mesh_2, cell_normal_2, cell_center_2, cell_vol_2 = import_cell_info(file_prefix_1,file_prefix_2,root_directory)
neigh_score = color_point_neighbor_similarity(X, Y, Z, U, V, W, num_feat)
dir_score, dist_from_cell, mag_list = color_point_direction(X, Y, Z, U, V, W, cell_mesh_1, cell_normal_1)
dist_from_edge = compute_dist_from_edge(X, Y, Z, X_DIM, Y_DIM, Z_DIM)
#type 6 will create all plots
# --> arrange data
if plot_type == 1 or plot_type == 6: # big plot with everything, saves it in two directories
plot_all(folder, root_directory, file_prefix_1,file_prefix_2,dir_score,neigh_score,dist_from_edge,dist_from_cell,mag_list,\
X,Y,Z,U,V,W,cell_center_1,cell_mesh_1,cell_vol_1,cell_center_2,cell_mesh_2,cell_vol_2,X_DIM,Y_DIM,Z_DIM,figtype_list)
if plot_type == 2 or plot_type == 6: # plots cells in both configurations
plot_only_cells(cell_mesh_1,cell_center_1,cell_vol_1,cell_mesh_2,cell_center_2,cell_vol_2,X_DIM,Y_DIM,Z_DIM,folder,figtype_list)
if plot_type == 3 or plot_type == 6: # plots scores only
plot_only_scores(neigh_score,dir_score,folder,figtype_list)
if plot_type == 4 or plot_type == 6: # plots slice only
plot_only_slice(dir_score,X,Y,Z,U,V,W,cell_center_1,cell_mesh_1,cell_center_2,cell_mesh_2,X_DIM,Y_DIM,Z_DIM,folder,figtype_list)
if plot_type == 5 or plot_type == 6: # plots magnitude wrt distance from surface
plot_only_distance(cell_mesh_1,dist_from_edge,dist_from_cell,mag_list,folder,figtype_list)
if should_plot:
plot_vector_field(X,Y,Z,U,V,W, cell_mesh_1, cell_mesh_2, dir_score,should_plot,True,folder)
return
##########################################################################################
# displacement interpolation -- use GPR
##########################################################################################
# --> create GP model
def create_gp_model(X,Y,Z,QoI):
num_pts = X.shape[0]
X_train_unscale = np.zeros((num_pts,3))
X_train_unscale[:,0] = X
X_train_unscale[:,1] = Y
X_train_unscale[:,2] = Z
scaler = preprocessing.StandardScaler().fit(X_train_unscale)
X_train = scaler.transform(X_train_unscale)
kernel = RationalQuadratic()
gp = GaussianProcessRegressor(kernel=kernel)
gp.fit(X_train, QoI)
return gp , scaler
# --> create GP models
def create_GP_model(file_prefix_1,file_prefix_2,root_directory):
folder = root_directory + '/Track_' + file_prefix_1 + '_to_' + file_prefix_2
X, Y, Z, U, V, W = import_bead_disps(folder)
gp_U, scaler = create_gp_model(X,Y,Z,U)
gp_V, scaler = create_gp_model(X,Y,Z,V)
gp_W, scaler = create_gp_model(X,Y,Z,W)
pickle.dump(gp_U, open(folder + '/gp_U.sav','wb'))
pickle.dump(gp_V, open(folder + '/gp_V.sav','wb'))
pickle.dump(gp_W, open(folder + '/gp_W.sav','wb'))
pickle.dump(scaler,open(folder + '/scaler.sav','wb'))
return
# --> interpolate GP model
def interpolate_gp_model(plane_case, center, gp, scaler, X_DIM, Y_DIM, Z_DIM ):
x_min = -1; x_max = X_DIM
y_min = -1; y_max = Y_DIM
z_min = -1; z_max = Z_DIM
grid_pts = 100
# --> construct artificial grid for plotting
if plane_case == 1: #x plane
x = center[1]
y = np.linspace(y_min,y_max,grid_pts)
z = np.linspace(z_min,z_max,grid_pts)
Y, Z = np.meshgrid(y,z)
X = x * np.ones((grid_pts,grid_pts))
RES = np.zeros((grid_pts,grid_pts))
elif plane_case == 2: #y plane
x = np.linspace(x_min,x_max,grid_pts)
y = center[0]
z = np.linspace(z_min,z_max,grid_pts)
X, Z = np.meshgrid(x, z)
Y = y * np.ones((grid_pts,grid_pts))
RES = np.zeros((grid_pts,grid_pts))
elif plane_case == 3: #z plane
x = np.linspace(x_min,x_max,grid_pts)
y = np.linspace(y_min,y_max,grid_pts)
z = center[2]
X, Y = np.meshgrid(x, y)
Z = z * np.ones((grid_pts,grid_pts))
RES = np.zeros((grid_pts,grid_pts))
# --> fit model grid
for j in range(0,grid_pts):
input = []
for k in range(0,grid_pts):
li = [X[j,k],Y[j,k],Z[j,k]]
input.append(li)
input = np.asarray(input)
input = scaler.transform(input)
pred = gp.predict(input)
RES[j,:] = pred[:]
if plane_case == 1:
return Y, Z, RES
elif plane_case == 2:
return X, Z, RES
elif plane_case == 3:
return X, Y, RES
# --> create a single GP plot
def plot_gp_model_single_plot(axi,is_mag,data_1,data_2,result,title):
# --> plot interpolated field
vmin = -5; vmax = 5
if is_mag:
vmin = 0; vmax = 10
CS1 = axi.pcolor(data_1, data_2, result, cmap=plt.cm.coolwarm,vmin=vmin,vmax=vmax)
cbar = plt.colorbar(CS1, ax=axi)
cbar.set_label(title,labelpad=-95,y=1.13,rotation=0)
return
# --> plot GPR model, one row
def plot_gp_model_one_row(ax_list,is_mag,X_DIM,Y_DIM,Z_DIM,title,center,gp_model,scaler,cell_mesh_1,cell_mesh_2):
axi = ax_list[0]
plane_case = 2
if is_mag == False:
data_1, data_2, result = interpolate_gp_model(plane_case, center, gp_model, scaler, X_DIM, Y_DIM, Z_DIM)
else:
data_1, data_2, result_0 = interpolate_gp_model(plane_case, center, gp_model[0], scaler, X_DIM, Y_DIM, Z_DIM)
data_1, data_2, result_1 = interpolate_gp_model(plane_case, center, gp_model[1], scaler, X_DIM, Y_DIM, Z_DIM)
data_1, data_2, result_2 = interpolate_gp_model(plane_case, center, gp_model[2], scaler, X_DIM, Y_DIM, Z_DIM)
result = (result_0**2.0 + result_1**2.0 + result_2**2.0)**(1.0/2.0)
plot_gp_model_single_plot(axi,is_mag,data_1,data_2,result,title)
idx0 = 0; idx1 = 2; idx2 = 1
plot_cell(center[idx2],cell_mesh_1[:,idx0],cell_mesh_1[:,idx1],cell_mesh_1[:,idx2],(0.75,0.75,0.75),axi)
plot_cell(center[idx2],cell_mesh_2[:,idx0],cell_mesh_2[:,idx1],cell_mesh_2[:,idx2],(0,0,0),axi)
axi.plot([-1,X_DIM],[center[2],center[2]],'k:',linewidth=1.0)
axi.plot([center[0],center[0]],[-1,Z_DIM],'k:',linewidth=1.0)
axi.set_xlabel(r'x-position $\mu m$')
axi.set_ylabel(r'z-position $\mu m$')
axi.set_xlim((-1,X_DIM))
axi.set_ylim((-1,Z_DIM))
axi = ax_list[1]
place_case = 1
if is_mag == False:
data_1, data_2, result = interpolate_gp_model(plane_case, center, gp_model, scaler, X_DIM, Y_DIM, Z_DIM)
else:
data_1, data_2, result_0 = interpolate_gp_model(plane_case, center, gp_model[0], scaler, X_DIM, Y_DIM, Z_DIM)
data_1, data_2, result_1 = interpolate_gp_model(plane_case, center, gp_model[1], scaler, X_DIM, Y_DIM, Z_DIM)
data_1, data_2, result_2 = interpolate_gp_model(plane_case, center, gp_model[2], scaler, X_DIM, Y_DIM, Z_DIM)
result = (result_0**2.0 + result_1**2.0 + result_2**2.0)**(1.0/2.0)
plot_gp_model_single_plot(axi,is_mag,data_1,data_2,result,title)
idx0 = 1; idx1 = 2; idx2 = 0
plot_cell(center[idx2],cell_mesh_1[:,idx0],cell_mesh_1[:,idx1],cell_mesh_1[:,idx2],(0.75,0.75,0.75),axi)
plot_cell(center[idx2],cell_mesh_2[:,idx0],cell_mesh_2[:,idx1],cell_mesh_2[:,idx2],(0,0,0),axi)
axi.plot([-1,Y_DIM],[center[2],center[2]],'k:',linewidth=1.0)
axi.plot([center[1],center[1]],[-1,Z_DIM],'k:',linewidth=1.0)
axi.set_xlabel(r'y-position $\mu m$')
axi.set_ylabel(r'z-position $\mu m$')
axi.set_xlim((-1,Y_DIM))
axi.set_ylim((-1,Z_DIM))
axi = ax_list[2]
plane_case = 3
if is_mag == False:
data_1, data_2, result = interpolate_gp_model(plane_case, center, gp_model, scaler, X_DIM, Y_DIM, Z_DIM)
else:
data_1, data_2, result_0 = interpolate_gp_model(plane_case, center, gp_model[0], scaler, X_DIM, Y_DIM, Z_DIM)
data_1, data_2, result_1 = interpolate_gp_model(plane_case, center, gp_model[1], scaler, X_DIM, Y_DIM, Z_DIM)
data_1, data_2, result_2 = interpolate_gp_model(plane_case, center, gp_model[2], scaler, X_DIM, Y_DIM, Z_DIM)
result = (result_0**2.0 + result_1**2.0 + result_2**2.0)**(1.0/2.0)
plot_gp_model_single_plot(axi,is_mag,data_1,data_2,result,title)
idx0 = 0; idx1 = 1; idx2 = 2
plot_cell(center[idx2],cell_mesh_1[:,idx0],cell_mesh_1[:,idx1],cell_mesh_1[:,idx2],(0.75,0.75,0.75),axi)
plot_cell(center[idx2],cell_mesh_2[:,idx0],cell_mesh_2[:,idx1],cell_mesh_2[:,idx2],(0,0,0),axi)
axi.plot([-1,X_DIM],[center[1],center[1]],'k:',linewidth=1.0)
axi.plot([center[0],center[0]],[-1,Z_DIM],'k:',linewidth=1.0)
axi.set_xlabel(r'x-position $\mu m$')
axi.set_ylabel(r'y-position $\mu m$')
axi.set_xlim((-1,X_DIM))
axi.set_ylim((-1,Z_DIM))
# --> plot GPR model
def plot_gp_model(file_prefix_1,file_prefix_2,X_DIM,Y_DIM,Z_DIM,figtype_list,use_corrected_cell, root_directory):
cell_mesh_1, cell_normal_1, cell_center_1, cell_vol_1, cell_mesh_2, cell_normal_2, cell_center_2, cell_vol_2 = import_cell_info(file_prefix_1,file_prefix_2,root_directory)
center = 0.5*cell_center_1 + 0.5*cell_center_2
folder = root_directory + '/Track_' + file_prefix_1 + '_to_' + file_prefix_2
if use_corrected_cell:
cell_mesh_2 = np.loadtxt(folder + '/cell_mesh_2_corrected.txt')
gp_U = pickle.load(open(folder + '/gp_U.sav', 'rb'))
gp_V = pickle.load(open(folder + '/gp_V.sav', 'rb'))
gp_W = pickle.load(open(folder + '/gp_W.sav', 'rb'))
scaler = pickle.load(open(folder + '/scaler.sav','rb'))
fig = plt.figure()
plt.style.use(stylepath)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
fig.set_figheight(20)
fig.set_figwidth(15)
ax1 = fig.add_subplot(4, 3, 1); ax2 = fig.add_subplot(4, 3, 2);ax3 = fig.add_subplot(4, 3, 3)
ax_list1 = [ax1,ax2,ax3]
title = r'x-displacement $\mu m$'
is_mag = False
plot_gp_model_one_row(ax_list1,is_mag,X_DIM,Y_DIM,Z_DIM,title,center,gp_U,scaler,cell_mesh_1,cell_mesh_2)
ax4 = fig.add_subplot(4, 3, 4); ax5 = fig.add_subplot(4, 3, 5); ax6 = fig.add_subplot(4, 3, 6)
ax_list2 = [ax4,ax5,ax6]
title = r'y-displacement $\mu m$'
is_mag = False
plot_gp_model_one_row(ax_list2,is_mag,X_DIM,Y_DIM,Z_DIM,title,center,gp_V,scaler,cell_mesh_1,cell_mesh_2)
ax7 = fig.add_subplot(4, 3, 7); ax8 = fig.add_subplot(4, 3, 8); ax9 = fig.add_subplot(4, 3, 9)
ax_list3 = [ax7,ax8,ax9]
title = r'z-displacement $\mu m$'
is_mag = False
plot_gp_model_one_row(ax_list3,is_mag,X_DIM,Y_DIM,Z_DIM,title,center,gp_W,scaler,cell_mesh_1,cell_mesh_2)
ax10 = fig.add_subplot(4, 3, 10); ax11 = fig.add_subplot(4, 3, 11); ax12 = fig.add_subplot(4, 3, 12)
ax_list4 = [ax10,ax11,ax12]
title = r'mag-displacement $\mu m$'
is_mag = True
plot_gp_model_one_row(ax_list4,is_mag,X_DIM,Y_DIM,Z_DIM,title,center,[gp_U, gp_V, gp_W],scaler,cell_mesh_1,cell_mesh_2)
plt.tight_layout()
for end in figtype_list:
fname = folder + '/Interpolate_plot' + end
plt.savefig(fname)
|
[
"numpy.argsort",
"pyvista.Arrow",
"sklearn.gaussian_process.GaussianProcessRegressor",
"numpy.mean",
"pyvista.PolyData",
"numpy.sort",
"numpy.asarray",
"matplotlib.pyplot.style.use",
"sklearn.neighbors.KernelDensity",
"numpy.max",
"numpy.exp",
"numpy.linspace",
"numpy.vstack",
"numpy.min",
"numpy.argmin",
"numpy.meshgrid",
"numpy.abs",
"matplotlib.pyplot.savefig",
"numpy.ones",
"sklearn.gaussian_process.kernels.RationalQuadratic",
"matplotlib.pyplot.cm.get_cmap",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.colorbar",
"os.path.join",
"sklearn.preprocessing.StandardScaler",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"os.path.abspath",
"numpy.loadtxt",
"pyvista.Plotter"
] |
[((1131, 1218), 'numpy.loadtxt', 'np.loadtxt', (["(root_directory + '/Gel_cell_coords/' + file_prefix_1 + '_cell_mesh.txt')"], {}), "(root_directory + '/Gel_cell_coords/' + file_prefix_1 +\n '_cell_mesh.txt')\n", (1141, 1218), True, 'import numpy as np\n'), ((1230, 1320), 'numpy.loadtxt', 'np.loadtxt', (["(root_directory + '/Gel_cell_coords/' + file_prefix_1 + '_cell_normals.txt')"], {}), "(root_directory + '/Gel_cell_coords/' + file_prefix_1 +\n '_cell_normals.txt')\n", (1240, 1320), True, 'import numpy as np\n'), ((1332, 1421), 'numpy.loadtxt', 'np.loadtxt', (["(root_directory + '/Gel_cell_coords/' + file_prefix_1 + '_cell_center.txt')"], {}), "(root_directory + '/Gel_cell_coords/' + file_prefix_1 +\n '_cell_center.txt')\n", (1342, 1421), True, 'import numpy as np\n'), ((1430, 1519), 'numpy.loadtxt', 'np.loadtxt', (["(root_directory + '/Gel_cell_coords/' + file_prefix_1 + '_cell_volume.txt')"], {}), "(root_directory + '/Gel_cell_coords/' + file_prefix_1 +\n '_cell_volume.txt')\n", (1440, 1519), True, 'import numpy as np\n'), ((1529, 1616), 'numpy.loadtxt', 'np.loadtxt', (["(root_directory + '/Gel_cell_coords/' + file_prefix_2 + '_cell_mesh.txt')"], {}), "(root_directory + '/Gel_cell_coords/' + file_prefix_2 +\n '_cell_mesh.txt')\n", (1539, 1616), True, 'import numpy as np\n'), ((1628, 1718), 'numpy.loadtxt', 'np.loadtxt', (["(root_directory + '/Gel_cell_coords/' + file_prefix_2 + '_cell_normals.txt')"], {}), "(root_directory + '/Gel_cell_coords/' + file_prefix_2 +\n '_cell_normals.txt')\n", (1638, 1718), True, 'import numpy as np\n'), ((1730, 1819), 'numpy.loadtxt', 'np.loadtxt', (["(root_directory + '/Gel_cell_coords/' + file_prefix_2 + '_cell_center.txt')"], {}), "(root_directory + '/Gel_cell_coords/' + file_prefix_2 +\n '_cell_center.txt')\n", (1740, 1819), True, 'import numpy as np\n'), ((1828, 1917), 'numpy.loadtxt', 'np.loadtxt', (["(root_directory + '/Gel_cell_coords/' + file_prefix_2 + '_cell_volume.txt')"], {}), "(root_directory + '/Gel_cell_coords/' + file_prefix_2 +\n '_cell_volume.txt')\n", (1838, 1917), True, 'import numpy as np\n'), ((2067, 2096), 'numpy.loadtxt', 'np.loadtxt', (["(folder + '/X.txt')"], {}), "(folder + '/X.txt')\n", (2077, 2096), True, 'import numpy as np\n'), ((2102, 2131), 'numpy.loadtxt', 'np.loadtxt', (["(folder + '/Y.txt')"], {}), "(folder + '/Y.txt')\n", (2112, 2131), True, 'import numpy as np\n'), ((2137, 2166), 'numpy.loadtxt', 'np.loadtxt', (["(folder + '/Z.txt')"], {}), "(folder + '/Z.txt')\n", (2147, 2166), True, 'import numpy as np\n'), ((2172, 2201), 'numpy.loadtxt', 'np.loadtxt', (["(folder + '/U.txt')"], {}), "(folder + '/U.txt')\n", (2182, 2201), True, 'import numpy as np\n'), ((2207, 2236), 'numpy.loadtxt', 'np.loadtxt', (["(folder + '/V.txt')"], {}), "(folder + '/V.txt')\n", (2217, 2236), True, 'import numpy as np\n'), ((2242, 2271), 'numpy.loadtxt', 'np.loadtxt', (["(folder + '/W.txt')"], {}), "(folder + '/W.txt')\n", (2252, 2271), True, 'import numpy as np\n'), ((5180, 5197), 'numpy.argsort', 'np.argsort', (['data1'], {}), '(data1)\n', (5190, 5197), True, 'import numpy as np\n'), ((5207, 5221), 'numpy.sort', 'np.sort', (['data1'], {}), '(data1)\n', (5214, 5221), True, 'import numpy as np\n'), ((5992, 6008), 'numpy.asarray', 'np.asarray', (['keep'], {}), '(keep)\n', (6002, 6008), True, 'import numpy as np\n'), ((6027, 6053), 'numpy.asarray', 'np.asarray', (['dist_from_cell'], {}), '(dist_from_cell)\n', (6037, 6053), True, 'import numpy as np\n'), ((6066, 6086), 'numpy.asarray', 'np.asarray', (['mag_list'], {}), '(mag_list)\n', (6076, 6086), True, 'import numpy as np\n'), ((7463, 7475), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (7469, 7475), True, 'import numpy as np\n'), ((7486, 7498), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (7492, 7498), True, 'import numpy as np\n'), ((8561, 8579), 'numpy.min', 'np.min', (['color_info'], {}), '(color_info)\n', (8567, 8579), True, 'import numpy as np\n'), ((8590, 8608), 'numpy.max', 'np.max', (['color_info'], {}), '(color_info)\n', (8596, 8608), True, 'import numpy as np\n'), ((9657, 9681), 'numpy.zeros', 'np.zeros', (['(num_beads, 3)'], {}), '((num_beads, 3))\n', (9665, 9681), True, 'import numpy as np\n'), ((9731, 9755), 'numpy.zeros', 'np.zeros', (['(num_beads, 3)'], {}), '((num_beads, 3))\n', (9739, 9755), True, 'import numpy as np\n'), ((12100, 12121), 'pyvista.PolyData', 'pyvista.PolyData', (['XYZ'], {}), '(XYZ)\n', (12116, 12121), False, 'import pyvista\n'), ((12219, 12234), 'pyvista.Arrow', 'pyvista.Arrow', ([], {}), '()\n', (12232, 12234), False, 'import pyvista\n'), ((12330, 12357), 'pyvista.PolyData', 'pyvista.PolyData', (['cell_init'], {}), '(cell_init)\n', (12346, 12357), False, 'import pyvista\n'), ((12372, 12400), 'pyvista.PolyData', 'pyvista.PolyData', (['cell_final'], {}), '(cell_final)\n', (12388, 12400), False, 'import pyvista\n'), ((13904, 13916), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (13914, 13916), True, 'import matplotlib.pyplot as plt\n'), ((13918, 13942), 'matplotlib.pyplot.style.use', 'plt.style.use', (['stylepath'], {}), '(stylepath)\n', (13931, 13942), True, 'import matplotlib.pyplot as plt\n'), ((13944, 13971), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (13950, 13971), True, 'import matplotlib.pyplot as plt\n'), ((13973, 14003), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""serif"""'}), "('font', family='serif')\n", (13979, 14003), True, 'import matplotlib.pyplot as plt\n'), ((14309, 14327), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (14325, 14327), True, 'import matplotlib.pyplot as plt\n'), ((14518, 14530), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (14528, 14530), True, 'import matplotlib.pyplot as plt\n'), ((14532, 14556), 'matplotlib.pyplot.style.use', 'plt.style.use', (['stylepath'], {}), '(stylepath)\n', (14545, 14556), True, 'import matplotlib.pyplot as plt\n'), ((14558, 14585), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (14564, 14585), True, 'import matplotlib.pyplot as plt\n'), ((14587, 14617), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""serif"""'}), "('font', family='serif')\n", (14593, 14617), True, 'import matplotlib.pyplot as plt\n'), ((14700, 14723), 'numpy.asarray', 'np.asarray', (['neigh_score'], {}), '(neigh_score)\n', (14710, 14723), True, 'import numpy as np\n'), ((14862, 14883), 'numpy.asarray', 'np.asarray', (['dir_score'], {}), '(dir_score)\n', (14872, 14883), True, 'import numpy as np\n'), ((14989, 15007), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (15005, 15007), True, 'import matplotlib.pyplot as plt\n'), ((15262, 15274), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (15272, 15274), True, 'import matplotlib.pyplot as plt\n'), ((15276, 15300), 'matplotlib.pyplot.style.use', 'plt.style.use', (['stylepath'], {}), '(stylepath)\n', (15289, 15300), True, 'import matplotlib.pyplot as plt\n'), ((15302, 15329), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (15308, 15329), True, 'import matplotlib.pyplot as plt\n'), ((15331, 15361), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""serif"""'}), "('font', family='serif')\n", (15337, 15361), True, 'import matplotlib.pyplot as plt\n'), ((16074, 16092), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (16090, 16092), True, 'import matplotlib.pyplot as plt\n'), ((16314, 16326), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (16324, 16326), True, 'import matplotlib.pyplot as plt\n'), ((16328, 16352), 'matplotlib.pyplot.style.use', 'plt.style.use', (['stylepath'], {}), '(stylepath)\n', (16341, 16352), True, 'import matplotlib.pyplot as plt\n'), ((16354, 16381), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (16360, 16381), True, 'import matplotlib.pyplot as plt\n'), ((16383, 16413), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""serif"""'}), "('font', family='serif')\n", (16389, 16413), True, 'import matplotlib.pyplot as plt\n'), ((16562, 16580), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (16578, 16580), True, 'import matplotlib.pyplot as plt\n'), ((16951, 16963), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (16961, 16963), True, 'import matplotlib.pyplot as plt\n'), ((16965, 16989), 'matplotlib.pyplot.style.use', 'plt.style.use', (['stylepath'], {}), '(stylepath)\n', (16978, 16989), True, 'import matplotlib.pyplot as plt\n'), ((16991, 17018), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (16997, 17018), True, 'import matplotlib.pyplot as plt\n'), ((17020, 17050), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""serif"""'}), "('font', family='serif')\n", (17026, 17050), True, 'import matplotlib.pyplot as plt\n'), ((17134, 17155), 'numpy.asarray', 'np.asarray', (['dir_score'], {}), '(dir_score)\n', (17144, 17155), True, 'import numpy as np\n'), ((17922, 17945), 'numpy.asarray', 'np.asarray', (['neigh_score'], {}), '(neigh_score)\n', (17932, 17945), True, 'import numpy as np\n'), ((18411, 18429), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (18427, 18429), True, 'import matplotlib.pyplot as plt\n'), ((21057, 21079), 'numpy.zeros', 'np.zeros', (['(num_pts, 3)'], {}), '((num_pts, 3))\n', (21065, 21079), True, 'import numpy as np\n'), ((21275, 21294), 'sklearn.gaussian_process.kernels.RationalQuadratic', 'RationalQuadratic', ([], {}), '()\n', (21292, 21294), False, 'from sklearn.gaussian_process.kernels import RBF, Matern, RationalQuadratic, ExpSineSquared, DotProduct, ConstantKernel, WhiteKernel\n'), ((21301, 21340), 'sklearn.gaussian_process.GaussianProcessRegressor', 'GaussianProcessRegressor', ([], {'kernel': 'kernel'}), '(kernel=kernel)\n', (21325, 21340), False, 'from sklearn.gaussian_process import GaussianProcessRegressor\n'), ((23552, 23577), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['CS1'], {'ax': 'axi'}), '(CS1, ax=axi)\n', (23564, 23577), True, 'import matplotlib.pyplot as plt\n'), ((27950, 27962), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (27960, 27962), True, 'import matplotlib.pyplot as plt\n'), ((27964, 27988), 'matplotlib.pyplot.style.use', 'plt.style.use', (['stylepath'], {}), '(stylepath)\n', (27977, 27988), True, 'import matplotlib.pyplot as plt\n'), ((27990, 28017), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (27996, 28017), True, 'import matplotlib.pyplot as plt\n'), ((28019, 28049), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""serif"""'}), "('font', family='serif')\n", (28025, 28049), True, 'import matplotlib.pyplot as plt\n'), ((29253, 29271), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (29269, 29271), True, 'import matplotlib.pyplot as plt\n'), ((793, 826), 'os.path.abspath', 'os.path.abspath', (['fmtrack.__file__'], {}), '(fmtrack.__file__)\n', (808, 826), False, 'import os\n'), ((3247, 3267), 'numpy.argsort', 'np.argsort', (['dist_all'], {}), '(dist_all)\n', (3257, 3267), True, 'import numpy as np\n'), ((3283, 3302), 'numpy.zeros', 'np.zeros', (['num_neigh'], {}), '(num_neigh)\n', (3291, 3302), True, 'import numpy as np\n'), ((4336, 4355), 'numpy.argmin', 'np.argmin', (['dist_all'], {}), '(dist_all)\n', (4345, 4355), True, 'import numpy as np\n'), ((4953, 4985), 'numpy.min', 'np.min', (['[x_edge, y_edge, z_edge]'], {}), '([x_edge, y_edge, z_edge])\n', (4959, 4985), True, 'import numpy as np\n'), ((7522, 7538), 'numpy.exp', 'np.exp', (['log_dens'], {}), '(log_dens)\n', (7528, 7538), True, 'import numpy as np\n'), ((12431, 12448), 'pyvista.Plotter', 'pyvista.Plotter', ([], {}), '()\n', (12446, 12448), False, 'import pyvista\n'), ((12505, 12533), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['"""viridis_r"""'], {}), "('viridis_r')\n", (12520, 12533), True, 'import matplotlib.pyplot as plt\n'), ((14398, 14416), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {}), '(fname)\n', (14409, 14416), True, 'import matplotlib.pyplot as plt\n'), ((15076, 15094), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {}), '(fname)\n', (15087, 15094), True, 'import matplotlib.pyplot as plt\n'), ((16165, 16183), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {}), '(fname)\n', (16176, 16183), True, 'import matplotlib.pyplot as plt\n'), ((16651, 16669), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {}), '(fname)\n', (16662, 16669), True, 'import matplotlib.pyplot as plt\n'), ((18499, 18517), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {}), '(fname)\n', (18510, 18517), True, 'import matplotlib.pyplot as plt\n'), ((18660, 18678), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {}), '(fname)\n', (18671, 18678), True, 'import matplotlib.pyplot as plt\n'), ((19007, 19056), 'numpy.loadtxt', 'np.loadtxt', (["(folder + '/cell_mesh_2_corrected.txt')"], {}), "(folder + '/cell_mesh_2_corrected.txt')\n", (19017, 19056), True, 'import numpy as np\n'), ((22246, 22281), 'numpy.linspace', 'np.linspace', (['y_min', 'y_max', 'grid_pts'], {}), '(y_min, y_max, grid_pts)\n', (22257, 22281), True, 'import numpy as np\n'), ((22286, 22321), 'numpy.linspace', 'np.linspace', (['z_min', 'z_max', 'grid_pts'], {}), '(z_min, z_max, grid_pts)\n', (22297, 22321), True, 'import numpy as np\n'), ((22329, 22346), 'numpy.meshgrid', 'np.meshgrid', (['y', 'z'], {}), '(y, z)\n', (22340, 22346), True, 'import numpy as np\n'), ((22393, 22423), 'numpy.zeros', 'np.zeros', (['(grid_pts, grid_pts)'], {}), '((grid_pts, grid_pts))\n', (22401, 22423), True, 'import numpy as np\n'), ((23045, 23062), 'numpy.asarray', 'np.asarray', (['input'], {}), '(input)\n', (23055, 23062), True, 'import numpy as np\n'), ((27669, 27718), 'numpy.loadtxt', 'np.loadtxt', (["(folder + '/cell_mesh_2_corrected.txt')"], {}), "(folder + '/cell_mesh_2_corrected.txt')\n", (27679, 27718), True, 'import numpy as np\n'), ((29345, 29363), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {}), '(fname)\n', (29356, 29363), True, 'import matplotlib.pyplot as plt\n'), ((3507, 3526), 'numpy.mean', 'np.mean', (['score_dist'], {}), '(score_dist)\n', (3514, 3526), True, 'import numpy as np\n'), ((3905, 3929), 'numpy.min', 'np.min', (['[num_pts, 10000]'], {}), '([num_pts, 10000])\n', (3911, 3929), True, 'import numpy as np\n'), ((5449, 5461), 'numpy.mean', 'np.mean', (['arr'], {}), '(arr)\n', (5456, 5461), True, 'import numpy as np\n'), ((7265, 7312), 'sklearn.neighbors.KernelDensity', 'KernelDensity', ([], {'kernel': '"""gaussian"""', 'bandwidth': '(0.1)'}), "(kernel='gaussian', bandwidth=0.1)\n", (7278, 7312), False, 'from sklearn.neighbors import KernelDensity\n'), ((7911, 7931), 'numpy.exp', 'np.exp', (['log_dens[kk]'], {}), '(log_dens[kk])\n', (7917, 7931), True, 'import numpy as np\n'), ((12015, 12035), 'numpy.vstack', 'np.vstack', (['(X, Y, Z)'], {}), '((X, Y, Z))\n', (12024, 12035), True, 'import numpy as np\n'), ((12053, 12073), 'numpy.vstack', 'np.vstack', (['(U, V, W)'], {}), '((U, V, W))\n', (12062, 12073), True, 'import numpy as np\n'), ((12822, 12863), 'os.path.join', 'os.path.join', (['foldername', '"""cell_init.vtk"""'], {}), "(foldername, 'cell_init.vtk')\n", (12834, 12863), False, 'import os\n'), ((12882, 12924), 'os.path.join', 'os.path.join', (['foldername', '"""cell_final.vtk"""'], {}), "(foldername, 'cell_final.vtk')\n", (12894, 12924), False, 'import os\n'), ((12939, 12977), 'os.path.join', 'os.path.join', (['foldername', '"""arrows.vtk"""'], {}), "(foldername, 'arrows.vtk')\n", (12951, 12977), False, 'import os\n'), ((21168, 21198), 'sklearn.preprocessing.StandardScaler', 'preprocessing.StandardScaler', ([], {}), '()\n', (21196, 21198), False, 'from sklearn import preprocessing\n'), ((22356, 22385), 'numpy.ones', 'np.ones', (['(grid_pts, grid_pts)'], {}), '((grid_pts, grid_pts))\n', (22363, 22385), True, 'import numpy as np\n'), ((22461, 22496), 'numpy.linspace', 'np.linspace', (['x_min', 'x_max', 'grid_pts'], {}), '(x_min, x_max, grid_pts)\n', (22472, 22496), True, 'import numpy as np\n'), ((22517, 22552), 'numpy.linspace', 'np.linspace', (['z_min', 'z_max', 'grid_pts'], {}), '(z_min, z_max, grid_pts)\n', (22528, 22552), True, 'import numpy as np\n'), ((22560, 22577), 'numpy.meshgrid', 'np.meshgrid', (['x', 'z'], {}), '(x, z)\n', (22571, 22577), True, 'import numpy as np\n'), ((22625, 22655), 'numpy.zeros', 'np.zeros', (['(grid_pts, grid_pts)'], {}), '((grid_pts, grid_pts))\n', (22633, 22655), True, 'import numpy as np\n'), ((4822, 4831), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (4828, 4831), True, 'import numpy as np\n'), ((4832, 4849), 'numpy.abs', 'np.abs', (['(X_DIM - x)'], {}), '(X_DIM - x)\n', (4838, 4849), True, 'import numpy as np\n'), ((4869, 4878), 'numpy.abs', 'np.abs', (['y'], {}), '(y)\n', (4875, 4878), True, 'import numpy as np\n'), ((4879, 4896), 'numpy.abs', 'np.abs', (['(Y_DIM - y)'], {}), '(Y_DIM - y)\n', (4885, 4896), True, 'import numpy as np\n'), ((4916, 4925), 'numpy.abs', 'np.abs', (['z'], {}), '(z)\n', (4922, 4925), True, 'import numpy as np\n'), ((4926, 4943), 'numpy.abs', 'np.abs', (['(Z_DIM - z)'], {}), '(Z_DIM - z)\n', (4932, 4943), True, 'import numpy as np\n'), ((7184, 7196), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (7190, 7196), True, 'import numpy as np\n'), ((7197, 7209), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (7203, 7209), True, 'import numpy as np\n'), ((22588, 22617), 'numpy.ones', 'np.ones', (['(grid_pts, grid_pts)'], {}), '((grid_pts, grid_pts))\n', (22595, 22617), True, 'import numpy as np\n'), ((22693, 22728), 'numpy.linspace', 'np.linspace', (['x_min', 'x_max', 'grid_pts'], {}), '(x_min, x_max, grid_pts)\n', (22704, 22728), True, 'import numpy as np\n'), ((22733, 22768), 'numpy.linspace', 'np.linspace', (['y_min', 'y_max', 'grid_pts'], {}), '(y_min, y_max, grid_pts)\n', (22744, 22768), True, 'import numpy as np\n'), ((22792, 22809), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (22803, 22809), True, 'import numpy as np\n'), ((22857, 22887), 'numpy.zeros', 'np.zeros', (['(grid_pts, grid_pts)'], {}), '((grid_pts, grid_pts))\n', (22865, 22887), True, 'import numpy as np\n'), ((22820, 22849), 'numpy.ones', 'np.ones', (['(grid_pts, grid_pts)'], {}), '((grid_pts, grid_pts))\n', (22827, 22849), True, 'import numpy as np\n'), ((7809, 7819), 'numpy.abs', 'np.abs', (['ci'], {}), '(ci)\n', (7815, 7819), True, 'import numpy as np\n'), ((7861, 7871), 'numpy.abs', 'np.abs', (['ci'], {}), '(ci)\n', (7867, 7871), True, 'import numpy as np\n'), ((7873, 7883), 'numpy.abs', 'np.abs', (['ci'], {}), '(ci)\n', (7879, 7883), True, 'import numpy as np\n'), ((7826, 7836), 'numpy.abs', 'np.abs', (['ci'], {}), '(ci)\n', (7832, 7836), True, 'import numpy as np\n'), ((9205, 9215), 'numpy.abs', 'np.abs', (['ci'], {}), '(ci)\n', (9211, 9215), True, 'import numpy as np\n'), ((9259, 9269), 'numpy.abs', 'np.abs', (['ci'], {}), '(ci)\n', (9265, 9269), True, 'import numpy as np\n'), ((9271, 9281), 'numpy.abs', 'np.abs', (['ci'], {}), '(ci)\n', (9277, 9281), True, 'import numpy as np\n'), ((9222, 9232), 'numpy.abs', 'np.abs', (['ci'], {}), '(ci)\n', (9228, 9232), True, 'import numpy as np\n')]
|
import numpy as np
train_ratings_path = "./../Data/netflix/TrainingRatings.txt"
test_ratings_path = "./../Data/netflix/TestingRatings.txt"
map_users={}
map_titles={}
data_matrix = np.empty((28978,1821),dtype=np.float32)
data_matrix[:] = np.nan
with open(train_ratings_path,'r') as reader:
counter_titles=0
counter_users = 0
for line in reader:
title,user_id,rating = line.split(',')
if not title in map_titles:
map_titles[title] = counter_titles
counter_titles +=1
if not user_id in map_users:
map_users[user_id]=counter_users
counter_users +=1
data_matrix[map_users[user_id]][map_titles[title]] = rating
del reader
mean_rating = np.nanmean(data_matrix,axis=1)
data_matrix[np.isnan(data_matrix)]=0
deviation = data_matrix - mean_rating[:,np.newaxis]
weights = {}
ratings={}
predicted = {}
squared_dev = (deviation**2).sum(axis=1)
act_ratings=[]
pred_ratings=[]
error_rating=[]
with open(test_ratings_path,'r') as reader:
c=0
for line in reader:
title,user_id,rating = line.split(',')
mapped_user = map_users[user_id]
mapped_title = map_titles[title]
if user_id not in weights:
n_correlation = np.abs((deviation[mapped_user] * deviation).sum(axis=1))
d_correlation = np.sqrt(squared_dev[mapped_user] * squared_dev)
weights[user_id]=n_correlation/d_correlation
normalising_constant = weights[user_id].sum()
weighted_sum = (weights[user_id]*(data_matrix[:,mapped_title] - mean_rating)).sum()
predicted[(mapped_title,user_id)] = mean_rating[mapped_user] + weighted_sum/normalising_constant
act_ratings.append(float(rating.replace("\n", "")))
error_rating.append(float(rating.replace("\n", ""))-predicted[(mapped_title,user_id)])
print(c," Acct : ",float(rating.replace("\n", "")), "Pred : ",predicted[(mapped_title,user_id)])
c+=1
|
[
"numpy.nanmean",
"numpy.sqrt",
"numpy.empty",
"numpy.isnan"
] |
[((184, 225), 'numpy.empty', 'np.empty', (['(28978, 1821)'], {'dtype': 'np.float32'}), '((28978, 1821), dtype=np.float32)\n', (192, 225), True, 'import numpy as np\n'), ((730, 761), 'numpy.nanmean', 'np.nanmean', (['data_matrix'], {'axis': '(1)'}), '(data_matrix, axis=1)\n', (740, 761), True, 'import numpy as np\n'), ((773, 794), 'numpy.isnan', 'np.isnan', (['data_matrix'], {}), '(data_matrix)\n', (781, 794), True, 'import numpy as np\n'), ((1337, 1384), 'numpy.sqrt', 'np.sqrt', (['(squared_dev[mapped_user] * squared_dev)'], {}), '(squared_dev[mapped_user] * squared_dev)\n', (1344, 1384), True, 'import numpy as np\n')]
|
import random
from precise.skaters.managerutil.managertesting import manager_test_run
from precise.skaters.managers.equalmanagers import equal_daily_long_manager, equal_long_manager
from precise.skaters.managers.equalmanagers import equal_weekly_long_manager, equal_weekly_buy_and_hold_long_manager
from precise.skatertools.data.equityhistorical import random_cached_equity_dense
from numpy.testing import assert_array_almost_equal
def test_random_manager():
from precise.skaters.managers.allmanagers import LONG_MANAGERS
mgr = random.choice(LONG_MANAGERS)
manager_test_run(mgr=mgr)
def test_daily_equal():
assert_equal_managing(equal_long_manager, equal_daily_long_manager)
def test_weekly_equal():
assert_equal_managing(equal_weekly_long_manager, equal_weekly_buy_and_hold_long_manager)
def assert_equal_managing(mgr1,mgr2):
ys = random_cached_equity_dense(k=1, n_obs=50, n_dim=3, as_frame=False)
s1 = {}
s2 = {}
for y in ys:
w1, s1 = mgr1(y=y, s=s1)
w2, s2 = mgr2(y=y, s=s2)
assert_array_almost_equal(w1,w2, err_msg='managers are not the same')
if __name__=='__main__':
test_daily_equal()
test_weekly_equal()
|
[
"random.choice",
"precise.skatertools.data.equityhistorical.random_cached_equity_dense",
"numpy.testing.assert_array_almost_equal",
"precise.skaters.managerutil.managertesting.manager_test_run"
] |
[((540, 568), 'random.choice', 'random.choice', (['LONG_MANAGERS'], {}), '(LONG_MANAGERS)\n', (553, 568), False, 'import random\n'), ((573, 598), 'precise.skaters.managerutil.managertesting.manager_test_run', 'manager_test_run', ([], {'mgr': 'mgr'}), '(mgr=mgr)\n', (589, 598), False, 'from precise.skaters.managerutil.managertesting import manager_test_run\n'), ((867, 933), 'precise.skatertools.data.equityhistorical.random_cached_equity_dense', 'random_cached_equity_dense', ([], {'k': '(1)', 'n_obs': '(50)', 'n_dim': '(3)', 'as_frame': '(False)'}), '(k=1, n_obs=50, n_dim=3, as_frame=False)\n', (893, 933), False, 'from precise.skatertools.data.equityhistorical import random_cached_equity_dense\n'), ((1049, 1119), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['w1', 'w2'], {'err_msg': '"""managers are not the same"""'}), "(w1, w2, err_msg='managers are not the same')\n", (1074, 1119), False, 'from numpy.testing import assert_array_almost_equal\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import cv2
from Text_B_ocr_crnn_model_file.crnn.util import resizeNormalize, strLabelConverter
class CRNN:
def __init__(self, alphabet=None):
self.alphabet = alphabet
def load_weights(self, path):
ocrPath = path
ocrPathtxt = path.replace('.pb', '.pbtxt')
self.model = cv2.dnn.readNetFromTensorflow(ocrPath, ocrPathtxt)
def predict(self, image):
image = resizeNormalize(image, 32)
image = image.astype(np.float32)
image = np.array([[image]])
self.model.setInput(image)
preds = self.model.forward()
preds = preds.transpose(0, 2, 3, 1)
preds = preds[0]
preds = np.argmax(preds, axis=2).reshape((-1,))
raw = strLabelConverter(preds, self.alphabet)
return raw
def predict_job(self, boxes):
n = len(boxes)
for i in range(n):
boxes[i]['text'] = self.predict(boxes[i]['img'])
return boxes
|
[
"cv2.dnn.readNetFromTensorflow",
"numpy.argmax",
"numpy.array",
"Text_B_ocr_crnn_model_file.crnn.util.strLabelConverter",
"Text_B_ocr_crnn_model_file.crnn.util.resizeNormalize"
] |
[((378, 428), 'cv2.dnn.readNetFromTensorflow', 'cv2.dnn.readNetFromTensorflow', (['ocrPath', 'ocrPathtxt'], {}), '(ocrPath, ocrPathtxt)\n', (407, 428), False, 'import cv2\n'), ((476, 502), 'Text_B_ocr_crnn_model_file.crnn.util.resizeNormalize', 'resizeNormalize', (['image', '(32)'], {}), '(image, 32)\n', (491, 502), False, 'from Text_B_ocr_crnn_model_file.crnn.util import resizeNormalize, strLabelConverter\n'), ((560, 579), 'numpy.array', 'np.array', (['[[image]]'], {}), '([[image]])\n', (568, 579), True, 'import numpy as np\n'), ((791, 830), 'Text_B_ocr_crnn_model_file.crnn.util.strLabelConverter', 'strLabelConverter', (['preds', 'self.alphabet'], {}), '(preds, self.alphabet)\n', (808, 830), False, 'from Text_B_ocr_crnn_model_file.crnn.util import resizeNormalize, strLabelConverter\n'), ((737, 761), 'numpy.argmax', 'np.argmax', (['preds'], {'axis': '(2)'}), '(preds, axis=2)\n', (746, 761), True, 'import numpy as np\n')]
|
## @package teetool
# This module contains the Visual_2d class
#
# See Visual_2d class for more details
import numpy as np
from scipy.interpolate import griddata
import matplotlib.pyplot as plt
import teetool as tt
## Visual_2d class generates the 2d output using Matplotlib
#
# Even 3-dimensional trajectories can be output in 2d (sliced)
class Visual_2d(object):
## Constructor for Visual_2d
# @param self object pointer
# @param thisWorld World object, filled with trajectory data and models
# @param kwargs additional parameters for plt.figure()
def __init__(self, thisWorld, **kwargs):
"""
<description>
"""
## figure object
self._fig = plt.figure(facecolor="white", **kwargs)
## axis object
self._ax = self._fig.gca()
# set colour of axis
#self._ax.set_axis_bgcolor('white')
#self._ax.set_facecolor('white')
## World object
self._world = thisWorld
## Labels of plots
self._labels = []
## Plot mean of trajectories
# @param self object pointer
# @param list_icluster list of clusters to plot
# @param colour if specified, overwrites distinct colours
# @param kwargs additional parameters for plotting
def plotMean(self, list_icluster=None, colour=None, **kwargs):
# check validity
list_icluster = self._world._check_list_icluster(list_icluster)
# extract data
clusters = self._world.getCluster(list_icluster)
# unique colours
colours = tt.helpers.getDistinctColours(len(self._world._clusters),
colour)
for (i, this_cluster) in enumerate(clusters):
# pass clusters
Y = this_cluster["model"].getMean()
a_line, = self._ax.plot(Y[:, 0],
Y[:, 1],
color=colours[list_icluster[i]],
**kwargs)
## Plot trajectories of cluster
# @param self object pointer
# @param list_icluster list of clusters to plot
# @param ntraj maximum number of trajectories
# @param colour if specified, overwrites distinct colours
# @param kwargs additional parameters for plotting
def plotTrajectories(self,
list_icluster=None,
ntraj=50,
colour=None,
**kwargs):
# check validity
list_icluster = self._world._check_list_icluster(list_icluster)
# extract data
clusters = self._world.getCluster(list_icluster)
# unique colours
colours = tt.helpers.getDistinctColours(len(self._world._clusters),
colour)
for (i, this_cluster) in enumerate(clusters):
# pass clusters
for itraj, (x, Y) in enumerate(this_cluster["data"]):
a_line, = self._ax.plot(Y[:, 0],
Y[:, 1],
color=colours[i],
**kwargs)
# limit number of trajectories
if itraj > ntraj:
break
self._labels.append((a_line, "data"))
## Plot trajectories of cluster
# @param self object pointer
# @param x1 point from [0,1] to visualise
# @param list_icluster list of clusters to plot
# @param ntraj maximum number of trajectories
# @param colour if specified, overwrites distinct colours
# @param kwargs additional parameters for plotting
def plotTrajectoriesPoints(self,
x1,
list_icluster=None,
ntraj=50,
colour=None,
**kwargs):
# check validity
list_icluster = self._world._check_list_icluster(list_icluster)
# obtain points
clustersP = self._world.getClusterPoints(x1, list_icluster)
# unique colours
colours = tt.helpers.getDistinctColours(len(self._world._clusters),
colour)
for (i, A) in enumerate(clustersP):
# pass clusters
for itraj, a in enumerate(A):
a_line, = self._ax.plot(a[0],
a[1],
color=colours[i],
**kwargs)
# limit number of trajectories
if itraj > ntraj:
break
self._labels.append((a_line, "data"))
## Plot time-series of trajectories
# @param self object pointer
# @param icluster select cluster to plot
# @param idim select dimension to plot
# @param ntraj maximum number of trajectories
# @param colour specificy colour of trajectories
# @param kwargs additional parameters for plotting
def plotTimeSeries(self, icluster=0, idim=0, ntraj=50,
colour='k', **kwargs):
# number of subplots, 2 or 3
ndim = self._world._ndim
# subplot
#f, axarr = plt.subplots(ndim, sharex=True)
# check validity
[icluster] = self._world._check_list_icluster([icluster])
# extract data
clusters = self._world.getCluster([icluster])
for (i, this_cluster) in enumerate(clusters):
# pass clusters
for itraj, (x, Y) in enumerate(this_cluster["data"]):
#for d in range(ndim):
x_norm = (x - x.min()) / (x.max() - x.min())
a_line, = self._ax.plot(x_norm,
Y[:,idim],
color=colour, **kwargs)
if itraj > ntraj:
break
self._labels.append((a_line, "data"))
## Plot a box based on two coordinates
# @param self object pointer
# @param coord_lowerleft lower-left coordinate (x,y)
# @param coord_upperright upper-right coordinate (x,y)
# @param kwargs additional parameters for plotting
def plotBox(self, coord_lowerleft, coord_upperright, **kwargs):
x_lo = coord_lowerleft[0]
x_hi = coord_upperright[0]
y_lo = coord_lowerleft[1]
y_hi = coord_upperright[1]
coords = np.array([[x_lo, y_lo],
[x_hi, y_lo],
[x_hi, y_hi],
[x_lo, y_hi],
[x_lo, y_lo]])
coords_x = coords[:,0]
coords_y = coords[:,1]
self._ax.plot(coords_x, coords_y, **kwargs)
## standard plotting function for Matplotlib
# @param self object pointer
# @param args additional arguments for plotting
# @param kwargs additional labeled parameters for plotting
def plot(self, *args, **kwargs):
# plot
self._ax.plot(*args, **kwargs)
## Plot samples of model
# @param self object pointer
# @param list_icluster list of clusters to plot
# @param ntraj number of trajectories
# @param colour if specified, overwrites distinct colours
# @param kwargs additional parameters for plotting
def plotSamples(self, list_icluster=None, ntraj=50, colour=None, **kwargs):
# check validity
list_icluster = self._world._check_list_icluster(list_icluster)
# unique colours
colours = tt.helpers.getDistinctColours(len(self._world._clusters),
colour)
for (i, icluster) in enumerate(list_icluster):
these_samples = self._world.getSamples(icluster,
nsamples=ntraj)
for (x, Y) in these_samples:
a_line, = self._ax.plot(Y[:, 0],
Y[:, 1],
color=colours[i],
linestyle=":",
**kwargs)
self._labels.append((a_line, "samples"))
## Add legend to plot
# @param self object pointer
def plotLegend(self):
list_lines = []
list_label = []
for (a_line, a_label) in self._labels:
list_lines.append(a_line)
list_label.append(a_label)
plt.legend(handles=list_lines, labels=list_label)
## Plots a confidence region of variance sigma
# @param self object pointer
# @param list_icluster list of clusters to plot
# @param sdwidth variance to evaluate
# @param z if specified, it evaluates the confidence region at a constant altitude for 3D trajectories
# @param resolution sets resolution for which to calculate the tube, can be a single integer, or an actual measurement [dim1 dim2] (2d) [dim1 dim2 dim3] (3d)
# @param colour if specified, overwrites distinct colours
# @param alpha opacity for the confidence region
# @param kwargs additional parameters for plotting
def plotTube(self,
list_icluster=None,
sdwidth=1,
z=None,
resolution=None,
colour=None,
alpha=.1,
**kwargs):
# check validity
list_icluster = self._world._check_list_icluster(list_icluster)
# extract
(ss_list, [xx, yy, zz]) = self._world.getTube(list_icluster,
sdwidth,
z=z,
resolution=resolution)
# unique colours
lcolours = tt.helpers.getDistinctColours(len(self._world._clusters),
colour)
for i, ss1 in enumerate(ss_list):
#plt.contourf(xx, yy, 1.*ss1, levels=[-np.inf, 1., np.inf], colors=(lcolours[i],), alpha=alpha, **kwargs)
# plot an iso surface line
plt.contour(xx,
yy,
ss1,
levels=[.5],
colors=(lcolours[list_icluster[i]], 'w'),
**kwargs)
## Plots the difference confidence region of variance sigma for two models
# @param self object pointer
# @param list_icluster list of 2 clusters to compare
# @param sdwidth variance to evaluate
# @param z if specified, it evaluates the confidence region at a constant altitude for 3D trajectories
# @param resolution specify resolution of region
# @param colour if specified, overwrites distinct colours
# @param alpha opacity for the confidence region
# @param kwargs additional parameters for plotting
def plotTubeDifference(self,
list_icluster=None,
sdwidth=1,
z=None,
resolution=None,
colour=None,
alpha=.1,
**kwargs):
# check validity
list_icluster = self._world._check_list_icluster(list_icluster)
# extract first two only!
list_icluster = list_icluster[:2]
# extract
(ss_list, [xx, yy, zz]) = self._world.getTube(list_icluster,
sdwidth, z=z,
resolution=resolution)
# to plot
ss_plot = - np.inf * np.ones_like(ss_list[0])
# 1 :: blocks added
ss_added = ((ss_list[0] - ss_list[1])==-1)
# 2 :: blocks removed
ss_removed = ((ss_list[0] - ss_list[1])==1)
# 3 :: present in both
ss_neutral = ((ss_list[0] + ss_list[1])==2)
ss_plot[ss_added] = 1.
ss_plot[ss_removed] = -1.
ss_plot[ss_neutral] = 0.
#plt.contourf(xx, yy, ss_plot, levels=[-np.inf, -1., 0., 1., np.inf], colors='none', hatches=['//', '.', '/'], **kwargs)
plt.contourf(xx,
yy,
ss_plot,
levels=[-np.inf, -1., 0., 1., np.inf],
colors=('r','b','g'),
alpha=alpha,
**kwargs)
for i in [1, 2, 3]:
if i == 1:
ss1 = 1.*ss_removed
color = 'r'
elif i == 2:
ss1 = 1.*ss_added
color = 'g'
elif i == 3:
ss1 = 1.*ss_neutral
color = 'b'
# plot an iso surface
plt.contour(xx, yy, ss1, levels=[0.5], colors=color)
## Plot the log-likehood of confidence regions -- which can be related to traffic complexity in the future
# @param self object pointer
# @param list_icluster list of clusters to compare
# @param pmin minimum value on a normalised scale
# @param pmax maximum value on a normalised scale
# @param z if specified, it evaluates the confidence region at a constant altitude for 3D trajectories
# @param resolution specify resolution of region
def plotLogLikelihood(self,
list_icluster=None,
pmin=0, pmax=1,
z=None,
resolution=None):
# check validity
list_icluster = self._world._check_list_icluster(list_icluster)
(ss_list, [xx, yy, zz]) = self._world.getLogLikelihood(list_icluster,
resolution,
z)
ss = ss_list[0] # initialise
for ss1 in ss_list:
# find those greater
mask = np.greater(ss1, ss)
# replace
ss[mask] = ss1[mask]
# normalise
ss_norm = (ss - np.min(ss)) / (np.max(ss) - np.min(ss))
# plot contours
self._ax.pcolor(xx,
yy,
ss_norm,
cmap="viridis",
vmin=pmin,
vmax=pmax)
def plotComplexityMap(self,
list_icluster=None,
complexity=1,
pmin=0, pmax=1,
z=None,
resolution=None, cmap1="Reds"):
ss, xx, yy, zz = self._world.getComplexityMap(list_icluster,
complexity,
resolution,
z)
# normalise
ss_norm = (ss - np.min(ss)) / (np.max(ss) - np.min(ss))
# plot contours
cax = self._ax.pcolor(xx,
yy,
ss_norm,
cmap=cmap1,
vmin=pmin,
vmax=pmax)
return cax
## add colorbar
def plotColourBar(self, *args, **kwargs):
cbar = self._fig.colorbar(*args, **kwargs)
# horizontal colorbar
# cbar.ax.set_xticklabels(['Low', 'Medium', 'High'])
return cbar
## Plots the title or worldname
# @param self object pointer
def _plotTitle(self):
# add title
world_name = self._world.getName()
if not (world_name == None):
plt.title(world_name)
## saves the figure to a file in the output folder
# @param self object pointer
# @param add additional identifier for file
def save(self, add=None):
if (add==None):
saveas = self._world.getName()
else:
saveas = "{0}_{1}".format(self._world.getName(), add)
plt.savefig("output/2d_{0}.png".format(saveas))
## shows the figure (pop-up or inside notebook)
# @param self object pointer
def show(self):
plt.show()
## closes all figures
# @param self object pointer
def close(self):
plt.close("all")
|
[
"matplotlib.pyplot.contourf",
"numpy.ones_like",
"numpy.greater",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.contour",
"numpy.min",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] |
[((710, 749), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'facecolor': '"""white"""'}), "(facecolor='white', **kwargs)\n", (720, 749), True, 'import matplotlib.pyplot as plt\n'), ((6461, 6546), 'numpy.array', 'np.array', (['[[x_lo, y_lo], [x_hi, y_lo], [x_hi, y_hi], [x_lo, y_hi], [x_lo, y_lo]]'], {}), '([[x_lo, y_lo], [x_hi, y_lo], [x_hi, y_hi], [x_lo, y_hi], [x_lo, y_lo]]\n )\n', (6469, 6546), True, 'import numpy as np\n'), ((8466, 8515), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'handles': 'list_lines', 'labels': 'list_label'}), '(handles=list_lines, labels=list_label)\n', (8476, 8515), True, 'import matplotlib.pyplot as plt\n'), ((12151, 12273), 'matplotlib.pyplot.contourf', 'plt.contourf', (['xx', 'yy', 'ss_plot'], {'levels': '[-np.inf, -1.0, 0.0, 1.0, np.inf]', 'colors': "('r', 'b', 'g')", 'alpha': 'alpha'}), "(xx, yy, ss_plot, levels=[-np.inf, -1.0, 0.0, 1.0, np.inf],\n colors=('r', 'b', 'g'), alpha=alpha, **kwargs)\n", (12163, 12273), True, 'import matplotlib.pyplot as plt\n'), ((16092, 16102), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16100, 16102), True, 'import matplotlib.pyplot as plt\n'), ((16192, 16208), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (16201, 16208), True, 'import matplotlib.pyplot as plt\n'), ((10122, 10216), 'matplotlib.pyplot.contour', 'plt.contour', (['xx', 'yy', 'ss1'], {'levels': '[0.5]', 'colors': "(lcolours[list_icluster[i]], 'w')"}), "(xx, yy, ss1, levels=[0.5], colors=(lcolours[list_icluster[i]],\n 'w'), **kwargs)\n", (10133, 10216), True, 'import matplotlib.pyplot as plt\n'), ((11641, 11665), 'numpy.ones_like', 'np.ones_like', (['ss_list[0]'], {}), '(ss_list[0])\n', (11653, 11665), True, 'import numpy as np\n'), ((12729, 12781), 'matplotlib.pyplot.contour', 'plt.contour', (['xx', 'yy', 'ss1'], {'levels': '[0.5]', 'colors': 'color'}), '(xx, yy, ss1, levels=[0.5], colors=color)\n', (12740, 12781), True, 'import matplotlib.pyplot as plt\n'), ((13887, 13906), 'numpy.greater', 'np.greater', (['ss1', 'ss'], {}), '(ss1, ss)\n', (13897, 13906), True, 'import numpy as np\n'), ((15585, 15606), 'matplotlib.pyplot.title', 'plt.title', (['world_name'], {}), '(world_name)\n', (15594, 15606), True, 'import matplotlib.pyplot as plt\n'), ((14007, 14017), 'numpy.min', 'np.min', (['ss'], {}), '(ss)\n', (14013, 14017), True, 'import numpy as np\n'), ((14022, 14032), 'numpy.max', 'np.max', (['ss'], {}), '(ss)\n', (14028, 14032), True, 'import numpy as np\n'), ((14035, 14045), 'numpy.min', 'np.min', (['ss'], {}), '(ss)\n', (14041, 14045), True, 'import numpy as np\n'), ((14828, 14838), 'numpy.min', 'np.min', (['ss'], {}), '(ss)\n', (14834, 14838), True, 'import numpy as np\n'), ((14843, 14853), 'numpy.max', 'np.max', (['ss'], {}), '(ss)\n', (14849, 14853), True, 'import numpy as np\n'), ((14856, 14866), 'numpy.min', 'np.min', (['ss'], {}), '(ss)\n', (14862, 14866), True, 'import numpy as np\n')]
|
import kmeans
import json
import numpy as np
NUM_GAUSSIANS = 32
DO_KMEANS = False
DEBUG = True
mixture_weights = [1.0/NUM_GAUSSIANS] * NUM_GAUSSIANS
if DEBUG:
print ("mixture_weights: ", mixture_weights)
print("Loading parsed data...")
traindata_processed_file = open("parsed_data/data1.universalenrollparsed", "r")
data = json.loads(traindata_processed_file.read())
traindata_processed_file.close()
print("Done loading parsed data!")
means = []
if DO_KMEANS:
means = kmeans.do_kmeans(data, 32)
else:
print("Loading centroids...")
traindata_processed_file = open("parsed_data/data1.kmeanspartialcentroids",
"r")
means = json.loads(traindata_processed_file.read())
traindata_processed_file.close()
print("Done loading centroids!")
data_np = np.array(data)
variances_np = np.var(data_np, axis=0)
if DEBUG:
print ("variances_np: ", variances_np)
variances = [variances_np.tolist()] * NUM_GAUSSIANS
initial_params = {
'mixture_weights': mixture_weights,
'means': means,
'variances': variances
}
print("writing inital parameters to file...")
traindata_processed_file = open("parsed_data/data1.initialparameters", "w")
traindata_processed_file.write(json.dumps(initial_params))
traindata_processed_file.close()
print("Done writing inital parameters to file")
|
[
"numpy.array",
"json.dumps",
"kmeans.do_kmeans",
"numpy.var"
] |
[((811, 825), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (819, 825), True, 'import numpy as np\n'), ((842, 865), 'numpy.var', 'np.var', (['data_np'], {'axis': '(0)'}), '(data_np, axis=0)\n', (848, 865), True, 'import numpy as np\n'), ((481, 507), 'kmeans.do_kmeans', 'kmeans.do_kmeans', (['data', '(32)'], {}), '(data, 32)\n', (497, 507), False, 'import kmeans\n'), ((1308, 1334), 'json.dumps', 'json.dumps', (['initial_params'], {}), '(initial_params)\n', (1318, 1334), False, 'import json\n')]
|
import tensorflow as tf
import numpy as np
from gpflow.params import DataHolder, Minibatch
from gpflow import autoflow, params_as_tensors, ParamList
from gpflow.models.model import Model
from gpflow.mean_functions import Identity, Linear
from gpflow.mean_functions import Zero
from gpflow.quadrature import mvhermgauss
from gpflow import settings
float_type = settings.float_type
from doubly_stochastic_dgp.layers import SVGP_Layer
def init_layers_linear(X, Y, Z, kernels,
num_outputs=None,
mean_function=Zero(),
Layer=SVGP_Layer,
white=False):
num_outputs = num_outputs or Y.shape[1]
layers = []
X_running, Z_running = X.copy(), Z.copy()
for kern_in, kern_out in zip(kernels[:-1], kernels[1:]):
dim_in = kern_in.input_dim
dim_out = kern_out.input_dim
print(dim_in, dim_out)
if dim_in == dim_out:
mf = Identity()
else:
if dim_in > dim_out: # stepping down, use the pca projection
_, _, V = np.linalg.svd(X_running, full_matrices=False)
W = V[:dim_out, :].T
else: # stepping up, use identity + padding
W = np.concatenate([np.eye(dim_in), np.zeros((dim_in, dim_out - dim_in))], 1)
mf = Linear(W)
mf.set_trainable(False)
layers.append(Layer(kern_in, Z_running, dim_out, mf, white=white))
if dim_in != dim_out:
Z_running = Z_running.dot(W)
X_running = X_running.dot(W)
# final layer
layers.append(Layer(kernels[-1], Z_running, num_outputs, mean_function, white=white))
return layers
def init_layers_input_prop(X, Y, Z, kernels,
num_outputs=None,
mean_function=Zero(),
Layer=SVGP_Layer,
white=False):
num_outputs = num_outputs or Y.shape[1]
D = X.shape[1]
M = Z.shape[0]
layers = []
for kern_in, kern_out in zip(kernels[:-1], kernels[1:]):
dim_in = kern_in.input_dim
dim_out = kern_out.input_dim - D
std_in = kern_in.variance.read_value()**0.5
pad = np.random.randn(M, dim_in - D) * 2. * std_in
Z_padded = np.concatenate([Z, pad], 1)
layers.append(Layer(kern_in, Z_padded, dim_out, Zero(), white=white, input_prop_dim=D))
dim_in = kernels[-1].input_dim
std_in = kernels[-2].variance.read_value()**0.5 if dim_in > D else 1.
pad = np.random.randn(M, dim_in - D) * 2. * std_in
Z_padded = np.concatenate([Z, pad], 1)
layers.append(Layer(kernels[-1], Z_padded, num_outputs, mean_function, white=white))
return layers
|
[
"numpy.eye",
"gpflow.mean_functions.Zero",
"gpflow.mean_functions.Linear",
"numpy.zeros",
"numpy.concatenate",
"numpy.linalg.svd",
"numpy.random.randn",
"gpflow.mean_functions.Identity"
] |
[((555, 561), 'gpflow.mean_functions.Zero', 'Zero', ([], {}), '()\n', (559, 561), False, 'from gpflow.mean_functions import Zero\n'), ((1833, 1839), 'gpflow.mean_functions.Zero', 'Zero', ([], {}), '()\n', (1837, 1839), False, 'from gpflow.mean_functions import Zero\n'), ((2598, 2625), 'numpy.concatenate', 'np.concatenate', (['[Z, pad]', '(1)'], {}), '([Z, pad], 1)\n', (2612, 2625), True, 'import numpy as np\n'), ((2294, 2321), 'numpy.concatenate', 'np.concatenate', (['[Z, pad]', '(1)'], {}), '([Z, pad], 1)\n', (2308, 2321), True, 'import numpy as np\n'), ((960, 970), 'gpflow.mean_functions.Identity', 'Identity', ([], {}), '()\n', (968, 970), False, 'from gpflow.mean_functions import Identity, Linear\n'), ((1338, 1347), 'gpflow.mean_functions.Linear', 'Linear', (['W'], {}), '(W)\n', (1344, 1347), False, 'from gpflow.mean_functions import Identity, Linear\n'), ((2538, 2568), 'numpy.random.randn', 'np.random.randn', (['M', '(dim_in - D)'], {}), '(M, dim_in - D)\n', (2553, 2568), True, 'import numpy as np\n'), ((1086, 1131), 'numpy.linalg.svd', 'np.linalg.svd', (['X_running'], {'full_matrices': '(False)'}), '(X_running, full_matrices=False)\n', (1099, 1131), True, 'import numpy as np\n'), ((2230, 2260), 'numpy.random.randn', 'np.random.randn', (['M', '(dim_in - D)'], {}), '(M, dim_in - D)\n', (2245, 2260), True, 'import numpy as np\n'), ((2378, 2384), 'gpflow.mean_functions.Zero', 'Zero', ([], {}), '()\n', (2382, 2384), False, 'from gpflow.mean_functions import Zero\n'), ((1262, 1276), 'numpy.eye', 'np.eye', (['dim_in'], {}), '(dim_in)\n', (1268, 1276), True, 'import numpy as np\n'), ((1278, 1314), 'numpy.zeros', 'np.zeros', (['(dim_in, dim_out - dim_in)'], {}), '((dim_in, dim_out - dim_in))\n', (1286, 1314), True, 'import numpy as np\n')]
|
import sys
sys.path.append('../')
import config
import pymysql.cursors
import pandas as pd
import numpy as np
from scipy import io as scipyio
from tempfile import SpooledTemporaryFile
from scipy.sparse import vstack as vstack_sparse_matrices
# Function to reassemble the p matrix from the vectors
def reconstitute_vector(bytesblob):
f = SpooledTemporaryFile(max_size=1000000000)
f.write(bytesblob)
f.seek(0)
return scipyio.mmread(f)
def youtubelink(vidid):
return ('https://www.youtube.com/watch?v=' + vidid)
connection = pymysql.connect(host='localhost',
user='root',
password=config.MYSQL_SERVER_PASSWORD,
db='youtubeProjectDB',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
with connection.cursor() as cursor:
# https://stackoverflow.com/questions/612231/how-can-i-select-rows-with-maxcolumn-value-distinct-by-another-column-in-sql?rq=1
# Note - this is a very interesting query! never seen it before..
sql = """SELECT * FROM
(SELECT DISTINCT(videoId) AS v, videoTitle FROM search_api) A
INNER JOIN
(SELECT * FROM captions c
INNER JOIN(SELECT videoId AS InnerVideoId,
MAX(wordCount) AS MaxWordCount,
MAX(id) AS MaxId
FROM captions
WHERE tfidfVector IS NOT NULL
GROUP BY videoId) grouped_c
ON c.videoId = grouped_c.InnerVideoId
AND c.wordCount = grouped_c.MaxWordCount
AND c.id = grouped_c.MaxId) B
ON A.v = B.videoId;"""
cursor.execute(sql)
manyCaptions = cursor.fetchall()
videos_df = pd.read_sql(sql, connection)
connection.close()
# note that the other program which put the vectors there only did it on captions WHERE language like '%en%'
# for that reason this query does not contain language. It has instead WHERE tfidfVector IS NOT NULL
videos_df = videos_df.drop('v', 1)
videos_df['tfidfVector_NP'] = videos_df['tfidfVector'].apply(reconstitute_vector)
listOfSparseVectors = list(videos_df['tfidfVector_NP'].values.flatten())
p = vstack_sparse_matrices(listOfSparseVectors)
video_titles = list(videos_df['videoTitle'].values.flatten())
video_ids = list(videos_df['videoId'].values.flatten())
# Apply the transformation to the term document matrix to compute similarity between all pairs
pairwise_similarity = (p * p.T).A # In Scipy, .A transforms a sparse matrix to a dense one
# df9 = pd.DataFrame(pairwise_similarity, columns=video_ids, index=video_ids)
# s = pd.Series(video_titles, index=df9.index)
# df9 = pd.concat((s.rename('videoTitles'), df9), axis=1)
def nth_similar_tuple(n, ps):
title = (np.array(video_titles))[((-ps).argsort()[n])]
vid_id = (np.array(video_ids))[((-ps).argsort()[n])]
return (title, vid_id)
d = []
for a,b,c in zip(video_titles, video_ids, pairwise_similarity):
d.append({'a':(a,b),
'b': nth_similar_tuple(1,c),
'c': nth_similar_tuple(2,c),
'd': nth_similar_tuple(3,c)})
# takes about a minute to run through the 7000 unique rows.
similarity_df = pd.DataFrame(d)
similarity_df.columns = ['original', 'first_similar', 'second_similar', 'third_similar']
# split the tuples into two-level columns.
similarity_df = pd.concat(
[pd.DataFrame(x, columns=['video_title','youtube_id']) for x in similarity_df.values.T.tolist()],
axis=1,
keys=similarity_df.columns)
print ("Finished running, the Pandas DataFrame variable similarity_df should now be in scope.")
|
[
"scipy.io.mmread",
"tempfile.SpooledTemporaryFile",
"numpy.array",
"pandas.DataFrame",
"scipy.sparse.vstack",
"pandas.read_sql",
"sys.path.append"
] |
[((11, 33), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (26, 33), False, 'import sys\n'), ((2329, 2372), 'scipy.sparse.vstack', 'vstack_sparse_matrices', (['listOfSparseVectors'], {}), '(listOfSparseVectors)\n', (2351, 2372), True, 'from scipy.sparse import vstack as vstack_sparse_matrices\n'), ((3355, 3370), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {}), '(d)\n', (3367, 3370), True, 'import pandas as pd\n'), ((343, 384), 'tempfile.SpooledTemporaryFile', 'SpooledTemporaryFile', ([], {'max_size': '(1000000000)'}), '(max_size=1000000000)\n', (363, 384), False, 'from tempfile import SpooledTemporaryFile\n'), ((433, 450), 'scipy.io.mmread', 'scipyio.mmread', (['f'], {}), '(f)\n', (447, 450), True, 'from scipy import io as scipyio\n'), ((1848, 1876), 'pandas.read_sql', 'pd.read_sql', (['sql', 'connection'], {}), '(sql, connection)\n', (1859, 1876), True, 'import pandas as pd\n'), ((2914, 2936), 'numpy.array', 'np.array', (['video_titles'], {}), '(video_titles)\n', (2922, 2936), True, 'import numpy as np\n'), ((2974, 2993), 'numpy.array', 'np.array', (['video_ids'], {}), '(video_ids)\n', (2982, 2993), True, 'import numpy as np\n'), ((3535, 3589), 'pandas.DataFrame', 'pd.DataFrame', (['x'], {'columns': "['video_title', 'youtube_id']"}), "(x, columns=['video_title', 'youtube_id'])\n", (3547, 3589), True, 'import pandas as pd\n')]
|
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
print("Python Version:", torch.__version__)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5, 1)
self.conv2 = nn.Conv2d(20, 50, 5, 1)
self.fc1 = nn.Linear(4*4*50, 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4*4*50)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def train(model, device, train_dataloader, optimizer, epoch):
model.train()
for idx, (data, target) in enumerate(train_dataloader):
data, target = data.to(device), target.to(device)
pred = model(data)
loss = F.nll_loss(pred, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if idx % 100 == 0:
print("Train Epoch: {}, iteration: {}, Loss: {}".format(
epoch, idx, loss.item()))
def test(model, device, test_dataloader):
model.eval()
total_loss = 0.
correct = 0.
with torch.no_grad():
for idx, (data, target) in enumerate(test_dataloader):
data, target = data.to(device), target.to(device)
output = model(data)
total_loss += F.nll_loss(output, target, reduction="sum").item()
pred = output.argmax(dim=1)
correct += pred.eq(target.view_as(pred)).sum().item()
total_loss /= len(test_dataloader.dataset)
acc = correct / len(test_dataloader.dataset) * 100
print("Test loss: {}, Accuracy: {}".format(total_loss, acc))
mnist_data = datasets.MNIST("./mnist_data", train=True, download=True,
transform = transforms.Compose([
transforms.ToTensor(),
]))
# print(mnist_data)
# print(mnist_data[233][0].shape)
data = [d[0].data.cpu().numpy() for d in mnist_data]
np.mean(data)
np.std(data)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
batch_size = 32
train_dataloader = torch.utils.data.DataLoader(
datasets.FashionMNIST("./fashion_mnist_data", train=False, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True, num_workers=1, pin_memory=True
)
test_dataloader = torch.utils.data.DataLoader(
datasets.FashionMNIST("./fashion_mnist_data", train=False, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True, num_workers=1, pin_memory=True
)
lr = 0.01
momentum = 0.5
model = Net().to(device)
optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=momentum)
num_epochs = 2
for epoch in range(num_epochs):
train(model, device, train_dataloader, optimizer, epoch)
test(model, device, test_dataloader)
torch.save(model.state_dict(), "fashion_mnist_cnn.pt")
|
[
"numpy.mean",
"torch.nn.functional.nll_loss",
"torch.nn.Conv2d",
"torch.cuda.is_available",
"torch.nn.Linear",
"numpy.std",
"torch.nn.functional.log_softmax",
"torch.no_grad",
"torch.nn.functional.max_pool2d",
"torchvision.transforms.Normalize",
"torchvision.transforms.ToTensor"
] |
[((2190, 2203), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (2197, 2203), True, 'import numpy as np\n'), ((2204, 2216), 'numpy.std', 'np.std', (['data'], {}), '(data)\n', (2210, 2216), True, 'import numpy as np\n'), ((308, 330), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(20)', '(5)', '(1)'], {}), '(1, 20, 5, 1)\n', (317, 330), True, 'import torch.nn as nn\n'), ((352, 375), 'torch.nn.Conv2d', 'nn.Conv2d', (['(20)', '(50)', '(5)', '(1)'], {}), '(20, 50, 5, 1)\n', (361, 375), True, 'import torch.nn as nn\n'), ((395, 421), 'torch.nn.Linear', 'nn.Linear', (['(4 * 4 * 50)', '(500)'], {}), '(4 * 4 * 50, 500)\n', (404, 421), True, 'import torch.nn as nn\n'), ((437, 455), 'torch.nn.Linear', 'nn.Linear', (['(500)', '(10)'], {}), '(500, 10)\n', (446, 455), True, 'import torch.nn as nn\n'), ((529, 550), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['x', '(2)', '(2)'], {}), '(x, 2, 2)\n', (541, 550), True, 'import torch.nn.functional as F\n'), ((597, 618), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['x', '(2)', '(2)'], {}), '(x, 2, 2)\n', (609, 618), True, 'import torch.nn.functional as F\n'), ((721, 744), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['x'], {'dim': '(1)'}), '(x, dim=1)\n', (734, 744), True, 'import torch.nn.functional as F\n'), ((987, 1011), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['pred', 'target'], {}), '(pred, target)\n', (997, 1011), True, 'import torch.nn.functional as F\n'), ((1336, 1351), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1349, 1351), False, 'import torch\n'), ((2250, 2275), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2273, 2275), False, 'import torch\n'), ((2027, 2048), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2046, 2048), False, 'from torchvision import datasets, transforms\n'), ((1537, 1580), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['output', 'target'], {'reduction': '"""sum"""'}), "(output, target, reduction='sum')\n", (1547, 1580), True, 'import torch.nn.functional as F\n'), ((2477, 2498), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2496, 2498), False, 'from torchvision import datasets, transforms\n'), ((2508, 2550), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.1307,)', '(0.3081,)'], {}), '((0.1307,), (0.3081,))\n', (2528, 2550), False, 'from torchvision import datasets, transforms\n'), ((2810, 2831), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2829, 2831), False, 'from torchvision import datasets, transforms\n'), ((2841, 2883), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.1307,)', '(0.3081,)'], {}), '((0.1307,), (0.3081,))\n', (2861, 2883), False, 'from torchvision import datasets, transforms\n')]
|
'''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | | / _ \ | ___ \_ _|
| . . | ___ __| | ___| | / /_\ \| |_/ / | |
| |\/| |/ _ \ / _` |/ _ \ | | _ || __/ | |
| | | | (_) | (_| | __/ | | | | || | _| |_
\_| |_/\___/ \__,_|\___|_| \_| |_/\_| \___/
This is Allie's modeling API to help build classification or regression models.
All you need to do is run the model.py script and you will be guided through the
modeling process.
Usage: python3 model.py
Alternative CLI Usage: python3 model.py audio 2 c gender males females
- audio = audio file type
- 2 = 2 classes
- c = classification (r for regression)
- gender = common name of model
- male = first class
- female = second class [via N number of classes]
For addditional documentation, check out
https://github.com/jim-schwoebel/allie/tree/master/training
'''
###############################################################
## IMPORT STATEMENTS ##
###############################################################
import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform
from pyfiglet import Figlet
f=Figlet(font='doh')
print(f.renderText('Allie'))
f=Figlet(font='doom')
import pandas as pd
import matplotlib.pyplot as plt
###############################################################
## CREATE HELPER FUNCTIONS ##
###############################################################
def most_common(lst):
'''
get most common item in a list
'''
return max(set(lst), key=lst.count)
def prev_dir(directory):
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
def get_folders(listdir):
folders=list()
for i in range(len(listdir)):
if listdir[i].find('.') < 0:
folders.append(listdir[i])
return folders
def classifyfolder(listdir):
filetypes=list()
for i in range(len(listdir)):
if listdir[i].endswith(('.mp3', '.wav')):
filetypes.append('audio')
elif listdir[i].endswith(('.png', '.jpg')):
filetypes.append('image')
elif listdir[i].endswith(('.txt')):
filetypes.append('text')
elif listdir[i].endswith(('.mp4', '.avi')):
filetypes.append('video')
elif listdir[i].endswith(('.csv')):
filetypes.append('csv')
counts={'audio': filetypes.count('audio'),
'image': filetypes.count('image'),
'text': filetypes.count('text'),
'video': filetypes.count('video'),
'csv': filetypes.count('csv')}
# get back the type of folder (main file type)
countlist=list(counts)
countvalues=list(counts.values())
maxvalue=max(countvalues)
maxind=countvalues.index(maxvalue)
return countlist[maxind]
def pull_element(mylist, element):
pull_=list()
for i in range(len(mylist)):
pull_.append(mylist[i][element])
return pull_
def convert_csv(X_train, y_train, labels, mtype, classes):
'''
Take in a array of features and labels and output a
pandas DataFrame format for easy .CSV expor and for model training.
This is important to make sure all machine learning training sessions
use the same dataset (so they can be benchmarked appropriately).
'''
# from pandas merging guide https://pandas.pydata.org/pandas-docs/stable/user_guide/merging.html
feature_list=labels
data=list()
for i in tqdm(range(len(X_train)), desc='converting csv...'):
newlist=list()
for j in range(len(X_train[i])):
newlist.append([X_train[i][j]])
temp=pd.DataFrame(dict(zip(feature_list,newlist)), index=[i])
# print(temp)
data.append(temp)
data = pd.concat(data)
if mtype == 'c':
data['class_']=y_train
elif mtype == 'r':
if len(classes) == 1:
data[classes[0]]=y_train
else:
for j in range(len(classes)):
newy=pull_element(y_train, j)
data[classes[j]]=newy
data=pd.DataFrame(data, columns = list(data))
# print this because in pretty much every case you will write the .CSV file afterwards
print('writing csv file...')
return data
def device_info():
cpu_data={'memory':psutil.virtual_memory(),
'cpu percent':psutil.cpu_percent(),
'cpu times':psutil.cpu_times(),
'cpu count':psutil.cpu_count(),
'cpu stats':psutil.cpu_stats(),
'cpu swap':psutil.swap_memory(),
'partitions':psutil.disk_partitions(),
'disk usage':psutil.disk_usage('/'),
'disk io counters':psutil.disk_io_counters(),
'battery':psutil.sensors_battery(),
'boot time':psutil.boot_time(),
}
data={'time':datetime.datetime.now().strftime("%Y-%m-%d %H:%M"),
'timezone':time.tzname,
'operating system': platform.system(),
'os release':platform.release(),
'os version':platform.version(),
'cpu data':cpu_data,
'space left': list(psutil.disk_usage('/'))[2]/1000000000}
return data
def get_metrics(clf, problemtype, mtype, default_training_script, common_name, X_test, y_test, classes, modelname, settings, model_session, transformer_name, created_csv_files, test_data, model_start_time):
'''
get the metrics associated iwth a classification and regression problem
and output a .JSON file with the training session.
'''
metrics_=dict()
y_true=y_test
if default_training_script not in ['autogluon', 'autokeras', 'autopytorch', 'alphapy', 'atm', 'keras', 'devol', 'ludwig', 'safe', 'neuraxle']:
y_pred=clf.predict(X_test)
elif default_training_script=='alphapy':
# go to the right folder
curdir=os.getcwd()
print(os.listdir())
os.chdir(common_name+'_alphapy_session')
alphapy_dir=os.getcwd()
os.chdir('input')
os.rename('test.csv', 'predict.csv')
os.chdir(alphapy_dir)
os.system('alphapy --predict')
os.chdir('output')
listdir=os.listdir()
for k in range(len(listdir)):
if listdir[k].startswith('predictions'):
csvfile=listdir[k]
y_pred=pd.read_csv(csvfile)['prediction']
os.chdir(curdir)
elif default_training_script == 'autogluon':
from autogluon import TabularPrediction as task
test_data=test_data.drop(labels=['class'],axis=1)
y_pred=clf.predict(test_data)
elif default_training_script == 'autokeras':
y_pred=clf.predict(X_test).flatten()
elif default_training_script == 'autopytorch':
y_pred=clf.predict(X_test).flatten()
elif default_training_script == 'atm':
curdir=os.getcwd()
os.chdir('atm_temp')
data = pd.read_csv('test.csv').drop(labels=['class_'], axis=1)
y_pred = clf.predict(data)
os.chdir(curdir)
elif default_training_script == 'ludwig':
data=pd.read_csv('test.csv').drop(labels=['class_'], axis=1)
pred=clf.predict(data)['class__predictions']
y_pred=np.array(list(pred), dtype=np.int64)
elif default_training_script == 'devol':
X_test=X_test.reshape(X_test.shape+ (1,)+ (1,))
y_pred=clf.predict_classes(X_test).flatten()
elif default_training_script=='keras':
if mtype == 'c':
y_pred=clf.predict_classes(X_test).flatten()
elif mtype == 'r':
y_pred=clf.predict(X_test).flatten()
elif default_training_script=='neuraxle':
y_pred=clf.transform(X_test)
elif default_training_script=='safe':
# have to make into a pandas dataframe
test_data=pd.read_csv('test.csv').drop(columns=['class_'], axis=1)
y_pred=clf.predict(test_data)
print(y_pred)
# get classification or regression metrics
if mtype in ['c', 'classification']:
# now get all classification metrics
mtype='classification'
metrics_['accuracy']=metrics.accuracy_score(y_true, y_pred)
metrics_['balanced_accuracy']=metrics.balanced_accuracy_score(y_true, y_pred)
try:
metrics_['precision']=metrics.precision_score(y_true, y_pred)
except:
metrics_['precision']='n/a'
try:
metrics_['recall']=metrics.recall_score(y_true, y_pred)
except:
metrics_['recall']='n/a'
try:
metrics_['f1_score']=metrics.f1_score (y_true, y_pred, pos_label=1)
except:
metrics_['f1_score']='n/a'
try:
metrics_['f1_micro']=metrics.f1_score(y_true, y_pred, average='micro')
except:
metrics_['f1_micro']='n/a'
try:
metrics_['f1_macro']=metrics.f1_score(y_true, y_pred, average='macro')
except:
metrics_['f1_macro']='n/a'
try:
metrics_['roc_auc']=metrics.roc_auc_score(y_true, y_pred)
except:
metrics_['roc_auc']='n/a'
try:
metrics_['roc_auc_micro']=metrics.roc_auc_score(y_true, y_pred, average='micro')
except:
metrics_['roc_auc_micro']='n/a'
try:
metrics_['roc_auc_macro']=metrics.roc_auc_score(y_true, y_pred, average='macro')
except:
metrics_['roc_auc_micro']='n/a'
metrics_['confusion_matrix']=metrics.confusion_matrix(y_true, y_pred).tolist()
metrics_['classification_report']=metrics.classification_report(y_true, y_pred, target_names=classes)
plot_confusion_matrix(np.array(metrics_['confusion_matrix']), classes)
try:
# predict_proba only works for or log loss and modified Huber loss.
# https://stackoverflow.com/questions/47788981/sgdclassifier-with-predict-proba
try:
y_probas = clf.predict_proba(X_test)[:, 1]
except:
try:
y_probas = clf.decision_function(X_test)[:, 1]
except:
print('error making y_probas')
plot_roc_curve(y_test, [y_probas], [default_training_script])
except:
print('error plotting ROC curve')
print('predict_proba only works for or log loss and modified Huber loss.')
elif mtype in ['r', 'regression']:
# now get all regression metrics
mtype='regression'
metrics_['mean_absolute_error'] = metrics.mean_absolute_error(y_true, y_pred)
metrics_['mean_squared_error'] = metrics.mean_squared_error(y_true, y_pred)
metrics_['median_absolute_error'] = metrics.median_absolute_error(y_true, y_pred)
metrics_['r2_score'] = metrics.r2_score(y_true, y_pred)
plot_regressor(clf, classes, X_test, y_test)
data={'sample type': problemtype,
'training time': time.time()-model_start_time,
'created date': str(datetime.datetime.now()),
'device info': device_info(),
'session id': model_session,
'classes': classes,
'problem type': mtype,
'model name': modelname,
'model type': default_training_script,
'metrics': metrics_,
'settings': settings,
'transformer name': transformer_name,
'training data': created_csv_files,
'sample X_test': X_test[0].tolist(),
'sample y_test': y_test[0].tolist()}
if modelname.endswith('.pickle'):
jsonfilename=modelname[0:-7]+'.json'
elif modelname.endswith('.h5'):
jsonfilename=modelname[0:-3]+'.json'
else:
jsonfilename=modelname+'.json'
jsonfile=open(jsonfilename,'w')
json.dump(data,jsonfile)
jsonfile.close()
# also output requirements.txt for reproducibilty purposes
curdir=os.getcwd()
basedir=prev_dir(curdir)
os.chdir(basedir)
os.system('pip3 freeze -> requirements.txt')
# FUTURE - add in optional copy of cleaning, augmentation, and feature libraries contextually
# try:
# shutil.copytree(prev_dir(prev_dir(basedir))+'/features', basedir+'/features')
# except:
# print('error copying features')
# try:
# shutil.copytree(prev_dir(prev_dir(basedir))+'/cleaning', basedir+'/cleaning')
# except:
# print('error copying cleaning techniques')
# shutil.copytree(prev_dir(prev_dir(basedir))+'/augmentation', basedir+'/augmentation')
# except:
# print('error copying augmentation techniques')
os.chdir(curdir)
def plot_roc_curve(y_test, probs, clf_names):
'''
This function plots an ROC curve with the appropriate
list of classifiers.
'''
cycol = itertools.cycle('bgrcmyk')
for i in range(len(probs)):
print(y_test)
print(probs[i])
try:
fper, tper, thresholds = roc_curve(y_test, probs[i])
plt.plot(fper, tper, color=next(cycol), label=clf_names[i]+' = %s'%(str(round(metrics.auc(fper, tper), 3))))
plt.plot([0, 1], [0, 1], color='darkblue', linestyle='--')
except:
print('passing %s'%(clf_names[i]))
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic (ROC) Curve')
plt.legend()
plt.tight_layout()
plt.savefig('roc_curve.png')
plt.close()
def plot_confusion_matrix(cm, classes, normalize=True, title='Confusion matrix', cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("\nNormalized confusion matrix")
else:
print('\nConfusion matrix, without normalization')
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
plt.savefig('confusion_matrix.png')
plt.close()
def plot_regressor(regressor, classes, X_test, y_test):
'''
plot regression models with a bar chart.
'''
try:
y_pred = regressor.predict(X_test)
# plot the first 25 records
if len(classes) == 2:
df = pd.DataFrame({'Actual': y_test.flatten(), 'Predicted': y_pred.flatten()})
df1 = df.head(25)
df1.plot(kind='bar',figsize=(16,10))
plt.grid(which='major', linestyle='-', linewidth='0.5', color='green')
plt.grid(which='minor', linestyle=':', linewidth='0.5', color='black')
plt.tight_layout()
plt.savefig('bar_graph_predictions.png')
plt.close()
# plot a straight line on the data
plt.scatter(X_test, y_test, color='gray')
plt.plot(X_test, y_pred, color='red', linewidth=2)
plt.tight_layout()
plt.savefig('straight_line_predictions.png')
plt.close()
else:
# multi-dimensional generalization
df = pd.DataFrame({'Actual': y_test, 'Predicted': y_pred})
df1 = df.head(25)
df1.plot(kind='bar',figsize=(10,8))
plt.grid(which='major', linestyle='-', linewidth='0.5', color='green')
plt.grid(which='minor', linestyle=':', linewidth='0.5', color='black')
plt.tight_layout()
plt.savefig('bar_graph_predictions.png')
plt.close()
except:
print('error plotting regressor')
def pursue_modeling(mtype, model_dir, problemtype, default_training_script,common_name_model):
'''
simple script to decide whether or not to continue modeling the data.
'''
try:
model_listdir=os.listdir(model_dir+'/'+problemtype+'_models')
except:
model_listdir=list()
# note that these are tpot definitions
model_exists=False
if default_training_script == 'tpot':
if common_name_model + '_classifier' in model_listdir and mtype == 'c':
model_exists=True
elif common_name_model +'_regression' in model_listdir and mtype == 'r':
model_exists=True
else:
# only look for naming conflicts with TPOT for now, can expand into the future.
model_exists=False
return model_exists, model_listdir
def get_csvfiles(listdir):
csvfiles=list()
for i in range(len(listdir)):
if listdir[i].endswith('.csv'):
csvfiles.append(listdir[i])
return csvfiles
###############################################################
## LOADING SETTINGS ##
###############################################################
# load the default feature set
cur_dir = os.getcwd()
prevdir= prev_dir(cur_dir)
sys.path.append(prevdir+'/train_dir')
settings=json.load(open(prevdir+'/settings.json'))
# get all the default feature arrays
default_audio_features=settings['default_audio_features']
default_text_features=settings['default_text_features']
default_image_features=settings['default_image_features']
default_video_features=settings['default_video_features']
default_csv_features=settings['default_csv_features']
create_csv=settings['create_csv']
# prepare training and testing data (should have been already featurized) - # of classes/folders
os.chdir(prevdir+'/train_dir')
data_dir=os.getcwd()
listdir=os.listdir()
folders=get_folders(listdir)
csvfiles=get_csvfiles(listdir)
# now assess folders by content type
data=dict()
for i in range(len(folders)):
os.chdir(folders[i])
listdir=os.listdir()
filetype=classifyfolder(listdir)
data[folders[i]]=filetype
os.chdir(data_dir)
###############################################################
## INITIALIZE CLASSES ##
###############################################################
# get all information from sys.argv, and if not,
# go through asking user for the proper parameters
try:
problemtype=sys.argv[1]
mtype=sys.argv[3]
if mtype == 'c':
classnum=sys.argv[2]
common_name=sys.argv[4]
classes=list()
for i in range(int(classnum)):
classes.append(sys.argv[i+5])
else:
classnum=1
problemtype='csv'
mtype=sys.argv[1]
csvfile=sys.argv[2]
classes=[sys.argv[3]]
common_name=csvfile[0:-4]
except:
# now ask user what type of problem they are trying to solve
mtype=input('is this a classification (c) or regression (r) problem? \n')
while mtype not in ['c','r']:
print('input not recognized...')
mtype=input('is this a classification (c) or regression (r) problem? \n')
if mtype == 'c':
problemtype=input('what problem are you solving? (1-audio, 2-text, 3-image, 4-video, 5-csv)\n')
while problemtype not in ['1','2','3','4','5']:
print('answer not recognized...')
problemtype=input('what problem are you solving? (1-audio, 2-text, 3-image, 4-video, 5-csv)\n')
if problemtype=='1':
problemtype='audio'
elif problemtype=='2':
problemtype='text'
elif problemtype=='3':
problemtype='image'
elif problemtype=='4':
problemtype='video'
elif problemtype=='5':
problemtype='csv'
if problemtype != 'csv':
print('\n OK cool, we got you modeling %s files \n'%(problemtype))
count=0
availableclasses=list()
for i in range(len(folders)):
if data[folders[i]]==problemtype:
availableclasses.append(folders[i])
count=count+1
classnum=input('how many classes would you like to model? (%s available) \n'%(str(count)))
print('these are the available classes: ')
print(availableclasses)
# get all if all (good for many classes)
classes=list()
if classnum=='all':
for i in range(len(availableclasses)):
classes.append(availableclasses[i])
else:
stillavailable=list()
for i in range(int(classnum)):
class_=input('what is class #%s \n'%(str(i+1)))
while class_ not in availableclasses and class_ not in '' or class_ in classes:
print('\n')
print('------------------ERROR------------------')
print('the input class does not exist (for %s files).'%(problemtype))
print('these are the available classes: ')
if len(stillavailable)==0:
print(availableclasses)
else:
print(stillavailable)
print('------------------------------------')
class_=input('what is class #%s \n'%(str(i+1)))
for j in range(len(availableclasses)):
stillavailable=list()
if availableclasses[j] not in classes:
stillavailable.append(availableclasses[j])
if class_ == '':
class_=stillavailable[0]
classes.append(class_)
elif problemtype == 'csv':
print('\n OK cool, we got you modeling %s files \n'%(problemtype))
print('csv file options are: %s \n'%(csvfiles))
csvfile=input('which csvfile would you like to use for classification? \n')
g=pd.read_csv(csvfile)
columns=list(g)
print('potential targets include: %s'%(columns))
target=input('what target would you like to use? \n')
csv_labels=g[target]
csv_features=g.drop([target], axis=1)
elif mtype =='r':
# for regression problems we need a target column to predict / classes from a .CSV
problemtype='csv'
# assumes the .CSV file is in the train dir
os.chdir(prevdir+'/train_dir')
listdir=os.listdir()
csvfiles=list()
for i in range(len(listdir)):
if listdir[i].endswith('.csv'):
csvfiles.append(listdir[i])
csvfile=input('what is the name of the spreadsheet (in ./train_dir) used for prediction? \n\n available: %s\n\n'%(str(csvfiles)))
while csvfile not in csvfiles:
print('answer not recognized...')
csvfile=input('what is the name of the spreadsheet (in ./train_dir) used for prediction? \n\n available: %s\n\n'%(str(csvfiles)))
# the available classes are only the numeric columns from the spreadsheet
data = pd.read_csv(csvfile)
columns = list(data)
availableclasses=list()
for i in range(len(columns)):
# look at filetype extension in each column
coldata=data[columns[i]]
sampletypes=list()
for j in range(len(coldata)):
try:
values=float(coldata[j])
sampletypes.append('numerical')
except:
if coldata[j].endswith('.wav'):
sampletypes.append('audio')
elif coldata[j].endswith('.txt'):
sampletypes.append('text')
elif coldata[j].endswith('.png'):
sampletypes.append('image')
elif coldata[j].endswith('.mp4'):
sampletypes.append('video')
else:
sampletypes.append('other')
coltype=most_common(sampletypes)
# correct the other category if needed
if coltype == 'other':
# if coltype.endswith('.csv'):
# coltype='csv'
if len(set(list(coldata))) < 10:
coltype='categorical'
else:
# if less than 5 unique answers then we can interpret this as text input
coltype='typedtext'
if coltype == 'numerical':
availableclasses.append(columns[i])
if len(availableclasses) > 0:
classnum=input('how many classes would you like to model? (%s available) \n'%(str(len(availableclasses))))
print('these are the available classes: %s'%(str(availableclasses)))
classes=list()
stillavailable=list()
for i in range(int(classnum)):
class_=input('what is class #%s \n'%(str(i+1)))
while class_ not in availableclasses and class_ not in '' or class_ in classes:
print('\n')
print('------------------ERROR------------------')
print('the input class does not exist (for %s files).'%(problemtype))
print('these are the available classes: ')
if len(stillavailable)==0:
print(availableclasses)
else:
print(stillavailable)
print('------------------------------------')
class_=input('what is class #%s \n'%(str(i+1)))
for j in range(len(availableclasses)):
stillavailable=list()
if availableclasses[j] not in classes:
stillavailable.append(availableclasses[j])
if class_ == '':
class_=stillavailable[0]
classes.append(class_)
else:
print('no classes available... ending session')
sys.exit()
common_name=input('what is the 1-word common name for the problem you are working on? (e.g. gender for male/female classification) \n')
###############################################################
## UPGRADE MODULES / LOAD MODULES ##
###############################################################
print('-----------------------------------')
print(' LOADING MODULES ')
print('-----------------------------------')
# upgrade to have the proper scikit-learn version later
os.chdir(cur_dir)
os.system('python3 upgrade.py')
import pandas as pd
from sklearn.model_selection import train_test_split
from tqdm import tqdm
import numpy as np
from sklearn import metrics
from sklearn.metrics import roc_curve
###############################################################
## CLEAN THE DATA ##
###############################################################
clean_data=settings['clean_data']
clean_dir=prevdir+'/cleaning'
if clean_data == True and mtype == 'c':
# only pursue augmentation strategies on directories of files and classification problems
print('-----------------------------------')
print(f.renderText('CLEANING DATA'))
print('-----------------------------------')
for i in range(len(classes)):
if problemtype == 'audio':
# clean audio via default_audio_cleaners
os.chdir(clean_dir+'/audio_cleaning')
elif problemtype == 'text':
# clean text via default_text_cleaners
os.chdir(clean_dir+'/text_cleaning')
elif problemtype == 'image':
# clean images via default_image_cleaners
os.chdir(clean_dir+'/image_cleaning')
elif problemtype == 'video':
# clean video via default_video_cleaners
os.chdir(clean_dir+'/video_cleaning')
elif problemtype == 'csv':
# clean .CSV via default_csv_cleaners
os.chdir(clean_dir+'/csv_cleaning')
os.system('python3 clean.py "%s"'%(data_dir+'/'+classes[i]))
elif clean_data == True and mtype == 'r':
for i in range(len(classes)):
if problemtype == 'csv':
# clean .CSV via default_csv_cleaners
os.chdir(clean_dir+'/csv_cleaning')
os.system('python3 clean.py "%s"'%(data_dir+'/'+classes[i]))
###############################################################
## AUGMENT THE DATA ##
###############################################################
augment_data=settings['augment_data']
augment_dir=prevdir+'/augmentation'
if augment_data == True and mtype == 'c':
# only pursue augmentation strategies on directories of files and classification problems
print('-----------------------------------')
print(f.renderText('AUGMENTING DATA'))
print('-----------------------------------')
for i in range(len(classes)):
if problemtype == 'audio':
# augment audio via default_audio_augmenters
os.chdir(augment_dir+'/audio_augmentation')
elif problemtype == 'text':
# augment text via default_text_augmenters
os.chdir(augment_dir+'/text_augmentation')
elif problemtype == 'image':
# augment images via default_image_augmenters
os.chdir(augment_dir+'/image_augmentation')
elif problemtype == 'video':
# augment video via default_video_augmenters
os.chdir(augment_dir+'/video_augmentation')
elif problemtype == 'csv':
# augment .CSV via default_csv_augmenters
os.chdir(augment_dir+'/csv_augmentation')
os.system('python3 augment.py "%s"'%(data_dir+'/'+classes[i]))
elif augment_data == True and mtype == 'r':
for i in range(len(classes)):
if problemtype == 'csv':
# featurize .CSV via default_csv_augmenters
os.chdir(augment_dir+'/csv_augmentation')
os.system('python3 augment.py "%s"'%(data_dir+'/'+classes[i]))
###############################################################
## FEATURIZE FILES ##
###############################################################
# now featurize each class (in proper folder)
if mtype == 'c':
data={}
print('-----------------------------------')
print(f.renderText('FEATURIZING DATA'))
print('-----------------------------------')
if problemtype == 'csv':
# csv features should have already been defined
# need to separate into number of unique classes
csv_labels=g[target]
csv_features=g.drop([target], axis=1)
csv_feature_labels=list(csv_features)
classes=list(set(list(csv_labels)))
for i in range(len(classes)):
class_type = classes[i]
feature_list=list()
label_list=list()
for i in range(len(csv_features)):
if csv_labels[i] == class_type:
feature_list.append(list(csv_features.iloc[i,:]))
label_list.append(csv_feature_labels)
data[class_type]=feature_list
else:
#
for i in range(len(classes)):
class_type=classes[i]
if problemtype == 'audio':
# featurize audio
os.chdir(prevdir+'/features/audio_features')
default_features=default_audio_features
elif problemtype == 'text':
# featurize text
os.chdir(prevdir+'/features/text_features')
default_features=default_text_features
elif problemtype == 'image':
# featurize images
os.chdir(prevdir+'/features/image_features')
default_features=default_image_features
elif problemtype == 'video':
# featurize video
os.chdir(prevdir+'/features/video_features')
default_features=default_video_features
print('-----------------------------------')
print(' FEATURIZING %s'%(classes[i].upper()))
print('-----------------------------------')
os.system('python3 featurize.py "%s"'%(data_dir+'/'+classes[i]))
os.chdir(data_dir+'/'+classes[i])
# load audio features
listdir=os.listdir()
feature_list=list()
label_list=list()
for j in range(len(listdir)):
if listdir[j][-5:]=='.json':
try:
g=json.load(open(listdir[j]))
# consolidate all features into one array (if featurizing with multiple featurizers)
default_feature=list()
default_label=list()
for k in range(len(default_features)):
default_feature=default_feature+g['features'][problemtype][default_features[k]]['features']
default_label=default_label+g['features'][problemtype][default_features[k]]['labels']
feature_list.append(default_feature)
label_list.append(default_label)
except:
print('ERROR - skipping ' + listdir[j])
data[class_type]=feature_list
elif mtype == 'r':
# featurize .CSV
os.chdir(prevdir+'/features/csv_features')
output_file=str(uuid.uuid1())+'.csv'
os.system('python3 featurize_csv_regression.py -i "%s" -o "%s" -t "%s"'%(prevdir+'/train_dir/'+csvfile, prevdir+'/train_dir/'+output_file, classes[0]))
csvfile=output_file
default_features=['csv_regression']
###############################################################
## GENERATE TRAINING DATA ##
###############################################################
print('-----------------------------------')
print(f.renderText('CREATING TRAINING DATA'))
print('-----------------------------------')
# perform class balance such that both classes have the same number
# of members (true by default, but can also be false)
os.chdir(prevdir+'/training/')
model_dir=prevdir+'/models'
balance=settings['balance_data']
remove_outliers=settings['remove_outliers']
outlier_types=settings['default_outlier_detector']
if mtype == 'c':
if problemtype != 'csv':
jsonfile=''
for i in range(len(classes)):
if i==0:
jsonfile=classes[i]
else:
jsonfile=jsonfile+'_'+classes[i]
jsonfile=jsonfile+'.json'
#try:
g=data
alldata=list()
labels=list()
lengths=list()
# check to see all classes are same length and reshape if necessary
for i in range(len(classes)):
class_=g[classes[i]]
lengths.append(len(class_))
lengths=np.array(lengths)
minlength=np.amin(lengths)
# now load all the classes
for i in range(len(classes)):
class_=g[classes[i]]
random.shuffle(class_)
# only balance if specified in settings
if balance==True:
if len(class_) > minlength:
print('%s greater than minlength (%s) by %s, equalizing...'%(classes[i], str(minlength), str(len(class_)-minlength)))
class_=class_[0:minlength]
for j in range(len(class_)):
alldata.append(class_[j])
labels.append(i)
# load features file and get feature labels by loading in classes
labels_dir=prevdir+'/train_dir/'+classes[0]
os.chdir(labels_dir)
listdir=os.listdir()
features_file=''
for i in range(len(listdir)):
if listdir[i].endswith('.json'):
features_file=listdir[i]
labels_=list()
for i in range(len(default_features)):
tlabel=json.load(open(features_file))['features'][problemtype][default_features[i]]['labels']
labels_=labels_+tlabel
elif problemtype == 'csv':
# format data appropriately
jsonfile=target+'.json'
#try:
g=data
alldata=list()
labels=list()
lengths=list()
# check to see all classes are same length and reshape if necessary
for i in range(len(classes)):
class_=g[classes[i]]
lengths.append(len(class_))
lengths=np.array(lengths)
minlength=np.amin(lengths)
# now load all the classes
for i in range(len(classes)):
class_=g[classes[i]]
random.shuffle(class_)
# only balance if specified in settings
if balance==True:
if len(class_) > minlength:
print('%s greater than minlength (%s) by %s, equalizing...'%(classes[i], str(minlength), str(len(class_)-minlength)))
class_=class_[0:minlength]
for j in range(len(class_)):
alldata.append(class_[j])
labels.append(i)
# load features file and get feature labels by loading in classes
labels_=csv_feature_labels
elif mtype == 'r':
regression_data=pd.read_csv(prevdir+'/train_dir/'+csvfile)
print(csvfile)
# get features and labels
features_=regression_data.drop(columns=classes, axis=1)
labels_=list(features_)
labels_csv=regression_data.drop(columns=list(features_), axis=1)
# iterate through each column and make into proper features and labels
features=list()
labels=list()
# testing
# print(len(features_))
# print(len(labels_))
# print(features_)
# print(labels_)
# print(features_.iloc[0,:])
# print(labels_.iloc[0,:])
# get features and labels
for i in range(len(features_)):
features.append(list(features_.iloc[i,:]))
labels.append(list(labels_csv.iloc[i,:]))
# convert to name alldata just to be consistent
alldata=features
# print(alldata[0])
# print(labels[0])
# print(labels_)
os.chdir(model_dir)
# get the split from the settings.json
try:
test_size=settings['test_size']
except:
test_size=0.25
# error checking around lengths of arrays and deleting as necessary
lengths=list()
for i in range(len(alldata)):
lengths.append(len(alldata[i]))
# CLEAN IF DIMENSIONS DO NOT MATCH!!
maxval=max(lengths)
minval=min(lengths)
delete_ind=list()
inds=list()
alldata=np.array(alldata)
labels=np.array(labels)
if maxval != minval:
if lengths.count(maxval) > lengths.count(minval):
for i in range(len(lengths)):
# this means that additional column has been removed
if lengths[i] == minval:
delete_ind.append(i)
elif lengths.count(maxval) < lengths.count(minval):
for i in range(len(lengths)):
# this means that additional column has been added
if lengths[i] == maxval:
delete_ind.append(i)
print('DELETING THESE INDICES: %s'%(str(delete_ind)))
print(alldata.shape)
print(labels.shape)
alldata=np.delete(alldata, tuple(delete_ind), axis=0)
labels=np.delete(labels, tuple(delete_ind))
print(alldata.shape)
print(labels.shape)
# # now see if any element in the array is a NaN and do not include if so in alldata or labels
# for i in range(len(alldata)):
# try:
# array_has_nan = list(np.isnan(np.array(alldata[i]))).count(True)
# array_has_string=list(np.char.isnumeric(np.array(alldata[i]))).count(False)
# except:
# array_has_string=1
# if array_has_nan > 0 or array_has_string > 0:
# inds.append(i)
# print(alldata[i])
# if len(inds) > 0:
# print('DELETING THESE INDICES: %s'%(str(inds)))
# alldata=np.delete(alldata, tuple(inds))
# labels=np.delete(labels, tuple(inds))
# REMOVE OUTLIERS IF SETTING IS TRUE
alldata=np.array(alldata)
labels=np.array(labels)
if remove_outliers==True:
print('-----------------------------------')
print(' REMOVING OUTLIERS')
print('-----------------------------------')
for i in range(len(outlier_types)):
outlier_type=outlier_types[i]
if outlier_type =='isolationforest':
from sklearn.ensemble import IsolationForest
clf = IsolationForest(random_state=0).fit(alldata)
y_pred = clf.predict(alldata)
inlier_ind=list(np.where(y_pred==1))
outlier_ind=list(np.where(y_pred==-1))
y_pred = y_pred.tolist()
print(type(y_pred))
print(type(y_pred[0]))
n_inliers = y_pred.count(1)
n_outliers = y_pred.count(-1)
print(n_inliers)
print(n_outliers)
# shape before
print(alldata.shape)
print(labels.shape)
# delete outliers
alldata=np.delete(alldata, tuple(outlier_ind), axis=0)
labels=np.delete(labels, tuple(outlier_ind))
print(alldata.shape)
print(labels.shape)
elif outlier_type=='zscore':
os.system('pip3 install statsmodels==0.11.1')
from scipy import stats
from statsmodels.formula.api import ols
# https://towardsdatascience.com/ways-to-detect-and-remove-the-outliers-404d16608dba
z = np.abs(stats.zscore(alldata))
# print(z)
threshold = 3
inds=list(set(np.where(z>threshold)[0]))
print(len(inds))
print(tuple(inds))
print(alldata.shape)
print('-->')
alldata = np.delete(alldata, tuple(inds), axis=0)
print(alldata.shape)
labels = np.delete(labels, tuple(inds))
print(len(alldata))
print(len(labels))
# rebalance data to all be the same length
newlabels=list(labels)
outlier_class=list()
for i in range(len(classes)):
outlier_class.append(newlabels.count(i))
lengths=np.array(outlier_class)
minlength=np.amin(outlier_class)
# now load all the classes
for i in range(len(classes)):
# only balance if specified in settings
if balance==True:
count2=newlabels.count(i)
while count2 > minlength:
count2=newlabels.count(i)
print('%s greater than minlength (%s) by %s, equalizing...'%(classes[i], str(minlength), str(count2-minlength)))
ind=list(labels).index(i)
alldata=np.delete(alldata, tuple([ind]), axis=0)
labels=np.delete(labels, tuple([ind]))
newlabels=list(labels)
alldata=list(alldata)
labels=list(labels)
# split the data
X_train, X_test, y_train, y_test = train_test_split(alldata, labels, test_size=test_size)
# convert everything to numpy arrays (for testing later)
X_train=np.array(X_train)
X_test=np.array(X_test)
y_train=np.array(y_train)
y_test=np.array(y_test)
# create list of created csv files
created_csv_files=list()
# create training and testing datasets and save to a .CSV file for archive purposes
# this ensures that all machine learning training methods use the same training data
basefile=common_name
temp_listdir=os.listdir()
if create_csv == True:
try:
print(basefile+'_all.csv'.upper())
if basefile+'_all.csv' not in temp_listdir:
all_data = convert_csv(alldata, labels, labels_, mtype, classes)
all_data.to_csv(basefile+'_all.csv',index=False)
created_csv_files.append(basefile+'_all.csv')
except:
print('error exporting data into excel sheet %s'%(basefile+'_all.csv'))
try:
print(basefile+'_train.csv'.upper())
if basefile+'_train.csv' not in temp_listdir:
train_data= convert_csv(X_train, y_train, labels_, mtype, classes)
train_data.to_csv(basefile+'_train.csv',index=False)
created_csv_files.append(basefile+'_train.csv')
except:
print('error exporting data into excel sheet %s'%(basefile+'_train.csv'))
try:
print(basefile+'_test.csv'.upper())
if basefile+'_test.csv' not in temp_listdir:
test_data= convert_csv(X_test, y_test, labels_, mtype, classes)
test_data.to_csv(basefile+'_test.csv',index=False)
created_csv_files.append(basefile+'_test.csv')
except:
print('error exporting data into excel sheet %s'%(basefile+'_test.csv'))
############################################################
## DATA TRANSFORMATION ##
############################################################
'''
Scale features via scalers, dimensionality reduction techniques,
and feature selection strategies per the settings.json document.
'''
preprocess_dir=prevdir+'/preprocessing'
os.chdir(preprocess_dir)
# get all the important settings for the transformations
scale_features=settings['scale_features']
reduce_dimensions=settings['reduce_dimensions']
select_features=settings['select_features']
default_scalers=settings['default_scaler']
default_reducers=settings['default_dimensionality_reducer']
default_selectors=settings['default_feature_selector']
# get command for terminal
transform_command=''
if problemtype == 'csv' and mtype == 'c':
transform_command=transform_command+' "'+'Class'+'"'
else:
for i in range(len(classes)):
transform_command=transform_command+' "'+classes[i]+'"'
# get filename / create a unique file name
if mtype=='r':
t_filename='r_'+common_name
elif mtype=='c':
t_filename='c_'+common_name
# only add names in if True
if scale_features == True:
for i in range(len(default_scalers)):
t_filename=t_filename+'_'+default_scalers[i]
if reduce_dimensions == True:
for i in range(len(default_reducers)):
t_filename=t_filename+'_'+default_reducers[i]
if select_features == True:
for i in range(len(default_selectors)):
t_filename=t_filename+'_'+default_selectors[i]
transform_file=t_filename+'.pickle'
if scale_features == True or reduce_dimensions == True or select_features == True:
print('----------------------------------')
print(f.renderText('TRANSFORMING DATA'))
print('----------------------------------')
# go to proper transformer directory
try:
os.chdir(problemtype+'_transformer')
except:
os.mkdir(problemtype+'_transformer')
os.chdir(problemtype+'_transformer')
# train transformer if it doesn't already exist
os.system('pip3 install scikit-learn==0.22.2.post1')
if transform_file in os.listdir():
# remove file if in listdir to avoid conflicts with naming
os.remove(transform_file)
print('making transformer...')
alldata=np.asarray(alldata)
labels=np.asarray(labels)
os.chdir(preprocess_dir)
if mtype == 'c':
print('python3 transform.py "%s" "%s" "%s" %s'%(problemtype, 'c', common_name, transform_command))
os.system('python3 transform.py "%s" "%s" "%s" %s'%(problemtype, 'c', common_name, transform_command))
os.chdir(problemtype+'_transformer')
print(transform_file)
transform_model=pickle.load(open(transform_file,'rb'))
alldata=transform_model.transform(np.array(alldata))
elif mtype == 'r':
command='python3 transform.py "%s" "%s" "%s" "%s" "%s" "%s"'%('csv', 'r', classes[0], csvfile, prevdir+'/train_dir/', common_name)
print(command)
os.system(command)
os.chdir(problemtype+'_transformer')
transform_model=pickle.load(open(transform_file,'rb'))
alldata=transform_model.transform(alldata)
os.chdir(preprocess_dir)
os.system('python3 load_transformer.py "%s" "%s"'%(problemtype, transform_file))
# now make new files as .CSV
os.chdir(model_dir)
# split the data
X_train, X_test, y_train, y_test = train_test_split(alldata, labels, test_size=test_size)
# convert to numpy arrays
X_train=np.array(X_train)
X_test=np.array(X_test)
y_train=np.array(y_train)
y_test=np.array(y_test)
# get new labels_ array
labels_=list()
for i in range(len(alldata[0].tolist())):
labels_.append('transformed_feature_%s'%(str(i)))
# now create transformed excel sheets
temp_listdir=os.listdir()
if create_csv == True:
try:
print(basefile+'_all_transformed.csv'.upper())
if basefile+'_all_transformed.csv' not in temp_listdir:
all_data = convert_csv(alldata, labels, labels_, mtype, classes)
all_data.to_csv(basefile+'_all_transformed.csv',index=False)
created_csv_files.append(basefile+'_all_transformed.csv')
except:
print('error exporting data into excel sheet %s'%(basefile+'_all_transformed.csv'))
try:
print(basefile+'_train_transformed.csv'.upper())
if basefile+'_train_transformed.csv' not in temp_listdir:
train_data= convert_csv(X_train, y_train, labels_, mtype, classes)
train_data.to_csv(basefile+'_train_transformed.csv',index=False)
created_csv_files.append(basefile+'_train_transformed.csv')
except:
print('error exporting data into excel sheet %s'%(basefile+'_train_transformed.csv'))
try:
print(basefile+'_test_transformed.csv'.upper())
if basefile+'_test_transformed.csv' not in temp_listdir:
test_data= convert_csv(X_test, y_test, labels_, mtype, classes)
test_data.to_csv(basefile+'_test_transformed.csv',index=False)
created_csv_files.append(basefile+'_test_transformed.csv')
except:
print('error exporting data into excel sheet %s'%(basefile+'_test_transformed.csv'))
else:
# make a transform model == '' so that later during model training this can be skipped
transform_model=''
############################################################
## VISUALIZE DATA ##
############################################################
visualize_data=settings['visualize_data']
visual_dir=prevdir+'/visualize'
model_session=str(uuid.uuid1())
os.chdir(visual_dir)
if visualize_data == True and mtype == 'c':
print('----------------------------------')
print(f.renderText('VISUALIZING DATA'))
print('----------------------------------')
command='python3 visualize.py %s'%(problemtype)
for i in range(len(classes)):
command=command+' "'+classes[i]+'"'
os.system(command)
# restructure the visualization directory
os.chdir(visual_dir+'/visualization_session')
os.mkdir('visualizations')
vizdir=os.getcwd()
# move directories so that visualization is separate from main model directory
shutil.move(vizdir+'/clustering', vizdir+'/visualizations/clustering')
shutil.move(vizdir+'/feature_ranking', vizdir+'/visualizations/feature_ranking')
shutil.move(vizdir+'/model_selection', vizdir+'/visualizations/model_selection')
# go back to main direcotry
os.chdir(visual_dir)
# now copy over the visualization directory to
try:
shutil.copytree(visual_dir+'/visualization_session', model_dir+'/'+model_session)
except:
shutil.rmtree(model_dir+'/'+model_session)
shutil.copytree(visual_dir+'/visualization_session', model_dir+'/'+model_session)
# copy over settings.json
shutil.copy(prevdir+'/settings.json',model_dir+'/%s/settings.json'%(model_session))
else:
# make a model session for next section if it doesn't exist from visualization directory
os.chdir(model_dir)
try:
os.mkdir(model_session)
except:
shutil.rmtree(model_session)
os.mkdir(model_session)
# copy over settings.json
shutil.copy(prevdir+'/settings.json', model_dir+'/%s/settings.json'%(model_session))
############################################################
## TRAIN THE MODEL ##
############################################################
'''
Now we can train the machine learning model via the default_training script.
Note you can specify multiple training scripts and it will consecutively model the
files appropriately.
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#
# Here is what all the variables below mean:
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#
# alldata = list of features in an array for model training
# [[39.0, 112.15384615384616, 70.98195453650514, 248.0, 14.0, 103.0, 143.5546875...],
...
[39.0, 112.15384615384616, 70.98195453650514, 248.0, 14.0, 103.0, 143.5546875,...]]
# labels = list of labels in an array for model training
# ['males','females',...,'males','females']
# mtype = classification or regression problem?
# 'c' --> classification
# 'r' --> regression
# jsonfile = filename of the .JSON document seprating classes
# males_females.json
# problemtype = type of problem selected
# 'audio' --> audio files
# 'image' --> images files
# 'text' --> text files
# 'video' --> video files
# 'csv' --> csv files
# default_featurenames = default feature array(s) to use for modeling
# ['librosa_features']
# settings = overall settings currenty used for model training
# output of the settings.json document
-----
# transform_model = transformer model if applicable
# useful for data transformation as part of the model initialization process (if pickle file)
# uses scikit-learn pipeline
# X_train, X_test, y_train, y_test
# training datasets used in the .CSV documents
# also can use pandas dataframe if applicable (loading in the model dir)
'''
print('----------------------------------')
print(f.renderText('MODELING DATA'))
print('----------------------------------')
# get defaults
default_training_scripts=settings['default_training_script']
model_compress=settings['model_compress']
default_featurenames=''
if problemtype != 'csv' and mtype == 'c':
for i in range(len(default_features)):
if i ==0:
default_featurenames=default_features[i]
else:
default_featurenames=default_featurenames+'_|_'+default_features[i]
else:
default_featurenames='csv_classification'
# just move all created .csv files into model_session directory
os.chdir(model_dir)
os.chdir(model_session)
os.mkdir('data')
for i in range(len(created_csv_files)):
shutil.move(model_dir+'/'+created_csv_files[i], os.getcwd()+'/data/'+created_csv_files[i])
# initialize i (for tqdm) and go through all model training scripts
i=0
for i in tqdm(range(len(default_training_scripts)), desc=default_training_scripts[i]):
try:
model_start_time=time.time()
# go to model directory
os.chdir(model_dir)
# get common name and default training script to select proper model trainer
default_training_script=default_training_scripts[i]
common_name_model=common_name+'_'+default_training_script
model_exists, model_listdir = pursue_modeling(mtype, model_dir, problemtype, default_training_script, common_name_model)
if model_exists == False:
print('----------------------------------')
print(' .... training %s '%(default_training_script.upper()))
print('----------------------------------')
if default_training_script=='adanet':
print('Adanet training is coming soon! Please use a different model setting for now.')
# import train_adanet as ta
# ta.train_adanet(mtype, classes, jsonfile, alldata, labels, feature_labels, problemtype, default_featurenames)
elif default_training_script=='alphapy':
import train_alphapy as talpy
modelname, modeldir, files=talpy.train_alphapy(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='atm':
import train_atm as tatm
modelname, modeldir, files=tatm.train_atm(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='autobazaar':
import train_autobazaar as autobzr
modelname, modeldir, files=autobzr.train_autobazaar(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='autogbt':
import train_autogbt as tautogbt
modelname, modeldir, files=tautogbt.train_autogbt(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='autogluon':
import train_autogluon as tautg
modelname, modeldir, files, test_data=tautg.train_autogluon(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='autokaggle':
import train_autokaggle as autokag
modelname, modeldir, files=autokag.train_autokaggle(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='autokeras':
import train_autokeras as autokeras_
modelname, modeldir, files=autokeras_.train_autokeras(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='automl':
import train_automl as auto_ml
modelname, modeldir, files=auto_ml.train_automl(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='autosklearn':
print('Autosklearn training is unstable! Please use a different model setting for now.')
# import train_autosklearn as taskl
# taskl.train_autosklearn(alldata, labels, mtype, jsonfile, problemtype, default_featurenames)
elif default_training_script=='autopytorch':
import train_autopytorch as autotorch_
modelname, modeldir, files=autotorch_.train_autopytorch(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='btb':
import train_btb as tbtb
modelname, modeldir, files=tbtb.train_btb(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='cvopt':
import train_cvopt as tcvopt
modelname, modeldir, files = tcvopt.train_cvopt(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='devol':
import train_devol as td
modelname, modeldir, files=td.train_devol(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='gama':
import train_gama as tgama
modelname, modeldir, files=tgama.train_gama(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='gentun':
import train_gentun as tgentun
modelname, modeldir, files=tgentun.train_gentun(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='hyperband':
import train_hyperband as thband
modelname, modeldir, files = thband.train_hyperband(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='hypsklearn':
import train_hypsklearn as th
modelname, modeldir, files=th.train_hypsklearn(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='hungabunga':
import train_hungabunga as thung
modelname, modeldir, files=thung.train_hungabunga(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='imbalance':
import train_imbalance as timb
modelname, modeldir, files=timb.train_imbalance(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='keras':
import train_keras as tk
modelname, modeldir, files=tk.train_keras(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='ludwig':
import train_ludwig as tl
modelname, modeldir, files=tl.train_ludwig(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='mlblocks':
import train_mlblocks as mlb
modelname, modeldir, files=mlb.train_mlblocks(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='mlbox':
import train_mlbox as mlbox_
modelname, modeldir, files=mlbox_.train_mlbox(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='neuraxle':
if mtype=='c':
print('Neuraxle does not support classification at this time. Please use a different model training script')
break
else:
import train_neuraxle as tneuraxle
modelname, modeldir, files=tneuraxle.train_neuraxle(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='plda':
print('PLDA training is unstable! Please use a different model setting for now.')
# import train_pLDA as tp
# tp.train_pLDA(alldata,labels)
elif default_training_script=='pytorch':
import train_pytorch as t_pytorch
modelname, modeldir, files = t_pytorch.train_pytorch(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='safe':
import train_safe as tsafe
modelname, modeldir, files=tsafe.train_safe(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='scsr':
import train_scsr as scsr
if mtype == 'c':
modelname, modeldir, files=scsr.train_sc(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,minlength)
elif mtype == 'r':
modelname, modeldir, files=scsr.train_sr(X_train,X_test,y_train,y_test,common_name_model,problemtype,classes,default_featurenames,transform_model,model_dir,settings)
elif default_training_script=='tpot':
import train_TPOT as tt
modelname, modeldir, files=tt.train_TPOT(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
############################################################
## CALCULATE METRICS / PLOT ROC CURVE ##
############################################################
if modelname.endswith('.pickle'):
foldername=modelname[0:-7]
elif modelname.endswith('.h5'):
foldername=modelname[0:-3]
else:
foldername=common_name_model
# copy the folder in case there are multiple models being trained
try:
shutil.copytree(model_session, foldername)
except:
shutil.rmtree(foldername)
shutil.copytree(model_session, foldername)
cur_dir2=os.getcwd()
os.chdir(foldername)
os.mkdir('model')
os.chdir('model')
model_dir_temp=os.getcwd()
# dump transform model to the models directory if necessary
if transform_model == '':
transformer_name=''
else:
# dump the tranform model into the current working directory
transformer_name=modelname.split('.')[0]+'_transform.pickle'
tmodel=open(transformer_name,'wb')
pickle.dump(transform_model, tmodel)
tmodel.close()
# move all supplementary files into model folder
for j in range(len(files)):
shutil.move(modeldir+'/'+files[j], model_dir_temp+'/'+files[j])
# load model for getting metrics
if default_training_script not in ['alphapy', 'atm', 'autokeras', 'autopytorch', 'ludwig', 'keras', 'devol']:
loadmodel=open(modelname, 'rb')
clf=pickle.load(loadmodel)
loadmodel.close()
elif default_training_script == 'atm':
from atm import Model
clf=Model.load(modelname)
elif default_training_script == 'autokeras':
import tensorflow as tf
import autokeras as ak
clf = pickle.load(open(modelname, 'rb'))
elif default_training_script=='autopytorch':
import torch
clf=torch.load(modelname)
elif default_training_script == 'ludwig':
from ludwig.api import LudwigModel
clf=LudwigModel.load('ludwig_files/experiment_run/model/')
elif default_training_script in ['devol', 'keras']:
from keras.models import load_model
clf = load_model(modelname)
else:
clf=''
# create test_data variable for anything other than autogluon
if default_training_script != 'autogluon':
test_data=''
# now make main .JSON file for the session summary with metrics
get_metrics(clf, problemtype, mtype, default_training_script, common_name, X_test, y_test, classes, modelname, settings, model_session, transformer_name, created_csv_files, test_data, model_start_time)
# now move to the proper models directory
os.chdir(model_dir)
os.system('python3 create_readme.py "%s"'%(os.getcwd()+'/'+foldername))
try:
os.chdir(problemtype+'_models')
except:
os.mkdir(problemtype+'_models')
os.chdir(problemtype+'_models')
shutil.move(model_dir+'/'+foldername, os.getcwd()+'/'+foldername)
############################################################
## COMPRESS MODELS ##
############################################################
if model_compress == True:
print(f.renderText('COMPRESSING MODEL'))
# now compress the model according to model type
if default_training_script in ['hypsklearn', 'scsr', 'tpot']:
# all .pickle files and can compress via scikit-small-ensemble
from sklearn.externals import joblib
# open up model
loadmodel=open(modelname, 'rb')
model = pickle.load(loadmodel)
loadmodel.close()
# compress - from 0 to 9. Higher value means more compression, but also slower read and write times.
# Using a value of 3 is often a good compromise.
joblib.dump(model, modelname[0:-7]+'_compressed.joblib',compress=3)
# can now load compressed models as such
# thenewmodel=joblib.load(modelname[0:-7]+'_compressed.joblib')
# leads to up to 10x reduction in model size and .72 sec - 0.23 secoon (3-4x faster loading model)
# note may note work in sklearn and python versions are different from saving and loading environments.
elif default_training_script in ['devol', 'keras']:
# can compress with keras_compressor
import logging
from keras.models import load_model
from keras_compressor.compressor import compress
logging.basicConfig(
level=logging.INFO,
)
try:
print('compressing model!!')
model = load_model(modelname)
model = compress(model, 7e-1)
model.save(modelname[0:-3]+'_compressed.h5')
except:
print('error compressing model!!')
else:
# for everything else, we can compress pocketflow models in the future.
print('We cannot currently compress %s models. We are working on this!! \n\n The model will remain uncompressed for now'%(default_training_script))
else:
if mtype == 'r':
print('SKIPPING MODELTYPE - %s already exists in the %s folder: %s'%(common_name_model+'_regression', problemtype+'_models', str(model_listdir)))
elif mtype == 'c':
print('SKIPPING MODELTYPE - %s already exists in the %s folder: %s'%(common_name_model+'_classifier', problemtype+'_models', str(model_listdir)))
############################################################
## PRODUCTIONIZING MODELS ##
############################################################
# TO BE COMPLETED IN THE FUTURE!
except:
print('ERROR - error in modeling session')
|
[
"pandas.read_csv",
"sklearn.metrics.auc",
"sys.exit",
"train_cvopt.train_cvopt",
"ludwig.api.LudwigModel.load",
"train_mlblocks.train_mlblocks",
"pyfiglet.Figlet",
"matplotlib.pyplot.xlabel",
"platform.system",
"os.mkdir",
"train_neuraxle.train_neuraxle",
"sklearn.metrics.mean_absolute_error",
"train_btb.train_btb",
"train_automl.train_automl",
"matplotlib.pyplot.savefig",
"random.shuffle",
"matplotlib.pyplot.xticks",
"train_gama.train_gama",
"sklearn.model_selection.train_test_split",
"train_gentun.train_gentun",
"sklearn.metrics.mean_squared_error",
"train_alphapy.train_alphapy",
"psutil.sensors_battery",
"train_hungabunga.train_hungabunga",
"matplotlib.pyplot.legend",
"sklearn.metrics.f1_score",
"pickle.dump",
"sklearn.ensemble.IsolationForest",
"os.getcwd",
"os.chdir",
"train_devol.train_devol",
"keras_compressor.compressor.compress",
"psutil.cpu_count",
"json.dump",
"train_safe.train_safe",
"sklearn.metrics.balanced_accuracy_score",
"sklearn.metrics.classification_report",
"train_autokaggle.train_autokaggle",
"train_scsr.train_sr",
"sklearn.metrics.precision_score",
"platform.release",
"sklearn.metrics.roc_auc_score",
"os.remove",
"shutil.move",
"numpy.where",
"matplotlib.pyplot.yticks",
"train_autopytorch.train_autopytorch",
"itertools.cycle",
"psutil.disk_io_counters",
"scipy.stats.zscore",
"psutil.boot_time",
"matplotlib.pyplot.title",
"logging.basicConfig",
"keras.models.load_model",
"matplotlib.pyplot.colorbar",
"train_scsr.train_sc",
"train_atm.train_atm",
"pandas.concat",
"matplotlib.pyplot.ylabel",
"train_autogbt.train_autogbt",
"matplotlib.pyplot.imshow",
"platform.version",
"os.listdir",
"train_hyperband.train_hyperband",
"matplotlib.pyplot.scatter",
"sklearn.metrics.confusion_matrix",
"psutil.cpu_percent",
"numpy.amin",
"atm.Model.load",
"train_TPOT.train_TPOT",
"time.time",
"psutil.disk_usage",
"sklearn.metrics.median_absolute_error",
"train_autobazaar.train_autobazaar",
"train_keras.train_keras",
"matplotlib.pyplot.tight_layout",
"shutil.rmtree",
"os.system",
"train_imbalance.train_imbalance",
"matplotlib.pyplot.grid",
"psutil.virtual_memory",
"sklearn.metrics.recall_score",
"numpy.array",
"sklearn.metrics.roc_curve",
"psutil.cpu_stats",
"sklearn.metrics.r2_score",
"sys.path.append",
"train_autogluon.train_autogluon",
"psutil.swap_memory",
"matplotlib.pyplot.plot",
"numpy.asarray",
"matplotlib.pyplot.close",
"psutil.disk_partitions",
"pandas.DataFrame",
"os.rename",
"pickle.load",
"uuid.uuid1",
"train_ludwig.train_ludwig",
"shutil.copy",
"sklearn.externals.joblib.dump",
"train_mlbox.train_mlbox",
"train_autokeras.train_autokeras",
"train_hypsklearn.train_hypsklearn",
"sklearn.metrics.accuracy_score",
"torch.load",
"train_pytorch.train_pytorch",
"shutil.copytree",
"datetime.datetime.now",
"psutil.cpu_times"
] |
[((2550, 2568), 'pyfiglet.Figlet', 'Figlet', ([], {'font': '"""doh"""'}), "(font='doh')\n", (2556, 2568), False, 'from pyfiglet import Figlet\n'), ((2600, 2619), 'pyfiglet.Figlet', 'Figlet', ([], {'font': '"""doom"""'}), "(font='doom')\n", (2606, 2619), False, 'from pyfiglet import Figlet\n'), ((16731, 16742), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (16740, 16742), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((16770, 16809), 'sys.path.append', 'sys.path.append', (["(prevdir + '/train_dir')"], {}), "(prevdir + '/train_dir')\n", (16785, 16809), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((17314, 17346), 'os.chdir', 'os.chdir', (["(prevdir + '/train_dir')"], {}), "(prevdir + '/train_dir')\n", (17322, 17346), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((17355, 17366), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (17364, 17366), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((17375, 17387), 'os.listdir', 'os.listdir', ([], {}), '()\n', (17385, 17387), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((24545, 24562), 'os.chdir', 'os.chdir', (['cur_dir'], {}), '(cur_dir)\n', (24553, 24562), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((24563, 24594), 'os.system', 'os.system', (['"""python3 upgrade.py"""'], {}), "('python3 upgrade.py')\n", (24572, 24594), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((31171, 31203), 'os.chdir', 'os.chdir', (["(prevdir + '/training/')"], {}), "(prevdir + '/training/')\n", (31179, 31203), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((34477, 34496), 'os.chdir', 'os.chdir', (['model_dir'], {}), '(model_dir)\n', (34485, 34496), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((34862, 34879), 'numpy.array', 'np.array', (['alldata'], {}), '(alldata)\n', (34870, 34879), True, 'import numpy as np\n'), ((34887, 34903), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (34895, 34903), True, 'import numpy as np\n'), ((36165, 36182), 'numpy.array', 'np.array', (['alldata'], {}), '(alldata)\n', (36173, 36182), True, 'import numpy as np\n'), ((36190, 36206), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (36198, 36206), True, 'import numpy as np\n'), ((38574, 38628), 'sklearn.model_selection.train_test_split', 'train_test_split', (['alldata', 'labels'], {'test_size': 'test_size'}), '(alldata, labels, test_size=test_size)\n', (38590, 38628), False, 'from sklearn.model_selection import train_test_split\n'), ((38695, 38712), 'numpy.array', 'np.array', (['X_train'], {}), '(X_train)\n', (38703, 38712), True, 'import numpy as np\n'), ((38720, 38736), 'numpy.array', 'np.array', (['X_test'], {}), '(X_test)\n', (38728, 38736), True, 'import numpy as np\n'), ((38745, 38762), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (38753, 38762), True, 'import numpy as np\n'), ((38770, 38786), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (38778, 38786), True, 'import numpy as np\n'), ((39052, 39064), 'os.listdir', 'os.listdir', ([], {}), '()\n', (39062, 39064), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((40476, 40500), 'os.chdir', 'os.chdir', (['preprocess_dir'], {}), '(preprocess_dir)\n', (40484, 40500), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((45360, 45380), 'os.chdir', 'os.chdir', (['visual_dir'], {}), '(visual_dir)\n', (45368, 45380), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((49270, 49289), 'os.chdir', 'os.chdir', (['model_dir'], {}), '(model_dir)\n', (49278, 49289), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((49290, 49313), 'os.chdir', 'os.chdir', (['model_session'], {}), '(model_session)\n', (49298, 49313), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((49314, 49330), 'os.mkdir', 'os.mkdir', (['"""data"""'], {}), "('data')\n", (49322, 49330), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((4994, 5009), 'pandas.concat', 'pd.concat', (['data'], {}), '(data)\n', (5003, 5009), True, 'import pandas as pd\n'), ((11861, 11886), 'json.dump', 'json.dump', (['data', 'jsonfile'], {}), '(data, jsonfile)\n', (11870, 11886), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((11973, 11984), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (11982, 11984), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((12012, 12029), 'os.chdir', 'os.chdir', (['basedir'], {}), '(basedir)\n', (12020, 12029), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((12031, 12075), 'os.system', 'os.system', (['"""pip3 freeze -> requirements.txt"""'], {}), "('pip3 freeze -> requirements.txt')\n", (12040, 12075), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((12611, 12627), 'os.chdir', 'os.chdir', (['curdir'], {}), '(curdir)\n', (12619, 12627), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((12774, 12800), 'itertools.cycle', 'itertools.cycle', (['"""bgrcmyk"""'], {}), "('bgrcmyk')\n", (12789, 12800), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((13153, 13186), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {}), "('False Positive Rate')\n", (13163, 13186), True, 'import matplotlib.pyplot as plt\n'), ((13188, 13220), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {}), "('True Positive Rate')\n", (13198, 13220), True, 'import matplotlib.pyplot as plt\n'), ((13222, 13280), 'matplotlib.pyplot.title', 'plt.title', (['"""Receiver Operating Characteristic (ROC) Curve"""'], {}), "('Receiver Operating Characteristic (ROC) Curve')\n", (13231, 13280), True, 'import matplotlib.pyplot as plt\n'), ((13282, 13294), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (13292, 13294), True, 'import matplotlib.pyplot as plt\n'), ((13296, 13314), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (13312, 13314), True, 'import matplotlib.pyplot as plt\n'), ((13316, 13344), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""roc_curve.png"""'], {}), "('roc_curve.png')\n", (13327, 13344), True, 'import matplotlib.pyplot as plt\n'), ((13346, 13357), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (13355, 13357), True, 'import matplotlib.pyplot as plt\n'), ((13759, 13809), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cm'], {'interpolation': '"""nearest"""', 'cmap': 'cmap'}), "(cm, interpolation='nearest', cmap=cmap)\n", (13769, 13809), True, 'import matplotlib.pyplot as plt\n'), ((13811, 13827), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (13820, 13827), True, 'import matplotlib.pyplot as plt\n'), ((13829, 13843), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (13841, 13843), True, 'import matplotlib.pyplot as plt\n'), ((13883, 13927), 'matplotlib.pyplot.xticks', 'plt.xticks', (['tick_marks', 'classes'], {'rotation': '(45)'}), '(tick_marks, classes, rotation=45)\n', (13893, 13927), True, 'import matplotlib.pyplot as plt\n'), ((13929, 13960), 'matplotlib.pyplot.yticks', 'plt.yticks', (['tick_marks', 'classes'], {}), '(tick_marks, classes)\n', (13939, 13960), True, 'import matplotlib.pyplot as plt\n'), ((14224, 14242), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (14240, 14242), True, 'import matplotlib.pyplot as plt\n'), ((14244, 14268), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True label"""'], {}), "('True label')\n", (14254, 14268), True, 'import matplotlib.pyplot as plt\n'), ((14270, 14299), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted label"""'], {}), "('Predicted label')\n", (14280, 14299), True, 'import matplotlib.pyplot as plt\n'), ((14301, 14319), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (14317, 14319), True, 'import matplotlib.pyplot as plt\n'), ((14321, 14356), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""confusion_matrix.png"""'], {}), "('confusion_matrix.png')\n", (14332, 14356), True, 'import matplotlib.pyplot as plt\n'), ((14358, 14369), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (14367, 14369), True, 'import matplotlib.pyplot as plt\n'), ((17530, 17550), 'os.chdir', 'os.chdir', (['folders[i]'], {}), '(folders[i])\n', (17538, 17550), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((17560, 17572), 'os.listdir', 'os.listdir', ([], {}), '()\n', (17570, 17572), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((17636, 17654), 'os.chdir', 'os.chdir', (['data_dir'], {}), '(data_dir)\n', (17644, 17654), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((37940, 37963), 'numpy.array', 'np.array', (['outlier_class'], {}), '(outlier_class)\n', (37948, 37963), True, 'import numpy as np\n'), ((37975, 37997), 'numpy.amin', 'np.amin', (['outlier_class'], {}), '(outlier_class)\n', (37982, 37997), True, 'import numpy as np\n'), ((42078, 42130), 'os.system', 'os.system', (['"""pip3 install scikit-learn==0.22.2.post1"""'], {}), "('pip3 install scikit-learn==0.22.2.post1')\n", (42087, 42130), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((42298, 42317), 'numpy.asarray', 'np.asarray', (['alldata'], {}), '(alldata)\n', (42308, 42317), True, 'import numpy as np\n'), ((42326, 42344), 'numpy.asarray', 'np.asarray', (['labels'], {}), '(labels)\n', (42336, 42344), True, 'import numpy as np\n'), ((42346, 42370), 'os.chdir', 'os.chdir', (['preprocess_dir'], {}), '(preprocess_dir)\n', (42354, 42370), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((43105, 43129), 'os.chdir', 'os.chdir', (['preprocess_dir'], {}), '(preprocess_dir)\n', (43113, 43129), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((43131, 43217), 'os.system', 'os.system', (['(\'python3 load_transformer.py "%s" "%s"\' % (problemtype, transform_file))'], {}), '(\'python3 load_transformer.py "%s" "%s"\' % (problemtype,\n transform_file))\n', (43140, 43217), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((43244, 43263), 'os.chdir', 'os.chdir', (['model_dir'], {}), '(model_dir)\n', (43252, 43263), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((43320, 43374), 'sklearn.model_selection.train_test_split', 'train_test_split', (['alldata', 'labels'], {'test_size': 'test_size'}), '(alldata, labels, test_size=test_size)\n', (43336, 43374), False, 'from sklearn.model_selection import train_test_split\n'), ((43413, 43430), 'numpy.array', 'np.array', (['X_train'], {}), '(X_train)\n', (43421, 43430), True, 'import numpy as np\n'), ((43439, 43455), 'numpy.array', 'np.array', (['X_test'], {}), '(X_test)\n', (43447, 43455), True, 'import numpy as np\n'), ((43465, 43482), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (43473, 43482), True, 'import numpy as np\n'), ((43491, 43507), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (43499, 43507), True, 'import numpy as np\n'), ((43701, 43713), 'os.listdir', 'os.listdir', ([], {}), '()\n', (43711, 43713), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((45346, 45358), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (45356, 45358), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((45677, 45695), 'os.system', 'os.system', (['command'], {}), '(command)\n', (45686, 45695), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((45742, 45789), 'os.chdir', 'os.chdir', (["(visual_dir + '/visualization_session')"], {}), "(visual_dir + '/visualization_session')\n", (45750, 45789), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((45789, 45815), 'os.mkdir', 'os.mkdir', (['"""visualizations"""'], {}), "('visualizations')\n", (45797, 45815), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((45824, 45835), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (45833, 45835), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((45919, 45993), 'shutil.move', 'shutil.move', (["(vizdir + '/clustering')", "(vizdir + '/visualizations/clustering')"], {}), "(vizdir + '/clustering', vizdir + '/visualizations/clustering')\n", (45930, 45993), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((45991, 46079), 'shutil.move', 'shutil.move', (["(vizdir + '/feature_ranking')", "(vizdir + '/visualizations/feature_ranking')"], {}), "(vizdir + '/feature_ranking', vizdir +\n '/visualizations/feature_ranking')\n", (46002, 46079), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((46073, 46161), 'shutil.move', 'shutil.move', (["(vizdir + '/model_selection')", "(vizdir + '/visualizations/model_selection')"], {}), "(vizdir + '/model_selection', vizdir +\n '/visualizations/model_selection')\n", (46084, 46161), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((46186, 46206), 'os.chdir', 'os.chdir', (['visual_dir'], {}), '(visual_dir)\n', (46194, 46206), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((46514, 46606), 'shutil.copy', 'shutil.copy', (["(prevdir + '/settings.json')", "(model_dir + '/%s/settings.json' % model_session)"], {}), "(prevdir + '/settings.json', model_dir + '/%s/settings.json' %\n model_session)\n", (46525, 46606), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((46696, 46715), 'os.chdir', 'os.chdir', (['model_dir'], {}), '(model_dir)\n', (46704, 46715), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((46842, 46934), 'shutil.copy', 'shutil.copy', (["(prevdir + '/settings.json')", "(model_dir + '/%s/settings.json' % model_session)"], {}), "(prevdir + '/settings.json', model_dir + '/%s/settings.json' %\n model_session)\n", (46853, 46934), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((5447, 5470), 'psutil.virtual_memory', 'psutil.virtual_memory', ([], {}), '()\n', (5468, 5470), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((5492, 5512), 'psutil.cpu_percent', 'psutil.cpu_percent', ([], {}), '()\n', (5510, 5512), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((5532, 5550), 'psutil.cpu_times', 'psutil.cpu_times', ([], {}), '()\n', (5548, 5550), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((5570, 5588), 'psutil.cpu_count', 'psutil.cpu_count', ([], {}), '()\n', (5586, 5588), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((5608, 5626), 'psutil.cpu_stats', 'psutil.cpu_stats', ([], {}), '()\n', (5624, 5626), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((5645, 5665), 'psutil.swap_memory', 'psutil.swap_memory', ([], {}), '()\n', (5663, 5665), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((5686, 5710), 'psutil.disk_partitions', 'psutil.disk_partitions', ([], {}), '()\n', (5708, 5710), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((5731, 5753), 'psutil.disk_usage', 'psutil.disk_usage', (['"""/"""'], {}), "('/')\n", (5748, 5753), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((5780, 5805), 'psutil.disk_io_counters', 'psutil.disk_io_counters', ([], {}), '()\n', (5803, 5805), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((5823, 5847), 'psutil.sensors_battery', 'psutil.sensors_battery', ([], {}), '()\n', (5845, 5847), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((5867, 5885), 'psutil.boot_time', 'psutil.boot_time', ([], {}), '()\n', (5883, 5885), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((6013, 6030), 'platform.system', 'platform.system', ([], {}), '()\n', (6028, 6030), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((6049, 6067), 'platform.release', 'platform.release', ([], {}), '()\n', (6065, 6067), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((6086, 6104), 'platform.version', 'platform.version', ([], {}), '()\n', (6102, 6104), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((8761, 8799), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (8783, 8799), False, 'from sklearn import metrics\n'), ((8832, 8879), 'sklearn.metrics.balanced_accuracy_score', 'metrics.balanced_accuracy_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (8863, 8879), False, 'from sklearn import metrics\n'), ((9955, 10022), 'sklearn.metrics.classification_report', 'metrics.classification_report', (['y_true', 'y_pred'], {'target_names': 'classes'}), '(y_true, y_pred, target_names=classes)\n', (9984, 10022), False, 'from sklearn import metrics\n'), ((15817, 15870), 'os.listdir', 'os.listdir', (["(model_dir + '/' + problemtype + '_models')"], {}), "(model_dir + '/' + problemtype + '_models')\n", (15827, 15870), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((25901, 25967), 'os.system', 'os.system', (['(\'python3 clean.py "%s"\' % (data_dir + \'/\' + classes[i]))'], {}), '(\'python3 clean.py "%s"\' % (data_dir + \'/\' + classes[i]))\n', (25910, 25967), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((27392, 27460), 'os.system', 'os.system', (['(\'python3 augment.py "%s"\' % (data_dir + \'/\' + classes[i]))'], {}), '(\'python3 augment.py "%s"\' % (data_dir + \'/\' + classes[i]))\n', (27401, 27460), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((30426, 30470), 'os.chdir', 'os.chdir', (["(prevdir + '/features/csv_features')"], {}), "(prevdir + '/features/csv_features')\n", (30434, 30470), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((30508, 30678), 'os.system', 'os.system', (['(\'python3 featurize_csv_regression.py -i "%s" -o "%s" -t "%s"\' % (prevdir +\n \'/train_dir/\' + csvfile, prevdir + \'/train_dir/\' + output_file, classes[0])\n )'], {}), '(\'python3 featurize_csv_regression.py -i "%s" -o "%s" -t "%s"\' % (\n prevdir + \'/train_dir/\' + csvfile, prevdir + \'/train_dir/\' +\n output_file, classes[0]))\n', (30517, 30678), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((31797, 31814), 'numpy.array', 'np.array', (['lengths'], {}), '(lengths)\n', (31805, 31814), True, 'import numpy as np\n'), ((31827, 31843), 'numpy.amin', 'np.amin', (['lengths'], {}), '(lengths)\n', (31834, 31843), True, 'import numpy as np\n'), ((32409, 32429), 'os.chdir', 'os.chdir', (['labels_dir'], {}), '(labels_dir)\n', (32417, 32429), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((32440, 32452), 'os.listdir', 'os.listdir', ([], {}), '()\n', (32450, 32452), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((33705, 33751), 'pandas.read_csv', 'pd.read_csv', (["(prevdir + '/train_dir/' + csvfile)"], {}), "(prevdir + '/train_dir/' + csvfile)\n", (33716, 33751), True, 'import pandas as pd\n'), ((41904, 41942), 'os.chdir', 'os.chdir', (["(problemtype + '_transformer')"], {}), "(problemtype + '_transformer')\n", (41912, 41942), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((42153, 42165), 'os.listdir', 'os.listdir', ([], {}), '()\n', (42163, 42165), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((42230, 42255), 'os.remove', 'os.remove', (['transform_file'], {}), '(transform_file)\n', (42239, 42255), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((42492, 42600), 'os.system', 'os.system', (['(\'python3 transform.py "%s" "%s" "%s" %s\' % (problemtype, \'c\', common_name,\n transform_command))'], {}), '(\'python3 transform.py "%s" "%s" "%s" %s\' % (problemtype, \'c\',\n common_name, transform_command))\n', (42501, 42600), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((42597, 42635), 'os.chdir', 'os.chdir', (["(problemtype + '_transformer')"], {}), "(problemtype + '_transformer')\n", (42605, 42635), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((46265, 46356), 'shutil.copytree', 'shutil.copytree', (["(visual_dir + '/visualization_session')", "(model_dir + '/' + model_session)"], {}), "(visual_dir + '/visualization_session', model_dir + '/' +\n model_session)\n", (46280, 46356), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((46724, 46747), 'os.mkdir', 'os.mkdir', (['model_session'], {}), '(model_session)\n', (46732, 46747), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((49651, 49662), 'time.time', 'time.time', ([], {}), '()\n', (49660, 49662), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((49692, 49711), 'os.chdir', 'os.chdir', (['model_dir'], {}), '(model_dir)\n', (49700, 49711), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((6835, 6846), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6844, 6846), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((6871, 6913), 'os.chdir', 'os.chdir', (["(common_name + '_alphapy_session')"], {}), "(common_name + '_alphapy_session')\n", (6879, 6913), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((6926, 6937), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6935, 6937), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((6940, 6957), 'os.chdir', 'os.chdir', (['"""input"""'], {}), "('input')\n", (6948, 6957), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((6960, 6996), 'os.rename', 'os.rename', (['"""test.csv"""', '"""predict.csv"""'], {}), "('test.csv', 'predict.csv')\n", (6969, 6996), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((6999, 7020), 'os.chdir', 'os.chdir', (['alphapy_dir'], {}), '(alphapy_dir)\n', (7007, 7020), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((7023, 7053), 'os.system', 'os.system', (['"""alphapy --predict"""'], {}), "('alphapy --predict')\n", (7032, 7053), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((7056, 7074), 'os.chdir', 'os.chdir', (['"""output"""'], {}), "('output')\n", (7064, 7074), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((7085, 7097), 'os.listdir', 'os.listdir', ([], {}), '()\n', (7095, 7097), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((7243, 7259), 'os.chdir', 'os.chdir', (['curdir'], {}), '(curdir)\n', (7251, 7259), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((8912, 8951), 'sklearn.metrics.precision_score', 'metrics.precision_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (8935, 8951), False, 'from sklearn import metrics\n'), ((9022, 9058), 'sklearn.metrics.recall_score', 'metrics.recall_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (9042, 9058), False, 'from sklearn import metrics\n'), ((9128, 9173), 'sklearn.metrics.f1_score', 'metrics.f1_score', (['y_true', 'y_pred'], {'pos_label': '(1)'}), '(y_true, y_pred, pos_label=1)\n', (9144, 9173), False, 'from sklearn import metrics\n'), ((9246, 9295), 'sklearn.metrics.f1_score', 'metrics.f1_score', (['y_true', 'y_pred'], {'average': '"""micro"""'}), "(y_true, y_pred, average='micro')\n", (9262, 9295), False, 'from sklearn import metrics\n'), ((9367, 9416), 'sklearn.metrics.f1_score', 'metrics.f1_score', (['y_true', 'y_pred'], {'average': '"""macro"""'}), "(y_true, y_pred, average='macro')\n", (9383, 9416), False, 'from sklearn import metrics\n'), ((9487, 9524), 'sklearn.metrics.roc_auc_score', 'metrics.roc_auc_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (9508, 9524), False, 'from sklearn import metrics\n'), ((9600, 9654), 'sklearn.metrics.roc_auc_score', 'metrics.roc_auc_score', (['y_true', 'y_pred'], {'average': '"""micro"""'}), "(y_true, y_pred, average='micro')\n", (9621, 9654), False, 'from sklearn import metrics\n'), ((9736, 9790), 'sklearn.metrics.roc_auc_score', 'metrics.roc_auc_score', (['y_true', 'y_pred'], {'average': '"""macro"""'}), "(y_true, y_pred, average='macro')\n", (9757, 9790), False, 'from sklearn import metrics\n'), ((10048, 10086), 'numpy.array', 'np.array', (["metrics_['confusion_matrix']"], {}), "(metrics_['confusion_matrix'])\n", (10056, 10086), True, 'import numpy as np\n'), ((10757, 10800), 'sklearn.metrics.mean_absolute_error', 'metrics.mean_absolute_error', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (10784, 10800), False, 'from sklearn import metrics\n'), ((10836, 10878), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (10862, 10878), False, 'from sklearn import metrics\n'), ((10917, 10962), 'sklearn.metrics.median_absolute_error', 'metrics.median_absolute_error', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (10946, 10962), False, 'from sklearn import metrics\n'), ((10988, 11020), 'sklearn.metrics.r2_score', 'metrics.r2_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (11004, 11020), False, 'from sklearn import metrics\n'), ((11126, 11137), 'time.time', 'time.time', ([], {}), '()\n', (11135, 11137), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((11180, 11203), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11201, 11203), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((12900, 12927), 'sklearn.metrics.roc_curve', 'roc_curve', (['y_test', 'probs[i]'], {}), '(y_test, probs[i])\n', (12909, 12927), False, 'from sklearn.metrics import roc_curve\n'), ((13044, 13102), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]'], {'color': '"""darkblue"""', 'linestyle': '"""--"""'}), "([0, 1], [0, 1], color='darkblue', linestyle='--')\n", (13052, 13102), True, 'import matplotlib.pyplot as plt\n'), ((14724, 14794), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'which': '"""major"""', 'linestyle': '"""-"""', 'linewidth': '"""0.5"""', 'color': '"""green"""'}), "(which='major', linestyle='-', linewidth='0.5', color='green')\n", (14732, 14794), True, 'import matplotlib.pyplot as plt\n'), ((14798, 14868), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'which': '"""minor"""', 'linestyle': '""":"""', 'linewidth': '"""0.5"""', 'color': '"""black"""'}), "(which='minor', linestyle=':', linewidth='0.5', color='black')\n", (14806, 14868), True, 'import matplotlib.pyplot as plt\n'), ((14872, 14890), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (14888, 14890), True, 'import matplotlib.pyplot as plt\n'), ((14894, 14934), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""bar_graph_predictions.png"""'], {}), "('bar_graph_predictions.png')\n", (14905, 14934), True, 'import matplotlib.pyplot as plt\n'), ((14938, 14949), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (14947, 14949), True, 'import matplotlib.pyplot as plt\n'), ((14992, 15033), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X_test', 'y_test'], {'color': '"""gray"""'}), "(X_test, y_test, color='gray')\n", (15003, 15033), True, 'import matplotlib.pyplot as plt\n'), ((15038, 15088), 'matplotlib.pyplot.plot', 'plt.plot', (['X_test', 'y_pred'], {'color': '"""red"""', 'linewidth': '(2)'}), "(X_test, y_pred, color='red', linewidth=2)\n", (15046, 15088), True, 'import matplotlib.pyplot as plt\n'), ((15092, 15110), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (15108, 15110), True, 'import matplotlib.pyplot as plt\n'), ((15114, 15158), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""straight_line_predictions.png"""'], {}), "('straight_line_predictions.png')\n", (15125, 15158), True, 'import matplotlib.pyplot as plt\n'), ((15162, 15173), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (15171, 15173), True, 'import matplotlib.pyplot as plt\n'), ((15229, 15282), 'pandas.DataFrame', 'pd.DataFrame', (["{'Actual': y_test, 'Predicted': y_pred}"], {}), "({'Actual': y_test, 'Predicted': y_pred})\n", (15241, 15282), True, 'import pandas as pd\n'), ((15347, 15417), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'which': '"""major"""', 'linestyle': '"""-"""', 'linewidth': '"""0.5"""', 'color': '"""green"""'}), "(which='major', linestyle='-', linewidth='0.5', color='green')\n", (15355, 15417), True, 'import matplotlib.pyplot as plt\n'), ((15421, 15491), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'which': '"""minor"""', 'linestyle': '""":"""', 'linewidth': '"""0.5"""', 'color': '"""black"""'}), "(which='minor', linestyle=':', linewidth='0.5', color='black')\n", (15429, 15491), True, 'import matplotlib.pyplot as plt\n'), ((15495, 15513), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (15511, 15513), True, 'import matplotlib.pyplot as plt\n'), ((15517, 15557), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""bar_graph_predictions.png"""'], {}), "('bar_graph_predictions.png')\n", (15528, 15557), True, 'import matplotlib.pyplot as plt\n'), ((15561, 15572), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (15570, 15572), True, 'import matplotlib.pyplot as plt\n'), ((25403, 25442), 'os.chdir', 'os.chdir', (["(clean_dir + '/audio_cleaning')"], {}), "(clean_dir + '/audio_cleaning')\n", (25411, 25442), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((26146, 26212), 'os.system', 'os.system', (['(\'python3 clean.py "%s"\' % (data_dir + \'/\' + classes[i]))'], {}), '(\'python3 clean.py "%s"\' % (data_dir + \'/\' + classes[i]))\n', (26155, 26212), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((26852, 26897), 'os.chdir', 'os.chdir', (["(augment_dir + '/audio_augmentation')"], {}), "(augment_dir + '/audio_augmentation')\n", (26860, 26897), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((27652, 27720), 'os.system', 'os.system', (['(\'python3 augment.py "%s"\' % (data_dir + \'/\' + classes[i]))'], {}), '(\'python3 augment.py "%s"\' % (data_dir + \'/\' + classes[i]))\n', (27661, 27720), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((29514, 29584), 'os.system', 'os.system', (['(\'python3 featurize.py "%s"\' % (data_dir + \'/\' + classes[i]))'], {}), '(\'python3 featurize.py "%s"\' % (data_dir + \'/\' + classes[i]))\n', (29523, 29584), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((29582, 29619), 'os.chdir', 'os.chdir', (["(data_dir + '/' + classes[i])"], {}), "(data_dir + '/' + classes[i])\n", (29590, 29619), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((29653, 29665), 'os.listdir', 'os.listdir', ([], {}), '()\n', (29663, 29665), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((31933, 31955), 'random.shuffle', 'random.shuffle', (['class_'], {}), '(class_)\n', (31947, 31955), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((33075, 33092), 'numpy.array', 'np.array', (['lengths'], {}), '(lengths)\n', (33083, 33092), True, 'import numpy as np\n'), ((33105, 33121), 'numpy.amin', 'np.amin', (['lengths'], {}), '(lengths)\n', (33112, 33121), True, 'import numpy as np\n'), ((41952, 41990), 'os.mkdir', 'os.mkdir', (["(problemtype + '_transformer')"], {}), "(problemtype + '_transformer')\n", (41960, 41990), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((41991, 42029), 'os.chdir', 'os.chdir', (["(problemtype + '_transformer')"], {}), "(problemtype + '_transformer')\n", (41999, 42029), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((42751, 42768), 'numpy.array', 'np.array', (['alldata'], {}), '(alldata)\n', (42759, 42768), True, 'import numpy as np\n'), ((42943, 42961), 'os.system', 'os.system', (['command'], {}), '(command)\n', (42952, 42961), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((42964, 43002), 'os.chdir', 'os.chdir', (["(problemtype + '_transformer')"], {}), "(problemtype + '_transformer')\n", (42972, 43002), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((46358, 46404), 'shutil.rmtree', 'shutil.rmtree', (["(model_dir + '/' + model_session)"], {}), "(model_dir + '/' + model_session)\n", (46371, 46404), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((46403, 46494), 'shutil.copytree', 'shutil.copytree', (["(visual_dir + '/visualization_session')", "(model_dir + '/' + model_session)"], {}), "(visual_dir + '/visualization_session', model_dir + '/' +\n model_session)\n", (46418, 46494), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((46759, 46787), 'shutil.rmtree', 'shutil.rmtree', (['model_session'], {}), '(model_session)\n', (46772, 46787), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((46790, 46813), 'os.mkdir', 'os.mkdir', (['model_session'], {}), '(model_session)\n', (46798, 46813), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((59108, 59119), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (59117, 59119), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((59123, 59143), 'os.chdir', 'os.chdir', (['foldername'], {}), '(foldername)\n', (59131, 59143), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((59147, 59164), 'os.mkdir', 'os.mkdir', (['"""model"""'], {}), "('model')\n", (59155, 59164), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((59168, 59185), 'os.chdir', 'os.chdir', (['"""model"""'], {}), "('model')\n", (59176, 59185), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((59204, 59215), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (59213, 59215), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((61056, 61075), 'os.chdir', 'os.chdir', (['model_dir'], {}), '(model_dir)\n', (61064, 61075), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((5909, 5932), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5930, 5932), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((6855, 6867), 'os.listdir', 'os.listdir', ([], {}), '()\n', (6865, 6867), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((7206, 7226), 'pandas.read_csv', 'pd.read_csv', (['csvfile'], {}), '(csvfile)\n', (7217, 7226), True, 'import pandas as pd\n'), ((9869, 9909), 'sklearn.metrics.confusion_matrix', 'metrics.confusion_matrix', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (9893, 9909), False, 'from sklearn import metrics\n'), ((21219, 21251), 'os.chdir', 'os.chdir', (["(prevdir + '/train_dir')"], {}), "(prevdir + '/train_dir')\n", (21227, 21251), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((21260, 21272), 'os.listdir', 'os.listdir', ([], {}), '()\n', (21270, 21272), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((21812, 21832), 'pandas.read_csv', 'pd.read_csv', (['csvfile'], {}), '(csvfile)\n', (21823, 21832), True, 'import pandas as pd\n'), ((25517, 25555), 'os.chdir', 'os.chdir', (["(clean_dir + '/text_cleaning')"], {}), "(clean_dir + '/text_cleaning')\n", (25525, 25555), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((26108, 26145), 'os.chdir', 'os.chdir', (["(clean_dir + '/csv_cleaning')"], {}), "(clean_dir + '/csv_cleaning')\n", (26116, 26145), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((26975, 27019), 'os.chdir', 'os.chdir', (["(augment_dir + '/text_augmentation')"], {}), "(augment_dir + '/text_augmentation')\n", (26983, 27019), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((27608, 27651), 'os.chdir', 'os.chdir', (["(augment_dir + '/csv_augmentation')"], {}), "(augment_dir + '/csv_augmentation')\n", (27616, 27651), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((28823, 28869), 'os.chdir', 'os.chdir', (["(prevdir + '/features/audio_features')"], {}), "(prevdir + '/features/audio_features')\n", (28831, 28869), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((30486, 30498), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (30496, 30498), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((33211, 33233), 'random.shuffle', 'random.shuffle', (['class_'], {}), '(class_)\n', (33225, 33233), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((36632, 36653), 'numpy.where', 'np.where', (['(y_pred == 1)'], {}), '(y_pred == 1)\n', (36640, 36653), True, 'import numpy as np\n'), ((36676, 36698), 'numpy.where', 'np.where', (['(y_pred == -1)'], {}), '(y_pred == -1)\n', (36684, 36698), True, 'import numpy as np\n'), ((37202, 37247), 'os.system', 'os.system', (['"""pip3 install statsmodels==0.11.1"""'], {}), "('pip3 install statsmodels==0.11.1')\n", (37211, 37247), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((49420, 49431), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (49429, 49431), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((58960, 59002), 'shutil.copytree', 'shutil.copytree', (['model_session', 'foldername'], {}), '(model_session, foldername)\n', (58975, 59002), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((59515, 59551), 'pickle.dump', 'pickle.dump', (['transform_model', 'tmodel'], {}), '(transform_model, tmodel)\n', (59526, 59551), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((59659, 59730), 'shutil.move', 'shutil.move', (["(modeldir + '/' + files[j])", "(model_dir_temp + '/' + files[j])"], {}), "(modeldir + '/' + files[j], model_dir_temp + '/' + files[j])\n", (59670, 59730), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((59920, 59942), 'pickle.load', 'pickle.load', (['loadmodel'], {}), '(loadmodel)\n', (59931, 59942), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((61164, 61197), 'os.chdir', 'os.chdir', (["(problemtype + '_models')"], {}), "(problemtype + '_models')\n", (61172, 61197), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((6154, 6176), 'psutil.disk_usage', 'psutil.disk_usage', (['"""/"""'], {}), "('/')\n", (6171, 6176), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((20828, 20848), 'pandas.read_csv', 'pd.read_csv', (['csvfile'], {}), '(csvfile)\n', (20839, 20848), True, 'import pandas as pd\n'), ((24014, 24024), 'sys.exit', 'sys.exit', ([], {}), '()\n', (24022, 24024), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((25634, 25673), 'os.chdir', 'os.chdir', (["(clean_dir + '/image_cleaning')"], {}), "(clean_dir + '/image_cleaning')\n", (25642, 25673), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((27101, 27146), 'os.chdir', 'os.chdir', (["(augment_dir + '/image_augmentation')"], {}), "(augment_dir + '/image_augmentation')\n", (27109, 27146), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((28968, 29013), 'os.chdir', 'os.chdir', (["(prevdir + '/features/text_features')"], {}), "(prevdir + '/features/text_features')\n", (28976, 29013), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((36528, 36559), 'sklearn.ensemble.IsolationForest', 'IsolationForest', ([], {'random_state': '(0)'}), '(random_state=0)\n', (36543, 36559), False, 'from sklearn.ensemble import IsolationForest\n'), ((37420, 37441), 'scipy.stats.zscore', 'stats.zscore', (['alldata'], {}), '(alldata)\n', (37432, 37441), False, 'from scipy import stats\n'), ((50626, 50799), 'train_alphapy.train_alphapy', 'talpy.train_alphapy', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype,\n common_name_model, problemtype, classes, default_featurenames,\n transform_model, settings, model_session)\n', (50645, 50799), True, 'import train_alphapy as talpy\n'), ((59018, 59043), 'shutil.rmtree', 'shutil.rmtree', (['foldername'], {}), '(foldername)\n', (59031, 59043), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((59048, 59090), 'shutil.copytree', 'shutil.copytree', (['model_session', 'foldername'], {}), '(model_session, foldername)\n', (59063, 59090), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((60041, 60062), 'atm.Model.load', 'Model.load', (['modelname'], {}), '(modelname)\n', (60051, 60062), False, 'from atm import Model\n'), ((61211, 61244), 'os.mkdir', 'os.mkdir', (["(problemtype + '_models')"], {}), "(problemtype + '_models')\n", (61219, 61244), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((61247, 61280), 'os.chdir', 'os.chdir', (["(problemtype + '_models')"], {}), "(problemtype + '_models')\n", (61255, 61280), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((61894, 61916), 'pickle.load', 'pickle.load', (['loadmodel'], {}), '(loadmodel)\n', (61905, 61916), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((62107, 62177), 'sklearn.externals.joblib.dump', 'joblib.dump', (['model', "(modelname[0:-7] + '_compressed.joblib')"], {'compress': '(3)'}), "(model, modelname[0:-7] + '_compressed.joblib', compress=3)\n", (62118, 62177), False, 'from sklearn.externals import joblib\n'), ((25751, 25790), 'os.chdir', 'os.chdir', (["(clean_dir + '/video_cleaning')"], {}), "(clean_dir + '/video_cleaning')\n", (25759, 25790), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((27227, 27272), 'os.chdir', 'os.chdir', (["(augment_dir + '/video_augmentation')"], {}), "(augment_dir + '/video_augmentation')\n", (27235, 27272), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((29114, 29160), 'os.chdir', 'os.chdir', (["(prevdir + '/features/image_features')"], {}), "(prevdir + '/features/image_features')\n", (29122, 29160), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((50881, 51049), 'train_atm.train_atm', 'tatm.train_atm', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype, common_name_model,\n problemtype, classes, default_featurenames, transform_model, settings,\n model_session)\n', (50895, 51049), True, 'import train_atm as tatm\n'), ((61321, 61332), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (61330, 61332), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((62727, 62766), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (62746, 62766), False, 'import logging\n'), ((7661, 7672), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7670, 7672), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((7675, 7695), 'os.chdir', 'os.chdir', (['"""atm_temp"""'], {}), "('atm_temp')\n", (7683, 7695), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((7792, 7808), 'os.chdir', 'os.chdir', (['curdir'], {}), '(curdir)\n', (7800, 7808), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((25863, 25900), 'os.chdir', 'os.chdir', (["(clean_dir + '/csv_cleaning')"], {}), "(clean_dir + '/csv_cleaning')\n", (25871, 25900), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((27348, 27391), 'os.chdir', 'os.chdir', (["(augment_dir + '/csv_augmentation')"], {}), "(augment_dir + '/csv_augmentation')\n", (27356, 27391), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((29262, 29308), 'os.chdir', 'os.chdir', (["(prevdir + '/features/video_features')"], {}), "(prevdir + '/features/video_features')\n", (29270, 29308), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((37491, 37514), 'numpy.where', 'np.where', (['(z > threshold)'], {}), '(z > threshold)\n', (37499, 37514), True, 'import numpy as np\n'), ((51148, 51326), 'train_autobazaar.train_autobazaar', 'autobzr.train_autobazaar', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype,\n common_name_model, problemtype, classes, default_featurenames,\n transform_model, settings, model_session)\n', (51172, 51326), True, 'import train_autobazaar as autobzr\n'), ((60284, 60305), 'torch.load', 'torch.load', (['modelname'], {}), '(modelname)\n', (60294, 60305), False, 'import torch\n'), ((61122, 61133), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (61131, 61133), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((62841, 62862), 'keras.models.load_model', 'load_model', (['modelname'], {}), '(modelname)\n', (62851, 62862), False, 'from keras.models import load_model\n'), ((62877, 62897), 'keras_compressor.compressor.compress', 'compress', (['model', '(0.7)'], {}), '(model, 0.7)\n', (62885, 62897), False, 'from keras_compressor.compressor import compress\n'), ((51420, 51596), 'train_autogbt.train_autogbt', 'tautogbt.train_autogbt', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype,\n common_name_model, problemtype, classes, default_featurenames,\n transform_model, settings, model_session)\n', (51442, 51596), True, 'import train_autogbt as tautogbt\n'), ((60398, 60452), 'ludwig.api.LudwigModel.load', 'LudwigModel.load', (['"""ludwig_files/experiment_run/model/"""'], {}), "('ludwig_files/experiment_run/model/')\n", (60414, 60452), False, 'from ludwig.api import LudwigModel\n'), ((7705, 7728), 'pandas.read_csv', 'pd.read_csv', (['"""test.csv"""'], {}), "('test.csv')\n", (7716, 7728), True, 'import pandas as pd\n'), ((13010, 13033), 'sklearn.metrics.auc', 'metrics.auc', (['fper', 'tper'], {}), '(fper, tper)\n', (13021, 13033), False, 'from sklearn import metrics\n'), ((51702, 51877), 'train_autogluon.train_autogluon', 'tautg.train_autogluon', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype,\n common_name_model, problemtype, classes, default_featurenames,\n transform_model, settings, model_session)\n', (51723, 51877), True, 'import train_autogluon as tautg\n'), ((60559, 60580), 'keras.models.load_model', 'load_model', (['modelname'], {}), '(modelname)\n', (60569, 60580), False, 'from keras.models import load_model\n'), ((7859, 7882), 'pandas.read_csv', 'pd.read_csv', (['"""test.csv"""'], {}), "('test.csv')\n", (7870, 7882), True, 'import pandas as pd\n'), ((51976, 52154), 'train_autokaggle.train_autokaggle', 'autokag.train_autokaggle', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype,\n common_name_model, problemtype, classes, default_featurenames,\n transform_model, settings, model_session)\n', (52000, 52154), True, 'import train_autokaggle as autokag\n'), ((52254, 52434), 'train_autokeras.train_autokeras', 'autokeras_.train_autokeras', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype,\n common_name_model, problemtype, classes, default_featurenames,\n transform_model, settings, model_session)\n', (52280, 52434), True, 'import train_autokeras as autokeras_\n'), ((52525, 52699), 'train_automl.train_automl', 'auto_ml.train_automl', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype,\n common_name_model, problemtype, classes, default_featurenames,\n transform_model, settings, model_session)\n', (52545, 52699), True, 'import train_automl as auto_ml\n'), ((8484, 8507), 'pandas.read_csv', 'pd.read_csv', (['"""test.csv"""'], {}), "('test.csv')\n", (8495, 8507), True, 'import pandas as pd\n'), ((53084, 53266), 'train_autopytorch.train_autopytorch', 'autotorch_.train_autopytorch', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype,\n common_name_model, problemtype, classes, default_featurenames,\n transform_model, settings, model_session)\n', (53112, 53266), True, 'import train_autopytorch as autotorch_\n'), ((53348, 53516), 'train_btb.train_btb', 'tbtb.train_btb', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype, common_name_model,\n problemtype, classes, default_featurenames, transform_model, settings,\n model_session)\n', (53362, 53516), True, 'import train_btb as tbtb\n'), ((53606, 53778), 'train_cvopt.train_cvopt', 'tcvopt.train_cvopt', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype,\n common_name_model, problemtype, classes, default_featurenames,\n transform_model, settings, model_session)\n', (53624, 53778), True, 'import train_cvopt as tcvopt\n'), ((53863, 54031), 'train_devol.train_devol', 'td.train_devol', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype, common_name_model,\n problemtype, classes, default_featurenames, transform_model, settings,\n model_session)\n', (53877, 54031), True, 'import train_devol as td\n'), ((54116, 54286), 'train_gama.train_gama', 'tgama.train_gama', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype, common_name_model,\n problemtype, classes, default_featurenames, transform_model, settings,\n model_session)\n', (54132, 54286), True, 'import train_gama as tgama\n'), ((54378, 54552), 'train_gentun.train_gentun', 'tgentun.train_gentun', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype,\n common_name_model, problemtype, classes, default_featurenames,\n transform_model, settings, model_session)\n', (54398, 54552), True, 'import train_gentun as tgentun\n'), ((54650, 54826), 'train_hyperband.train_hyperband', 'thband.train_hyperband', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype,\n common_name_model, problemtype, classes, default_featurenames,\n transform_model, settings, model_session)\n', (54672, 54826), True, 'import train_hyperband as thband\n'), ((54921, 55094), 'train_hypsklearn.train_hypsklearn', 'th.train_hypsklearn', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype,\n common_name_model, problemtype, classes, default_featurenames,\n transform_model, settings, model_session)\n', (54940, 55094), True, 'import train_hypsklearn as th\n'), ((55191, 55367), 'train_hungabunga.train_hungabunga', 'thung.train_hungabunga', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype,\n common_name_model, problemtype, classes, default_featurenames,\n transform_model, settings, model_session)\n', (55213, 55367), True, 'import train_hungabunga as thung\n'), ((55461, 55635), 'train_imbalance.train_imbalance', 'timb.train_imbalance', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype,\n common_name_model, problemtype, classes, default_featurenames,\n transform_model, settings, model_session)\n', (55481, 55635), True, 'import train_imbalance as timb\n'), ((55719, 55887), 'train_keras.train_keras', 'tk.train_keras', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype, common_name_model,\n problemtype, classes, default_featurenames, transform_model, settings,\n model_session)\n', (55733, 55887), True, 'import train_keras as tk\n'), ((55973, 56142), 'train_ludwig.train_ludwig', 'tl.train_ludwig', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype, common_name_model,\n problemtype, classes, default_featurenames, transform_model, settings,\n model_session)\n', (55988, 56142), True, 'import train_ludwig as tl\n'), ((56233, 56405), 'train_mlblocks.train_mlblocks', 'mlb.train_mlblocks', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype,\n common_name_model, problemtype, classes, default_featurenames,\n transform_model, settings, model_session)\n', (56251, 56405), True, 'import train_mlblocks as mlb\n'), ((56493, 56665), 'train_mlbox.train_mlbox', 'mlbox_.train_mlbox', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype,\n common_name_model, problemtype, classes, default_featurenames,\n transform_model, settings, model_session)\n', (56511, 56665), True, 'import train_mlbox as mlbox_\n'), ((56918, 57096), 'train_neuraxle.train_neuraxle', 'tneuraxle.train_neuraxle', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype,\n common_name_model, problemtype, classes, default_featurenames,\n transform_model, settings, model_session)\n', (56942, 57096), True, 'import train_neuraxle as tneuraxle\n'), ((57387, 57564), 'train_pytorch.train_pytorch', 't_pytorch.train_pytorch', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype,\n common_name_model, problemtype, classes, default_featurenames,\n transform_model, settings, model_session)\n', (57410, 57564), True, 'import train_pytorch as t_pytorch\n'), ((57649, 57819), 'train_safe.train_safe', 'tsafe.train_safe', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype, common_name_model,\n problemtype, classes, default_featurenames, transform_model, settings,\n model_session)\n', (57665, 57819), True, 'import train_safe as tsafe\n'), ((57925, 58088), 'train_scsr.train_sc', 'scsr.train_sc', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'minlength'], {}), '(X_train, X_test, y_train, y_test, mtype, common_name_model,\n problemtype, classes, default_featurenames, transform_model, settings,\n minlength)\n', (57938, 58088), True, 'import train_scsr as scsr\n'), ((58364, 58531), 'train_TPOT.train_TPOT', 'tt.train_TPOT', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype, common_name_model,\n problemtype, classes, default_featurenames, transform_model, settings,\n model_session)\n', (58377, 58531), True, 'import train_TPOT as tt\n'), ((58125, 58281), 'train_scsr.train_sr', 'scsr.train_sr', (['X_train', 'X_test', 'y_train', 'y_test', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'model_dir', 'settings'], {}), '(X_train, X_test, y_train, y_test, common_name_model,\n problemtype, classes, default_featurenames, transform_model, model_dir,\n settings)\n', (58138, 58281), True, 'import train_scsr as scsr\n')]
|
import numpy as np
import cvxpy as cvx
import util
def set_contains_array(S, a):
"""
:param S: list of np.ndarray
:param a: np.ndarray
:return: contains, 0 or 1
"""
contains = 0
for b in S:
if not (a - b).any(): # if a contained in S
contains = 1
return contains
def set_sum_two(A, B):
"""
:param A: list of np.ndarray
:param B: list of np.ndarray
:return: list of np.ndarray
"""
C = []
for a in A:
for b in B:
if not set_contains_array(C, a + b):
C.append(a + b)
return C
def set_sum_list(Omega):
"""
Set sum of multiple set of np.ndarray
:param Omega: list of list of np.ndarray
:return: list of np.ndarray
"""
S = Omega[0]
# print 'len(Omega) =', len(Omega)
# print 0, 'S =', S
for i in range(1, len(Omega)):
# print i, 'Omega[i] =',Omega[i]
S = set_sum_two(S, Omega[i])
# print i, 'S =', S
return S
def pointwise_dominate(w, U):
"""
Test if w is point-wise dominated by all u in U
:param w: np.ndarray
:param U: list of np.ndarray
:return:
"""
for u in U:
if np.all(w < u):
return True
return False
def lp_dominate(w, U):
"""
Computes the belief in which w improves U the most.
With LP in White & Clark
:param w: np.ndarray
:param U: list of np.ndarray
:return: b if d >= 0 else None
"""
# print("LP dominate")
if len(U) == 0:
return w
S = len(w)
d = cvx.Variable()
b = cvx.Variable(S)
objective = cvx.Maximize(d)
# print("U", U)
constraints = [b.T*(w-u) >= d for u in U] + [np.sum(b) == 1]
prob = cvx.Problem(objective, constraints)
result = prob.solve()
# print("d =", d.value)
if d.value >= 0:
return np.ravel(b.value)
else:
return None
def dec_dominate(w, U):
"""
Computes the belief in which w improves U the most.
With Bender's decomposition (Walraven & Spaan, 2017)
:param w: np.ndarray
:param U: list of np.ndarray
:return: b if d >= 0 else None
"""
if len(U) == 0:
return w
S = len(w)
d = cvx.Variable()
b = cvx.Variable(S)
objective = cvx.Maximize(d)
# print("U", U)
constraints = [np.sum(b) == 1]
b_ = np.random.random(S)
b_ = b_ / np.sum(b_)
U_ = []
while 1:
_b = b_
u_ = U[np.argmin([np.dot((w - U[i]), _b) for i in range(len(U))])]
constraints += [d <= b.T*(w-u_)]
U_.append(u_)
prob = cvx.Problem(objective, constraints)
_ = prob.solve()
b_ = np.ravel(b.value)
if not (b_ - _b).any():
break
if d.value >= 0:
return _b
else:
return None
def lex_less(u, w):
if w is None:
return False
for i in range(len(u)):
if u[i] > w[i]:
return False
return True
def best_point(b, U):
# print("Find best")
_max = -np.inf
w = None
for i in range(len(U)):
u = U[i]
# print("b", b)
# print("u", u)
x = np.dot(b, u)
# print("x", x)
if x > _max or (x == _max and lex_less(u, U[w])):
w = i
_max = x
# print("max", _max)
return w
def prune(W, A=None):
# print("prune", W)
D, E = [], []
while len(W) > 0:
w = W[-1]
if pointwise_dominate(w, D):
W.pop()
else:
# b = lp_dominate(w, D)
b = dec_dominate(w, D)
if b is None:
W.pop()
else:
i = best_point(b, W)
D.append(W[i])
if A is not None:
E.append(A[i])
W.pop(i)
if A is not None:
return D, E
else:
return D
def set_union(V):
V_ = []
for v in V:
V_ += v
return V_
class POMDP:
def __init__(self, P=None, Z=None, R=None, g=None, alpha=1.0):
self.P = P # m x n x n: a(t)->s(t)->s(t+1)
self.Z = Z # m x n x k: a(t)->s(t+1)->o(t+1)
self.R = R # m x n x n: a(t)->s(t+1)->s(t+1)
self.g = g # n x 1: s(T)
self.alpha = alpha # discount factor
self.nActions = self.Z.shape[0] # m
self.nStates = self.Z.shape[1] # n
self.nLevels = self.Z.shape[2] # k
if g is None:
self.g = np.zeros(self.nStates)
# print self.nActions, self.nStates, self.nLevels
def update_belief(self, b, a, o):
p = self.Z[a, :, o] * self.P[a].T.dot(b)
return p / p.sum()
def monahan_enumeration(self, V):
"""construct the set of Omega
:param V: input list of alpha vectors
"""
V_, A_ = [], []
for a in range(self.nActions):
# print("Action", a)
Va = []
_r = np.sum(self.P[a] * self.R[a], axis=1) / self.nLevels
# print("_r:", _r)
for z in range(self.nLevels):
# print("Obs", z)
Vaz = [_r + self.alpha * (self.Z[a,:,z] * v).dot(self.P[a]) for v in V]
# print("Vaz", Vaz)
if len(Va) > 0:
Va = prune(set_sum_two(Va, Vaz)) # incremental pruning
else:
Va = Vaz
A_ += [a for _ in Va]
V_ += Va
V_, A_ = prune(V_, A_)
return V_, A_
def transition(self, a, s):
return np.random.choice(self.nStates, p=self.P[a, s])
def emmission(self, a, s):
return np.random.choice(self.nStates, p=self.Z[a, s])
@staticmethod
def optimal_action(b, V, A):
assert len(V) == len(A)
values = [np.dot(b, v) for v in V]
opt_idx = np.argmax(values)
return A[opt_idx], V[opt_idx]
def solve(self, T):
V = self.g
Values = [None for _ in range(T)] + [[self.g]]
Actions = [None for _ in range(T)]
for t in range(T):
V, A = self.monahan_enumeration(V)
Values[T-1-t] = V
Actions[T-1-t] = A
return Values, Actions
def plan(self, T, initial_belief=None, perform=False):
V = self.g
if initial_belief is None:
initial_belief = np.ones(self.nStates) / self.nStates
b = initial_belief
Values = [None for _ in range(T)] + [[self.g]]
Actions = [None for _ in range(T)]
for t in range(T):
V, A = self.monahan_enumeration(V)
Values[T - 1 - t] = V
Actions[T - 1 - t] = A
a0, v0 = self.optimal_action(b, Values[0], Actions[0])
if not perform:
return a0, v0
s = np.random.choice(self.nStates, p=b)
actions, states, observations, reward = [], [], [], 0.0
for t in range(T):
a, v = self.optimal_action(b, Values[t], Actions[t])
# print('a', a)
# print('v', v)
_s = s
s = self.transition(a, s)
o = self.transition(a, s)
b = self.update_belief(b, a, o)
states.append(_s)
actions.append(s)
observations.append(o)
reward += self.R[a, _s, s] * self.alpha ** t
return a0, v0, actions, states, observations, reward
def test_pomdp(nActions, nStates, nLevels, alpha):
# P = np.array([
# [[0.25, 0.75], [0.6 , 0.4 ]],
# [[0.5 , 0.5 ], [0.7 , 0.3 ]]])
# Z = np.array([
# [[0.55, 0.45], [0.3 , 0.7 ]],
# [[0.65, 0.35], [0.25, 0.75]]])
# R = np.array([
# [[2., 2. ], [ 0., 0.]],
# [[3., 3. ], [-1., -1.]]])
# g = np.array([2., -1.])
P = util.normalize(np.random.random(size=(nActions, nStates, nStates)), axis=2)
Z = util.normalize(np.random.random(size=(nActions, nStates, nLevels)), axis=2)
R = util.normalize(np.random.random(size=(nActions, nStates, nStates)), axis=2)
g = util.normalize(np.random.random(size=(nStates)), axis=0)
pomdp = POMDP(P, Z, R, g, alpha)
T = 10
V = pomdp.g
a0, v0 = pomdp.plan(T, initial_belief=None, perform=False)
# a0, v0, actions, states, observations, reward = pomdp.plan(T, initial_belief=None, perform=True)
# print('a0 =', a0, 'v0 =', v0)
# print('actions:', actions)
# print('states:', states)
# print('observations:', observations)
# print('reward:', reward)
# for t in range(T):
# print("Iteration", t+1)
# V, A = pomdp.monahan_enumeration(V)
# for v, a in zip(V, A):
# print(v, a)
if __name__ == "__main__":
# import timeit
# print(timeit.timeit("main()"))
import time
for s in range(123, 133):
start_time = time.time()
np.random.seed(s)
print("===== SEED %d =====" %(s))
test_pomdp(nActions=2, nStates=3, nLevels=3, alpha=0.9975)
end_time = time.time()
print(end_time - start_time)
|
[
"cvxpy.Variable",
"cvxpy.Problem",
"numpy.ones",
"numpy.random.random",
"numpy.random.choice",
"numpy.argmax",
"numpy.sum",
"numpy.dot",
"numpy.zeros",
"numpy.random.seed",
"numpy.ravel",
"numpy.all",
"time.time",
"cvxpy.Maximize"
] |
[((1555, 1569), 'cvxpy.Variable', 'cvx.Variable', ([], {}), '()\n', (1567, 1569), True, 'import cvxpy as cvx\n'), ((1578, 1593), 'cvxpy.Variable', 'cvx.Variable', (['S'], {}), '(S)\n', (1590, 1593), True, 'import cvxpy as cvx\n'), ((1610, 1625), 'cvxpy.Maximize', 'cvx.Maximize', (['d'], {}), '(d)\n', (1622, 1625), True, 'import cvxpy as cvx\n'), ((1722, 1757), 'cvxpy.Problem', 'cvx.Problem', (['objective', 'constraints'], {}), '(objective, constraints)\n', (1733, 1757), True, 'import cvxpy as cvx\n'), ((2205, 2219), 'cvxpy.Variable', 'cvx.Variable', ([], {}), '()\n', (2217, 2219), True, 'import cvxpy as cvx\n'), ((2228, 2243), 'cvxpy.Variable', 'cvx.Variable', (['S'], {}), '(S)\n', (2240, 2243), True, 'import cvxpy as cvx\n'), ((2260, 2275), 'cvxpy.Maximize', 'cvx.Maximize', (['d'], {}), '(d)\n', (2272, 2275), True, 'import cvxpy as cvx\n'), ((2340, 2359), 'numpy.random.random', 'np.random.random', (['S'], {}), '(S)\n', (2356, 2359), True, 'import numpy as np\n'), ((1193, 1206), 'numpy.all', 'np.all', (['(w < u)'], {}), '(w < u)\n', (1199, 1206), True, 'import numpy as np\n'), ((1848, 1865), 'numpy.ravel', 'np.ravel', (['b.value'], {}), '(b.value)\n', (1856, 1865), True, 'import numpy as np\n'), ((2374, 2384), 'numpy.sum', 'np.sum', (['b_'], {}), '(b_)\n', (2380, 2384), True, 'import numpy as np\n'), ((2579, 2614), 'cvxpy.Problem', 'cvx.Problem', (['objective', 'constraints'], {}), '(objective, constraints)\n', (2590, 2614), True, 'import cvxpy as cvx\n'), ((2653, 2670), 'numpy.ravel', 'np.ravel', (['b.value'], {}), '(b.value)\n', (2661, 2670), True, 'import numpy as np\n'), ((3130, 3142), 'numpy.dot', 'np.dot', (['b', 'u'], {}), '(b, u)\n', (3136, 3142), True, 'import numpy as np\n'), ((5504, 5550), 'numpy.random.choice', 'np.random.choice', (['self.nStates'], {'p': 'self.P[a, s]'}), '(self.nStates, p=self.P[a, s])\n', (5520, 5550), True, 'import numpy as np\n'), ((5598, 5644), 'numpy.random.choice', 'np.random.choice', (['self.nStates'], {'p': 'self.Z[a, s]'}), '(self.nStates, p=self.Z[a, s])\n', (5614, 5644), True, 'import numpy as np\n'), ((5790, 5807), 'numpy.argmax', 'np.argmax', (['values'], {}), '(values)\n', (5799, 5807), True, 'import numpy as np\n'), ((6728, 6763), 'numpy.random.choice', 'np.random.choice', (['self.nStates'], {'p': 'b'}), '(self.nStates, p=b)\n', (6744, 6763), True, 'import numpy as np\n'), ((7729, 7780), 'numpy.random.random', 'np.random.random', ([], {'size': '(nActions, nStates, nStates)'}), '(size=(nActions, nStates, nStates))\n', (7745, 7780), True, 'import numpy as np\n'), ((7813, 7864), 'numpy.random.random', 'np.random.random', ([], {'size': '(nActions, nStates, nLevels)'}), '(size=(nActions, nStates, nLevels))\n', (7829, 7864), True, 'import numpy as np\n'), ((7897, 7948), 'numpy.random.random', 'np.random.random', ([], {'size': '(nActions, nStates, nStates)'}), '(size=(nActions, nStates, nStates))\n', (7913, 7948), True, 'import numpy as np\n'), ((7981, 8011), 'numpy.random.random', 'np.random.random', ([], {'size': 'nStates'}), '(size=nStates)\n', (7997, 8011), True, 'import numpy as np\n'), ((8743, 8754), 'time.time', 'time.time', ([], {}), '()\n', (8752, 8754), False, 'import time\n'), ((8763, 8780), 'numpy.random.seed', 'np.random.seed', (['s'], {}), '(s)\n', (8777, 8780), True, 'import numpy as np\n'), ((8909, 8920), 'time.time', 'time.time', ([], {}), '()\n', (8918, 8920), False, 'import time\n'), ((2315, 2324), 'numpy.sum', 'np.sum', (['b'], {}), '(b)\n', (2321, 2324), True, 'import numpy as np\n'), ((4441, 4463), 'numpy.zeros', 'np.zeros', (['self.nStates'], {}), '(self.nStates)\n', (4449, 4463), True, 'import numpy as np\n'), ((5747, 5759), 'numpy.dot', 'np.dot', (['b', 'v'], {}), '(b, v)\n', (5753, 5759), True, 'import numpy as np\n'), ((1695, 1704), 'numpy.sum', 'np.sum', (['b'], {}), '(b)\n', (1701, 1704), True, 'import numpy as np\n'), ((4905, 4942), 'numpy.sum', 'np.sum', (['(self.P[a] * self.R[a])'], {'axis': '(1)'}), '(self.P[a] * self.R[a], axis=1)\n', (4911, 4942), True, 'import numpy as np\n'), ((6298, 6319), 'numpy.ones', 'np.ones', (['self.nStates'], {}), '(self.nStates)\n', (6305, 6319), True, 'import numpy as np\n'), ((2452, 2472), 'numpy.dot', 'np.dot', (['(w - U[i])', '_b'], {}), '(w - U[i], _b)\n', (2458, 2472), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# @author : wanglei
# @date : 2021/2/19 1:47 PM
# @description :
import numpy as np
"""
感应器对象
"""
class Perceptron(object):
"""
该方法为感应器的初始化方法
eta:学习速率
n_iter:学习次数(迭代次数)
"""
def __init__(self, eta=0.01, n_iter=10):
self.eta = eta
self.n_iter = n_iter
"""
该方法为模型训练的方法
shape[0]返回该矩阵有几行
shape[1]返回该矩阵有几列
在这个例子中X.shape[1]=2
np.zeros(1 + X.shape[1])是一个1行3列的元素都为零的列表
"""
def fit(self, X, y):
self.w_ = np.zeros(1 + X.shape[1]) # 初始化一个权重和阈值的列表,初始值为0
self.errors_ = [] # 用来记录每一次迭代全样本的错误预测次数
for _ in range(self.n_iter): # 进行多次预测样本
errors = 0 # 用来记录本次预测的全样本的错误次数
for xi, target in zip(X, y): # 遍历这个样本集和实际结果集
update = self.eta * (
target - self.predict(xi)) # 用实际结果值减掉预测结果值如果该值为0,表示预测正确,如果不为0则乘上学习速率,获取的值就是本次权重、阈值需要更新的值
self.w_[1:] += update * xi # 如果预测正确,则update为0,那么权重本次就无需改变,否则,增加
self.w_[0] += update # 如果预测正确,则update为0,那么阈值本次就无需改变,否则,增加
errors += int(update != 0.0) # 预测错误就记录一次错误数
self.errors_.append(errors) # 将所有的样本数据预测完成后,将本次的预测错误的次数放到error_这个列表中
return self
"""
该方法为将一个样本的属性值进行处理的方法
X=array([[1,2,3,4],[5,6,7,8],...])
self.w_[1:]=array([0,0,0,0])
根据api:dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])
np.dot(X,self.w_[1:])=array([[0],[0],...])【将每一个属性乘上权重再将每一个样本的每个属性值进行求和】
self.w_[0]=array([[0]])获取阈值
"""
def net_input(self, X):
return np.dot(X, self.w_[1:]) + self.w_[0]
"""
该方法为一个样本的预测结果输出方法
numpy.where(condition[, x, y])
就是一个三目运算,满足条件就输出x,否则输出y
"""
def predict(self, X):
return np.where(self.net_input(X) >= 0.0, 1, -1)
import pandas as pd
"""
读取数据源
"""
df = pd.read_csv("/Users/a1/Downloads/iris.data", header=None)
print(df.tail()) # 打印后几行
y = df.iloc[0:100, 4].values # 取前100行数据的第4列,类标这一列,前100行就两类
print(y)
y = np.where(y == 'Iris-setosa', -1, 1) # 将类标这一列的文本表示替换成数字表示,就分了两类
X = df.iloc[0:100, [0, 2]].values # 获取前100行的第0列和第2列,即花瓣宽度和花萼宽度
print(X)
"""
对模型进行训练,查看训练时每次迭代的错误数量
"""
ppn= Perceptron(eta=0.1, n_iter=10)
ppn.fit(X,y)
|
[
"numpy.where",
"numpy.dot",
"numpy.zeros",
"pandas.read_csv"
] |
[((1812, 1869), 'pandas.read_csv', 'pd.read_csv', (['"""/Users/a1/Downloads/iris.data"""'], {'header': 'None'}), "('/Users/a1/Downloads/iris.data', header=None)\n", (1823, 1869), True, 'import pandas as pd\n'), ((1969, 2004), 'numpy.where', 'np.where', (["(y == 'Iris-setosa')", '(-1)', '(1)'], {}), "(y == 'Iris-setosa', -1, 1)\n", (1977, 2004), True, 'import numpy as np\n'), ((505, 529), 'numpy.zeros', 'np.zeros', (['(1 + X.shape[1])'], {}), '(1 + X.shape[1])\n', (513, 529), True, 'import numpy as np\n'), ((1550, 1572), 'numpy.dot', 'np.dot', (['X', 'self.w_[1:]'], {}), '(X, self.w_[1:])\n', (1556, 1572), True, 'import numpy as np\n')]
|
########################################################################
# Copyright 2021, UChicago Argonne, LLC
#
# Licensed under the BSD-3 License (the "License"); you may not use
# this file except in compliance with the License. You may obtain a
# copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
########################################################################
"""
date: 2021-08-12
author: matz
Cheng-Todreas correlation for flow split (1986)
"""
########################################################################
import numpy as np
from . import friction_ctd as ctd
applicability = ctd.applicability
########################################################################
# MODULE-WIDE CONSTANTS
_GAMMA = 1 / 3.0
_M = ctd._m
_EXP1 = {}
_EXP2 = {}
for regime in ctd._m.keys():
_EXP1[regime] = (1 + ctd._m[regime]) / (2 - ctd._m[regime])
_EXP2[regime] = 1 / (2 - ctd._m[regime])
########################################################################
def calculate_flow_split(asm_obj, regime=None, beta=1.0):
"""Calculate the flow split into the different types of
subchannels based on the Cheng-Todreas model
Parameters
----------
asm_obj : DASSH Assembly object
Contains the geometric description of the assembly
regime : str or NoneType
Indicate flow regime for which to calculate flow split
{'turbulent', 'laminar', None}; default = None
beta : float
Beta is a factor used to combine the laminar and turbulent
flowpslit terms in the transition region. It comes from
Cheng's 1984 thesis in which he recommends a value of
0.05. There, Figure 4.19 shows the edge flowsplit assuming
beta=0.05. However, in reality beta=0.05 gives weird results
and beta=1.0 matches what's shown in the figure. Therefore,
it'set to 1.0 here by default.
Returns
-------
numpy.ndarray
Flow split between interior, edge, and corner coolant
subchannels
"""
try:
Re_bnds = asm_obj.corr_constants['fs']['Re_bnds']
except (KeyError, AttributeError):
Re_bnds = ctd.calculate_Re_bounds(asm_obj)
try:
Cf = asm_obj.corr_constants['fs']['Cf_sc']
except (KeyError, AttributeError):
Cf = ctd.calculate_subchannel_friction_factor_const(asm_obj)
if regime is not None:
return _calculate_flow_split(asm_obj, Cf, regime, Re_bnds, beta=beta)
elif asm_obj.coolant_int_params['Re'] <= Re_bnds[0]:
return _calculate_flow_split(asm_obj, Cf, 'laminar')
elif asm_obj.coolant_int_params['Re'] >= Re_bnds[1]:
return _calculate_flow_split(asm_obj, Cf, 'turbulent')
else:
return _calculate_flow_split(asm_obj, Cf, 'transition', Re_bnds, beta)
def _calculate_flow_split(asm_obj, Cf_dict, regime, Re_bnds=None, beta=1.0):
"""Worker function to calculate the flow split into the
different types of subchannels based on the Cheng-Todreas
model.
Parameters
----------
asm_obj : DASSH Assembly object
Contains the geometric description of the assembly
Cf_dict : dict
Dictionary containing subchannel friction factor constants;
keys: ['laminar', 'turbulent']
regime : str {'laminar', 'turbulent', 'transition'}
Flow regime with which to evaluate flow split ratios
Re_bnds : list (optional)
Reynolds number flow regime boundaries for calculating
intermittency factor in transition regime
beta : float
Beta is a factor used to combine the laminar and turbulent
flowpslit terms in the transition region. It comes from
Cheng's 1984 thesis in which he recommends a value of
0.05. There, Figure 4.19 shows the edge flowsplit assuming
beta=0.05. However, in reality beta=0.05 gives weird results
and beta=1.0 matches what's shown in the figure. Therefore,
it'set to 1.0 here by default.
Returns
-------
numpy.ndarray
Flow split between interior, edge, and corner coolant
subchannels
Notes
-----
This method is imported by the flow split model in the
Upgraded Cheng-Todreas correlation (flowsplit_uctd)
"""
if regime == 'transition':
try:
na = asm_obj.corr_constants['fs']['na']
except (KeyError, AttributeError):
na = [asm_obj.subchannel.n_sc['coolant']['interior']
* asm_obj.params['area'][0],
asm_obj.subchannel.n_sc['coolant']['edge']
* asm_obj.params['area'][1],
asm_obj.subchannel.n_sc['coolant']['corner']
* asm_obj.params['area'][2]]
flow_split = np.zeros(3)
intf_b = ctd.calc_intermittency_factor(
asm_obj, Re_bnds[0], Re_bnds[1])
xratio_t = asm_obj.corr_constants['fs']['xr']['transition'].copy()
xratio_t[0] = (xratio_t[0]
* (1 - intf_b)**_GAMMA
/ asm_obj.coolant_int_params['Re'])
xratio_t[1] = (xratio_t[1]
* intf_b**_GAMMA
/ asm_obj.coolant_int_params['Re']**_M['turbulent']
)**_EXP2['turbulent']
# xratio = xratio_t1 + beta * xratio_t2
xratio = xratio_t[0] + beta * xratio_t[1]
x1x2 = xratio[1] / xratio[0] # Equation 4.51 in Cheng 1984
x3x2 = xratio[1] / xratio[2] # Equation 4.51 in Cheng 1984
flow_split[1] = (asm_obj.bundle_params['area']
/ (na[1] + x1x2 * na[0] + x3x2 * na[2]))
flow_split[0] = x1x2 * flow_split[1]
flow_split[2] = x3x2 * flow_split[1]
else:
flow_split = asm_obj.corr_constants['fs']['fs'][regime]
# x1x2 = asm_obj.corr_constants['fs']['xr'][regime][0]
# x3x2 = asm_obj.corr_constants['fs']['xr'][regime][1]
#
# # Flow split to subchannel type 2
# flow_split[1] = (asm_obj.bundle_params['area']
# / (na[1] + x1x2 * na[0] + x3x2 * na[2]))
# flow_split[0] = x1x2 * flow_split[1]
# flow_split[2] = x3x2 * flow_split[1]
return flow_split
def calc_constants(asm_obj):
"""Calculate constants needed by the CTD flowsplit calculation"""
const = ctd.calc_constants(asm_obj)
del const['Cf_b']
# Total subchannel area for each subchannel type
const['na'] = [asm_obj.subchannel.n_sc['coolant']['interior']
* asm_obj.params['area'][0],
asm_obj.subchannel.n_sc['coolant']['edge']
* asm_obj.params['area'][1],
asm_obj.subchannel.n_sc['coolant']['corner']
* asm_obj.params['area'][2]]
# REGIME RATIO CONSTANTS
const['xr'] = _calc_regime_ratio_constants(asm_obj, const['Cf_sc'])
# # Transition regime
# const['xr'] = {}
# const['xr']['transition'] = np.array([
# (const['Cf_sc']['laminar']
# * asm_obj.bundle_params['de']
# / asm_obj.params['de']**2),
# (const['Cf_sc']['turbulent']
# * asm_obj.bundle_params['de']**_M['turbulent']
# / asm_obj.params['de']**(_M['turbulent'] + 1))
# ])
#
# # Laminar/turbulent regime
# for k in ['laminar', 'turbulent']:
# const['xr'][k] = np.array([
# ((asm_obj.params['de'][0] / asm_obj.params['de'][1])**_EXP1[k]
# * (const['Cf_sc'][k][1] / const['Cf_sc'][k][0])**_EXP2[k]),
# ((asm_obj.params['de'][2] / asm_obj.params['de'][1])**_EXP1[k]
# * (const['Cf_sc'][k][1] / const['Cf_sc'][k][2])**_EXP2[k])
# ])
# Laminar/turbulent: constant flow split!
const['fs'] = _calc_constant_flowsplits(asm_obj, const)
# const['fs'] = {}
# for k in ['laminar', 'turbulent']:
# const['fs'][k] = np.zeros(3)
# const['fs'][k][1] = (asm_obj.bundle_params['area']
# / (const['na'][1]
# + const['xr'][k][0] * const['na'][0]
# + const['xr'][k][1] * const['na'][2]))
# const['fs'][k][0] = const['xr'][k][0] * const['fs'][k][1]
# const['fs'][k][2] = const['xr'][k][1] * const['fs'][k][1]
return const
def _calc_regime_ratio_constants(asm_obj, Cf_sc):
"""Constant ratios for laminar, turbulent, and transition regimes"""
xr = {}
xr['transition'] = np.array([
(Cf_sc['laminar']
* asm_obj.bundle_params['de']
/ asm_obj.params['de']**2),
(Cf_sc['turbulent']
* asm_obj.bundle_params['de']**_M['turbulent']
/ asm_obj.params['de']**(_M['turbulent'] + 1))
])
# Laminar/turbulent regime
for k in ['laminar', 'turbulent']:
xr[k] = np.array([
((asm_obj.params['de'][0] / asm_obj.params['de'][1])**_EXP1[k]
* (Cf_sc[k][1] / Cf_sc[k][0])**_EXP2[k]),
((asm_obj.params['de'][2] / asm_obj.params['de'][1])**_EXP1[k]
* (Cf_sc[k][1] / Cf_sc[k][2])**_EXP2[k])
])
return xr
def _calc_constant_flowsplits(asm_obj, const):
"""Laminar and turbulent flowsplits are constant"""
fs = {}
for k in ['laminar', 'turbulent']:
fs[k] = np.zeros(3)
fs[k][1] = (asm_obj.bundle_params['area']
/ (const['na'][1]
+ const['xr'][k][0] * const['na'][0]
+ const['xr'][k][1] * const['na'][2]))
fs[k][0] = const['xr'][k][0] * fs[k][1]
fs[k][2] = const['xr'][k][1] * fs[k][1]
return fs
|
[
"numpy.array",
"numpy.zeros"
] |
[((8731, 8944), 'numpy.array', 'np.array', (["[Cf_sc['laminar'] * asm_obj.bundle_params['de'] / asm_obj.params['de'] ** 2,\n Cf_sc['turbulent'] * asm_obj.bundle_params['de'] ** _M['turbulent'] / \n asm_obj.params['de'] ** (_M['turbulent'] + 1)]"], {}), "([Cf_sc['laminar'] * asm_obj.bundle_params['de'] / asm_obj.params[\n 'de'] ** 2, Cf_sc['turbulent'] * asm_obj.bundle_params['de'] ** _M[\n 'turbulent'] / asm_obj.params['de'] ** (_M['turbulent'] + 1)])\n", (8739, 8944), True, 'import numpy as np\n'), ((5058, 5069), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (5066, 5069), True, 'import numpy as np\n'), ((9077, 9313), 'numpy.array', 'np.array', (["[(asm_obj.params['de'][0] / asm_obj.params['de'][1]) ** _EXP1[k] * (Cf_sc[k\n ][1] / Cf_sc[k][0]) ** _EXP2[k], (asm_obj.params['de'][2] / asm_obj.\n params['de'][1]) ** _EXP1[k] * (Cf_sc[k][1] / Cf_sc[k][2]) ** _EXP2[k]]"], {}), "([(asm_obj.params['de'][0] / asm_obj.params['de'][1]) ** _EXP1[k] *\n (Cf_sc[k][1] / Cf_sc[k][0]) ** _EXP2[k], (asm_obj.params['de'][2] /\n asm_obj.params['de'][1]) ** _EXP1[k] * (Cf_sc[k][1] / Cf_sc[k][2]) **\n _EXP2[k]])\n", (9085, 9313), True, 'import numpy as np\n'), ((9544, 9555), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (9552, 9555), True, 'import numpy as np\n')]
|
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from rl.dataset import ReplayBuffer, RandomSampler
from rl.base_agent import BaseAgent
from rl.policies.mlp_actor_critic import MlpActor, MlpCritic
from util.logger import logger
from util.mpi import mpi_average
from util.pytorch import optimizer_cuda, count_parameters, \
compute_gradient_norm, compute_weight_norm, sync_networks, sync_grads, \
obs2tensor, to_tensor
from env.action_spec import ActionSpec
class MetaPPOAgent(BaseAgent):
""" Meta policy class. """
def __init__(self, config, ob_space):
super().__init__(config, ob_space)
if config.meta is None:
logger.warn('Creating a dummy meta policy.')
return
# parse body parts and skills
if config.subdiv:
# subdiv = ob1,ob2-ac1/ob3,ob4-ac2/...
clusters = config.subdiv.split('/')
clusters = [cluster.split('-')[1].split(',') for cluster in clusters]
else:
clusters = [ob_space.keys()]
if config.subdiv_skills:
subdiv_skills = config.subdiv_skills.split('/')
subdiv_skills = [skills.split(',') for skills in subdiv_skills]
else:
subdiv_skills = [['primitive']] * len(clusters)
self.subdiv_skills = subdiv_skills
assert len(subdiv_skills) == len(clusters), \
'subdiv_skills and clusters have different # subdivisions'
if config.meta == 'hard':
ac_space = ActionSpec(size=0)
for cluster, skills in zip(clusters, subdiv_skills):
ac_space.add(','.join(cluster), 'discrete', len(skills), 0, 1)
self.ac_space = ac_space
if config.diayn:
ob_clusters = config.subdiv.split('/')
ob_clusters = [cluster.split('-')[0].split(',') for cluster in ob_clusters]
for cluster, skills in zip(ob_clusters, subdiv_skills):
self.ac_space.add(','.join(cluster) + '_diayn', 'continuous', config.z_dim, 0, 1)
# build up networks
self._actor = MlpActor(config, ob_space, ac_space, tanh_policy=False)
self._old_actor = MlpActor(config, ob_space, ac_space, tanh_policy=False)
self._critic = MlpCritic(config, ob_space)
self._network_cuda(config.device)
self._actor_optim = optim.Adam(self._actor.parameters(), lr=config.lr_actor)
self._critic_optim = optim.Adam(self._critic.parameters(), lr=config.lr_critic)
sampler = RandomSampler()
self._buffer = ReplayBuffer(['ob', 'ac', 'done', 'rew', 'ret', 'adv',
'ac_before_activation', 'log_prob'],
config.buffer_size,
sampler.sample_func)
if config.is_chef:
logger.warn('Creating a meta PPO agent')
logger.info('The actor has %d parameters', count_parameters(self._actor))
logger.info('The critic has %d parameters', count_parameters(self._critic))
def store_episode(self, rollouts):
""" Stores @rollouts to replay buffer. """
self._compute_gae(rollouts)
self._buffer.store_episode(rollouts)
def _compute_gae(self, rollouts):
""" Computes GAE from @rollouts. """
T = len(rollouts['done'])
ob = rollouts['ob']
ob = self.normalize(ob)
ob = obs2tensor(ob, self._config.device)
vpred = self._critic(ob).detach().cpu().numpy()[:,0]
assert len(vpred) == T + 1
done = rollouts['done']
rew = rollouts['rew']
adv = np.empty((T, ) , 'float32')
lastgaelam = 0
for t in reversed(range(T)):
nonterminal = 1 - done[t]
delta = rew[t] + self._config.discount_factor * vpred[t + 1] * nonterminal - vpred[t]
adv[t] = lastgaelam = delta + self._config.discount_factor * self._config.gae_lambda * nonterminal * lastgaelam
ret = adv + vpred[:-1]
assert np.isfinite(adv).all()
assert np.isfinite(ret).all()
# update rollouts
if adv.std() == 0:
rollouts['adv'] = (adv * 0).tolist()
else:
rollouts['adv'] = ((adv - adv.mean()) / adv.std()).tolist()
rollouts['ret'] = ret.tolist()
def state_dict(self):
if self._config.meta is None:
return {}
return {
'actor_state_dict': self._actor.state_dict(),
'critic_state_dict': self._critic.state_dict(),
'actor_optim_state_dict': self._actor_optim.state_dict(),
'critic_optim_state_dict': self._critic_optim.state_dict(),
'ob_norm_state_dict': self._ob_norm.state_dict(),
}
def load_state_dict(self, ckpt):
if self._config.meta is None:
return
self._actor.load_state_dict(ckpt['actor_state_dict'])
self._critic.load_state_dict(ckpt['critic_state_dict'])
self._ob_norm.load_state_dict(ckpt['ob_norm_state_dict'])
self._network_cuda(self._config.device)
self._actor_optim.load_state_dict(ckpt['actor_optim_state_dict'])
self._critic_optim.load_state_dict(ckpt['critic_optim_state_dict'])
optimizer_cuda(self._actor_optim, self._config.device)
optimizer_cuda(self._critic_optim, self._config.device)
def _network_cuda(self, device):
self._actor.to(device)
self._old_actor.to(device)
self._critic.to(device)
def sync_networks(self):
sync_networks(self._actor)
sync_networks(self._critic)
def train(self):
self._copy_target_network(self._old_actor, self._actor)
for _ in range(self._config.num_batches):
transitions = self._buffer.sample(self._config.batch_size)
train_info = self._update_network(transitions)
self._buffer.clear()
train_info.update({
'actor_grad_norm': compute_gradient_norm(self._actor),
'actor_weight_norm': compute_weight_norm(self._actor),
'critic_grad_norm': compute_gradient_norm(self._critic),
'critic_weight_norm': compute_weight_norm(self._critic),
})
return train_info
def _update_network(self, transitions):
info = {}
# pre-process observations
o = transitions['ob']
o = self.normalize(o)
bs = len(transitions['done'])
_to_tensor = lambda x: to_tensor(x, self._config.device)
o = _to_tensor(o)
ac = _to_tensor(transitions['ac'])
z = _to_tensor(transitions['ac_before_activation'])
ret = _to_tensor(transitions['ret']).reshape(bs, 1)
adv = _to_tensor(transitions['adv']).reshape(bs, 1)
old_log_pi = _to_tensor(transitions['log_prob']).reshape(bs, 1)
log_pi, ent = self._actor.act_log(o, z)
if (log_pi - old_log_pi).max() > 20:
print('(log_pi - old_log_pi) is too large', (log_pi - old_log_pi).max())
import ipdb; ipdb.set_trace()
# the actor loss
entropy_loss = self._config.entropy_loss_coeff * ent.mean()
ratio = torch.exp(torch.clamp(log_pi - old_log_pi, -20, 20))
surr1 = ratio * adv
surr2 = torch.clamp(ratio, 1.0 - self._config.clip_param,
1.0 + self._config.clip_param) * adv
actor_loss = -torch.min(surr1, surr2).mean()
if not np.isfinite(ratio.cpu().detach()).all() or not np.isfinite(adv.cpu().detach()).all():
import ipdb; ipdb.set_trace()
info['entropy_loss'] = entropy_loss.cpu().item()
info['actor_loss'] = actor_loss.cpu().item()
actor_loss += entropy_loss
discriminator_loss = self._actor.discriminator_loss()
if discriminator_loss is not None:
actor_loss += discriminator_loss * self._config.discriminator_loss_weight
info['discriminator_loss'] = discriminator_loss.cpu().item()
# the q loss
value_pred = self._critic(o)
value_loss = self._config.value_loss_coeff * (ret - value_pred).pow(2).mean()
info['value_target'] = ret.mean().cpu().item()
info['value_predicted'] = value_pred.mean().cpu().item()
info['value_loss'] = value_loss.cpu().item()
# update the actor
self._actor_optim.zero_grad()
actor_loss.backward()
sync_grads(self._actor)
self._actor_optim.step()
# update the critic
self._critic_optim.zero_grad()
value_loss.backward()
sync_grads(self._critic)
self._critic_optim.step()
# include info from policy
info.update(self._actor.info)
return mpi_average(info)
def act(self, ob, is_train=True):
"""
Returns a set of actions and the actors' activations given an observation @ob.
"""
if self._config.meta:
ob = self.normalize(ob)
return self._actor.act(ob, is_train, return_log_prob=True)
else:
return [0], None, None
|
[
"util.pytorch.compute_gradient_norm",
"rl.dataset.ReplayBuffer",
"util.pytorch.obs2tensor",
"env.action_spec.ActionSpec",
"torch.min",
"rl.policies.mlp_actor_critic.MlpActor",
"numpy.isfinite",
"util.pytorch.sync_networks",
"rl.policies.mlp_actor_critic.MlpCritic",
"util.pytorch.count_parameters",
"numpy.empty",
"util.pytorch.sync_grads",
"util.pytorch.to_tensor",
"ipdb.set_trace",
"util.logger.logger.warn",
"rl.dataset.RandomSampler",
"util.pytorch.compute_weight_norm",
"torch.clamp",
"util.pytorch.optimizer_cuda",
"util.mpi.mpi_average"
] |
[((2111, 2166), 'rl.policies.mlp_actor_critic.MlpActor', 'MlpActor', (['config', 'ob_space', 'ac_space'], {'tanh_policy': '(False)'}), '(config, ob_space, ac_space, tanh_policy=False)\n', (2119, 2166), False, 'from rl.policies.mlp_actor_critic import MlpActor, MlpCritic\n'), ((2193, 2248), 'rl.policies.mlp_actor_critic.MlpActor', 'MlpActor', (['config', 'ob_space', 'ac_space'], {'tanh_policy': '(False)'}), '(config, ob_space, ac_space, tanh_policy=False)\n', (2201, 2248), False, 'from rl.policies.mlp_actor_critic import MlpActor, MlpCritic\n'), ((2272, 2299), 'rl.policies.mlp_actor_critic.MlpCritic', 'MlpCritic', (['config', 'ob_space'], {}), '(config, ob_space)\n', (2281, 2299), False, 'from rl.policies.mlp_actor_critic import MlpActor, MlpCritic\n'), ((2535, 2550), 'rl.dataset.RandomSampler', 'RandomSampler', ([], {}), '()\n', (2548, 2550), False, 'from rl.dataset import ReplayBuffer, RandomSampler\n'), ((2574, 2715), 'rl.dataset.ReplayBuffer', 'ReplayBuffer', (["['ob', 'ac', 'done', 'rew', 'ret', 'adv', 'ac_before_activation', 'log_prob']", 'config.buffer_size', 'sampler.sample_func'], {}), "(['ob', 'ac', 'done', 'rew', 'ret', 'adv',\n 'ac_before_activation', 'log_prob'], config.buffer_size, sampler.\n sample_func)\n", (2586, 2715), False, 'from rl.dataset import ReplayBuffer, RandomSampler\n'), ((3434, 3469), 'util.pytorch.obs2tensor', 'obs2tensor', (['ob', 'self._config.device'], {}), '(ob, self._config.device)\n', (3444, 3469), False, 'from util.pytorch import optimizer_cuda, count_parameters, compute_gradient_norm, compute_weight_norm, sync_networks, sync_grads, obs2tensor, to_tensor\n'), ((3643, 3668), 'numpy.empty', 'np.empty', (['(T,)', '"""float32"""'], {}), "((T,), 'float32')\n", (3651, 3668), True, 'import numpy as np\n'), ((5260, 5314), 'util.pytorch.optimizer_cuda', 'optimizer_cuda', (['self._actor_optim', 'self._config.device'], {}), '(self._actor_optim, self._config.device)\n', (5274, 5314), False, 'from util.pytorch import optimizer_cuda, count_parameters, compute_gradient_norm, compute_weight_norm, sync_networks, sync_grads, obs2tensor, to_tensor\n'), ((5323, 5378), 'util.pytorch.optimizer_cuda', 'optimizer_cuda', (['self._critic_optim', 'self._config.device'], {}), '(self._critic_optim, self._config.device)\n', (5337, 5378), False, 'from util.pytorch import optimizer_cuda, count_parameters, compute_gradient_norm, compute_weight_norm, sync_networks, sync_grads, obs2tensor, to_tensor\n'), ((5553, 5579), 'util.pytorch.sync_networks', 'sync_networks', (['self._actor'], {}), '(self._actor)\n', (5566, 5579), False, 'from util.pytorch import optimizer_cuda, count_parameters, compute_gradient_norm, compute_weight_norm, sync_networks, sync_grads, obs2tensor, to_tensor\n'), ((5588, 5615), 'util.pytorch.sync_networks', 'sync_networks', (['self._critic'], {}), '(self._critic)\n', (5601, 5615), False, 'from util.pytorch import optimizer_cuda, count_parameters, compute_gradient_norm, compute_weight_norm, sync_networks, sync_grads, obs2tensor, to_tensor\n'), ((8410, 8433), 'util.pytorch.sync_grads', 'sync_grads', (['self._actor'], {}), '(self._actor)\n', (8420, 8433), False, 'from util.pytorch import optimizer_cuda, count_parameters, compute_gradient_norm, compute_weight_norm, sync_networks, sync_grads, obs2tensor, to_tensor\n'), ((8573, 8597), 'util.pytorch.sync_grads', 'sync_grads', (['self._critic'], {}), '(self._critic)\n', (8583, 8597), False, 'from util.pytorch import optimizer_cuda, count_parameters, compute_gradient_norm, compute_weight_norm, sync_networks, sync_grads, obs2tensor, to_tensor\n'), ((8722, 8739), 'util.mpi.mpi_average', 'mpi_average', (['info'], {}), '(info)\n', (8733, 8739), False, 'from util.mpi import mpi_average\n'), ((693, 737), 'util.logger.logger.warn', 'logger.warn', (['"""Creating a dummy meta policy."""'], {}), "('Creating a dummy meta policy.')\n", (704, 737), False, 'from util.logger import logger\n'), ((1529, 1547), 'env.action_spec.ActionSpec', 'ActionSpec', ([], {'size': '(0)'}), '(size=0)\n', (1539, 1547), False, 'from env.action_spec import ActionSpec\n'), ((2856, 2896), 'util.logger.logger.warn', 'logger.warn', (['"""Creating a meta PPO agent"""'], {}), "('Creating a meta PPO agent')\n", (2867, 2896), False, 'from util.logger import logger\n'), ((6480, 6513), 'util.pytorch.to_tensor', 'to_tensor', (['x', 'self._config.device'], {}), '(x, self._config.device)\n', (6489, 6513), False, 'from util.pytorch import optimizer_cuda, count_parameters, compute_gradient_norm, compute_weight_norm, sync_networks, sync_grads, obs2tensor, to_tensor\n'), ((7040, 7056), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (7054, 7056), False, 'import ipdb\n'), ((7177, 7218), 'torch.clamp', 'torch.clamp', (['(log_pi - old_log_pi)', '(-20)', '(20)'], {}), '(log_pi - old_log_pi, -20, 20)\n', (7188, 7218), False, 'import torch\n'), ((7264, 7349), 'torch.clamp', 'torch.clamp', (['ratio', '(1.0 - self._config.clip_param)', '(1.0 + self._config.clip_param)'], {}), '(ratio, 1.0 - self._config.clip_param, 1.0 + self._config.clip_param\n )\n', (7275, 7349), False, 'import torch\n'), ((7559, 7575), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (7573, 7575), False, 'import ipdb\n'), ((2952, 2981), 'util.pytorch.count_parameters', 'count_parameters', (['self._actor'], {}), '(self._actor)\n', (2968, 2981), False, 'from util.pytorch import optimizer_cuda, count_parameters, compute_gradient_norm, compute_weight_norm, sync_networks, sync_grads, obs2tensor, to_tensor\n'), ((3039, 3069), 'util.pytorch.count_parameters', 'count_parameters', (['self._critic'], {}), '(self._critic)\n', (3055, 3069), False, 'from util.pytorch import optimizer_cuda, count_parameters, compute_gradient_norm, compute_weight_norm, sync_networks, sync_grads, obs2tensor, to_tensor\n'), ((4039, 4055), 'numpy.isfinite', 'np.isfinite', (['adv'], {}), '(adv)\n', (4050, 4055), True, 'import numpy as np\n'), ((4077, 4093), 'numpy.isfinite', 'np.isfinite', (['ret'], {}), '(ret)\n', (4088, 4093), True, 'import numpy as np\n'), ((5973, 6007), 'util.pytorch.compute_gradient_norm', 'compute_gradient_norm', (['self._actor'], {}), '(self._actor)\n', (5994, 6007), False, 'from util.pytorch import optimizer_cuda, count_parameters, compute_gradient_norm, compute_weight_norm, sync_networks, sync_grads, obs2tensor, to_tensor\n'), ((6042, 6074), 'util.pytorch.compute_weight_norm', 'compute_weight_norm', (['self._actor'], {}), '(self._actor)\n', (6061, 6074), False, 'from util.pytorch import optimizer_cuda, count_parameters, compute_gradient_norm, compute_weight_norm, sync_networks, sync_grads, obs2tensor, to_tensor\n'), ((6108, 6143), 'util.pytorch.compute_gradient_norm', 'compute_gradient_norm', (['self._critic'], {}), '(self._critic)\n', (6129, 6143), False, 'from util.pytorch import optimizer_cuda, count_parameters, compute_gradient_norm, compute_weight_norm, sync_networks, sync_grads, obs2tensor, to_tensor\n'), ((6179, 6212), 'util.pytorch.compute_weight_norm', 'compute_weight_norm', (['self._critic'], {}), '(self._critic)\n', (6198, 6212), False, 'from util.pytorch import optimizer_cuda, count_parameters, compute_gradient_norm, compute_weight_norm, sync_networks, sync_grads, obs2tensor, to_tensor\n'), ((7401, 7424), 'torch.min', 'torch.min', (['surr1', 'surr2'], {}), '(surr1, surr2)\n', (7410, 7424), False, 'import torch\n')]
|
import math
import numpy as np
import cv2
import json
import argparse
def augment_homogeneous(V, augment):
""" Augment a 3xN array of vectors into a 4xN array of homogeneous coordinates
Args:
v (np.array 3xN): Array of vectors
augment (float): The value to fill in for the W coordinate
Returns:
(np.array 4xN): New array of augmented vectors
"""
Vh = np.zeros((4, V.shape[1]))
Vh[0:3, :] = V[0:3, :]
Vh[3, :] = augment
return Vh
def batch_normalize_3d(V, w):
""" Normalize a 4xN array of vectors in their first three dimensions
Args:
V (np.array 4xN): Array of homogeneous coordinates
w (float): Value to fill in for w coordinate after normalization
Returns:
(np.array 4xN): New array of normalized vectors
"""
norms = np.linalg.norm(V[0:3, :], axis=0)
#norms = np.sqrt(np.sum(V[0:3,:]**2.0, 0))
N = np.copy(V)
for i in range(3):
N[i, :] /= norms
N[3, :] = w
return N
def batch_sphere_interior_intersect(P, V):
""" Compute intersections of a batch of rays against the unit sphere
In case of multiple intersections, the *last* intersection is returned
Args:
P (np.array 4xN): Array of ray origins
V (np.array 4xN): Array of ray directions
Returns:
(np.array N, np.array 4xN, np.array 4xN): Valid, intersections, normals
"""
P3 = P[0:3, :]
V3 = V[0:3, :]
# Parametrize ray as a function of t so that ray(t) = p + v*t
# Then solve for t' such that ||ray(t')||^2 = 1
# This resolves to a quadratic in t that can be solved w/ quadratic eq
A = np.sum(V3 * V3, 0) # = vx^2 + vy^2 + vz^2
B = 2.0 * np.sum(P3 * V3, 0) # = 2 * (x*vx + y*vy + z*vz)
C = np.sum(P3 * P3, 0) - 1.0 # = x^2 + y^2 + z^2 - 1
discriminant = B**2.0 - 4.0*A*C
valid_pts = discriminant >= 0.0
safe_discriminant = np.maximum(discriminant, 0.0)
# Use latest (largest t) intersection
t = (-B + np.sqrt(safe_discriminant)) / (2.0*A)
# t1 = (-B - np.sqrt(safe_discriminant)) / (2.0*A)
# t = np.maximum(t0, t1)
t[valid_pts == False] = 0.0
P_intersect = P + t*V
# sphere normals are just normalized intersection locations
N = batch_normalize_3d(P_intersect, 0.0)
return valid_pts, P_intersect, N
def batch_plane_intersect(P, V):
""" Compute intersections of a batch of rays against the XY plane
Args:
P (np.array 4xN): Array of ray origins
V (np.array 4xN): Array of ray directions
Returns:
(np.array N, np.array 4xN, np.array 4xN): Valid, intersections, normals
"""
valid_pts = np.ones(P.shape[1]).astype(np.bool)
# ray(t) = p + vt, solve for t' s.t. ray(t').z = 0
# 0 = p.z + v.z * t --> t = -p.z / v.z
t = -(P[2,:] / V[2,:])
P_intersect = P + V * t
# plane normals are just z = 1
N = np.zeros(P.shape)
N[2,:] = 1.0
return valid_pts, P_intersect, N
def batch_reflect(V, N):
""" Reflect a batch of vectors by a batch of normals
Args:
V (np.array 4xN): Array of vectors
N (np.array 4xN): Array of normals
Returns:
(np.array 4xN): Array of reflected vectors
"""
v_dot_n = np.sum(V[i, :] * N[i, :] for i in range(3))
# N(V⋅N) gives the component of the vector aligned with the normal
# V = (V - N(V⋅N)) + (N(V⋅N))
# parallel part perpendicular part
# To reflect, we negate the perpendicular part
# V_ref = (V - N(V⋅N)) - (N(V⋅N))
# V_ref = V - 2N(V⋅N)
return V - (2.0 * N * v_dot_n)
def batch_transformed_intersect(T, P, V, intersect_func):
""" Compute transformed ray intersections in batch (vectorized)
Args:
T (np.array 4x4): Transform
P (np.array 4xN): Ray origins
V (np.array 4xN): Ray directions
intersect_func (function): Untransformed intersection function
Returns:
(np.array N, np.array 4xN, np.array 4xN): valid, positions, local positions, normals
"""
T_inv = np.linalg.inv(T)
P_loc = T_inv @ P
V_loc = T_inv @ V
valid, P_i_loc, N_loc = intersect_func(P_loc, V_loc)
P_intersect = T @ P_i_loc
# Normals are pseudo-vectors, so we transform them by the inverse transpose
N = batch_normalize_3d(T_inv.T @ N_loc, 0.0)
return valid, P_intersect, P_i_loc, N
def forward_trace(T_ellipse, T_plane, P, V):
""" Trace rays to UV positions on the display plane in a Northstar configuration
Args:
T_ellipse (np.array 4x4): Reflector ellipse as transform of unit sphere
T_plane (np.array 4x4): Display plane as transform of unit XY planar patch
P (np.array 4xN): Ray origins
V (np.array 4xN): Ray directions
Returns:
(np.array N, np.array 2xN): valid, UVs
"""
P = augment_homogeneous(P, 1.0)
V = augment_homogeneous(V, 0.0)
valid, P_i_e, _, N_e = batch_transformed_intersect(T_ellipse, P, V, batch_sphere_interior_intersect)
V_ref = batch_reflect(V, N_e)
valid_p, _, UV, _ = batch_transformed_intersect(T_plane, P_i_e, V_ref, batch_plane_intersect)
## cleanup: scale UVs [-1,1] -> [0,1]; mark out-of-range UVs as invalid
UV = (UV * 0.5) + 0.5
valid = np.logical_and(valid, valid_p)
for i in range(2):
valid[UV[i, :] < 0.0] = False
valid[UV[i, :] > 1.0] = False
return valid, UV[0:2, :]
def rand_circular(n_samples):
""" Sample random points in a unit circle.
Args:
n_samples (int): Number of points to sample.
Returns:
(np.array 2xN): Array of samples.
"""
length = np.random.uniform(0.0, 1.0, (n_samples))
angle = np.pi * np.random.uniform(0.0, 2.0, (n_samples))
ret = np.zeros((2, n_samples))
ret[0, :] = np.sqrt(length) * np.cos(angle)
ret[1, :] = np.sqrt(length) * np.sin(angle)
return ret
def forward_perspective_trace(T_ellipse, T_plane, fov, resolution, jitter=0.0):
""" Trace UVs for a perspective camera located at the origin.
Args:
T_ellipse (np.array 4x4): Reflector ellipse as transform of unit sphere
T_plane (np.array 4x4): Display plane as transform of unit XY planar patch
fov (float): Field of view (square aspect ratio) in radians
resolution (int): Output resolution (square aspect ratio) in pixels
jitter (float): Amount to randomly jitter each sample point origin XY
Returns:
(np.array NxN, np.array NxN, np.array NxN): valid, U, V
"""
view_limit = math.tan(fov / 2.0)
spts = np.linspace(-view_limit, view_limit, resolution)
X, Y = np.meshgrid(spts, -spts)
P = np.zeros((3, X.size))
if jitter > 0.0:
P[0:2, :] += rand_circular(P.shape[1]) * jitter
V = np.zeros((3, X.size))
V[0, :] = X.reshape(-1)
V[1, :] = Y.reshape(-1)
V[2, :] = -1.0
valid_pts, UV = forward_trace(T_ellipse, T_plane, P, V)
U = UV[0, :].reshape(X.shape)
V = UV[1, :].reshape(X.shape)
valid_mask = valid_pts.reshape(X.shape)
U[valid_mask == False] = 0.0
V[valid_mask == False] = 0.0
return valid_mask, U, V
def invert_map(x_vals, y_vals, target_vals, dest_size):
import scipy
import scipy.interpolate
interpolator = scipy.interpolate.interp2d(x_vals, y_vals, target_vals, kind='cubic')
# The interpolater returned by interp2d only accepts monotonically
# increasing inputs, so we will need to flip vertically later to
# account for our UV convention of lower-left origin
x_vals = np.linspace(0.0, 1.0, dest_size)
y_vals = np.linspace(0.0, 1.0, dest_size)
inv_map = interpolator(x_vals, y_vals)
inv_map = np.maximum(0.0, np.minimum(1.0, inv_map))
return inv_map
def compute_inverse_maps(valid, u_map, v_map, dest_size):
idim = u_map.shape[0]
src_u, src_v = np.meshgrid(np.linspace(0.0, 1.0, idim),
np.linspace(1.0, 0.0, idim))
inv_u = invert_map(u_map[valid], v_map[valid], src_u[valid], dest_size)
inv_v = invert_map(u_map[valid], v_map[valid], src_v[valid], dest_size)
# Flip V map to account for lower-left origin UVs
inv_v = np.flip(inv_v, 0)
return inv_u, inv_v
def map_image(u_map, v_map, im):
u_pixel = (u_map * im.shape[1]).astype(np.float32)
v_pixel = ((1.0 - v_map) * im.shape[0]).astype(np.float32)
im_mapped = cv2.remap(im, u_pixel, v_pixel, cv2.INTER_CUBIC)
return im_mapped
def main():
parser = argparse.ArgumentParser(description='Compute Northstar forward/inverse distortion maps.')
parser.add_argument('configfile',
help='Configuration .json to use')
parser.add_argument('--quality', type=int, default=64,
help='Intermediate interpolation resolution (>128 will be very slow)')
parser.add_argument('--testimage', default='uvgrid.png',
help='Image to use for testing projections.')
parser.add_argument('--outformat', default='exr',
help='Output format (exr/png16/png8)')
args = parser.parse_args()
#rendering
view_fov = math.pi / 2.0 # 90 degrees fov
compute_res = 64
forward_res = 1024
dest_size = 1024
# ellipse parameters
e_a = 0.665 #2.5
e_b = 0.528 #2.0
e_f = math.sqrt(e_a**2.0 - e_b**2.0) # focus
ellipse_tf = np.array([[e_a, 0.0, 0.0, -e_f],
[0.0, e_b, 0.0, 0.0],
[0.0, 0.0, e_b, 0.0],
[0.0, 0.0, 0.0, 1.0]])
psize = 0.3
plane_tf = np.array([[psize, 0.0, 0.0, 0.0],
[0.0, psize, 0.0, 0.0],
[0.0, 0.0, psize, 0.0],
[0.0, 0.0, 0.0, 1.0]])
th = -1.0 + math.pi
rotation_mat = np.array([[math.cos(th), 0.0, math.sin(th), 0.0],
[0.0, 1.0, 0.0, 0.0],
[-math.sin(th), 0.0, math.cos(th), 0.0],
[0.0, 0.0, 0.0, 1.0]])
plane_tf = rotation_mat @ plane_tf
plane_tf[0:3, 3] = np.array([-0.2, 0.0, -0.25])
valid, f_u, f_v = forward_perspective_trace(ellipse_tf, plane_tf,
view_fov,
compute_res)
print("Computing inverse maps")
inv_u, inv_v = compute_inverse_maps(valid, f_u, f_v, dest_size)
print("Generating test images")
valid, f_u, f_v = forward_perspective_trace(ellipse_tf, plane_tf,
view_fov,
forward_res)
uv_im = cv2.imread("uv.png")
forward_im = map_image(f_u, f_v, uv_im)
cv2.imwrite("forward_test.png", forward_im)
inv_im = map_image(inv_u, inv_v, uv_im)
cv2.imwrite("inv_test.png", inv_im)
round_trip_im = map_image(f_u, f_v, inv_im)
cv2.imwrite("round_trip_test.png", round_trip_im)
print("Generating miscalibrated IPD image")
ellipse_tf_ipd = np.array([[e_a, 0.0, 0.0, -e_f + 0.01],
[0.0, e_b, 0.0, 0.0],
[0.0, 0.0, e_b, 0.0],
[0.0, 0.0, 0.0, 1.0]])
valid, f_u, f_v = forward_perspective_trace(ellipse_tf_ipd, plane_tf,
view_fov,
forward_res)
round_trip_im = map_image(f_u, f_v, inv_im)
cv2.imwrite("round_trip_test_incorrect_ipd.png", round_trip_im)
print("Generating focus image.")
n_samples = 100
accum_image = np.zeros((f_u.shape[0], f_u.shape[1], 3))
for i in range(n_samples):
valid, f_u, f_v = forward_perspective_trace(ellipse_tf, plane_tf,
view_fov,
forward_res, 0.01)
accum_image += map_image(f_u, f_v, uv_im)
cv2.imwrite("focus_test.png", (accum_image / n_samples).astype(np.uint8))
print("Done")
if __name__ == '__main__':
main()
|
[
"numpy.sqrt",
"cv2.remap",
"math.sqrt",
"math.cos",
"numpy.array",
"numpy.linalg.norm",
"numpy.sin",
"scipy.interpolate.interp2d",
"numpy.flip",
"math.tan",
"argparse.ArgumentParser",
"numpy.linspace",
"numpy.meshgrid",
"numpy.maximum",
"numpy.ones",
"numpy.cos",
"cv2.imread",
"numpy.copy",
"cv2.imwrite",
"numpy.minimum",
"numpy.logical_and",
"numpy.sum",
"numpy.zeros",
"numpy.linalg.inv",
"numpy.random.uniform",
"math.sin"
] |
[((397, 422), 'numpy.zeros', 'np.zeros', (['(4, V.shape[1])'], {}), '((4, V.shape[1]))\n', (405, 422), True, 'import numpy as np\n'), ((823, 856), 'numpy.linalg.norm', 'np.linalg.norm', (['V[0:3, :]'], {'axis': '(0)'}), '(V[0:3, :], axis=0)\n', (837, 856), True, 'import numpy as np\n'), ((912, 922), 'numpy.copy', 'np.copy', (['V'], {}), '(V)\n', (919, 922), True, 'import numpy as np\n'), ((1640, 1658), 'numpy.sum', 'np.sum', (['(V3 * V3)', '(0)'], {}), '(V3 * V3, 0)\n', (1646, 1658), True, 'import numpy as np\n'), ((1906, 1935), 'numpy.maximum', 'np.maximum', (['discriminant', '(0.0)'], {}), '(discriminant, 0.0)\n', (1916, 1935), True, 'import numpy as np\n'), ((2884, 2901), 'numpy.zeros', 'np.zeros', (['P.shape'], {}), '(P.shape)\n', (2892, 2901), True, 'import numpy as np\n'), ((4020, 4036), 'numpy.linalg.inv', 'np.linalg.inv', (['T'], {}), '(T)\n', (4033, 4036), True, 'import numpy as np\n'), ((5226, 5256), 'numpy.logical_and', 'np.logical_and', (['valid', 'valid_p'], {}), '(valid, valid_p)\n', (5240, 5256), True, 'import numpy as np\n'), ((5603, 5641), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1.0)', 'n_samples'], {}), '(0.0, 1.0, n_samples)\n', (5620, 5641), True, 'import numpy as np\n'), ((5715, 5739), 'numpy.zeros', 'np.zeros', (['(2, n_samples)'], {}), '((2, n_samples))\n', (5723, 5739), True, 'import numpy as np\n'), ((6497, 6516), 'math.tan', 'math.tan', (['(fov / 2.0)'], {}), '(fov / 2.0)\n', (6505, 6516), False, 'import math\n'), ((6528, 6576), 'numpy.linspace', 'np.linspace', (['(-view_limit)', 'view_limit', 'resolution'], {}), '(-view_limit, view_limit, resolution)\n', (6539, 6576), True, 'import numpy as np\n'), ((6588, 6612), 'numpy.meshgrid', 'np.meshgrid', (['spts', '(-spts)'], {}), '(spts, -spts)\n', (6599, 6612), True, 'import numpy as np\n'), ((6621, 6642), 'numpy.zeros', 'np.zeros', (['(3, X.size)'], {}), '((3, X.size))\n', (6629, 6642), True, 'import numpy as np\n'), ((6728, 6749), 'numpy.zeros', 'np.zeros', (['(3, X.size)'], {}), '((3, X.size))\n', (6736, 6749), True, 'import numpy as np\n'), ((7213, 7282), 'scipy.interpolate.interp2d', 'scipy.interpolate.interp2d', (['x_vals', 'y_vals', 'target_vals'], {'kind': '"""cubic"""'}), "(x_vals, y_vals, target_vals, kind='cubic')\n", (7239, 7282), False, 'import scipy\n'), ((7493, 7525), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', 'dest_size'], {}), '(0.0, 1.0, dest_size)\n', (7504, 7525), True, 'import numpy as np\n'), ((7539, 7571), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', 'dest_size'], {}), '(0.0, 1.0, dest_size)\n', (7550, 7571), True, 'import numpy as np\n'), ((8114, 8131), 'numpy.flip', 'np.flip', (['inv_v', '(0)'], {}), '(inv_v, 0)\n', (8121, 8131), True, 'import numpy as np\n'), ((8324, 8372), 'cv2.remap', 'cv2.remap', (['im', 'u_pixel', 'v_pixel', 'cv2.INTER_CUBIC'], {}), '(im, u_pixel, v_pixel, cv2.INTER_CUBIC)\n', (8333, 8372), False, 'import cv2\n'), ((8420, 8514), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Compute Northstar forward/inverse distortion maps."""'}), "(description=\n 'Compute Northstar forward/inverse distortion maps.')\n", (8443, 8514), False, 'import argparse\n'), ((9247, 9281), 'math.sqrt', 'math.sqrt', (['(e_a ** 2.0 - e_b ** 2.0)'], {}), '(e_a ** 2.0 - e_b ** 2.0)\n', (9256, 9281), False, 'import math\n'), ((9304, 9407), 'numpy.array', 'np.array', (['[[e_a, 0.0, 0.0, -e_f], [0.0, e_b, 0.0, 0.0], [0.0, 0.0, e_b, 0.0], [0.0, \n 0.0, 0.0, 1.0]]'], {}), '([[e_a, 0.0, 0.0, -e_f], [0.0, e_b, 0.0, 0.0], [0.0, 0.0, e_b, 0.0],\n [0.0, 0.0, 0.0, 1.0]])\n', (9312, 9407), True, 'import numpy as np\n'), ((9484, 9592), 'numpy.array', 'np.array', (['[[psize, 0.0, 0.0, 0.0], [0.0, psize, 0.0, 0.0], [0.0, 0.0, psize, 0.0], [\n 0.0, 0.0, 0.0, 1.0]]'], {}), '([[psize, 0.0, 0.0, 0.0], [0.0, psize, 0.0, 0.0], [0.0, 0.0, psize,\n 0.0], [0.0, 0.0, 0.0, 1.0]])\n', (9492, 9592), True, 'import numpy as np\n'), ((9926, 9954), 'numpy.array', 'np.array', (['[-0.2, 0.0, -0.25]'], {}), '([-0.2, 0.0, -0.25])\n', (9934, 9954), True, 'import numpy as np\n'), ((10555, 10575), 'cv2.imread', 'cv2.imread', (['"""uv.png"""'], {}), "('uv.png')\n", (10565, 10575), False, 'import cv2\n'), ((10624, 10667), 'cv2.imwrite', 'cv2.imwrite', (['"""forward_test.png"""', 'forward_im'], {}), "('forward_test.png', forward_im)\n", (10635, 10667), False, 'import cv2\n'), ((10716, 10751), 'cv2.imwrite', 'cv2.imwrite', (['"""inv_test.png"""', 'inv_im'], {}), "('inv_test.png', inv_im)\n", (10727, 10751), False, 'import cv2\n'), ((10804, 10853), 'cv2.imwrite', 'cv2.imwrite', (['"""round_trip_test.png"""', 'round_trip_im'], {}), "('round_trip_test.png', round_trip_im)\n", (10815, 10853), False, 'import cv2\n'), ((10924, 11034), 'numpy.array', 'np.array', (['[[e_a, 0.0, 0.0, -e_f + 0.01], [0.0, e_b, 0.0, 0.0], [0.0, 0.0, e_b, 0.0],\n [0.0, 0.0, 0.0, 1.0]]'], {}), '([[e_a, 0.0, 0.0, -e_f + 0.01], [0.0, e_b, 0.0, 0.0], [0.0, 0.0,\n e_b, 0.0], [0.0, 0.0, 0.0, 1.0]])\n', (10932, 11034), True, 'import numpy as np\n'), ((11408, 11471), 'cv2.imwrite', 'cv2.imwrite', (['"""round_trip_test_incorrect_ipd.png"""', 'round_trip_im'], {}), "('round_trip_test_incorrect_ipd.png', round_trip_im)\n", (11419, 11471), False, 'import cv2\n'), ((11548, 11589), 'numpy.zeros', 'np.zeros', (['(f_u.shape[0], f_u.shape[1], 3)'], {}), '((f_u.shape[0], f_u.shape[1], 3))\n', (11556, 11589), True, 'import numpy as np\n'), ((1703, 1721), 'numpy.sum', 'np.sum', (['(P3 * V3)', '(0)'], {}), '(P3 * V3, 0)\n', (1709, 1721), True, 'import numpy as np\n'), ((1760, 1778), 'numpy.sum', 'np.sum', (['(P3 * P3)', '(0)'], {}), '(P3 * P3, 0)\n', (1766, 1778), True, 'import numpy as np\n'), ((5664, 5702), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(2.0)', 'n_samples'], {}), '(0.0, 2.0, n_samples)\n', (5681, 5702), True, 'import numpy as np\n'), ((5756, 5771), 'numpy.sqrt', 'np.sqrt', (['length'], {}), '(length)\n', (5763, 5771), True, 'import numpy as np\n'), ((5774, 5787), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (5780, 5787), True, 'import numpy as np\n'), ((5804, 5819), 'numpy.sqrt', 'np.sqrt', (['length'], {}), '(length)\n', (5811, 5819), True, 'import numpy as np\n'), ((5822, 5835), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (5828, 5835), True, 'import numpy as np\n'), ((7645, 7669), 'numpy.minimum', 'np.minimum', (['(1.0)', 'inv_map'], {}), '(1.0, inv_map)\n', (7655, 7669), True, 'import numpy as np\n'), ((7806, 7833), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', 'idim'], {}), '(0.0, 1.0, idim)\n', (7817, 7833), True, 'import numpy as np\n'), ((7866, 7893), 'numpy.linspace', 'np.linspace', (['(1.0)', '(0.0)', 'idim'], {}), '(1.0, 0.0, idim)\n', (7877, 7893), True, 'import numpy as np\n'), ((1992, 2018), 'numpy.sqrt', 'np.sqrt', (['safe_discriminant'], {}), '(safe_discriminant)\n', (1999, 2018), True, 'import numpy as np\n'), ((2647, 2666), 'numpy.ones', 'np.ones', (['P.shape[1]'], {}), '(P.shape[1])\n', (2654, 2666), True, 'import numpy as np\n'), ((9691, 9703), 'math.cos', 'math.cos', (['th'], {}), '(th)\n', (9699, 9703), False, 'import math\n'), ((9710, 9722), 'math.sin', 'math.sin', (['th'], {}), '(th)\n', (9718, 9722), False, 'import math\n'), ((9805, 9817), 'math.cos', 'math.cos', (['th'], {}), '(th)\n', (9813, 9817), False, 'import math\n'), ((9786, 9798), 'math.sin', 'math.sin', (['th'], {}), '(th)\n', (9794, 9798), False, 'import math\n')]
|
# Copyright 2019, FBPIC contributors
# Authors: <NAME>, <NAME>
# License: 3-Clause-BSD-LBNL
"""
This file is part of the Fourier-Bessel Particle-In-Cell code (FB-PIC)
It defines the picmi Simulation interface
"""
import numpy as np
from scipy.constants import c, e, m_e
from .particle_charge_and_mass import particle_charge, particle_mass
# Import relevant fbpic object
from fbpic.main import Simulation as FBPICSimulation
from fbpic.fields.smoothing import BinomialSmoother
from fbpic.lpa_utils.laser import add_laser_pulse, GaussianLaser
from fbpic.lpa_utils.bunch import add_particle_bunch_gaussian
from fbpic.openpmd_diag import FieldDiagnostic, ParticleDiagnostic
# Import picmi base class
from picmistandard import PICMI_Simulation, PICMI_CylindricalGrid
from picmistandard import PICMI_AnalyticDistribution, PICMI_UniformDistribution, PICMI_GriddedLayout
from picmistandard import PICMI_PseudoRandomLayout, PICMI_GaussianBunchDistribution
from picmistandard import PICMI_LaserAntenna, PICMI_GaussianLaser
from picmistandard import PICMI_Species, PICMI_MultiSpecies
from picmistandard import PICMI_FieldDiagnostic, PICMI_ParticleDiagnostic
# Define a new simulation object for picmi, that derives from PICMI_Simulation
class Simulation( PICMI_Simulation ):
# Redefine the `init` method, as required by the picmi `_ClassWithInit`
def init(self, kw):
# Get the grid
grid = self.solver.grid
if not type(grid) == PICMI_CylindricalGrid:
raise ValueError('When using fbpic with PICMI, '
'the grid needs to be a CylindricalGrid object.')
# Check rmin and boundary conditions
assert grid.rmin == 0.
assert grid.bc_zmin == grid.bc_zmax
assert grid.bc_zmax in ['periodic', 'open']
assert grid.bc_rmax in ['reflective', 'open']
# Determine timestep
if self.solver.cfl is not None:
dz = (grid.zmax-grid.zmin)/grid.nz
dt = self.solver.cfl * dz / c
elif self.time_step_size is not None:
dt = self.time_step_size
else:
raise ValueError(
'You need to either set the `cfl` of the solver\n'
'or the `timestep_size` of the `Simulation`.')
# Convert API for the smoother
if self.solver.source_smoother is None:
smoother = BinomialSmoother()
else:
smoother = BinomialSmoother(
n_passes=self.solver.source_smoother.n_pass,
compensator=self.solver.source_smoother.compensation )
# Order of the stencil for z derivatives in the Maxwell solver
if self.solver.stencil_order is None:
n_order = -1
else:
n_order = self.solver.stencil_order[-1]
# Initialize and store the FBPIC simulation object
self.fbpic_sim = FBPICSimulation(
Nz=int(grid.nz), zmin=grid.zmin, zmax=grid.zmax,
Nr=int(grid.nr), rmax=grid.rmax, Nm=grid.n_azimuthal_modes,
dt=dt, use_cuda=True, smoother=smoother, n_order=n_order,
boundaries={'z':grid.bc_zmax, 'r':grid.bc_rmax} )
# Set the moving window
if grid.moving_window_zvelocity is not None:
self.fbpic_sim.set_moving_window(grid.moving_window_zvelocity)
# Redefine the method `add_laser` from the PICMI Simulation class
def add_laser( self, laser, injection_method ):
# Call method of parent class
PICMI_Simulation.add_laser( self, laser, injection_method )
# Handle injection method
assert type(injection_method) == PICMI_LaserAntenna
# Handle laser profile method
if type(laser) == PICMI_GaussianLaser:
assert laser.propagation_direction[0] == 0.
assert laser.propagation_direction[1] == 0.
assert (laser.zeta is None) or (laser.zeta == 0)
assert (laser.beta is None) or (laser.beta == 0)
phi2_chirp = laser.phi2
if phi2_chirp is None:
phi2_chirp = 0
polarization_angle = np.arctan2(laser.polarization_direction[1],
laser.polarization_direction[0])
laser_profile = GaussianLaser( a0=laser.a0, waist=laser.waist,
z0=laser.centroid_position[-1], zf=laser.focal_position[-1],
tau=laser.duration, theta_pol=polarization_angle,
phi2_chirp=phi2_chirp )
else:
raise ValueError('Unknown laser profile: %s' %type(injection_method))
# Inject the laser
add_laser_pulse( self.fbpic_sim, laser_profile, method='antenna',
z0_antenna=injection_method.position[-1] )
# Redefine the method `add_species` from the PICMI Simulation class
def add_species( self, species, layout, initialize_self_field=False ):
# Call method of parent class
PICMI_Simulation.add_species( self, species, layout,
initialize_self_field )
# Extract list of species
if type(species) == PICMI_Species:
species_instances_list = [species]
elif type(species) == PICMI_MultiSpecies:
species_instances_list = species.species_instances_list
else:
raise ValueError('Unknown type: %s' %type(species))
# Loop over species and create FBPIC species
for s in species_instances_list:
# Get their charge and mass
if s.particle_type is not None:
s.charge = particle_charge[s.particle_type]
s.mass = particle_mass[s.particle_type]
# If `charge_state` is set, redefine the charge and mass
if s.charge_state is not None:
s.charge = s.charge_state*e
s.mass -= s.charge_state*m_e
# Add the species to the FBPIC simulation
fbpic_species = self._create_new_fbpic_species(s,
layout, initialize_self_field)
# Register a pointer to the FBPIC species in the PICMI species itself
# (Useful for particle diagnostics later on)
s.fbpic_species = fbpic_species
# Loop over species and handle ionization
for s in species_instances_list:
for interaction in s.interactions:
assert interaction[0] == 'ionization'
assert interaction[1] == 'ADK'
picmi_target = interaction[2]
if not hasattr( picmi_target, 'fbpic_species' ):
raise RuntimeError('For ionization with PICMI+FBPIC:\n'
'You need to add the target species to the simulation,'
' before the other species.')
fbpic_target = picmi_target.fbpic_species
fbpic_source = s.fbpic_species
fbpic_source.make_ionizable( element=s.particle_type,
level_start=s.charge_state,
target_species=fbpic_target )
def _create_new_fbpic_species(self, s, layout, initialize_self_field):
# - For the case of a plasma defined in a gridded layout
if type(layout) == PICMI_GriddedLayout:
assert initialize_self_field == False
# - Uniform distribution
if type(s.initial_distribution)==PICMI_UniformDistribution:
n0 = s.initial_distribution.density
dens_func = None
# - Analytic distribution
elif type(s.initial_distribution)==PICMI_AnalyticDistribution:
import numexpr
density_expression = s.initial_distribution.density_expression
if s.density_scale is not None:
n0 = s.density_scale
else:
n0 = 1.
def dens_func(z, r):
n = numexpr.evaluate(density_expression)
return n
else:
raise ValueError('Unknown combination of layout and distribution')
p_nr = layout.n_macroparticle_per_cell[0]
p_nt = layout.n_macroparticle_per_cell[1]
p_nz = layout.n_macroparticle_per_cell[2]
fbpic_species = self.fbpic_sim.add_new_species(
q=s.charge, m=s.mass, n=n0,
dens_func=dens_func, p_nz=p_nz, p_nr=p_nr, p_nt=p_nt,
p_zmin=s.initial_distribution.lower_bound[-1],
p_zmax=s.initial_distribution.upper_bound[-1],
continuous_injection=s.initial_distribution.fill_in )
# - For the case of a Gaussian beam
elif (type(s.initial_distribution)==PICMI_GaussianBunchDistribution) \
and (type(layout) == PICMI_PseudoRandomLayout):
dist = s.initial_distribution
gamma0_beta0 = dist.centroid_velocity[-1]/c
gamma0 = ( 1 + gamma0_beta0**2 )**.5
sig_r = dist.rms_bunch_size[0]
sig_z = dist.rms_bunch_size[-1]
sig_gamma = dist.rms_velocity[-1]/c
sig_vr = dist.rms_velocity[0] / gamma0
if sig_vr != 0:
tf = - sig_r**2/sig_vr**2 * dist.velocity_divergence[0]
else:
tf = 0.
zf = dist.centroid_position[-1] + \
dist.centroid_velocity[-1]/gamma0 * tf
# Calculate size at focus and emittance
sig_r0 = (sig_r**2 - (sig_vr*tf)**2)**0.5
n_emit = gamma0 * sig_r0 * sig_vr/c
# Get the number of physical particles
n_physical_particles = dist.n_physical_particles
if s.density_scale is not None:
n_physical_particles *= s.density_scale
fbpic_species = add_particle_bunch_gaussian( self.fbpic_sim,
q=s.charge, m=s.mass,
gamma0=gamma0, sig_gamma=sig_gamma,
sig_r=sig_r0, sig_z=sig_z, n_emit=n_emit,
n_physical_particles=n_physical_particles,
n_macroparticles=layout.n_macroparticles,
zf=zf, tf=tf,
initialize_self_field=initialize_self_field )
# - For the case of an empty species
elif (s.initial_distribution is None) and (layout is None):
fbpic_species = self.fbpic_sim.add_new_species(q=s.charge, m=s.mass)
else:
raise ValueError('Unknown combination of layout and distribution')
return fbpic_species
# Redefine the method `add_diagnostic` of the parent class
def add_diagnostic(self, diagnostic):
# Call method of parent class
PICMI_Simulation.add_diagnostic( self, diagnostic )
# Handle diagnostic
if diagnostic.step_min is None:
iteration_min = 0
else:
iteration_min = diagnostic.step_min
if diagnostic.step_max is None:
iteration_max = np.inf
else:
iteration_max = diagnostic.step_max
# Register field diagnostic
if type(diagnostic) == PICMI_FieldDiagnostic:
diag = FieldDiagnostic(
period=diagnostic.period,
fldobject=self.fbpic_sim.fld,
comm=self.fbpic_sim.comm,
fieldtypes=diagnostic.data_list,
write_dir=diagnostic.write_dir,
iteration_min=iteration_min,
iteration_max=iteration_max)
# Register particle diagnostic
elif type(diagnostic) == PICMI_ParticleDiagnostic:
species_dict = {}
for s in diagnostic.species:
if s.name is None:
raise ValueError('When using a species in a diagnostic, '
'its name must be set.')
species_dict[s.name] = s.fbpic_species
diag = ParticleDiagnostic(
period=diagnostic.period,
species=species_dict,
comm=self.fbpic_sim.comm,
particle_data=diagnostic.data_list,
write_dir=diagnostic.write_dir,
iteration_min=iteration_min,
iteration_max=iteration_max)
# Add it to the FBPIC simulation
self.fbpic_sim.diags.append( diag )
# Redefine the method `step` of the parent class
def step(self, nsteps=None):
if nsteps is None:
nsteps = self.max_steps
self.fbpic_sim.step( nsteps )
|
[
"picmistandard.PICMI_Simulation.add_laser",
"picmistandard.PICMI_Simulation.add_diagnostic",
"fbpic.openpmd_diag.FieldDiagnostic",
"fbpic.lpa_utils.bunch.add_particle_bunch_gaussian",
"fbpic.lpa_utils.laser.GaussianLaser",
"fbpic.lpa_utils.laser.add_laser_pulse",
"fbpic.openpmd_diag.ParticleDiagnostic",
"numpy.arctan2",
"numexpr.evaluate",
"fbpic.fields.smoothing.BinomialSmoother",
"picmistandard.PICMI_Simulation.add_species"
] |
[((3469, 3526), 'picmistandard.PICMI_Simulation.add_laser', 'PICMI_Simulation.add_laser', (['self', 'laser', 'injection_method'], {}), '(self, laser, injection_method)\n', (3495, 3526), False, 'from picmistandard import PICMI_Simulation, PICMI_CylindricalGrid\n'), ((4589, 4700), 'fbpic.lpa_utils.laser.add_laser_pulse', 'add_laser_pulse', (['self.fbpic_sim', 'laser_profile'], {'method': '"""antenna"""', 'z0_antenna': 'injection_method.position[-1]'}), "(self.fbpic_sim, laser_profile, method='antenna', z0_antenna\n =injection_method.position[-1])\n", (4604, 4700), False, 'from fbpic.lpa_utils.laser import add_laser_pulse, GaussianLaser\n'), ((4905, 4979), 'picmistandard.PICMI_Simulation.add_species', 'PICMI_Simulation.add_species', (['self', 'species', 'layout', 'initialize_self_field'], {}), '(self, species, layout, initialize_self_field)\n', (4933, 4979), False, 'from picmistandard import PICMI_Simulation, PICMI_CylindricalGrid\n'), ((10795, 10844), 'picmistandard.PICMI_Simulation.add_diagnostic', 'PICMI_Simulation.add_diagnostic', (['self', 'diagnostic'], {}), '(self, diagnostic)\n', (10826, 10844), False, 'from picmistandard import PICMI_Simulation, PICMI_CylindricalGrid\n'), ((2356, 2374), 'fbpic.fields.smoothing.BinomialSmoother', 'BinomialSmoother', ([], {}), '()\n', (2372, 2374), False, 'from fbpic.fields.smoothing import BinomialSmoother\n'), ((2412, 2532), 'fbpic.fields.smoothing.BinomialSmoother', 'BinomialSmoother', ([], {'n_passes': 'self.solver.source_smoother.n_pass', 'compensator': 'self.solver.source_smoother.compensation'}), '(n_passes=self.solver.source_smoother.n_pass, compensator=\n self.solver.source_smoother.compensation)\n', (2428, 2532), False, 'from fbpic.fields.smoothing import BinomialSmoother\n'), ((4078, 4154), 'numpy.arctan2', 'np.arctan2', (['laser.polarization_direction[1]', 'laser.polarization_direction[0]'], {}), '(laser.polarization_direction[1], laser.polarization_direction[0])\n', (4088, 4154), True, 'import numpy as np\n'), ((4227, 4416), 'fbpic.lpa_utils.laser.GaussianLaser', 'GaussianLaser', ([], {'a0': 'laser.a0', 'waist': 'laser.waist', 'z0': 'laser.centroid_position[-1]', 'zf': 'laser.focal_position[-1]', 'tau': 'laser.duration', 'theta_pol': 'polarization_angle', 'phi2_chirp': 'phi2_chirp'}), '(a0=laser.a0, waist=laser.waist, z0=laser.centroid_position[-1\n ], zf=laser.focal_position[-1], tau=laser.duration, theta_pol=\n polarization_angle, phi2_chirp=phi2_chirp)\n', (4240, 4416), False, 'from fbpic.lpa_utils.laser import add_laser_pulse, GaussianLaser\n'), ((11254, 11488), 'fbpic.openpmd_diag.FieldDiagnostic', 'FieldDiagnostic', ([], {'period': 'diagnostic.period', 'fldobject': 'self.fbpic_sim.fld', 'comm': 'self.fbpic_sim.comm', 'fieldtypes': 'diagnostic.data_list', 'write_dir': 'diagnostic.write_dir', 'iteration_min': 'iteration_min', 'iteration_max': 'iteration_max'}), '(period=diagnostic.period, fldobject=self.fbpic_sim.fld,\n comm=self.fbpic_sim.comm, fieldtypes=diagnostic.data_list, write_dir=\n diagnostic.write_dir, iteration_min=iteration_min, iteration_max=\n iteration_max)\n', (11269, 11488), False, 'from fbpic.openpmd_diag import FieldDiagnostic, ParticleDiagnostic\n'), ((9809, 10115), 'fbpic.lpa_utils.bunch.add_particle_bunch_gaussian', 'add_particle_bunch_gaussian', (['self.fbpic_sim'], {'q': 's.charge', 'm': 's.mass', 'gamma0': 'gamma0', 'sig_gamma': 'sig_gamma', 'sig_r': 'sig_r0', 'sig_z': 'sig_z', 'n_emit': 'n_emit', 'n_physical_particles': 'n_physical_particles', 'n_macroparticles': 'layout.n_macroparticles', 'zf': 'zf', 'tf': 'tf', 'initialize_self_field': 'initialize_self_field'}), '(self.fbpic_sim, q=s.charge, m=s.mass, gamma0=\n gamma0, sig_gamma=sig_gamma, sig_r=sig_r0, sig_z=sig_z, n_emit=n_emit,\n n_physical_particles=n_physical_particles, n_macroparticles=layout.\n n_macroparticles, zf=zf, tf=tf, initialize_self_field=initialize_self_field\n )\n', (9836, 10115), False, 'from fbpic.lpa_utils.bunch import add_particle_bunch_gaussian\n'), ((12035, 12268), 'fbpic.openpmd_diag.ParticleDiagnostic', 'ParticleDiagnostic', ([], {'period': 'diagnostic.period', 'species': 'species_dict', 'comm': 'self.fbpic_sim.comm', 'particle_data': 'diagnostic.data_list', 'write_dir': 'diagnostic.write_dir', 'iteration_min': 'iteration_min', 'iteration_max': 'iteration_max'}), '(period=diagnostic.period, species=species_dict, comm=\n self.fbpic_sim.comm, particle_data=diagnostic.data_list, write_dir=\n diagnostic.write_dir, iteration_min=iteration_min, iteration_max=\n iteration_max)\n', (12053, 12268), False, 'from fbpic.openpmd_diag import FieldDiagnostic, ParticleDiagnostic\n'), ((7952, 7988), 'numexpr.evaluate', 'numexpr.evaluate', (['density_expression'], {}), '(density_expression)\n', (7968, 7988), False, 'import numexpr\n')]
|
import math
import numpy as np
import os
import pickle
from pymatgen.core.surface import SlabGenerator, get_symmetrically_distinct_miller_indices
from pymatgen.io.ase import AseAtomsAdaptor
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from .constants import MAX_MILLER, COVALENT_MATERIALS_MPIDS
class Bulk():
'''
This class handles all things with the bulk.
It also provides possible surfaces, later used to create a Surface object.
Attributes
----------
precomputed_structures : str
root dir of precomputed structures
bulk_atoms : Atoms
actual atoms of the bulk
mpid : str
mpid of the bulk
bulk_sampling_str : str
string capturing the bulk index and number of possible bulks
index_of_bulk_atoms : int
index of bulk in the db
n_elems : int
number of elements of the bulk
elem_sampling_str : str
string capturing n_elems and the max possible elements
Public methods
--------------
get_possible_surfaces()
returns a list of possible surfaces for this bulk instance
'''
def __init__(self, bulk_database, precomputed_structures=None, bulk_index=None, max_elems=3):
'''
Initializes the object by choosing or sampling from the bulk database
Args:
bulk_database: either a list of dict of bulks
precomputed_structures: Root directory of precomputed structures for
surface enumeration
bulk_index: index of bulk to select if not doing a random sample
max_elems: max number of elements for any bulk
'''
self.precomputed_structures = precomputed_structures
self.choose_bulk_pkl(bulk_database, bulk_index, max_elems)
def choose_bulk_pkl(self, bulk_db, bulk_index, max_elems):
'''
Chooses a bulk from our pkl file at random as long as the bulk contains
the specified number of elements in any composition.
Args:
bulk_db Unpickled dict or list of bulks
bulk_index Index of which bulk to select. If None, randomly sample one.
max_elems Max elems for any bulk structure. Currently it is 3 by default.
Sets as class attributes:
bulk_atoms `ase.Atoms` of the chosen bulk structure.
mpid A string indicating which MPID the bulk is
bulk_sampling_str A string to enumerate the sampled structure
index_of_bulk_atoms Index of the chosen bulk in the array (should match
bulk_index if provided)
'''
try:
if bulk_index is not None:
assert len(bulk_db) > max_elems, f'Bulk db only has {len(bulk_db)} entries. Did you pass in the correct bulk database?'
assert isinstance(bulk_db[bulk_index], tuple)
self.bulk_atoms, self.mpid, self.bulk_sampling_str, self.index_of_bulk_atoms = bulk_db[bulk_index]
self.n_elems = len(set(self.bulk_atoms.symbols)) # 1, 2, or 3
self.elem_sampling_str = f'{self.n_elems}/{max_elems}'
else:
self.sample_n_elems()
assert isinstance(bulk_db, dict), 'Did you pass in the correct bulk database?'
assert self.n_elems in bulk_db.keys(), f'Bulk db does not have bulks of {self.n_elems} elements'
assert isinstance(bulk_db[self.n_elems], list), 'Did you pass in the correct bulk database?'
total_elements_for_key = len(bulk_db[self.n_elems])
row_bulk_index = np.random.choice(total_elements_for_key)
self.bulk_atoms, self.mpid, self.bulk_sampling_str, self.index_of_bulk_atoms = bulk_db[self.n_elems][row_bulk_index]
except IndexError:
raise ValueError('Randomly chose to look for a %i-component material, '
'but no such materials exist. Please add one '
'to the database or change the weights to exclude '
'this number of components.'
% self.n_elems)
def sample_n_elems(self, n_cat_elems_weights={1: 0.05, 2: 0.65, 3: 0.3}):
'''
Chooses the number of species we should look for in this sample.
Arg:
n_cat_elems_weights A dictionary whose keys are integers containing the
number of species you want to consider and whose
values are the probabilities of selecting this
number. The probabilities must sum to 1.
Sets:
n_elems An integer showing how many species have been chosen.
elem_sampling_str Enum string of [chosen n_elems]/[total number of choices]
'''
possible_n_elems = list(n_cat_elems_weights.keys())
weights = list(n_cat_elems_weights.values())
assert math.isclose(sum(weights), 1)
self.n_elems = np.random.choice(possible_n_elems, p=weights)
self.elem_sampling_str = str(self.n_elems) + "/" + str(len(possible_n_elems))
def get_possible_surfaces(self):
'''
Returns a list of possible surfaces for this bulk instance.
This can be later used to iterate through all surfaces,
or select one at random, to make a Surface object.
'''
if self.precomputed_structures:
surfaces_info = self.read_from_precomputed_enumerations(self.index_of_bulk_atoms)
else:
surfaces_info = self.enumerate_surfaces()
return surfaces_info
def read_from_precomputed_enumerations(self, index):
'''
Loads relevant pickle of precomputed surfaces.
Args:
index: bulk index
Returns:
surfaces_info: a list of surface_info tuples (atoms, miller, shift, top)
'''
with open(os.path.join(self.precomputed_structures, str(index) + ".pkl"), "rb") as f:
surfaces_info = pickle.load(f)
return surfaces_info
def enumerate_surfaces(self, max_miller=MAX_MILLER):
'''
Enumerate all the symmetrically distinct surfaces of a bulk structure. It
will not enumerate surfaces with Miller indices above the `max_miller`
argument. Note that we also look at the bottoms of surfaces if they are
distinct from the top. If they are distinct, we flip the surface so the bottom
is pointing upwards.
Args:
bulk_atoms `ase.Atoms` object of the bulk you want to enumerate
surfaces from.
max_miller An integer indicating the maximum Miller index of the surfaces
you are willing to enumerate. Increasing this argument will
increase the number of surfaces, but the surfaces will
generally become larger.
Returns:
all_slabs_info A list of 4-tuples containing: `pymatgen.Structure`
objects for surfaces we have enumerated, the Miller
indices, floats for the shifts, and Booleans for "top".
'''
bulk_struct = self.standardize_bulk(self.bulk_atoms)
all_slabs_info = []
for millers in get_symmetrically_distinct_miller_indices(bulk_struct, MAX_MILLER):
slab_gen = SlabGenerator(initial_structure=bulk_struct,
miller_index=millers,
min_slab_size=7.,
min_vacuum_size=20.,
lll_reduce=False,
center_slab=True,
primitive=True,
max_normal_search=1)
slabs = slab_gen.get_slabs(tol=0.3,
bonds=None,
max_broken_bonds=0,
symmetrize=False)
# Additional filtering for the 2D materials' slabs
if self.mpid in COVALENT_MATERIALS_MPIDS:
slabs = [slab for slab in slabs if is_2D_slab_reasonsable(slab) is True]
# If the bottoms of the slabs are different than the tops, then we want
# to consider them, too
if len(slabs) != 0:
flipped_slabs_info = [(self.flip_struct(slab), millers, slab.shift, False)
for slab in slabs if self.is_structure_invertible(slab) is False]
# Concatenate all the results together
slabs_info = [(slab, millers, slab.shift, True) for slab in slabs]
all_slabs_info.extend(slabs_info + flipped_slabs_info)
return all_slabs_info
def is_2D_slab_reasonsable(self, struct):
'''
There are 400+ 2D bulk materials whose slabs generated by pymaten require
additional filtering: some slabs are cleaved where one or more surface atoms
have no bonds with other atoms on the slab.
Arg:
struct `pymatgen.Structure` object of a slab
Returns:
A boolean indicating whether or not the slab is
reasonable.
'''
for site in struct:
if len(struct.get_neighbors(site, 3)) == 0:
return False
return True
def standardize_bulk(self, atoms):
'''
There are many ways to define a bulk unit cell. If you change the unit cell
itself but also change the locations of the atoms within the unit cell, you
can get effectively the same bulk structure. To address this, there is a
standardization method used to reduce the degrees of freedom such that each
unit cell only has one "true" configuration. This function will align a
unit cell you give it to fit within this standardization.
Args:
atoms: `ase.Atoms` object of the bulk you want to standardize
Returns:
standardized_struct: `pymatgen.Structure` of the standardized bulk
'''
struct = AseAtomsAdaptor.get_structure(atoms)
sga = SpacegroupAnalyzer(struct, symprec=0.1)
standardized_struct = sga.get_conventional_standard_structure()
return standardized_struct
def flip_struct(self, struct):
'''
Flips an atoms object upside down. Normally used to flip surfaces.
Arg:
struct `pymatgen.Structure` object
Returns:
flipped_struct: The same `ase.Atoms` object that was fed as an
argument, but flipped upside down.
'''
atoms = AseAtomsAdaptor.get_atoms(struct)
# This is black magic wizardry to me. Good look figuring it out.
atoms.wrap()
atoms.rotate(180, 'x', rotate_cell=True, center='COM')
if atoms.cell[2][2] < 0.:
atoms.cell[2] = -atoms.cell[2]
if np.cross(atoms.cell[0], atoms.cell[1])[2] < 0.0:
atoms.cell[1] = -atoms.cell[1]
atoms.center()
atoms.wrap()
flipped_struct = AseAtomsAdaptor.get_structure(atoms)
return flipped_struct
def is_structure_invertible(self, structure):
'''
This function figures out whether or not an `pymatgen.Structure` object has
symmetricity. In this function, the affine matrix is a rotation matrix that
is multiplied with the XYZ positions of the crystal. If the z,z component
of that is negative, it means symmetry operation exist, it could be a
mirror operation, or one that involves multiple rotations/etc. Regardless,
it means that the top becomes the bottom and vice-versa, and the structure
is the symmetric. i.e. structure_XYZ = structure_XYZ*M.
In short: If this function returns `False`, then the input structure can
be flipped in the z-direction to create a new structure.
Arg:
structure: A `pymatgen.Structure` object.
Returns
A boolean indicating whether or not your `ase.Atoms` object is
symmetric in z-direction (i.e. symmetric with respect to x-y plane).
'''
# If any of the operations involve a transformation in the z-direction,
# then the structure is invertible.
sga = SpacegroupAnalyzer(structure, symprec=0.1)
for operation in sga.get_symmetry_operations():
xform_matrix = operation.affine_matrix
z_xform = xform_matrix[2, 2]
if z_xform == -1:
return True
return False
|
[
"pymatgen.io.ase.AseAtomsAdaptor.get_atoms",
"numpy.cross",
"numpy.random.choice",
"pickle.load",
"pymatgen.core.surface.SlabGenerator",
"pymatgen.core.surface.get_symmetrically_distinct_miller_indices",
"pymatgen.symmetry.analyzer.SpacegroupAnalyzer",
"pymatgen.io.ase.AseAtomsAdaptor.get_structure"
] |
[((5132, 5177), 'numpy.random.choice', 'np.random.choice', (['possible_n_elems'], {'p': 'weights'}), '(possible_n_elems, p=weights)\n', (5148, 5177), True, 'import numpy as np\n'), ((7442, 7508), 'pymatgen.core.surface.get_symmetrically_distinct_miller_indices', 'get_symmetrically_distinct_miller_indices', (['bulk_struct', 'MAX_MILLER'], {}), '(bulk_struct, MAX_MILLER)\n', (7483, 7508), False, 'from pymatgen.core.surface import SlabGenerator, get_symmetrically_distinct_miller_indices\n'), ((10323, 10359), 'pymatgen.io.ase.AseAtomsAdaptor.get_structure', 'AseAtomsAdaptor.get_structure', (['atoms'], {}), '(atoms)\n', (10352, 10359), False, 'from pymatgen.io.ase import AseAtomsAdaptor\n'), ((10374, 10413), 'pymatgen.symmetry.analyzer.SpacegroupAnalyzer', 'SpacegroupAnalyzer', (['struct'], {'symprec': '(0.1)'}), '(struct, symprec=0.1)\n', (10392, 10413), False, 'from pymatgen.symmetry.analyzer import SpacegroupAnalyzer\n'), ((10890, 10923), 'pymatgen.io.ase.AseAtomsAdaptor.get_atoms', 'AseAtomsAdaptor.get_atoms', (['struct'], {}), '(struct)\n', (10915, 10923), False, 'from pymatgen.io.ase import AseAtomsAdaptor\n'), ((11332, 11368), 'pymatgen.io.ase.AseAtomsAdaptor.get_structure', 'AseAtomsAdaptor.get_structure', (['atoms'], {}), '(atoms)\n', (11361, 11368), False, 'from pymatgen.io.ase import AseAtomsAdaptor\n'), ((12558, 12600), 'pymatgen.symmetry.analyzer.SpacegroupAnalyzer', 'SpacegroupAnalyzer', (['structure'], {'symprec': '(0.1)'}), '(structure, symprec=0.1)\n', (12576, 12600), False, 'from pymatgen.symmetry.analyzer import SpacegroupAnalyzer\n'), ((6154, 6168), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6165, 6168), False, 'import pickle\n'), ((7533, 7722), 'pymatgen.core.surface.SlabGenerator', 'SlabGenerator', ([], {'initial_structure': 'bulk_struct', 'miller_index': 'millers', 'min_slab_size': '(7.0)', 'min_vacuum_size': '(20.0)', 'lll_reduce': '(False)', 'center_slab': '(True)', 'primitive': '(True)', 'max_normal_search': '(1)'}), '(initial_structure=bulk_struct, miller_index=millers,\n min_slab_size=7.0, min_vacuum_size=20.0, lll_reduce=False, center_slab=\n True, primitive=True, max_normal_search=1)\n', (7546, 7722), False, 'from pymatgen.core.surface import SlabGenerator, get_symmetrically_distinct_miller_indices\n'), ((3706, 3746), 'numpy.random.choice', 'np.random.choice', (['total_elements_for_key'], {}), '(total_elements_for_key)\n', (3722, 3746), True, 'import numpy as np\n'), ((11170, 11208), 'numpy.cross', 'np.cross', (['atoms.cell[0]', 'atoms.cell[1]'], {}), '(atoms.cell[0], atoms.cell[1])\n', (11178, 11208), True, 'import numpy as np\n')]
|
#! /usr/bin/env python
# Copyright 2018 The Yawn Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Trains and exports a saved-model based on some test data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from data.stochastic_quantized_sine_wave import get_numpy_data
from model.wavenet_model import WaveNetModel
# TODO: Figure out some util file for this function.
def data_format_to_shape(
batch_length=None,
sequence_length=None,
channel_length=None,
data_format='channels_first'
):
"""."""
shape = [batch_length, None, None]
channel_axis = 1 if data_format == 'channels_first' else 2
sequence_axis = 2 if data_format == 'channels_first' else 1
shape[sequence_axis] = sequence_length
shape[channel_axis] = channel_length
return tuple(shape)
def main(FLAGS):
"""."""
input_channels = 1
label_channels = 1
quantization = 64
scale = 4
num_mixtures = 2
filters = 8
initial_kernel = 8
kernel_size = 2
dilation_powers = [0, 1, 2, 3, 4, 5, 6]
dilations = [kernel_size**power for power in dilation_powers]
# data, data_labels, bins = get_numpy_data('../data.npy', quantization)
data, data_labels, bins = get_numpy_data(2000, quantization, scale=scale)
dataset_size = len(data)
mock_epochs = 2000
assert np.all(np.diff(bins) > 0.0)
model = WaveNetModel(
filters=filters,
initial_kernel=initial_kernel,
kernel_size=kernel_size,
dilations=dilations,
quantization=quantization,
num_mixtures=num_mixtures,
bins=bins,
data_format='channels_last',
version='mixture'
)
batch_size = 2
sequence_length = model.receptive_field + min(512, model.receptive_field)
sampled_data = []
sampled_labels = []
for i in range(mock_epochs*dataset_size//sequence_length):
index = np.random.randint(dataset_size-sequence_length)
sampled_data.append(data[index:index+sequence_length])
sampled_labels.append(data_labels[index:index+sequence_length])
data = np.array(sampled_data)
data_labels = np.array(sampled_labels)
data = data.reshape(
data_format_to_shape(-1, sequence_length, input_channels, data_format=model.data_format)
)
data_labels = data_labels.reshape(-1, sequence_length, label_channels)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
classifier = tf.estimator.Estimator(
model_dir=FLAGS.model_dir,
model_fn=model.model_fn,
params=dict(
learning_rate=1e-4,
add_summaries=True
),
config=tf.estimator.RunConfig(session_config=config)
)
classifier.train(
input_fn=tf.estimator.inputs.numpy_input_fn(
data, data_labels, batch_size=batch_size, shuffle=True,
num_epochs=2
)
)
def serving_input_receiver_fn():
features = tf.placeholder(
dtype=tf.float32,
shape=data_format_to_shape(
None, 1+model.receptive_field, input_channels, data_format=model.data_format
),
name='inputs'
)
return tf.estimator.export.TensorServingInputReceiver(
features=features,
receiver_tensors=features
)
classifier.export_savedmodel(
export_dir_base='/tmp/wavenet',
serving_input_receiver_fn=serving_input_receiver_fn
)
return 0
if __name__ == '__main__':
import argparse
tf.logging.set_verbosity(tf.logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument(
'--model_dir', type=str, metavar=dir, default=None,
help='Estimator model directory.'
)
args = parser.parse_args()
exit(main(args))
|
[
"tensorflow.estimator.RunConfig",
"argparse.ArgumentParser",
"data.stochastic_quantized_sine_wave.get_numpy_data",
"model.wavenet_model.WaveNetModel",
"tensorflow.logging.set_verbosity",
"numpy.diff",
"tensorflow.estimator.inputs.numpy_input_fn",
"numpy.array",
"numpy.random.randint",
"tensorflow.ConfigProto",
"tensorflow.estimator.export.TensorServingInputReceiver"
] |
[((1939, 1986), 'data.stochastic_quantized_sine_wave.get_numpy_data', 'get_numpy_data', (['(2000)', 'quantization'], {'scale': 'scale'}), '(2000, quantization, scale=scale)\n', (1953, 1986), False, 'from data.stochastic_quantized_sine_wave import get_numpy_data\n'), ((2092, 2324), 'model.wavenet_model.WaveNetModel', 'WaveNetModel', ([], {'filters': 'filters', 'initial_kernel': 'initial_kernel', 'kernel_size': 'kernel_size', 'dilations': 'dilations', 'quantization': 'quantization', 'num_mixtures': 'num_mixtures', 'bins': 'bins', 'data_format': '"""channels_last"""', 'version': '"""mixture"""'}), "(filters=filters, initial_kernel=initial_kernel, kernel_size=\n kernel_size, dilations=dilations, quantization=quantization,\n num_mixtures=num_mixtures, bins=bins, data_format='channels_last',\n version='mixture')\n", (2104, 2324), False, 'from model.wavenet_model import WaveNetModel\n'), ((2809, 2831), 'numpy.array', 'np.array', (['sampled_data'], {}), '(sampled_data)\n', (2817, 2831), True, 'import numpy as np\n'), ((2850, 2874), 'numpy.array', 'np.array', (['sampled_labels'], {}), '(sampled_labels)\n', (2858, 2874), True, 'import numpy as np\n'), ((3093, 3109), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (3107, 3109), True, 'import tensorflow as tf\n'), ((4246, 4287), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (4270, 4287), True, 'import tensorflow as tf\n'), ((4302, 4327), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4325, 4327), False, 'import argparse\n'), ((2614, 2663), 'numpy.random.randint', 'np.random.randint', (['(dataset_size - sequence_length)'], {}), '(dataset_size - sequence_length)\n', (2631, 2663), True, 'import numpy as np\n'), ((3912, 4008), 'tensorflow.estimator.export.TensorServingInputReceiver', 'tf.estimator.export.TensorServingInputReceiver', ([], {'features': 'features', 'receiver_tensors': 'features'}), '(features=features,\n receiver_tensors=features)\n', (3958, 4008), True, 'import tensorflow as tf\n'), ((2058, 2071), 'numpy.diff', 'np.diff', (['bins'], {}), '(bins)\n', (2065, 2071), True, 'import numpy as np\n'), ((3373, 3418), 'tensorflow.estimator.RunConfig', 'tf.estimator.RunConfig', ([], {'session_config': 'config'}), '(session_config=config)\n', (3395, 3418), True, 'import tensorflow as tf\n'), ((3465, 3573), 'tensorflow.estimator.inputs.numpy_input_fn', 'tf.estimator.inputs.numpy_input_fn', (['data', 'data_labels'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'num_epochs': '(2)'}), '(data, data_labels, batch_size=batch_size,\n shuffle=True, num_epochs=2)\n', (3499, 3573), True, 'import tensorflow as tf\n')]
|
"""Evaluate outcome (+CATE) of datasets"""
from scipy.stats import spearmanr
import numpy as np
def safe_spearmanr(arr_a, arr_b):
"Compute the spearman-R correlation, but 0 if all equal"
if np.all(arr_a[0] == arr_a) or np.all(arr_b[0] == arr_b):
return 0
return spearmanr(arr_a, arr_b).correlation
def evaluate_outcome(model, dataset, k=5, n=1):
"""Evaluate the outcome of a model with a dataset
Arguments
---------
model: BaseModel
model to be trained and evaluated on the dataset.
dataset: BaseDataset
Dataset on which the model is evaluated.
k: int
Number of folds
n: int
Number of iterations to evaluate over
"""
results = []
for _ in range(n):
for train_data, test_data in dataset.kfolds(k=k):
model.train(model.preprocess(train_data.standard_df))
model_outcome = model.predict_outcome(test_data.standard_df)
if (np.all(model_outcome == model_outcome[0])
or np.all(test_data.outcome == test_data.outcome[0])):
corr = 0
else:
corr = spearmanr(model_outcome, test_data.outcome).correlation
results.append(corr)
return results
def evaluate_performance(model, dataset, k=5, n=1):
"""Evaluate the outcome + CATE of a model with a dataset
Arguments
---------
model: BaseModel
model to be trained and evaluated on the dataset.
dataset: BaseDataset
Dataset on which the model is evaluated.
k: int
Number of folds
n: int
Number of iterations to evaluate over
"""
cate_corr = []
outcome_corr = []
for _ in range(n):
for train_data, test_data in dataset.kfolds(k=k):
model.train(model.preprocess(train_data.standard_df))
test_df = test_data.standard_df
cate = model.predict_cate(test_df)
outcome = model.predict_outcome(test_df)
cate_corr.append(safe_spearmanr(cate, test_data.cate))
outcome_corr.append(safe_spearmanr(outcome, test_data.outcome))
return cate_corr, outcome_corr
|
[
"scipy.stats.spearmanr",
"numpy.all"
] |
[((201, 226), 'numpy.all', 'np.all', (['(arr_a[0] == arr_a)'], {}), '(arr_a[0] == arr_a)\n', (207, 226), True, 'import numpy as np\n'), ((230, 255), 'numpy.all', 'np.all', (['(arr_b[0] == arr_b)'], {}), '(arr_b[0] == arr_b)\n', (236, 255), True, 'import numpy as np\n'), ((285, 308), 'scipy.stats.spearmanr', 'spearmanr', (['arr_a', 'arr_b'], {}), '(arr_a, arr_b)\n', (294, 308), False, 'from scipy.stats import spearmanr\n'), ((960, 1001), 'numpy.all', 'np.all', (['(model_outcome == model_outcome[0])'], {}), '(model_outcome == model_outcome[0])\n', (966, 1001), True, 'import numpy as np\n'), ((1025, 1074), 'numpy.all', 'np.all', (['(test_data.outcome == test_data.outcome[0])'], {}), '(test_data.outcome == test_data.outcome[0])\n', (1031, 1074), True, 'import numpy as np\n'), ((1143, 1186), 'scipy.stats.spearmanr', 'spearmanr', (['model_outcome', 'test_data.outcome'], {}), '(model_outcome, test_data.outcome)\n', (1152, 1186), False, 'from scipy.stats import spearmanr\n')]
|
import random
import torch
import numpy as np
import time
import os
from model.net import Net
from model.loss import Loss
from torch.autograd import Variable
import itertools
import pandas as pd
from main.dataset import LunaDataSet
from torch.utils.data import DataLoader
from configs import VAL_PCT, TOTAL_EPOCHS, DEFAULT_LR, OUTPUT_PATH
from glob import glob
def get_lr(epoch):
if epoch <= TOTAL_EPOCHS * 0.5:
lr = DEFAULT_LR
elif epoch <= TOTAL_EPOCHS * 0.8:
lr = 0.1 * DEFAULT_LR
else:
lr = 0.01 * DEFAULT_LR
return lr
def train(data_loader, net, loss, epoch, optimizer, get_lr, save_dir='./models/'):
print("****************training:*******************")
start_time = time.time()
net.train()
lr = get_lr(epoch)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
metrics = []
for i, (data, target, coord) in enumerate(data_loader):
if torch.cuda.is_available():
data = Variable(data.cuda())
target = Variable(target.cuda())
coord = Variable(coord.cuda())
data = data.float()
target = target.float()
coord = coord.float()
output = net(data, coord)
loss_output = loss(output, target)
optimizer.zero_grad()
loss_output[0].backward()
optimizer.step()
loss_output[0] = loss_output[0].item()
metrics.append(loss_output)
break
metrics = np.asarray(metrics, np.float32)
if epoch % 10 == 0:
net_state_dict = net.state_dict()
for key in net_state_dict.keys():
net_state_dict[key] = net_state_dict[key].cpu()
torch.save({
'epoch': epoch,
'save_dir': save_dir,
'model_state_dict': net_state_dict,
'optimizer_state_dict': optimizer.state_dict(),
'loss': np.mean(metrics[:, 0])}, os.path.join(save_dir, f'''{epoch}.ckpt'''))
end_time = time.time()
print(f'''Epoch {epoch} (lr {lr})''')
print(f'''Train: tpr {100.0 * np.sum(metrics[:, 6]) / np.sum(metrics[:, 7])},
tnr {100.0 * np.sum(metrics[:, 8]) / np.sum(metrics[:, 9])},
total pos {np.sum(metrics[:, 7])}, total neg {np.sum(metrics[:, 9])},
time {end_time - start_time}''')
print(f'''loss {np.mean(metrics[:, 0])}, classify loss {np.mean(metrics[:, 1])},
regress loss {np.mean(metrics[:, 2])}, {np.mean(metrics[:, 3])},
{np.mean(metrics[:, 4])}, {np.mean(metrics[:, 5])}''')
def validate(data_loader, net, loss):
print("****************validation:*******************")
start_time = time.time()
net.eval()
metrics = []
for i, (data, target, coord) in enumerate(data_loader):
if torch.cuda.is_available():
data = Variable(data.cuda())
target = Variable(target.cuda())
coord = Variable(coord.cuda())
data = data.float()
target = target.float()
coord = coord.float()
output = net(data, coord)
loss_output = loss(output, target, train=False)
loss_output[0] = loss_output[0].item()
metrics.append(loss_output)
break
end_time = time.time()
metrics = np.asarray(metrics, np.float32)
print(f'''time {end_time - start_time}''')
print(f'''loss {np.mean(metrics[:, 0])}, classify loss {np.mean(metrics[:, 1])},
regress loss {np.mean(metrics[:, 2])}, {np.mean(metrics[:, 3])},
{np.mean(metrics[:, 4])}, {np.mean(metrics[:, 5])}''')
def run(load_last_checkpoint=False):
save_dir = f'{OUTPUT_PATH}/models/'
os.makedirs(save_dir, exist_ok=True)
neural_net = Net()
loss_fn = Loss()
optim = torch.optim.SGD(neural_net.parameters(), DEFAULT_LR, momentum=0.9, weight_decay=1e-4)
starting_epoch = 0
initial_loss = None
if load_last_checkpoint:
model_paths = glob(f'''{save_dir}*.ckpt''')
model_names = [int(i.split('/')[-1][:-5]) for i in model_paths]
latest_model_path = f'''{save_dir}{max(model_names)}.ckpt'''
print('loading latest model from:', latest_model_path)
checkpoint = torch.load(latest_model_path)
neural_net.load_state_dict(checkpoint['model_state_dict'])
optim.load_state_dict(checkpoint['optimizer_state_dict'])
starting_epoch = checkpoint['epoch']
initial_loss = checkpoint['loss']
if torch.cuda.is_available():
neural_net = neural_net.cuda()
loss_fn = loss_fn.cuda()
print(f'''Training from epoch: {starting_epoch} towards: {TOTAL_EPOCHS},
with learning rate starting from: {get_lr(starting_epoch)}, and loss: {initial_loss}''')
meta = pd.read_csv(f'{OUTPUT_PATH}/augmented_meta.csv', index_col=0).sample(frac=1).reset_index(drop=True)
meta_group_by_series = meta.groupby(['seriesuid']).indices
list_of_groups = [{i: list(meta_group_by_series[i])} for i in meta_group_by_series.keys()]
random.Random(0).shuffle(list_of_groups)
val_split = int(VAL_PCT * len(list_of_groups))
val_indices = list(itertools.chain(*[list(i.values())[0] for i in list_of_groups[:val_split]]))
train_indices = list(itertools.chain(*[list(i.values())[0] for i in list_of_groups[val_split:]]))
ltd = LunaDataSet(train_indices, meta)
lvd = LunaDataSet(val_indices, meta)
train_loader = DataLoader(ltd, batch_size=1, shuffle=False)
val_loader = DataLoader(lvd, batch_size=1, shuffle=False)
for ep in range(starting_epoch, TOTAL_EPOCHS):
train(train_loader, neural_net, loss_fn, ep, optim, get_lr, save_dir=save_dir)
validate(train_loader, neural_net, loss_fn)
if __name__ == '__main__':
run(load_last_checkpoint=False)
|
[
"numpy.mean",
"os.makedirs",
"torch.utils.data.DataLoader",
"random.Random",
"pandas.read_csv",
"torch.load",
"numpy.asarray",
"os.path.join",
"model.net.Net",
"numpy.sum",
"torch.cuda.is_available",
"model.loss.Loss",
"time.time",
"glob.glob",
"main.dataset.LunaDataSet"
] |
[((726, 737), 'time.time', 'time.time', ([], {}), '()\n', (735, 737), False, 'import time\n'), ((1470, 1501), 'numpy.asarray', 'np.asarray', (['metrics', 'np.float32'], {}), '(metrics, np.float32)\n', (1480, 1501), True, 'import numpy as np\n'), ((1967, 1978), 'time.time', 'time.time', ([], {}), '()\n', (1976, 1978), False, 'import time\n'), ((2589, 2600), 'time.time', 'time.time', ([], {}), '()\n', (2598, 2600), False, 'import time\n'), ((3156, 3167), 'time.time', 'time.time', ([], {}), '()\n', (3165, 3167), False, 'import time\n'), ((3183, 3214), 'numpy.asarray', 'np.asarray', (['metrics', 'np.float32'], {}), '(metrics, np.float32)\n', (3193, 3214), True, 'import numpy as np\n'), ((3550, 3586), 'os.makedirs', 'os.makedirs', (['save_dir'], {'exist_ok': '(True)'}), '(save_dir, exist_ok=True)\n', (3561, 3586), False, 'import os\n'), ((3604, 3609), 'model.net.Net', 'Net', ([], {}), '()\n', (3607, 3609), False, 'from model.net import Net\n'), ((3624, 3630), 'model.loss.Loss', 'Loss', ([], {}), '()\n', (3628, 3630), False, 'from model.loss import Loss\n'), ((4339, 4364), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4362, 4364), False, 'import torch\n'), ((5181, 5213), 'main.dataset.LunaDataSet', 'LunaDataSet', (['train_indices', 'meta'], {}), '(train_indices, meta)\n', (5192, 5213), False, 'from main.dataset import LunaDataSet\n'), ((5224, 5254), 'main.dataset.LunaDataSet', 'LunaDataSet', (['val_indices', 'meta'], {}), '(val_indices, meta)\n', (5235, 5254), False, 'from main.dataset import LunaDataSet\n'), ((5274, 5318), 'torch.utils.data.DataLoader', 'DataLoader', (['ltd'], {'batch_size': '(1)', 'shuffle': '(False)'}), '(ltd, batch_size=1, shuffle=False)\n', (5284, 5318), False, 'from torch.utils.data import DataLoader\n'), ((5336, 5380), 'torch.utils.data.DataLoader', 'DataLoader', (['lvd'], {'batch_size': '(1)', 'shuffle': '(False)'}), '(lvd, batch_size=1, shuffle=False)\n', (5346, 5380), False, 'from torch.utils.data import DataLoader\n'), ((945, 970), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (968, 970), False, 'import torch\n'), ((2706, 2731), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2729, 2731), False, 'import torch\n'), ((3827, 3852), 'glob.glob', 'glob', (['f"""{save_dir}*.ckpt"""'], {}), "(f'{save_dir}*.ckpt')\n", (3831, 3852), False, 'from glob import glob\n'), ((4082, 4111), 'torch.load', 'torch.load', (['latest_model_path'], {}), '(latest_model_path)\n', (4092, 4111), False, 'import torch\n'), ((1906, 1945), 'os.path.join', 'os.path.join', (['save_dir', 'f"""{epoch}.ckpt"""'], {}), "(save_dir, f'{epoch}.ckpt')\n", (1918, 1945), False, 'import os\n'), ((4877, 4893), 'random.Random', 'random.Random', (['(0)'], {}), '(0)\n', (4890, 4893), False, 'import random\n'), ((1881, 1903), 'numpy.mean', 'np.mean', (['metrics[:, 0]'], {}), '(metrics[:, 0])\n', (1888, 1903), True, 'import numpy as np\n'), ((2186, 2207), 'numpy.sum', 'np.sum', (['metrics[:, 7]'], {}), '(metrics[:, 7])\n', (2192, 2207), True, 'import numpy as np\n'), ((2221, 2242), 'numpy.sum', 'np.sum', (['metrics[:, 9]'], {}), '(metrics[:, 9])\n', (2227, 2242), True, 'import numpy as np\n'), ((2287, 2309), 'numpy.mean', 'np.mean', (['metrics[:, 0]'], {}), '(metrics[:, 0])\n', (2294, 2309), True, 'import numpy as np\n'), ((2327, 2349), 'numpy.mean', 'np.mean', (['metrics[:, 1]'], {}), '(metrics[:, 1])\n', (2334, 2349), True, 'import numpy as np\n'), ((2377, 2399), 'numpy.mean', 'np.mean', (['metrics[:, 2]'], {}), '(metrics[:, 2])\n', (2384, 2399), True, 'import numpy as np\n'), ((2403, 2425), 'numpy.mean', 'np.mean', (['metrics[:, 3]'], {}), '(metrics[:, 3])\n', (2410, 2425), True, 'import numpy as np\n'), ((2429, 2451), 'numpy.mean', 'np.mean', (['metrics[:, 4]'], {}), '(metrics[:, 4])\n', (2436, 2451), True, 'import numpy as np\n'), ((2455, 2477), 'numpy.mean', 'np.mean', (['metrics[:, 5]'], {}), '(metrics[:, 5])\n', (2462, 2477), True, 'import numpy as np\n'), ((3282, 3304), 'numpy.mean', 'np.mean', (['metrics[:, 0]'], {}), '(metrics[:, 0])\n', (3289, 3304), True, 'import numpy as np\n'), ((3322, 3344), 'numpy.mean', 'np.mean', (['metrics[:, 1]'], {}), '(metrics[:, 1])\n', (3329, 3344), True, 'import numpy as np\n'), ((3372, 3394), 'numpy.mean', 'np.mean', (['metrics[:, 2]'], {}), '(metrics[:, 2])\n', (3379, 3394), True, 'import numpy as np\n'), ((3398, 3420), 'numpy.mean', 'np.mean', (['metrics[:, 3]'], {}), '(metrics[:, 3])\n', (3405, 3420), True, 'import numpy as np\n'), ((3424, 3446), 'numpy.mean', 'np.mean', (['metrics[:, 4]'], {}), '(metrics[:, 4])\n', (3431, 3446), True, 'import numpy as np\n'), ((3450, 3472), 'numpy.mean', 'np.mean', (['metrics[:, 5]'], {}), '(metrics[:, 5])\n', (3457, 3472), True, 'import numpy as np\n'), ((2079, 2100), 'numpy.sum', 'np.sum', (['metrics[:, 7]'], {}), '(metrics[:, 7])\n', (2085, 2100), True, 'import numpy as np\n'), ((2151, 2172), 'numpy.sum', 'np.sum', (['metrics[:, 9]'], {}), '(metrics[:, 9])\n', (2157, 2172), True, 'import numpy as np\n'), ((4615, 4676), 'pandas.read_csv', 'pd.read_csv', (['f"""{OUTPUT_PATH}/augmented_meta.csv"""'], {'index_col': '(0)'}), "(f'{OUTPUT_PATH}/augmented_meta.csv', index_col=0)\n", (4626, 4676), True, 'import pandas as pd\n'), ((2055, 2076), 'numpy.sum', 'np.sum', (['metrics[:, 6]'], {}), '(metrics[:, 6])\n', (2061, 2076), True, 'import numpy as np\n'), ((2127, 2148), 'numpy.sum', 'np.sum', (['metrics[:, 8]'], {}), '(metrics[:, 8])\n', (2133, 2148), True, 'import numpy as np\n')]
|
#!/usr/bin/python3
"""Plot histograms of images. Possible nans and infinities are ignored."""
import argparse
from collections import OrderedDict
import logging
import numpy as np
import pylab as pl
from scipy import interpolate
import dwi.files
import dwi.util
def parse_args():
"""Parse command-line arguments."""
p = argparse.ArgumentParser(description=__doc__)
p.add_argument('--verbose', '-v', action='count',
help='increase verbosity')
p.add_argument('--input', nargs='+',
help='input files')
p.add_argument('--param', type=int, nargs='*',
help='image parameter index to use')
p.add_argument('--fig', required=True,
help='output figure file')
p.add_argument('--smooth', action='store_true',
help='smoothen the histogram by spline interpolation')
return p.parse_args()
def histogram(a, m1=None, m2=None, inclusive=True, bins='doane'):
"""Create histogram from data between (m1, m2), with bin centers."""
a = np.asarray(a)
if m1 is not None:
if inclusive:
a = a[a >= m1]
else:
a = a[a > m1]
if m2 is not None:
if inclusive:
a = a[a <= m2]
else:
a = a[a < m2]
mn, mx = a.min(), a.max()
hist, bin_edges = np.histogram(a, bins=bins, density=False)
bin_centers = [np.mean(t) for t in zip(bin_edges, bin_edges[1:])]
return hist, bin_centers, mn, mx
def smoothen(x, y):
"""Smoothen histogram."""
x_smooth = np.linspace(min(x), max(x), 300)
y_smooth = interpolate.spline(x, y, x_smooth)
y_smooth[y_smooth < 0] = 0 # Don't let it dive negative.
return x_smooth, y_smooth
def plot_histograms(Histograms, outfile, smooth=False):
"""Plot subfigures, each having several histograms bundled together."""
nrows = len({x[0] for x in Histograms})
ncols = len({x[1] for x in Histograms})
# logging.warning('## %s ', [nrows, ncols])
fig = pl.figure(figsize=(ncols * 6, nrows * 6))
# pl.yscale('log')
for i, ((param, rng), histograms) in enumerate(Histograms.items(), 1):
# logging.warning('#### %s ', [i, param, rng, len(histograms)])
if histograms:
fig.add_subplot(nrows, ncols, i)
minmin, maxmax = None, None
for hist, bins, mn, mx in histograms:
x, y = bins, hist
if smooth:
x, y = smoothen(x, y)
pl.plot(x, y)
# pl.bar(x, y, width=x[1] - x[0])
if minmin is None:
minmin = mn
if maxmax is None:
maxmax = mx
minmin = min(minmin, mn)
maxmax = max(maxmax, mx)
pl.title(f'{param}; {len(histograms)}; {rng}; '
f'[{minmin:.5g}, {maxmax:.5g}]')
# pl.tight_layout()
logging.info('Plotting to %s...', outfile)
pl.savefig(outfile, bbox_inches='tight')
pl.close()
def add_histograms(hists, path, img, param, ranges, verbose):
"""Add histograms for a file."""
original_shape, original_size = img.shape, img.size
img = img[dwi.util.bbox(img)]
img = img[np.isfinite(img)]
if np.any(img < 0):
# negatives = img[img < 0]
logging.warning('Image contains negatives: %s', path)
if verbose:
print(f'Read {original_shape}, {img.dtype}, '
f'{img.size / original_size:.1%}, {np.mean(img):.4g}, '
f'{dwi.util.fivenums(img)}, {param}, {path}')
for rng in ranges:
if isinstance(rng, list):
incl = True
if isinstance(rng, tuple):
incl = False
m1, m2 = np.percentile(img, rng)
key = param, str(rng)
hists.setdefault(key, []).append(histogram(img, m1, m2, incl))
# hists[0].append(histogram(img, None, None))
# hists[1].append(histogram(img, 0, 100))
# hists[2].append(histogram(img, 0.1, 99.9))
# hists[3].append(histogram(img, 1, 99))
# hists[4].append(histogram(img, 2, 98))
def main():
"""Main."""
args = parse_args()
logging.basicConfig(level=logging.INFO)
ranges = [[0, 100], (0, 100), [0, 99], (1, 95)]
hists = OrderedDict()
for path in args.input:
img, attrs = dwi.files.read_pmap(path, params=args.param,
dtype=np.float32)
for i, param in enumerate(attrs['parameters']):
add_histograms(hists, path, img[..., i], param, ranges,
args.verbose)
plot_histograms(hists, args.fig, smooth=args.smooth)
if __name__ == '__main__':
main()
|
[
"logging.basicConfig",
"numpy.mean",
"numpy.histogram",
"collections.OrderedDict",
"argparse.ArgumentParser",
"pylab.plot",
"pylab.savefig",
"numpy.asarray",
"logging.warning",
"numpy.any",
"pylab.close",
"pylab.figure",
"numpy.isfinite",
"scipy.interpolate.spline",
"numpy.percentile",
"logging.info"
] |
[((334, 378), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (357, 378), False, 'import argparse\n'), ((1056, 1069), 'numpy.asarray', 'np.asarray', (['a'], {}), '(a)\n', (1066, 1069), True, 'import numpy as np\n'), ((1346, 1387), 'numpy.histogram', 'np.histogram', (['a'], {'bins': 'bins', 'density': '(False)'}), '(a, bins=bins, density=False)\n', (1358, 1387), True, 'import numpy as np\n'), ((1610, 1644), 'scipy.interpolate.spline', 'interpolate.spline', (['x', 'y', 'x_smooth'], {}), '(x, y, x_smooth)\n', (1628, 1644), False, 'from scipy import interpolate\n'), ((2017, 2058), 'pylab.figure', 'pl.figure', ([], {'figsize': '(ncols * 6, nrows * 6)'}), '(figsize=(ncols * 6, nrows * 6))\n', (2026, 2058), True, 'import pylab as pl\n'), ((2928, 2970), 'logging.info', 'logging.info', (['"""Plotting to %s..."""', 'outfile'], {}), "('Plotting to %s...', outfile)\n", (2940, 2970), False, 'import logging\n'), ((2975, 3015), 'pylab.savefig', 'pl.savefig', (['outfile'], {'bbox_inches': '"""tight"""'}), "(outfile, bbox_inches='tight')\n", (2985, 3015), True, 'import pylab as pl\n'), ((3020, 3030), 'pylab.close', 'pl.close', ([], {}), '()\n', (3028, 3030), True, 'import pylab as pl\n'), ((3261, 3276), 'numpy.any', 'np.any', (['(img < 0)'], {}), '(img < 0)\n', (3267, 3276), True, 'import numpy as np\n'), ((4151, 4190), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (4170, 4190), False, 'import logging\n'), ((4256, 4269), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4267, 4269), False, 'from collections import OrderedDict\n'), ((1407, 1417), 'numpy.mean', 'np.mean', (['t'], {}), '(t)\n', (1414, 1417), True, 'import numpy as np\n'), ((3236, 3252), 'numpy.isfinite', 'np.isfinite', (['img'], {}), '(img)\n', (3247, 3252), True, 'import numpy as np\n'), ((3321, 3374), 'logging.warning', 'logging.warning', (['"""Image contains negatives: %s"""', 'path'], {}), "('Image contains negatives: %s', path)\n", (3336, 3374), False, 'import logging\n'), ((3733, 3756), 'numpy.percentile', 'np.percentile', (['img', 'rng'], {}), '(img, rng)\n', (3746, 3756), True, 'import numpy as np\n'), ((2506, 2519), 'pylab.plot', 'pl.plot', (['x', 'y'], {}), '(x, y)\n', (2513, 2519), True, 'import pylab as pl\n'), ((3494, 3506), 'numpy.mean', 'np.mean', (['img'], {}), '(img)\n', (3501, 3506), True, 'import numpy as np\n')]
|
import os
from tqdm import tqdm
import numpy as np
import pandas as pd
import os
import pdb
import cv2
import time
import json
import torch
import random
import scipy
import logging
import traceback
import numpy as np
from datetime import datetime
# from config import HOME
from tensorboard_logger import log_value, log_images
from matplotlib import pyplot as plt
plt.switch_backend("agg")
def logger_init(save_folder):
mkdir(save_folder)
logging.basicConfig(
filename=os.path.join(save_folder, "log.txt"),
filemode="a",
level=logging.DEBUG,
format="%(asctime)s %(message)s",
datefmt="%H:%M:%S",
)
console = logging.StreamHandler()
logger = logging.getLogger(__name__)
logger.addHandler(console)
return logger
def plot_ROC(roc, targets, predictions, phase, epoch, folder):
roc_plot_folder = os.path.join(folder, "ROC_plots")
mkdir(os.path.join(roc_plot_folder))
fpr, tpr, thresholds = roc_curve(targets, predictions)
roc_plot_name = "ROC_%s_%s_%0.4f" % (phase, epoch, roc)
roc_plot_path = os.path.join(roc_plot_folder, roc_plot_name + ".jpg")
fig = plt.figure(figsize=(10, 5))
plt.plot([0, 1], [0, 1], linestyle="--")
plt.plot(fpr, tpr, marker=".")
plt.legend(["diagonal-line", roc_plot_name])
fig.savefig(roc_plot_path, bbox_inches="tight", pad_inches=0)
plt.close(fig) # see footnote [1]
plot = cv2.imread(roc_plot_path)
log_images(roc_plot_name, [plot], epoch)
def print_time(log, start, string):
diff = time.time() - start
log(string + ": %02d:%02d" % (diff // 60, diff % 60))
def iter_log(log, phase, epoch, iteration, epoch_size, loss, start):
diff = time.time() - start
log(
"%s epoch: %d (%d/%d) loss: %.4f || %02d:%02d",
phase,
epoch,
iteration,
epoch_size,
loss.item(),
diff // 60,
diff % 60,
)
def mkdir(folder):
if not os.path.exists(folder):
os.mkdir(folder)
def save_hyperparameters(trainer, remark):
hp_file = os.path.join(trainer.save_folder, "parameters.txt")
time_now = datetime.now()
augmentations = trainer.dataloaders["train"].dataset.transforms.transforms
# pdb.set_trace()
string_to_write = (
f"Time: {time_now}\n"
+ f"model_name: {trainer.model_name}\n"
+ f"train_df_name: {trainer.train_df_name}\n"
#+ f"images_folder: {trainer.images_folder}\n"
+ f"resume: {trainer.resume}\n"
+ f"pretrained: {trainer.pretrained}\n"
+ f"pretrained_path: {trainer.pretrained_path}\n"
+ f"folder: {trainer.folder}\n"
+ f"fold: {trainer.fold}\n"
+ f"total_folds: {trainer.total_folds}\n"
+ f"num_samples: {trainer.num_samples}\n"
+ f"sampling class weights: {trainer.class_weights}\n"
+ f"size: {trainer.size}\n"
+ f"top_lr: {trainer.top_lr}\n"
+ f"base_lr: {trainer.base_lr}\n"
+ f"num_workers: {trainer.num_workers}\n"
+ f"batchsize: {trainer.batch_size}\n"
+ f"momentum: {trainer.momentum}\n"
+ f"mean: {trainer.mean}\n"
+ f"std: {trainer.std}\n"
+ f"start_epoch: {trainer.start_epoch}\n"
+ f"augmentations: {augmentations}\n"
+ f"criterion: {trainer.criterion}\n"
+ f"optimizer: {trainer.optimizer}\n"
+ f"remark: {remark}\n"
)
with open(hp_file, "a") as f:
f.write(string_to_write)
print(string_to_write)
def seed_pytorch(seed=69):
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
|
[
"logging.getLogger",
"os.path.exists",
"logging.StreamHandler",
"matplotlib.pyplot.plot",
"os.path.join",
"tensorboard_logger.log_images",
"random.seed",
"time.time",
"matplotlib.pyplot.close",
"datetime.datetime.now",
"matplotlib.pyplot.figure",
"numpy.random.seed",
"os.mkdir",
"matplotlib.pyplot.switch_backend",
"torch.cuda.manual_seed",
"cv2.imread",
"matplotlib.pyplot.legend"
] |
[((366, 391), 'matplotlib.pyplot.switch_backend', 'plt.switch_backend', (['"""agg"""'], {}), "('agg')\n", (384, 391), True, 'from matplotlib import pyplot as plt\n'), ((668, 691), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (689, 691), False, 'import logging\n'), ((705, 732), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (722, 732), False, 'import logging\n'), ((870, 903), 'os.path.join', 'os.path.join', (['folder', '"""ROC_plots"""'], {}), "(folder, 'ROC_plots')\n", (882, 903), False, 'import os\n'), ((1084, 1137), 'os.path.join', 'os.path.join', (['roc_plot_folder', "(roc_plot_name + '.jpg')"], {}), "(roc_plot_folder, roc_plot_name + '.jpg')\n", (1096, 1137), False, 'import os\n'), ((1148, 1175), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (1158, 1175), True, 'from matplotlib import pyplot as plt\n'), ((1180, 1220), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]'], {'linestyle': '"""--"""'}), "([0, 1], [0, 1], linestyle='--')\n", (1188, 1220), True, 'from matplotlib import pyplot as plt\n'), ((1225, 1255), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr', 'tpr'], {'marker': '"""."""'}), "(fpr, tpr, marker='.')\n", (1233, 1255), True, 'from matplotlib import pyplot as plt\n'), ((1260, 1304), 'matplotlib.pyplot.legend', 'plt.legend', (["['diagonal-line', roc_plot_name]"], {}), "(['diagonal-line', roc_plot_name])\n", (1270, 1304), True, 'from matplotlib import pyplot as plt\n'), ((1375, 1389), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (1384, 1389), True, 'from matplotlib import pyplot as plt\n'), ((1422, 1447), 'cv2.imread', 'cv2.imread', (['roc_plot_path'], {}), '(roc_plot_path)\n', (1432, 1447), False, 'import cv2\n'), ((1452, 1492), 'tensorboard_logger.log_images', 'log_images', (['roc_plot_name', '[plot]', 'epoch'], {}), '(roc_plot_name, [plot], epoch)\n', (1462, 1492), False, 'from tensorboard_logger import log_value, log_images\n'), ((2063, 2114), 'os.path.join', 'os.path.join', (['trainer.save_folder', '"""parameters.txt"""'], {}), "(trainer.save_folder, 'parameters.txt')\n", (2075, 2114), False, 'import os\n'), ((2130, 2144), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2142, 2144), False, 'from datetime import datetime\n'), ((3525, 3542), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (3536, 3542), False, 'import random\n'), ((3592, 3612), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (3606, 3612), True, 'import numpy as np\n'), ((3617, 3645), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (3639, 3645), False, 'import torch\n'), ((914, 943), 'os.path.join', 'os.path.join', (['roc_plot_folder'], {}), '(roc_plot_folder)\n', (926, 943), False, 'import os\n'), ((1542, 1553), 'time.time', 'time.time', ([], {}), '()\n', (1551, 1553), False, 'import time\n'), ((1703, 1714), 'time.time', 'time.time', ([], {}), '()\n', (1712, 1714), False, 'import time\n'), ((1955, 1977), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (1969, 1977), False, 'import os\n'), ((1987, 2003), 'os.mkdir', 'os.mkdir', (['folder'], {}), '(folder)\n', (1995, 2003), False, 'import os\n'), ((489, 525), 'os.path.join', 'os.path.join', (['save_folder', '"""log.txt"""'], {}), "(save_folder, 'log.txt')\n", (501, 525), False, 'import os\n')]
|
import numpy as np
from scipy.ndimage import convolve, maximum_filter
def gauss2d(sigma, fsize):
""" Create a 2D Gaussian filter
Args:
sigma: width of the Gaussian filter
fsize: (w, h) dimensions of the filter
Returns:
*normalized* Gaussian filter as (h, w) np.array
"""
m, n = fsize
x = np.arange(-m / 2 + 0.5, m / 2)
y = np.arange(-n / 2 + 0.5, n / 2)
xx, yy = np.meshgrid(x, y, sparse=True)
g = np.exp(-(xx ** 2 + yy ** 2) / (2 * sigma ** 2))
return g / np.sum(g)
def derivative_filters():
""" Create derivative filters for x and y direction
Returns:
fx: derivative filter in x direction
fy: derivative filter in y direction
"""
fx = np.array([[0.5, 0, -0.5]])
fy = fx.transpose()
return fx, fy
def compute_hessian(img, gauss, fx, fy):
""" Compute elements of the Hessian matrix
Args:
img:
gauss: Gaussian filter
fx: derivative filter in x direction
fy: derivative filter in y direction
Returns:
I_xx: (h, w) np.array of 2nd derivatives in x direction
I_yy: (h, w) np.array of 2nd derivatives in y direction
I_xy: (h, w) np.array of 2nd derivatives in x-y direction
"""
#
# You code here
#
# set mode
mode = "mirror"
# smooth image
img = convolve(img, gauss, mode = mode, cval=0)
# first derivatives
I_x = convolve(img, fx, mode = mode, cval=0)
I_y = convolve(img, fy, mode = mode, cval=0)
# second derivatives
I_xx = convolve(I_x, fx, mode = mode, cval=0)
I_xy = convolve(I_x, fy, mode = mode, cval=0)
I_yy = convolve(I_y, fy, mode = mode, cval=0)
return I_xx,I_yy,I_xy
def compute_criterion(I_xx, I_yy, I_xy, sigma):
""" Compute criterion function
Args:
I_xx: (h, w) np.array of 2nd derivatives in x direction
I_yy: (h, w) np.array of 2nd derivatives in y direction
I_xy: (h, w) np.array of 2nd derivatives in x-y direction
sigma: scaling factor
Returns:
criterion: (h, w) np.array of scaled determinant of Hessian matrix
"""
#
# You code here
#
det = I_xx * I_yy - I_xy ** 2
return sigma ** 4 * det
def nonmaxsuppression(criterion, threshold):
""" Apply non-maximum suppression to criterion values
and return Hessian interest points
Args:
criterion: (h, w) np.array of criterion function values
threshold: criterion threshold
Returns:
rows: (n,) np.array with y-positions of interest points
cols: (n,) np.array with x-positions of interest points
"""
#
# You code here
#
criterion_max = maximum_filter(criterion, (5,5), mode= "mirror")
criterion_thresh = np.logical_and(criterion_max > threshold, criterion >= criterion_max)
mask = np.zeros_like(criterion_thresh)
mask[5:-5, 5:-5] = criterion_thresh[5:-5, 5:-5]
rows, cols = np.nonzero(mask)
return rows, cols
|
[
"numpy.logical_and",
"scipy.ndimage.convolve",
"numpy.exp",
"numpy.array",
"numpy.sum",
"numpy.nonzero",
"scipy.ndimage.maximum_filter",
"numpy.meshgrid",
"numpy.zeros_like",
"numpy.arange"
] |
[((339, 369), 'numpy.arange', 'np.arange', (['(-m / 2 + 0.5)', '(m / 2)'], {}), '(-m / 2 + 0.5, m / 2)\n', (348, 369), True, 'import numpy as np\n'), ((378, 408), 'numpy.arange', 'np.arange', (['(-n / 2 + 0.5)', '(n / 2)'], {}), '(-n / 2 + 0.5, n / 2)\n', (387, 408), True, 'import numpy as np\n'), ((422, 452), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {'sparse': '(True)'}), '(x, y, sparse=True)\n', (433, 452), True, 'import numpy as np\n'), ((461, 508), 'numpy.exp', 'np.exp', (['(-(xx ** 2 + yy ** 2) / (2 * sigma ** 2))'], {}), '(-(xx ** 2 + yy ** 2) / (2 * sigma ** 2))\n', (467, 508), True, 'import numpy as np\n'), ((739, 765), 'numpy.array', 'np.array', (['[[0.5, 0, -0.5]]'], {}), '([[0.5, 0, -0.5]])\n', (747, 765), True, 'import numpy as np\n'), ((1358, 1397), 'scipy.ndimage.convolve', 'convolve', (['img', 'gauss'], {'mode': 'mode', 'cval': '(0)'}), '(img, gauss, mode=mode, cval=0)\n', (1366, 1397), False, 'from scipy.ndimage import convolve, maximum_filter\n'), ((1435, 1471), 'scipy.ndimage.convolve', 'convolve', (['img', 'fx'], {'mode': 'mode', 'cval': '(0)'}), '(img, fx, mode=mode, cval=0)\n', (1443, 1471), False, 'from scipy.ndimage import convolve, maximum_filter\n'), ((1484, 1520), 'scipy.ndimage.convolve', 'convolve', (['img', 'fy'], {'mode': 'mode', 'cval': '(0)'}), '(img, fy, mode=mode, cval=0)\n', (1492, 1520), False, 'from scipy.ndimage import convolve, maximum_filter\n'), ((1560, 1596), 'scipy.ndimage.convolve', 'convolve', (['I_x', 'fx'], {'mode': 'mode', 'cval': '(0)'}), '(I_x, fx, mode=mode, cval=0)\n', (1568, 1596), False, 'from scipy.ndimage import convolve, maximum_filter\n'), ((1610, 1646), 'scipy.ndimage.convolve', 'convolve', (['I_x', 'fy'], {'mode': 'mode', 'cval': '(0)'}), '(I_x, fy, mode=mode, cval=0)\n', (1618, 1646), False, 'from scipy.ndimage import convolve, maximum_filter\n'), ((1660, 1696), 'scipy.ndimage.convolve', 'convolve', (['I_y', 'fy'], {'mode': 'mode', 'cval': '(0)'}), '(I_y, fy, mode=mode, cval=0)\n', (1668, 1696), False, 'from scipy.ndimage import convolve, maximum_filter\n'), ((2727, 2775), 'scipy.ndimage.maximum_filter', 'maximum_filter', (['criterion', '(5, 5)'], {'mode': '"""mirror"""'}), "(criterion, (5, 5), mode='mirror')\n", (2741, 2775), False, 'from scipy.ndimage import convolve, maximum_filter\n'), ((2799, 2868), 'numpy.logical_and', 'np.logical_and', (['(criterion_max > threshold)', '(criterion >= criterion_max)'], {}), '(criterion_max > threshold, criterion >= criterion_max)\n', (2813, 2868), True, 'import numpy as np\n'), ((2881, 2912), 'numpy.zeros_like', 'np.zeros_like', (['criterion_thresh'], {}), '(criterion_thresh)\n', (2894, 2912), True, 'import numpy as np\n'), ((2982, 2998), 'numpy.nonzero', 'np.nonzero', (['mask'], {}), '(mask)\n', (2992, 2998), True, 'import numpy as np\n'), ((524, 533), 'numpy.sum', 'np.sum', (['g'], {}), '(g)\n', (530, 533), True, 'import numpy as np\n')]
|
from builtins import range
from builtins import object
import numpy as np
from comp411.layers import *
from comp411.layer_utils import *
class ThreeLayerNet(object):
"""
A three-layer fully-connected neural network with Leaky ReLU nonlinearity and
softmax loss that uses a modular layer design. We assume an input dimension
of D, a hidden dimension of tuple of (H1, H2) yielding the dimension for the
first and second hidden layer respectively, and perform classification over C classes.
The architecture should be affine - leakyrelu - affine - leakyrelu - affine - softmax.
Note that this class does not implement gradient descent; instead, it
will interact with a separate Solver object that is responsible for running
optimization.
The learnable parameters of the model are stored in the dictionary
self.params that maps parameter names to numpy arrays.
"""
def __init__(self, input_dim=3*32*32, hidden_dim=(64, 32), num_classes=10,
weight_scale=1e-3, reg=0.0, alpha=1e-3):
"""
Initialize a new network.
Inputs:
- input_dim: An integer giving the size of the input
- hidden_dim: A tuple giving the size of the first and second hidden layer respectively
- num_classes: An integer giving the number of classes to classify
- weight_scale: Scalar giving the standard deviation for random
initialization of the weights.
- reg: Scalar giving L2 regularization strength.
- alpha: negative slope of Leaky ReLU layers
"""
self.params = {}
self.reg = reg
self.alpha = alpha
############################################################################
# TODO: Initialize the weights and biases of the three-layer net. Weights #
# should be initialized from a Gaussian centered at 0.0 with #
# standard deviation equal to weight_scale, and biases should be #
# initialized to zero. All weights and biases should be stored in the #
# dictionary self.params, with first layer weights #
# and biases using the keys 'W1' and 'b1', second layer #
# weights and biases using the keys 'W2' and 'b2', #
# and third layer weights and biases using the keys 'W3' and 'b3. #
# #
############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
self.params['W1'] = weight_scale * np.random.randn(input_dim,hidden_dim[0])
self.params['W2'] = weight_scale * np.random.randn(hidden_dim[0],hidden_dim[1])
self.params['W3'] = weight_scale * np.random.randn(hidden_dim[1],num_classes)
self.params['b1'] = np.zeros(hidden_dim[0])
self.params['b2'] = np.zeros(hidden_dim[1])
self.params['b3'] = np.zeros(num_classes)
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
############################################################################
# END OF YOUR CODE #
############################################################################
def loss(self, X, y=None):
"""
Compute loss and gradient for a minibatch of data.
Inputs:
- X: Array of input data of shape (N, d_1, ..., d_k)
- y: Array of labels, of shape (N,). y[i] gives the label for X[i].
Returns:
If y is None, then run a test-time forward pass of the model and return:
- scores: Array of shape (N, C) giving classification scores, where
scores[i, c] is the classification score for X[i] and class c.
If y is not None, then run a training-time forward and backward pass and
return a tuple of:
- loss: Scalar value giving the loss
- grads: Dictionary with the same keys as self.params, mapping parameter
names to gradients of the loss with respect to those parameters.
"""
scores = None
############################################################################
# TODO: Implement the forward pass for the three-layer net, computing the #
# class scores for X and storing them in the scores variable. #
############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
W1 = self.params['W1']
W2 = self.params['W2']
W3 = self.params['W3']
b1 = self.params['b1']
b2 = self.params['b2']
b3 = self.params['b3']
X2 , lrelu_cache1 = affine_lrelu_forward(X,W1,b1,{"alpha": self.alpha})
X3 , lrelu_cache2 = affine_lrelu_forward(X2,W2,b2,{"alpha": self.alpha})
scores, affine_cache = affine_forward(X3,W3,b3)
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
############################################################################
# END OF YOUR CODE #
############################################################################
# If y is None then we are in test mode so just return scores
if y is None:
return scores
loss, grads = 0, {}
############################################################################
# TODO: Implement the backward pass for the three-layer net. Store the loss#
# in the loss variable and gradients in the grads dictionary. Compute data #
# loss using softmax, and make sure that grads[k] holds the gradients for #
# self.params[k]. Don't forget to add L2 regularization! #
# #
# NOTE: To ensure that your implementation matches ours and you pass the #
# automated tests, make sure that your L2 regularization includes a factor #
# of 0.5 to simplify the expression for the gradient. #
############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
loss, softmax_grad = softmax_loss(scores, y)
loss += 0.5 * self.reg * ( np.sum(W1 * W1) + np.sum(W2 * W2) + np.sum(W3 * W3) )
dx3, dw3, db3 = affine_backward(softmax_grad, affine_cache)
dx2, dw2, db2 = affine_lrelu_backward(dx3, lrelu_cache2)
dx1, dw1, db1 = affine_lrelu_backward(dx2, lrelu_cache1)
grads['W3'] = dw3 + self.reg * W3
grads['b3'] = db3
grads['W2'] = dw2 + self.reg * W2
grads['b2'] = db2
grads['W1'] = dw1 + self.reg * W1
grads['b1'] = db1
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
############################################################################
# END OF YOUR CODE #
############################################################################
return loss, grads
class FullyConnectedNet(object):
"""
A fully-connected neural network with an arbitrary number of hidden layers,
LeakyReLU nonlinearities, and a softmax loss function. This will also implement
dropout optionally. For a network with L layers, the architecture will be
{affine - leakyrelu - [dropout]} x (L - 1) - affine - softmax
where dropout is optional, and the {...} block is repeated L - 1 times.
Similar to the ThreeLayerNet above, learnable parameters are stored in the
self.params dictionary and will be learned using the Solver class.
"""
def __init__(self, hidden_dims, input_dim=3*32*32, num_classes=10,
dropout=1, reg=0.0, alpha=1e-2,
weight_scale=1e-2, dtype=np.float32, seed=None):
"""
Initialize a new FullyConnectedNet.
Inputs:
- hidden_dims: A list of integers giving the size of each hidden layer.
- input_dim: An integer giving the size of the input.
- num_classes: An integer giving the number of classes to classify.
- dropout: Scalar between 0 and 1 giving dropout strength. If dropout=1 then
the network should not use dropout at all.
- reg: Scalar giving L2 regularization strength.
- alpha: negative slope of Leaky ReLU layers
- weight_scale: Scalar giving the standard deviation for random
initialization of the weights.
- dtype: A numpy datatype object; all computations will be performed using
this datatype. float32 is faster but less accurate, so you should use
float64 for numeric gradient checking.
- seed: If not None, then pass this random seed to the dropout layers. This
will make the dropout layers deterministic so we can gradient check the
model.
"""
self.use_dropout = dropout != 1
self.reg = reg
self.alpha = alpha
self.num_layers = 1 + len(hidden_dims)
self.dtype = dtype
self.params = {}
############################################################################
# TODO: Initialize the parameters of the network, storing all values in #
# the self.params dictionary. Store weights and biases for the first layer #
# in W1 and b1; for the second layer use W2 and b2, etc. Weights should be #
# initialized from a normal distribution centered at 0 with standard #
# deviation equal to weight_scale. Biases should be initialized to zero. #
# #
############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
dims = np.hstack((input_dim, hidden_dims, num_classes))
for i in range(self.num_layers):
self.params['W%d' % (i + 1)] = weight_scale * np.random.randn(dims[i], dims[i+1])
self.params['b%d' % (i + 1)] = np.zeros(dims[i+1])
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
############################################################################
# END OF YOUR CODE #
############################################################################
# When using dropout we need to pass a dropout_param dictionary to each
# dropout layer so that the layer knows the dropout probability and the mode
# (train / test). You can pass the same dropout_param to each dropout layer.
self.dropout_param = {}
if self.use_dropout:
self.dropout_param = {'mode': 'train', 'p': dropout}
if seed is not None:
self.dropout_param['seed'] = seed
# Cast all parameters to the correct datatype
for k, v in self.params.items():
self.params[k] = v.astype(dtype)
def loss(self, X, y=None):
"""
Compute loss and gradient for the fully-connected net.
Input / output: Same as ThreeLayerNet above.
"""
X = X.astype(self.dtype)
mode = 'test' if y is None else 'train'
# Set train/test mode for dropout param since it
# behaves differently during training and testing.
if self.use_dropout:
self.dropout_param['mode'] = mode
scores = None
############################################################################
# TODO: Implement the forward pass for the fully-connected net, computing #
# the class scores for X and storing them in the scores variable. #
# #
# When using dropout, you'll need to pass self.dropout_param to each #
# dropout forward pass. #
# #
############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
hidden_num = self.num_layers - 1
scores = X
cache_history = []
L2reg = 0
for i in range(hidden_num):
scores, cache = affine_lrelu_forward(scores, self.params['W%d' % (i + 1)], self.params['b%d' % (i + 1)],{"alpha": self.alpha})
cache_history.append(cache)
if self.use_dropout:
scores, cache = dropout_forward(scores, self.dropout_param)
cache_history.append(cache)
L2reg += np.sum(self.params['W%d' % (i + 1)] ** 2)
i += 1
scores, cache = affine_forward(scores, self.params['W%d' % (i + 1)],
self.params['b%d' % (i + 1)])
cache_history.append(cache)
L2reg += np.sum(self.params['W%d' % (i + 1)] ** 2)
L2reg *= 0.5 * self.reg
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
############################################################################
# END OF YOUR CODE #
############################################################################
# If test mode return early
if mode == 'test':
return scores
loss, grads = 0.0, {}
############################################################################
# TODO: Implement the backward pass for the fully-connected net. Store the #
# loss in the loss variable and gradients in the grads dictionary. Compute #
# data loss using softmax, and make sure that grads[k] holds the gradients #
# for self.params[k]. Don't forget to add L2 regularization! #
# #
# #
# NOTE: To ensure that your implementation matches ours and you pass the #
# automated tests, make sure that your L2 regularization includes a factor #
# of 0.5 to simplify the expression for the gradient. #
############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
loss, dout = softmax_loss(scores, y)
loss += L2reg
dout, grads['W%d' % (i + 1)], grads['b%d' % (i + 1)] = affine_backward(dout, cache_history.pop())
grads['W%d' % (i + 1)] += self.reg * self.params['W%d' % (i + 1)]
i -= 1
while i >= 0:
if self.use_dropout:
dout = dropout_backward(dout, cache_history.pop())
#else:
dout, grads['W%d' % (i + 1)], grads['b%d' % (i + 1)] = affine_lrelu_backward(dout, cache_history.pop())
grads['W%d' % (i + 1)] += self.reg * self.params['W%d' % (i + 1)]
i -= 1
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
############################################################################
# END OF YOUR CODE #
############################################################################
return loss, grads
|
[
"numpy.hstack",
"numpy.sum",
"builtins.range",
"numpy.zeros",
"numpy.random.randn"
] |
[((2953, 2976), 'numpy.zeros', 'np.zeros', (['hidden_dim[0]'], {}), '(hidden_dim[0])\n', (2961, 2976), True, 'import numpy as np\n'), ((3005, 3028), 'numpy.zeros', 'np.zeros', (['hidden_dim[1]'], {}), '(hidden_dim[1])\n', (3013, 3028), True, 'import numpy as np\n'), ((3057, 3078), 'numpy.zeros', 'np.zeros', (['num_classes'], {}), '(num_classes)\n', (3065, 3078), True, 'import numpy as np\n'), ((10193, 10241), 'numpy.hstack', 'np.hstack', (['(input_dim, hidden_dims, num_classes)'], {}), '((input_dim, hidden_dims, num_classes))\n', (10202, 10241), True, 'import numpy as np\n'), ((10260, 10282), 'builtins.range', 'range', (['self.num_layers'], {}), '(self.num_layers)\n', (10265, 10282), False, 'from builtins import range\n'), ((12712, 12729), 'builtins.range', 'range', (['hidden_num'], {}), '(hidden_num)\n', (12717, 12729), False, 'from builtins import range\n'), ((13348, 13389), 'numpy.sum', 'np.sum', (["(self.params['W%d' % (i + 1)] ** 2)"], {}), "(self.params['W%d' % (i + 1)] ** 2)\n", (13354, 13389), True, 'import numpy as np\n'), ((2710, 2751), 'numpy.random.randn', 'np.random.randn', (['input_dim', 'hidden_dim[0]'], {}), '(input_dim, hidden_dim[0])\n', (2725, 2751), True, 'import numpy as np\n'), ((2794, 2839), 'numpy.random.randn', 'np.random.randn', (['hidden_dim[0]', 'hidden_dim[1]'], {}), '(hidden_dim[0], hidden_dim[1])\n', (2809, 2839), True, 'import numpy as np\n'), ((2882, 2925), 'numpy.random.randn', 'np.random.randn', (['hidden_dim[1]', 'num_classes'], {}), '(hidden_dim[1], num_classes)\n', (2897, 2925), True, 'import numpy as np\n'), ((10421, 10442), 'numpy.zeros', 'np.zeros', (['dims[i + 1]'], {}), '(dims[i + 1])\n', (10429, 10442), True, 'import numpy as np\n'), ((13084, 13125), 'numpy.sum', 'np.sum', (["(self.params['W%d' % (i + 1)] ** 2)"], {}), "(self.params['W%d' % (i + 1)] ** 2)\n", (13090, 13125), True, 'import numpy as np\n'), ((6601, 6616), 'numpy.sum', 'np.sum', (['(W3 * W3)'], {}), '(W3 * W3)\n', (6607, 6616), True, 'import numpy as np\n'), ((10342, 10379), 'numpy.random.randn', 'np.random.randn', (['dims[i]', 'dims[i + 1]'], {}), '(dims[i], dims[i + 1])\n', (10357, 10379), True, 'import numpy as np\n'), ((6565, 6580), 'numpy.sum', 'np.sum', (['(W1 * W1)'], {}), '(W1 * W1)\n', (6571, 6580), True, 'import numpy as np\n'), ((6583, 6598), 'numpy.sum', 'np.sum', (['(W2 * W2)'], {}), '(W2 * W2)\n', (6589, 6598), True, 'import numpy as np\n')]
|
# coding: utf-8
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import numpy as np
def card_num_distribution():
"""Plot `Std-CardNumDistribution.png`."""
total = np.fromstring('12 14 12 12 13 12 12 12 12 12 12 14 12', sep=' ')
jb = np.fromstring('0 6 6 6 6 8 9 11 11 10 7 3 2', sep=' ')
jn = np.fromstring('7 1 6 6 0 3 3 1 1 2 5 9 6', sep=' ')
zb = np.fromstring('5 7 0 0 7 1 0 0 0 0 0 2 4', sep=' ')
jb /= total
jn /= total
zb /= total
x = np.arange(1, 14, 1)
xlabels = 'A 2 3 4 5 6 7 8 9 10 J Q K'.split()
plt.plot(x, jb, '*-', color='k', label='基本牌')
plt.plot(x, jn, 'o-', color='b', label='锦囊牌')
plt.plot(x, zb, '+-', color='r', label='装备牌')
plt.legend()
plt.grid()
plt.ylim(ymin=-0.01, ymax=1.01)
ax = plt.gca()
ax.yaxis.set_major_formatter(mticker.PercentFormatter(1.0))
plt.xticks(x, xlabels)
plt.show()
def card_suit_distribution():
"""Plot `Std-CardSuitDistribution.png`."""
jb = np.fromstring('14 22 20 29', sep=' ')
jn = np.fromstring('16 15 14 5', sep=' ')
zb = np.fromstring('10 3 6 7', sep=' ')
total = np.fromstring('40 40 40 41', sep=' ')
jb /= total
jn /= total
zb /= total
x = np.arange(1, 5, 1)
xlabels = '黑桃 红桃 草花 方片'.split()
plt.bar(x - 0.2, jb, color='k', width=0.2, label='基本牌')
plt.bar(x, jn, color='b', width=0.2, label='锦囊牌')
plt.bar(x + 0.2, zb, color='r', width=0.2, label='装备牌')
plt.legend()
plt.grid()
ax = plt.gca()
ax.yaxis.set_major_formatter(mticker.PercentFormatter(1.0))
plt.xticks(x, xlabels)
plt.show()
def main():
matplotlib.rc('font',**{
'sans-serif': 'Microsoft YaHei'
})
# card_num_distribution()
card_suit_distribution()
if __name__ == '__main__':
main()
|
[
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xticks",
"numpy.arange",
"matplotlib.ticker.PercentFormatter",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.bar",
"matplotlib.rc",
"matplotlib.pyplot.ylim",
"numpy.fromstring",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] |
[((210, 274), 'numpy.fromstring', 'np.fromstring', (['"""12 14 12 12 13 12 12 12 12 12 12 14 12"""'], {'sep': '""" """'}), "('12 14 12 12 13 12 12 12 12 12 12 14 12', sep=' ')\n", (223, 274), True, 'import numpy as np\n'), ((285, 339), 'numpy.fromstring', 'np.fromstring', (['"""0 6 6 6 6 8 9 11 11 10 7 3 2"""'], {'sep': '""" """'}), "('0 6 6 6 6 8 9 11 11 10 7 3 2', sep=' ')\n", (298, 339), True, 'import numpy as np\n'), ((349, 400), 'numpy.fromstring', 'np.fromstring', (['"""7 1 6 6 0 3 3 1 1 2 5 9 6"""'], {'sep': '""" """'}), "('7 1 6 6 0 3 3 1 1 2 5 9 6', sep=' ')\n", (362, 400), True, 'import numpy as np\n'), ((410, 461), 'numpy.fromstring', 'np.fromstring', (['"""5 7 0 0 7 1 0 0 0 0 0 2 4"""'], {'sep': '""" """'}), "('5 7 0 0 7 1 0 0 0 0 0 2 4', sep=' ')\n", (423, 461), True, 'import numpy as np\n'), ((520, 539), 'numpy.arange', 'np.arange', (['(1)', '(14)', '(1)'], {}), '(1, 14, 1)\n', (529, 539), True, 'import numpy as np\n'), ((595, 640), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'jb', '"""*-"""'], {'color': '"""k"""', 'label': '"""基本牌"""'}), "(x, jb, '*-', color='k', label='基本牌')\n", (603, 640), True, 'import matplotlib.pyplot as plt\n'), ((645, 690), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'jn', '"""o-"""'], {'color': '"""b"""', 'label': '"""锦囊牌"""'}), "(x, jn, 'o-', color='b', label='锦囊牌')\n", (653, 690), True, 'import matplotlib.pyplot as plt\n'), ((695, 740), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'zb', '"""+-"""'], {'color': '"""r"""', 'label': '"""装备牌"""'}), "(x, zb, '+-', color='r', label='装备牌')\n", (703, 740), True, 'import matplotlib.pyplot as plt\n'), ((746, 758), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (756, 758), True, 'import matplotlib.pyplot as plt\n'), ((763, 773), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (771, 773), True, 'import matplotlib.pyplot as plt\n'), ((778, 809), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'ymin': '(-0.01)', 'ymax': '(1.01)'}), '(ymin=-0.01, ymax=1.01)\n', (786, 809), True, 'import matplotlib.pyplot as plt\n'), ((819, 828), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (826, 828), True, 'import matplotlib.pyplot as plt\n'), ((897, 919), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x', 'xlabels'], {}), '(x, xlabels)\n', (907, 919), True, 'import matplotlib.pyplot as plt\n'), ((925, 935), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (933, 935), True, 'import matplotlib.pyplot as plt\n'), ((1024, 1061), 'numpy.fromstring', 'np.fromstring', (['"""14 22 20 29"""'], {'sep': '""" """'}), "('14 22 20 29', sep=' ')\n", (1037, 1061), True, 'import numpy as np\n'), ((1071, 1107), 'numpy.fromstring', 'np.fromstring', (['"""16 15 14 5"""'], {'sep': '""" """'}), "('16 15 14 5', sep=' ')\n", (1084, 1107), True, 'import numpy as np\n'), ((1117, 1151), 'numpy.fromstring', 'np.fromstring', (['"""10 3 6 7"""'], {'sep': '""" """'}), "('10 3 6 7', sep=' ')\n", (1130, 1151), True, 'import numpy as np\n'), ((1164, 1201), 'numpy.fromstring', 'np.fromstring', (['"""40 40 40 41"""'], {'sep': '""" """'}), "('40 40 40 41', sep=' ')\n", (1177, 1201), True, 'import numpy as np\n'), ((1260, 1278), 'numpy.arange', 'np.arange', (['(1)', '(5)', '(1)'], {}), '(1, 5, 1)\n', (1269, 1278), True, 'import numpy as np\n'), ((1320, 1375), 'matplotlib.pyplot.bar', 'plt.bar', (['(x - 0.2)', 'jb'], {'color': '"""k"""', 'width': '(0.2)', 'label': '"""基本牌"""'}), "(x - 0.2, jb, color='k', width=0.2, label='基本牌')\n", (1327, 1375), True, 'import matplotlib.pyplot as plt\n'), ((1380, 1429), 'matplotlib.pyplot.bar', 'plt.bar', (['x', 'jn'], {'color': '"""b"""', 'width': '(0.2)', 'label': '"""锦囊牌"""'}), "(x, jn, color='b', width=0.2, label='锦囊牌')\n", (1387, 1429), True, 'import matplotlib.pyplot as plt\n'), ((1434, 1489), 'matplotlib.pyplot.bar', 'plt.bar', (['(x + 0.2)', 'zb'], {'color': '"""r"""', 'width': '(0.2)', 'label': '"""装备牌"""'}), "(x + 0.2, zb, color='r', width=0.2, label='装备牌')\n", (1441, 1489), True, 'import matplotlib.pyplot as plt\n'), ((1495, 1507), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1505, 1507), True, 'import matplotlib.pyplot as plt\n'), ((1512, 1522), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1520, 1522), True, 'import matplotlib.pyplot as plt\n'), ((1532, 1541), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1539, 1541), True, 'import matplotlib.pyplot as plt\n'), ((1610, 1632), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x', 'xlabels'], {}), '(x, xlabels)\n', (1620, 1632), True, 'import matplotlib.pyplot as plt\n'), ((1638, 1648), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1646, 1648), True, 'import matplotlib.pyplot as plt\n'), ((1667, 1725), 'matplotlib.rc', 'matplotlib.rc', (['"""font"""'], {}), "('font', **{'sans-serif': 'Microsoft YaHei'})\n", (1680, 1725), False, 'import matplotlib\n'), ((862, 891), 'matplotlib.ticker.PercentFormatter', 'mticker.PercentFormatter', (['(1.0)'], {}), '(1.0)\n', (886, 891), True, 'import matplotlib.ticker as mticker\n'), ((1575, 1604), 'matplotlib.ticker.PercentFormatter', 'mticker.PercentFormatter', (['(1.0)'], {}), '(1.0)\n', (1599, 1604), True, 'import matplotlib.ticker as mticker\n')]
|
#!/usr/bin/env python
#
# Copyright (c) 2017 10X Genomics, Inc. All rights reserved.
#
import cellranger.analysis.io as analysis_io
import cellranger.analysis.constants as analysis_constants
import cellranger.h5_constants as h5_constants
import cellranger.io as cr_io
import cellranger.analysis.stats as analysis_stats
import collections
from irlb import irlb
import numpy as np
import os
import tables
# The RUNPCA stage attempts to run the PCA at this threshold, and if that
# fails it reruns at zero. In the event thresholding prevents us from
# returning the requested number of components and we are at this threshold
# value, we throw an exception.
DEFAULT_RUNPCA_THRESHOLD = 2
from sklearn.utils import sparsefuncs
class MatrixRankTooSmallException(Exception):
pass
PCA = collections.namedtuple('PCA', ['transformed_pca_matrix', 'components', 'variance_explained', 'dispersion', 'features_selected'])
def get_original_columns_used(cols_not_removed, cols_used_after_removal):
"""If a matrix is subset down to only have columns indexed by cols_not_removed, and then is further subset to
only contain cols_used_after removal, in that order, than this method returns the index of which columns in the old
matrix correspond the the columns in the new matrix."""
return [cols_not_removed[x] for x in cols_used_after_removal]
def run_pca(matrix, pca_features=None, pca_bcs=None, n_pca_components=None, random_state=None, min_count_threshold=0):
""" Run a PCA on the matrix using the IRLBA matrix factorization algorithm. Prior to the PCA analysis, the
matrix is modified so that all barcodes/columns have the same counts, and then the counts are transformed
by a log2(1+X) operation.
If desired, only a subset of features (e.g. sample rows) can be selected for PCA analysis. Each feature is ranked
by its dispersion relative to other features that have a similar mean count. The top `pca_features` as ranked by
this method will then be used for the PCA.
One can also select to subset number of barcodes to use (e.g. sample columns), but in this case they are simply
randomly sampled.
Args:
matrix (CountMatrix): The matrix to perform PCA on.
pca_features (int): Number of features to subset from matrix and use in PCA. The top pca_features ranked by
dispersion are used
pca_bcs (int): Number of barcodes to randomly sample for the matrix.
n_pca_components (int): How many PCA components should be used.
random_state (int): The seed for the RNG
min_count_threshold (int): The minimum sum of each row/column for that row/column to be passed to PCA
(this filter is prior to any subsetting that occurs).
Returns:
A PCA object
"""
if random_state is None:
random_state=analysis_constants.RANDOM_STATE
np.random.seed(0)
# Threshold the rows/columns of matrix, will throw error if an empty matrix results.
thresholded_matrix, _, thresholded_features = matrix.select_axes_above_threshold(min_count_threshold)
# If requested, we can subsample some of the barcodes to get a smaller matrix for PCA
pca_bc_indices = np.arange(thresholded_matrix.bcs_dim)
if pca_bcs is None:
pca_bcs = thresholded_matrix.bcs_dim
pca_bc_indices = np.arange(thresholded_matrix.bcs_dim)
elif pca_bcs < thresholded_matrix.bcs_dim:
pca_bc_indices = np.sort(np.random.choice(np.arange(thresholded_matrix.bcs_dim), size=pca_bcs, replace=False))
elif pca_bcs > thresholded_matrix.bcs_dim:
msg = ("You requested {} barcodes but the matrix after thresholding only "
"included {}, so the smaller amount is being used.").format(pca_bcs, thresholded_matrix.bcs_dim)
print(msg)
pca_bcs = thresholded_matrix.bcs_dim
pca_bc_indices = np.arange(thresholded_matrix.bcs_dim)
# If requested, select fewer features to use by selecting the features with highest normalized dispersion
if pca_features is None:
pca_features = thresholded_matrix.features_dim
elif pca_features > thresholded_matrix.features_dim:
msg = ("You requested {} features but the matrix after thresholding only included {} features,"
"so the smaller amount is being used.").format(pca_features, thresholded_matrix.features_dim)
print(msg)
pca_features = thresholded_matrix.features_dim
# Calc mean and variance of counts after normalizing
# But don't transform to log space, in order to preserve the mean-variance relationship
m = analysis_stats.normalize_by_umi(thresholded_matrix)
# Get mean and variance of rows
(mu, var) = analysis_stats.summarize_columns(m.T)
dispersion = analysis_stats.get_normalized_dispersion(mu.squeeze(), var.squeeze()) # TODO set number of bins?
pca_feature_indices = np.argsort(dispersion)[-pca_features:]
# Now determine how many components.
if n_pca_components is None:
n_pca_components = analysis_constants.PCA_N_COMPONENTS_DEFAULT
likely_matrix_rank = min(pca_features, pca_bcs)
if likely_matrix_rank < n_pca_components:
if min_count_threshold == DEFAULT_RUNPCA_THRESHOLD:
# Kick back to run_pca stage so it can retry with no threshold, this is for historical reasons
raise MatrixRankTooSmallException()
else:
print(("There are fewer nonzero features or barcodes ({}) than requested "
"PCA components ({}); reducing the number of components.").format(likely_matrix_rank, n_pca_components))
n_pca_components = likely_matrix_rank
if (likely_matrix_rank * 0.5) <= float(n_pca_components):
print("Requested number of PCA components is large relative to the matrix size, an exact approach to matrix factorization may be faster.")
# Note, after subsetting it is possible some rows/cols in pca_mat have counts below the threshold.
# However, we are not performing a second thresholding as in practice subsetting is not used and we explain
# that thresholding occurs prior to subsetting in the doc string.
pca_mat = thresholded_matrix.select_barcodes(pca_bc_indices).select_features(pca_feature_indices)
(pca_norm_mat, pca_center, pca_scale) = normalize_and_transpose(pca_mat)
(u, d, v, _, _) = irlb(pca_norm_mat, n_pca_components, center=pca_center.squeeze(), scale=pca_scale.squeeze(), random_state=random_state)
# make sure to project the matrix before centering, to avoid densification
(full_norm_mat, full_center, full_scale) = normalize_and_transpose(matrix)
sparsefuncs.inplace_column_scale(full_norm_mat, 1 / full_scale.squeeze()) # can have some zeros here
# Get a coordinate map so we know which columns in the old matrix correspond to columns in the new
org_cols_used = get_original_columns_used(thresholded_features, pca_feature_indices)
transformed_irlba_matrix = full_norm_mat[:,org_cols_used].dot(v) - (full_center / full_scale)[:,org_cols_used].dot(v)
irlba_components = np.zeros((n_pca_components, matrix.features_dim))
irlba_components[:,org_cols_used] = v.T
# calc proportion of variance explained
variance_sum = len(pca_feature_indices) # each feature has variance=1, mean=0 after normalization
variance_explained = np.square(d)/((len(pca_bc_indices)-1) * variance_sum)
features_selected = np.array([f.id for f in matrix.feature_ref.feature_defs])[org_cols_used]
# Now project back up the dispersion to return.
full_dispersion = np.empty(matrix.features_dim)
full_dispersion[:] = np.nan
full_dispersion[thresholded_features] = dispersion
# sanity check dimensions
assert transformed_irlba_matrix.shape == (matrix.bcs_dim, n_pca_components)
assert irlba_components.shape == (n_pca_components, matrix.features_dim)
assert variance_explained.shape == (n_pca_components,)
return PCA(transformed_irlba_matrix, irlba_components, variance_explained, full_dispersion, features_selected)
def normalize_and_transpose(matrix):
matrix.tocsc()
m = analysis_stats.normalize_by_umi(matrix)
# Use log counts
m.data = np.log2(1 + m.data)
# Transpose
m = m.T
# compute centering (mean) and scaling (stdev)
(c,v) = analysis_stats.summarize_columns(m)
# TODO: Inputs to this function shouldn't have zero variance columns
v[np.where(v == 0.0)] = 1.0
s = np.sqrt(v)
return (m, c, s)
def get_irlb_mem_gb_from_matrix_dim(nonzero_entries):
irlba_mem_gb = round(np.ceil(1.0 * nonzero_entries / analysis_constants.NUM_IRLB_MATRIX_ENTRIES_PER_MEM_GB)) + analysis_constants.IRLB_BASE_MEM_GB
return h5_constants.MATRIX_MEM_GB_MULTIPLIER * max(h5_constants.MIN_MEM_GB, irlba_mem_gb)
def save_pca_csv(pca_map, matrix, base_dir):
save_pca_csv_with_bc_feature(pca_map, matrix.bcs, matrix.feature_ref.feature_defs, base_dir)
def save_pca_csv_with_bc_feature(pca_map, barcodes, features, base_dir):
for n_components, pca in pca_map.iteritems():
n_components_dir = os.path.join(base_dir, '%d_components' % n_components)
cr_io.makedirs(n_components_dir, allow_existing=True)
matrix_fn = os.path.join(n_components_dir, 'projection.csv')
n_columns = pca.transformed_pca_matrix.shape[1]
assert n_columns <= n_components
matrix_header = ['Barcode'] + ['PC-%d' % (i+1) for i in xrange(n_columns)]
analysis_io.save_matrix_csv(matrix_fn, pca.transformed_pca_matrix, matrix_header,
barcodes)
# FBPCA presently provides 0-sized entries for the following PCA() member variables.
# This allows us to distinguish FBPCA from IRLBA, and also avoids weird empty files.
if pca.components.size > 0:
components_fn = os.path.join(n_components_dir, 'components.csv')
components_header = ['PC'] + [f.id for f in features]
analysis_io.save_matrix_csv(components_fn, pca.components, components_header,
range(1, n_components+1))
if pca.variance_explained.size > 0:
variance_fn = os.path.join(n_components_dir, 'variance.csv')
variance_header = ['PC','Proportion.Variance.Explained']
analysis_io.save_matrix_csv(variance_fn, pca.variance_explained, variance_header,
range(1, n_components+1))
if pca.dispersion.size > 0:
dispersion_fn = os.path.join(n_components_dir, 'dispersion.csv')
dispersion_header = ['Feature','Normalized.Dispersion']
analysis_io.save_matrix_csv(dispersion_fn, pca.dispersion, dispersion_header,
[f.id for f in features])
if pca.features_selected.size > 0:
features_fn = os.path.join(n_components_dir, 'features_selected.csv')
# TODO: there are two columns here, but only 1 entry in the header...BAD
features_header = ['Feature']
analysis_io.save_matrix_csv(features_fn, pca.features_selected, features_header, range(1, len(pca.features_selected)+1))
def save_pca_h5(pca_map, f):
group = f.create_group(f.root, analysis_constants.ANALYSIS_H5_PCA_GROUP)
for n_components, pca in pca_map.iteritems():
analysis_io.save_h5(f, group, str(n_components), pca)
def load_pca_from_h5(filename):
""" Load just the PCA info from an analysis h5 """
with tables.open_file(filename, 'r') as f:
group = f.root._v_groups[analysis_constants.ANALYSIS_H5_PCA_GROUP]
# Just take the first PCA object, assuming we never have multiple
for _, pca in analysis_io.load_h5_iter(group, PCA):
return pca
|
[
"cellranger.analysis.io.load_h5_iter",
"numpy.sqrt",
"cellranger.analysis.io.save_matrix_csv",
"numpy.argsort",
"numpy.array",
"numpy.arange",
"numpy.where",
"cellranger.io.makedirs",
"numpy.empty",
"numpy.random.seed",
"numpy.ceil",
"collections.namedtuple",
"cellranger.analysis.stats.normalize_by_umi",
"tables.open_file",
"numpy.square",
"numpy.log2",
"cellranger.analysis.stats.summarize_columns",
"os.path.join",
"numpy.zeros"
] |
[((790, 922), 'collections.namedtuple', 'collections.namedtuple', (['"""PCA"""', "['transformed_pca_matrix', 'components', 'variance_explained', 'dispersion',\n 'features_selected']"], {}), "('PCA', ['transformed_pca_matrix', 'components',\n 'variance_explained', 'dispersion', 'features_selected'])\n", (812, 922), False, 'import collections\n'), ((2911, 2928), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (2925, 2928), True, 'import numpy as np\n'), ((3237, 3274), 'numpy.arange', 'np.arange', (['thresholded_matrix.bcs_dim'], {}), '(thresholded_matrix.bcs_dim)\n', (3246, 3274), True, 'import numpy as np\n'), ((4639, 4690), 'cellranger.analysis.stats.normalize_by_umi', 'analysis_stats.normalize_by_umi', (['thresholded_matrix'], {}), '(thresholded_matrix)\n', (4670, 4690), True, 'import cellranger.analysis.stats as analysis_stats\n'), ((4743, 4780), 'cellranger.analysis.stats.summarize_columns', 'analysis_stats.summarize_columns', (['m.T'], {}), '(m.T)\n', (4775, 4780), True, 'import cellranger.analysis.stats as analysis_stats\n'), ((7113, 7162), 'numpy.zeros', 'np.zeros', (['(n_pca_components, matrix.features_dim)'], {}), '((n_pca_components, matrix.features_dim))\n', (7121, 7162), True, 'import numpy as np\n'), ((7605, 7634), 'numpy.empty', 'np.empty', (['matrix.features_dim'], {}), '(matrix.features_dim)\n', (7613, 7634), True, 'import numpy as np\n'), ((8151, 8190), 'cellranger.analysis.stats.normalize_by_umi', 'analysis_stats.normalize_by_umi', (['matrix'], {}), '(matrix)\n', (8182, 8190), True, 'import cellranger.analysis.stats as analysis_stats\n'), ((8226, 8245), 'numpy.log2', 'np.log2', (['(1 + m.data)'], {}), '(1 + m.data)\n', (8233, 8245), True, 'import numpy as np\n'), ((8339, 8374), 'cellranger.analysis.stats.summarize_columns', 'analysis_stats.summarize_columns', (['m'], {}), '(m)\n', (8371, 8374), True, 'import cellranger.analysis.stats as analysis_stats\n'), ((8489, 8499), 'numpy.sqrt', 'np.sqrt', (['v'], {}), '(v)\n', (8496, 8499), True, 'import numpy as np\n'), ((3369, 3406), 'numpy.arange', 'np.arange', (['thresholded_matrix.bcs_dim'], {}), '(thresholded_matrix.bcs_dim)\n', (3378, 3406), True, 'import numpy as np\n'), ((4922, 4944), 'numpy.argsort', 'np.argsort', (['dispersion'], {}), '(dispersion)\n', (4932, 4944), True, 'import numpy as np\n'), ((7379, 7391), 'numpy.square', 'np.square', (['d'], {}), '(d)\n', (7388, 7391), True, 'import numpy as np\n'), ((7457, 7514), 'numpy.array', 'np.array', (['[f.id for f in matrix.feature_ref.feature_defs]'], {}), '([f.id for f in matrix.feature_ref.feature_defs])\n', (7465, 7514), True, 'import numpy as np\n'), ((8454, 8472), 'numpy.where', 'np.where', (['(v == 0.0)'], {}), '(v == 0.0)\n', (8462, 8472), True, 'import numpy as np\n'), ((9115, 9169), 'os.path.join', 'os.path.join', (['base_dir', "('%d_components' % n_components)"], {}), "(base_dir, '%d_components' % n_components)\n", (9127, 9169), False, 'import os\n'), ((9178, 9231), 'cellranger.io.makedirs', 'cr_io.makedirs', (['n_components_dir'], {'allow_existing': '(True)'}), '(n_components_dir, allow_existing=True)\n', (9192, 9231), True, 'import cellranger.io as cr_io\n'), ((9253, 9301), 'os.path.join', 'os.path.join', (['n_components_dir', '"""projection.csv"""'], {}), "(n_components_dir, 'projection.csv')\n", (9265, 9301), False, 'import os\n'), ((9490, 9585), 'cellranger.analysis.io.save_matrix_csv', 'analysis_io.save_matrix_csv', (['matrix_fn', 'pca.transformed_pca_matrix', 'matrix_header', 'barcodes'], {}), '(matrix_fn, pca.transformed_pca_matrix,\n matrix_header, barcodes)\n', (9517, 9585), True, 'import cellranger.analysis.io as analysis_io\n'), ((11505, 11536), 'tables.open_file', 'tables.open_file', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (11521, 11536), False, 'import tables\n'), ((11714, 11750), 'cellranger.analysis.io.load_h5_iter', 'analysis_io.load_h5_iter', (['group', 'PCA'], {}), '(group, PCA)\n', (11738, 11750), True, 'import cellranger.analysis.io as analysis_io\n'), ((8601, 8692), 'numpy.ceil', 'np.ceil', (['(1.0 * nonzero_entries / analysis_constants.NUM_IRLB_MATRIX_ENTRIES_PER_MEM_GB)'], {}), '(1.0 * nonzero_entries / analysis_constants.\n NUM_IRLB_MATRIX_ENTRIES_PER_MEM_GB)\n', (8608, 8692), True, 'import numpy as np\n'), ((9865, 9913), 'os.path.join', 'os.path.join', (['n_components_dir', '"""components.csv"""'], {}), "(n_components_dir, 'components.csv')\n", (9877, 9913), False, 'import os\n'), ((10201, 10247), 'os.path.join', 'os.path.join', (['n_components_dir', '"""variance.csv"""'], {}), "(n_components_dir, 'variance.csv')\n", (10213, 10247), False, 'import os\n'), ((10536, 10584), 'os.path.join', 'os.path.join', (['n_components_dir', '"""dispersion.csv"""'], {}), "(n_components_dir, 'dispersion.csv')\n", (10548, 10584), False, 'import os\n'), ((10665, 10772), 'cellranger.analysis.io.save_matrix_csv', 'analysis_io.save_matrix_csv', (['dispersion_fn', 'pca.dispersion', 'dispersion_header', '[f.id for f in features]'], {}), '(dispersion_fn, pca.dispersion,\n dispersion_header, [f.id for f in features])\n', (10692, 10772), True, 'import cellranger.analysis.io as analysis_io\n'), ((10873, 10928), 'os.path.join', 'os.path.join', (['n_components_dir', '"""features_selected.csv"""'], {}), "(n_components_dir, 'features_selected.csv')\n", (10885, 10928), False, 'import os\n'), ((3905, 3942), 'numpy.arange', 'np.arange', (['thresholded_matrix.bcs_dim'], {}), '(thresholded_matrix.bcs_dim)\n', (3914, 3942), True, 'import numpy as np\n'), ((3504, 3541), 'numpy.arange', 'np.arange', (['thresholded_matrix.bcs_dim'], {}), '(thresholded_matrix.bcs_dim)\n', (3513, 3541), True, 'import numpy as np\n')]
|
# udi dataset process module
# modiflied from nuscenes_dataset.py
import json
import pickle
import time
import random
from copy import deepcopy
from functools import partial
from pathlib import Path
import subprocess
import fire
import numpy as np
import os
from second.core import box_np_ops
from second.core import preprocess as prep
from second.data import kitti_common as kitti
from second.data.dataset import Dataset, register_dataset
from second.utils.eval import get_coco_eval_result, get_official_eval_result
from second.utils.progress_bar import progress_bar_iter as prog_bar
from second.utils.timer import simple_timer
@register_dataset
class UDIDataset(Dataset):
NumPointFeatures = 4
NameMapping = {
'car': 'car',
'pedestrian': 'pedestrian',
'cyclist': 'cyclist',
'truck': 'truck',
'forklift': 'forklift',
'golf car': 'golf car',
'motorcyclist': 'motorcyclist',
'bicycle': 'bicycle',
'motorbike': 'motorbike'
}
DefaultAttribute = {
"car": "object_action_parked",
"pedestrain": "object_action_walking",
"bicycle": "object_action_driving_straight_forward",
"motorcycle": "object_action_parked",
"other_vehicle": "object_action_driving_straight_forward",
"emergency_vehicle": "object_action_driving_straight_forward",
"truck": "object_action_parked",
"animal": "",
"bus": "object_action_driving_straight_forward",
}
def __init__(self,
root_path,
info_path,
class_names=None,
prep_func=None,
num_point_features=None):
self._root_path = Path(root_path)
self._info_path = Path(info_path)
with open(info_path, 'rb') as f:
data = pickle.load(f)
self._udi_infos = data["infos"]
self._metadata = data["metadata"]
self._class_names = class_names
self._prep_func = prep_func
self.version = self._metadata["version"]
self._with_velocity = False
def __len__(self):
return len(self._udi_infos)
def __getitem__(self, idx):
input_dict = self.get_sensor_data(idx)
example = self._prep_func(input_dict=input_dict)
example["metadata"] = input_dict["metadata"]
if "anchors_mask" in example:
example["anchors_mask"] = example["anchors_mask"].astype(np.uint8)
return example
def get_sensor_data(self, query):
idx = query
if isinstance(query, dict):
assert "lidar" in query
idx = query["lidar"]["idx"]
info = self._udi_infos[idx]
res = {
"lidar": {
"type": "lidar",
"points": None,
},
"metadata": {
"token": info["token"]
},
}
lidar_path = Path(info['lidar_path'])
points = np.fromfile(str(lidar_path), dtype=np.float32).reshape((-1,4))
points[:, 3] /= 255
res["lidar"]["points"] = points
if 'gt_boxes' in info:
res["lidar"]["annotations"] = {
'boxes': info["gt_boxes"],
'names': info["gt_names"]
}
return res
def evaluation_udi(self, detections, output_dir):
version = self.version
eval_set_map = {
# "v1.0-mini": "mini_train",
"v1.0-trainval": "val",
}
# gt_annos = self.ground_truth_annotations
# if gt_annos is None:
# return None
udi_annos = {}
mapped_class_names = self._class_names
token2info = {}
for info in self._udi_infos:
token2info[info["token"]] = info
for det in detections:
annos = []
boxes = _second_det_to_udi_box(det)
for i, box in enumerate(boxes):
name = mapped_class_names[box.label]
velocity = box.velocity[:2].tolist()
box.velocity = np.array([*velocity, 0.0])
for i, box in enumerate(boxes):
name = mapped_class_names[box.label]
velocity = box.velocity[:2].tolist()
nusc_anno = {
"sample_token": det["metadata"]["token"],
"translation": box.center.tolist(),
"size": box.wlh.tolist(),
"rotation": box.orientation.elements.tolist(),
"velocity": velocity,
"detection_name": name,
"detection_score": box.score,
"attribute_name": "",
}
annos.append(nusc_anno)
udi_annos[det["metadata"]["token"]] = annos
nusc_submissions = {
"meta": {
"use_camera": False,
"use_lidar": False,
"use_radar": False,
"use_map": False,
"use_external": False,
},
"results": udi_annos,
}
res_path = Path(output_dir) / "results_udi.json"
with open(res_path, "w") as f:
json.dump(nusc_submissions, f)
eval_main_file = Path(__file__).resolve().parent / "udi_eval.py"
# why add \"{}\"? to support path with spaces.
cmd = f"python3 {str(eval_main_file)} --root_path=\"{str(self._root_path)}\""
cmd += f" --info_path=\"{str(self._info_path)}\""
cmd += f" --version={self.version}"
cmd += f" --res_path=\"{str(res_path)}\" --eval_set={eval_set_map[self.version]}"
cmd += f" --output_dir=\"{output_dir}\""
# use subprocess can release all nusc memory after evaluation
subprocess.check_output(cmd, shell=True)
with open(Path(output_dir) / "metrics_summary.json", "r") as f:
metrics = json.load(f)
detail = {}
res_path.unlink() # delete results_nusc.json since it's very large
result = f"Nusc {version} Evaluation\n"
for name in mapped_class_names:
detail[name] = {}
for k, v in metrics["label_aps"][name].items():
detail[name][f"dist@{k}"] = v
tp_errs = []
tp_names = []
for k, v in metrics["label_tp_errors"][name].items():
detail[name][k] = v
tp_errs.append(f"{v:.4f}")
tp_names.append(k)
threshs = ', '.join(list(metrics["label_aps"][name].keys()))
scores = list(metrics["label_aps"][name].values())
scores = ', '.join([f"{s * 100:.2f}" for s in scores])
result += f"{name} Nusc dist AP@{threshs} and TP errors\n"
result += scores
result += "\n"
result += ', '.join(tp_names) + ": " + ', '.join(tp_errs)
result += "\n"
return {
"results": {
"nusc": result
},
"detail": {
"nusc": detail
},
}
def evaluation(self, detections, output_dir):
res_udi = self.evaluation_udi(detections, output_dir)
res = {
"results": {
"nusc": res_udi["result"]["nusc"],
},
"detail": {
"eval.nusc": res_udi["detail"]["nusc"],
},
}
return res
def _second_det_to_udi_box(detection):
from udi_eval import Box
import pyquaternion
box3d = detection["box3d_lidar"].detach().cpu().numpy()
scores = detection["scores"].detach().cpu().numpy()
labels = detection["label_preds"].detach().cpu().numpy()
box3d[:, 6] = -box3d[:, 6] - np.pi/2
box_list = []
for i in range(box3d.shape[0]):
quat = pyquaternion.Quaternion(axis=[0, 0, 1], radians=box3d[i,6])
velocity = (np.nan, np.nan, np.nan)
# if box3d.shape[1] == 9:
# velocity = (*box3d[i, 7:9], 0.0)
box = Box(
box3d[i, :3],
box3d[i, 3:6],
quat,
label=labels[i],
score=scores[i],
velocity=velocity)
box_list.append(box)
return box_list
# def _lidar_nusc_box_to_global(info, boxes, classes, eval_version="ICLR 2019"):
# import pyquaternion
# box_list = []
# for box in boxes:
# box.rotate(pyquaternion.Quaternion(info['lidar2ego_rotation']))
# box.translate(np.array(info['lidar2ego_translation']))
# box.rotate(pyquaternion.Quaternion(info['ego2global_rotation']))
# box.translate(np.array(info['ego2global_translation']))
# box_list.append(box)
# return box_list
# def _get_available_scenes(lyft):
# available_scenes = []
# print("total scene num:", len(lyft.scene))
# for scene in lyft.scene:
# scene_token = scene["token"]
# scene_rec = lyft.get('scene', scene_token)
# sample_rec = lyft.get('sample', scene_rec['first_sample_token'])
# sd_rec = lyft.get('sample_data', sample_rec['data']["LIDAR_TOP"])
# has_more_frames = True
# scene_not_exist = False
# while has_more_frames:
# lidar_path, boxes, _ = lyft.get_sample_data(sd_rec['token'])
# if not Path(lidar_path).exists():
# scenes_not_exist = True
# break
# else:
# break
# if not sd_rec['next'] == "":
# sd_rec = lyft.get('sample_data', sd_rec['next'])
# else:
# has_more_frames = False
# if scene_not_exist:
# continue
# available_scenes.append(scene)
# print("exist scene num:", len(available_scenes))
# return available_scenes
def _fill_train_infos(root_path):
train_udi_infos = []
lidar_root_path = root_path+ "/lidar"
label_root_path = root_path + "/label"
img_root_path = root_path + "/image"
filenames = os.listdir(lidar_root_path)
for filename in prog_bar(filenames):
index = filename.split(".")[0]
lidar_path = lidar_root_path + "/" + index + ".bin"
cam_path = img_root_path + "/" + index + ".jpg"
label_path = label_root_path + "/" + index + "_bin.json"
assert Path(lidar_path).exists()
assert Path(cam_path).exists()
assert Path(label_path).exists()
with open(label_path, encoding='utf-8') as f:
res = f.read()
result = json.loads(res)
boxes = result["elem"]
info = {
"lidar_path": lidar_path,
"cam_front_path": cam_path,
"filename": filename,
"token": int(index),
}
gt_locs_list = []
gt_dims_list = []
print("label file path:", label_path)
for box in boxes:
box_loc = box["position"]
box_size = box["size"]
box_loc_ = np.array([box_loc["x"],box_loc["y"], box_loc["z"]], dtype=np.float)
box_size_ = np.array([box_size["width"],box_size["depth"],box_size["height"]], dtype=np.float)
box_loc_ = box_loc_.reshape(-1, 3)
box_size_ = box_size_.reshape(-1, 3)
gt_locs_list.append(box_loc_)
gt_dims_list.append(box_size_)
locs = np.concatenate(gt_locs_list, axis=0)
dims = np.concatenate(gt_dims_list, axis=0)
rots = np.array([b["yaw"] for b in boxes]).reshape(-1, 1)
names = [b["class"] for b in boxes]
for i in range(len(names)):
if names[i] in UDIDataset.NameMapping:
names[i] = UDIDataset.NameMapping[names[i]]
names = np.array(names)
# we need to convert rot to SECOND format.
# change the rot format will break all checkpoint.
gt_boxes = np.concatenate([locs, dims, -rots - np.pi / 2], axis=1)
info["gt_boxes"] = gt_boxes
info["gt_names"] = names
train_udi_infos.append(info)
return train_udi_infos
def create_udi_infos(root_path):
# root_path = Path(root_path)
root_path = str(root_path)
train_udi_infos = _fill_train_infos(root_path)
metadata = {
"version": "v0.1-train",
}
print(
f"train sample: {len(train_udi_infos)}"
)
data = {
"infos": train_udi_infos,
"metadata": metadata,
}
with open(root_path + "/infos_udi_train.pkl", 'wb') as f:
pickle.dump(data, f)
def get_box_mean(info_path, class_name="car"):
with open(info_path, 'rb') as f:
lyft_infos = pickle.load(f)["infos"]
gt_boxes_list = []
for info in lyft_infos:
gt_boxes = info["gt_boxes"]
gt_names = info["gt_names"]
mask = np.array([s == class_name for s in info["gt_names"]], dtype=np.bool_)
gt_names = gt_names[mask]
gt_boxes = gt_boxes[mask]
gt_boxes_list.append(gt_boxes.reshape(-1, 7))
gt_boxes_list = np.concatenate(gt_boxes_list, axis=0)
return {
"box3d": gt_boxes_list.mean(0).tolist(),
"detail": gt_boxes_list
}
def get_all_box_mean(info_path):
det_names = set()
for k, v in UDIDataset.NameMapping.items():
if v not in det_names:
det_names.add(v)
det_names = sorted(list(det_names))
res = {}
details = {}
for k in det_names:
result = get_box_mean(info_path, k)
details[k] = result["detail"]
res[k] = result["box3d"]
print(json.dumps(res, indent=2))
return details
if __name__ == "__main__":
fire.Fire()
|
[
"subprocess.check_output",
"json.loads",
"os.listdir",
"pickle.dump",
"fire.Fire",
"pathlib.Path",
"json.dumps",
"udi_eval.Box",
"pickle.load",
"numpy.array",
"numpy.concatenate",
"json.load",
"pyquaternion.Quaternion",
"json.dump",
"second.utils.progress_bar.progress_bar_iter"
] |
[((9955, 9982), 'os.listdir', 'os.listdir', (['lidar_root_path'], {}), '(lidar_root_path)\n', (9965, 9982), False, 'import os\n'), ((10004, 10023), 'second.utils.progress_bar.progress_bar_iter', 'prog_bar', (['filenames'], {}), '(filenames)\n', (10012, 10023), True, 'from second.utils.progress_bar import progress_bar_iter as prog_bar\n'), ((12921, 12958), 'numpy.concatenate', 'np.concatenate', (['gt_boxes_list'], {'axis': '(0)'}), '(gt_boxes_list, axis=0)\n', (12935, 12958), True, 'import numpy as np\n'), ((13521, 13532), 'fire.Fire', 'fire.Fire', ([], {}), '()\n', (13530, 13532), False, 'import fire\n'), ((1711, 1726), 'pathlib.Path', 'Path', (['root_path'], {}), '(root_path)\n', (1715, 1726), False, 'from pathlib import Path\n'), ((1753, 1768), 'pathlib.Path', 'Path', (['info_path'], {}), '(info_path)\n', (1757, 1768), False, 'from pathlib import Path\n'), ((2927, 2951), 'pathlib.Path', 'Path', (["info['lidar_path']"], {}), "(info['lidar_path'])\n", (2931, 2951), False, 'from pathlib import Path\n'), ((5764, 5804), 'subprocess.check_output', 'subprocess.check_output', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (5787, 5804), False, 'import subprocess\n'), ((7783, 7843), 'pyquaternion.Quaternion', 'pyquaternion.Quaternion', ([], {'axis': '[0, 0, 1]', 'radians': 'box3d[i, 6]'}), '(axis=[0, 0, 1], radians=box3d[i, 6])\n', (7806, 7843), False, 'import pyquaternion\n'), ((7982, 8077), 'udi_eval.Box', 'Box', (['box3d[i, :3]', 'box3d[i, 3:6]', 'quat'], {'label': 'labels[i]', 'score': 'scores[i]', 'velocity': 'velocity'}), '(box3d[i, :3], box3d[i, 3:6], quat, label=labels[i], score=scores[i],\n velocity=velocity)\n', (7985, 8077), False, 'from udi_eval import Box\n'), ((10466, 10481), 'json.loads', 'json.loads', (['res'], {}), '(res)\n', (10476, 10481), False, 'import json\n'), ((11287, 11323), 'numpy.concatenate', 'np.concatenate', (['gt_locs_list'], {'axis': '(0)'}), '(gt_locs_list, axis=0)\n', (11301, 11323), True, 'import numpy as np\n'), ((11339, 11375), 'numpy.concatenate', 'np.concatenate', (['gt_dims_list'], {'axis': '(0)'}), '(gt_dims_list, axis=0)\n', (11353, 11375), True, 'import numpy as np\n'), ((11650, 11665), 'numpy.array', 'np.array', (['names'], {}), '(names)\n', (11658, 11665), True, 'import numpy as np\n'), ((11795, 11850), 'numpy.concatenate', 'np.concatenate', (['[locs, dims, -rots - np.pi / 2]'], {'axis': '(1)'}), '([locs, dims, -rots - np.pi / 2], axis=1)\n', (11809, 11850), True, 'import numpy as np\n'), ((12420, 12440), 'pickle.dump', 'pickle.dump', (['data', 'f'], {}), '(data, f)\n', (12431, 12440), False, 'import pickle\n'), ((12709, 12780), 'numpy.array', 'np.array', (["[(s == class_name) for s in info['gt_names']]"], {'dtype': 'np.bool_'}), "([(s == class_name) for s in info['gt_names']], dtype=np.bool_)\n", (12717, 12780), True, 'import numpy as np\n'), ((13442, 13467), 'json.dumps', 'json.dumps', (['res'], {'indent': '(2)'}), '(res, indent=2)\n', (13452, 13467), False, 'import json\n'), ((1829, 1843), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1840, 1843), False, 'import pickle\n'), ((5111, 5127), 'pathlib.Path', 'Path', (['output_dir'], {}), '(output_dir)\n', (5115, 5127), False, 'from pathlib import Path\n'), ((5200, 5230), 'json.dump', 'json.dump', (['nusc_submissions', 'f'], {}), '(nusc_submissions, f)\n', (5209, 5230), False, 'import json\n'), ((5899, 5911), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5908, 5911), False, 'import json\n'), ((10907, 10975), 'numpy.array', 'np.array', (["[box_loc['x'], box_loc['y'], box_loc['z']]"], {'dtype': 'np.float'}), "([box_loc['x'], box_loc['y'], box_loc['z']], dtype=np.float)\n", (10915, 10975), True, 'import numpy as np\n'), ((10999, 11088), 'numpy.array', 'np.array', (["[box_size['width'], box_size['depth'], box_size['height']]"], {'dtype': 'np.float'}), "([box_size['width'], box_size['depth'], box_size['height']], dtype=\n np.float)\n", (11007, 11088), True, 'import numpy as np\n'), ((12547, 12561), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (12558, 12561), False, 'import pickle\n'), ((4070, 4096), 'numpy.array', 'np.array', (['[*velocity, 0.0]'], {}), '([*velocity, 0.0])\n', (4078, 4096), True, 'import numpy as np\n'), ((10261, 10277), 'pathlib.Path', 'Path', (['lidar_path'], {}), '(lidar_path)\n', (10265, 10277), False, 'from pathlib import Path\n'), ((10302, 10316), 'pathlib.Path', 'Path', (['cam_path'], {}), '(cam_path)\n', (10306, 10316), False, 'from pathlib import Path\n'), ((10341, 10357), 'pathlib.Path', 'Path', (['label_path'], {}), '(label_path)\n', (10345, 10357), False, 'from pathlib import Path\n'), ((11391, 11426), 'numpy.array', 'np.array', (["[b['yaw'] for b in boxes]"], {}), "([b['yaw'] for b in boxes])\n", (11399, 11426), True, 'import numpy as np\n'), ((5823, 5839), 'pathlib.Path', 'Path', (['output_dir'], {}), '(output_dir)\n', (5827, 5839), False, 'from pathlib import Path\n'), ((5256, 5270), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (5260, 5270), False, 'from pathlib import Path\n')]
|
#!/usr/bin/env python
"""
Configure folder for Multicolor testing.
Hazen 01/18
"""
import argparse
import inspect
import numpy
import os
import pickle
import subprocess
import storm_analysis
import storm_analysis.sa_library.parameters as parameters
import storm_analysis.sa_library.sa_h5py as saH5Py
import storm_analysis.simulator.background as background
import storm_analysis.simulator.camera as camera
import storm_analysis.simulator.drift as drift
import storm_analysis.simulator.photophysics as photophysics
import storm_analysis.simulator.psf as psf
import storm_analysis.simulator.simulate as simulate
import storm_analysis.sCMOS.scmos_analysis as scmos
import storm_analysis.diagnostics.multicolor.settings as settings
def testingParametersSCMOS():
"""
Create a sCMOS parameters object.
"""
params = parameters.ParametersSCMOS()
params.setAttr("max_frame", "int", -1)
params.setAttr("start_frame", "int", -1)
params.setAttr("background_sigma", "float", 8.0)
params.setAttr("camera_calibration", "filename", "calib.npy")
params.setAttr("find_max_radius", "int", 5)
params.setAttr("foreground_sigma", "float", 1.5)
params.setAttr("iterations", "int", settings.iterations)
params.setAttr("model", "string", "2dfixed")
params.setAttr("pixel_size", "float", settings.pixel_size)
params.setAttr("sigma", "float", 150.0/settings.pixel_size)
params.setAttr("threshold", "float", 6.0)
# Don't do tracking.
params.setAttr("descriptor", "string", "1")
params.setAttr("radius", "float", "0.0")
# Don't do drift-correction.
params.setAttr("d_scale", "int", 2)
params.setAttr("drift_correction", "int", 0)
params.setAttr("frame_step", "int", 500)
params.setAttr("z_correction", "int", 0)
return params
def testingParametersMC():
"""
Create a Multiplane parameters object.
"""
params = parameters.ParametersMultiplaneArb()
params.setAttr("max_frame", "int", -1)
params.setAttr("start_frame", "int", -1)
params.setAttr("background_sigma", "float", 8.0)
params.setAttr("find_max_radius", "int", 2)
params.setAttr("independent_heights", "int", settings.independent_heights)
params.setAttr("iterations", "int", settings.iterations)
params.setAttr("mapping", "filename", "map.map")
params.setAttr("no_fitting", "int", 0)
params.setAttr("pixel_size", "float", settings.pixel_size)
params.setAttr("sigma", "float", 1.5)
params.setAttr("threshold", "float", 6.0)
params.setAttr("weights", "filename", "weights.npy")
params.setAttr("z_value", "float-array", settings.z_value)
params.setAttr("channel0_cal", "filename", "calib.npy")
params.setAttr("channel1_cal", "filename", "calib.npy")
params.setAttr("channel2_cal", "filename", "calib.npy")
params.setAttr("channel3_cal", "filename", "calib.npy")
params.setAttr("channel0_ext", "string", "_c1.dax")
params.setAttr("channel1_ext", "string", "_c2.dax")
params.setAttr("channel2_ext", "string", "_c3.dax")
params.setAttr("channel3_ext", "string", "_c4.dax")
params.setAttr("channel0_offset", "int", 0)
params.setAttr("channel1_offset", "int", 0)
params.setAttr("channel2_offset", "int", 0)
params.setAttr("channel3_offset", "int", 0)
params.setAttr("spline0", "filename", "c1_psf.spline")
params.setAttr("spline1", "filename", "c2_psf.spline")
params.setAttr("spline2", "filename", "c3_psf.spline")
params.setAttr("spline3", "filename", "c4_psf.spline")
# Do tracking (localization color analysis depends on the tracks).
params.setAttr("descriptor", "string", "1")
params.setAttr("radius", "float", "1.0")
params.setAttr("max_z", "float", str(0.001 * settings.psf_z_range))
params.setAttr("min_z", "float", str(-0.001 * settings.psf_z_range))
# Don't do drift-correction.
params.setAttr("d_scale", "int", 2)
params.setAttr("drift_correction", "int", 0)
params.setAttr("frame_step", "int", 500)
params.setAttr("z_correction", "int", 0)
return params
def configure():
# Get relevant paths.
mm_path = os.path.dirname(inspect.getfile(storm_analysis)) + "/micrometry/"
mp_path = os.path.dirname(inspect.getfile(storm_analysis)) + "/multi_plane/"
sp_path = os.path.dirname(inspect.getfile(storm_analysis)) + "/spliner/"
# Create analysis XML files.
#
print("Creating XML files.")
params = testingParametersSCMOS()
params.toXMLFile("scmos.xml")
params = testingParametersMC()
params.toXMLFile("multicolor.xml")
# Useful variables
aoi_size = int(settings.psf_size/2)+1
# Create sCMOS data and HDF5 files we'll need for the simulation.
#
if True:
# Create sCMOS camera calibration files.
#
numpy.save("calib.npy", [numpy.zeros((settings.y_size, settings.x_size)) + settings.camera_offset,
numpy.ones((settings.y_size, settings.x_size)) * settings.camera_variance,
numpy.ones((settings.y_size, settings.x_size)) * settings.camera_gain,
1])
# Create localization on a grid file.
#
print("Creating gridded localizations.")
sim_path = os.path.dirname(inspect.getfile(storm_analysis)) + "/simulator/"
subprocess.call(["python", sim_path + "emitters_on_grid.py",
"--bin", "grid_list.hdf5",
"--nx", str(settings.nx),
"--ny", str(settings.ny),
"--spacing", "20",
"--zrange", str(settings.test_z_range),
"--zoffset", str(settings.test_z_offset)])
# Create randomly located localizations file (for STORM movies).
#
print("Creating random localizations.")
subprocess.call(["python", sim_path + "emitters_uniform_random.py",
"--bin", "random_storm.hdf5",
"--density", "1.0",
"--margin", str(settings.margin),
"--sx", str(settings.x_size),
"--sy", str(settings.y_size),
"--zrange", str(settings.test_z_range)])
# Create randomly located localizations file (for mapping measurement).
#
print("Creating random localizations.")
subprocess.call(["python", sim_path + "emitters_uniform_random.py",
"--bin", "random_map.hdf5",
"--density", "0.0003",
"--margin", str(settings.margin),
"--sx", str(settings.x_size),
"--sy", str(settings.y_size)])
# Create sparser grid for PSF measurement.
#
print("Creating data for PSF measurement.")
sim_path = os.path.dirname(inspect.getfile(storm_analysis)) + "/simulator/"
subprocess.call(["python", sim_path + "emitters_on_grid.py",
"--bin", "psf_list.hdf5",
"--nx", "6",
"--ny", "3",
"--spacing", "40"])
## This part makes / tests measuring the mapping.
##
if True:
print("Measuring mapping.")
# Make localization files for simulations.
#
locs = saH5Py.loadLocalizations("random_map.hdf5")
locs["z"][:] = 1.0e-3 * settings.z_planes[0]
saH5Py.saveLocalizations("c1_random_map.hdf5", locs)
for i in range(1,4):
locs["x"] += settings.dx
locs["y"] += settings.dy
locs["z"][:] = settings.z_planes[i]
saH5Py.saveLocalizations("c" + str(i+1) + "_random_map.hdf5", locs)
# Make localization files for simulations.
#
locs = saH5Py.loadLocalizations("random_map.hdf5")
locs["z"][:] = 1.0e-3 * settings.z_planes[0]
saH5Py.saveLocalizations("c1_random_map.hdf5", locs)
for i in range(1,4):
locs["x"] += settings.dx
locs["y"] += settings.dy
locs["z"][:] = settings.z_planes[i]
saH5Py.saveLocalizations("c" + str(i+1) + "_random_map.hdf5", locs)
# Make simulated mapping data.
#
bg_f = lambda s, x, y, h5 : background.UniformBackground(s, x, y, h5, photons = 10)
cam_f = lambda s, x, y, h5 : camera.SCMOS(s, x, y, h5, "calib.npy")
pp_f = lambda s, x, y, h5 : photophysics.AlwaysOn(s, x, y, h5, 20000.0)
psf_f = lambda s, x, y, i3 : psf.GaussianPSF(s, x, y, i3, settings.pixel_size)
sim = simulate.Simulate(background_factory = bg_f,
camera_factory = cam_f,
photophysics_factory = pp_f,
psf_factory = psf_f,
x_size = settings.x_size,
y_size = settings.y_size)
for i in range(4):
sim.simulate("c" + str(i+1) + "_map.dax", "c" + str(i+1) + "_random_map.hdf5", 1)
# Analyze simulated mapping data
#
for i in range(4):
scmos.analyze("c" + str(i+1) + "_map.dax", "c" + str(i+1) + "_map.hdf5", "scmos.xml")
# Measure mapping.
#
for i in range(3):
subprocess.call(["python", mm_path + "micrometry.py",
"--locs1", "c1_map.hdf5",
"--locs2", "c" + str(i+2) + "_map.hdf5",
"--results", "c1_c" + str(i+2) + "_map.map",
"--no_plots"])
# Merge mapping.
#
subprocess.call(["python", mm_path + "merge_maps.py",
"--results", "map.map",
"--maps", "c1_c2_map.map", "c1_c3_map.map", "c1_c4_map.map"])
# Print mapping.
#
if True:
print("Mapping is:")
subprocess.call(["python", mp_path + "print_mapping.py",
"--mapping", "map.map"])
print("")
# Check that mapping is close to what we expect (within 5%).
#
with open("map.map", 'rb') as fp:
mappings = pickle.load(fp)
for i in range(3):
if not numpy.allclose(mappings["0_" + str(i+1) + "_x"], numpy.array([settings.dx*(i+1), 1.0, 0.0]), rtol = 0.05, atol = 0.05):
print("X mapping difference for channel", i+1)
if not numpy.allclose(mappings["0_" + str(i+1) + "_y"], numpy.array([settings.dy*(i+1), 0.0, 1.0]), rtol = 0.05, atol = 0.05):
print("Y mapping difference for channel", i+1)
## This part measures / test the PSF measurement.
##
if True:
# Create drift file, this is used to displace the localizations in the
# PSF measurement movie.
#
dz = numpy.arange(-settings.psf_z_range, settings.psf_z_range + 0.05, 0.01)
drift_data = numpy.zeros((dz.size, 3))
drift_data[:,2] = dz
numpy.savetxt("drift.txt", drift_data)
# Also create the z-offset file.
#
z_offset = numpy.ones((dz.size, 2))
z_offset[:,1] = dz
numpy.savetxt("z_offset.txt", z_offset)
# Create simulated data for PSF measurements.
#
bg_f = lambda s, x, y, h5 : background.UniformBackground(s, x, y, h5, photons = 10)
cam_f = lambda s, x, y, h5 : camera.SCMOS(s, x, y, h5, "calib.npy")
drift_f = lambda s, x, y, h5 : drift.DriftFromFile(s, x, y, h5, "drift.txt")
pp_f = lambda s, x, y, h5 : photophysics.AlwaysOn(s, x, y, h5, 20000.0)
psf_f = lambda s, x, y, h5 : psf.PupilFunction(s, x, y, h5, settings.pixel_size, [])
sim = simulate.Simulate(background_factory = bg_f,
camera_factory = cam_f,
drift_factory = drift_f,
photophysics_factory = pp_f,
psf_factory = psf_f,
x_size = settings.x_size,
y_size = settings.y_size)
if True:
for i in range(4):
sim.simulate("c" + str(i+1) + "_zcal.dax",
"c" + str(i+1) + "_random_map.hdf5",
dz.size)
# Get localizations to use for PSF measurement.
#
subprocess.call(["python", mp_path + "psf_localizations.py",
"--bin", "c1_map_ref.hdf5",
"--map", "map.map",
"--aoi_size", str(aoi_size)])
# Create PSF z stacks.
#
for i in range(4):
subprocess.call(["python", mp_path + "psf_zstack.py",
"--movie", "c" + str(i+1) + "_zcal.dax",
"--bin", "c1_map_ref_c" + str(i+1) + "_psf.hdf5",
"--zstack", "c" + str(i+1) + "_zstack",
"--scmos_cal", "calib.npy",
"--aoi_size", str(aoi_size)])
# Measure PSF.
#
for i in range(4):
subprocess.call(["python", mp_path + "measure_psf.py",
"--zstack", "c" + str(i+1) + "_zstack.npy",
"--zoffsets", "z_offset.txt",
"--psf_name", "c" + str(i+1) + "_psf_normed.psf",
"--z_range", str(settings.psf_z_range),
"--normalize"])
## This part creates the splines.
##
if True:
print("Measuring Splines.")
for i in range(4):
subprocess.call(["python", sp_path + "psf_to_spline.py",
"--psf", "c" + str(i+1) + "_psf_normed.psf",
"--spline", "c" + str(i+1) + "_psf.spline",
"--spline_size", str(settings.psf_size)])
## This part measures the Cramer-Rao weights.
##
if True:
print("Calculating weights.")
subprocess.call(["python", mp_path + "plane_weighting.py",
"--background", str(settings.photons[0][0]),
"--photons", str(settings.photons[0][1]),
"--output", "weights.npy",
"--xml", "multicolor.xml",
"--no_plots"])
if (__name__ == "__main__"):
configure()
|
[
"storm_analysis.simulator.camera.SCMOS",
"storm_analysis.simulator.drift.DriftFromFile",
"numpy.array",
"numpy.arange",
"storm_analysis.simulator.simulate.Simulate",
"storm_analysis.sa_library.sa_h5py.loadLocalizations",
"storm_analysis.simulator.psf.GaussianPSF",
"inspect.getfile",
"storm_analysis.simulator.photophysics.AlwaysOn",
"subprocess.call",
"storm_analysis.simulator.psf.PupilFunction",
"numpy.ones",
"storm_analysis.sa_library.parameters.ParametersSCMOS",
"pickle.load",
"storm_analysis.simulator.background.UniformBackground",
"numpy.savetxt",
"storm_analysis.sa_library.sa_h5py.saveLocalizations",
"storm_analysis.sa_library.parameters.ParametersMultiplaneArb",
"numpy.zeros"
] |
[((832, 860), 'storm_analysis.sa_library.parameters.ParametersSCMOS', 'parameters.ParametersSCMOS', ([], {}), '()\n', (858, 860), True, 'import storm_analysis.sa_library.parameters as parameters\n'), ((1922, 1958), 'storm_analysis.sa_library.parameters.ParametersMultiplaneArb', 'parameters.ParametersMultiplaneArb', ([], {}), '()\n', (1956, 1958), True, 'import storm_analysis.sa_library.parameters as parameters\n'), ((7043, 7179), 'subprocess.call', 'subprocess.call', (["['python', sim_path + 'emitters_on_grid.py', '--bin', 'psf_list.hdf5',\n '--nx', '6', '--ny', '3', '--spacing', '40']"], {}), "(['python', sim_path + 'emitters_on_grid.py', '--bin',\n 'psf_list.hdf5', '--nx', '6', '--ny', '3', '--spacing', '40'])\n", (7058, 7179), False, 'import subprocess\n'), ((7469, 7512), 'storm_analysis.sa_library.sa_h5py.loadLocalizations', 'saH5Py.loadLocalizations', (['"""random_map.hdf5"""'], {}), "('random_map.hdf5')\n", (7493, 7512), True, 'import storm_analysis.sa_library.sa_h5py as saH5Py\n'), ((7574, 7626), 'storm_analysis.sa_library.sa_h5py.saveLocalizations', 'saH5Py.saveLocalizations', (['"""c1_random_map.hdf5"""', 'locs'], {}), "('c1_random_map.hdf5', locs)\n", (7598, 7626), True, 'import storm_analysis.sa_library.sa_h5py as saH5Py\n'), ((7935, 7978), 'storm_analysis.sa_library.sa_h5py.loadLocalizations', 'saH5Py.loadLocalizations', (['"""random_map.hdf5"""'], {}), "('random_map.hdf5')\n", (7959, 7978), True, 'import storm_analysis.sa_library.sa_h5py as saH5Py\n'), ((8040, 8092), 'storm_analysis.sa_library.sa_h5py.saveLocalizations', 'saH5Py.saveLocalizations', (['"""c1_random_map.hdf5"""', 'locs'], {}), "('c1_random_map.hdf5', locs)\n", (8064, 8092), True, 'import storm_analysis.sa_library.sa_h5py as saH5Py\n'), ((8733, 8899), 'storm_analysis.simulator.simulate.Simulate', 'simulate.Simulate', ([], {'background_factory': 'bg_f', 'camera_factory': 'cam_f', 'photophysics_factory': 'pp_f', 'psf_factory': 'psf_f', 'x_size': 'settings.x_size', 'y_size': 'settings.y_size'}), '(background_factory=bg_f, camera_factory=cam_f,\n photophysics_factory=pp_f, psf_factory=psf_f, x_size=settings.x_size,\n y_size=settings.y_size)\n', (8750, 8899), True, 'import storm_analysis.simulator.simulate as simulate\n'), ((9785, 9928), 'subprocess.call', 'subprocess.call', (["['python', mm_path + 'merge_maps.py', '--results', 'map.map', '--maps',\n 'c1_c2_map.map', 'c1_c3_map.map', 'c1_c4_map.map']"], {}), "(['python', mm_path + 'merge_maps.py', '--results',\n 'map.map', '--maps', 'c1_c2_map.map', 'c1_c3_map.map', 'c1_c4_map.map'])\n", (9800, 9928), False, 'import subprocess\n'), ((11023, 11093), 'numpy.arange', 'numpy.arange', (['(-settings.psf_z_range)', '(settings.psf_z_range + 0.05)', '(0.01)'], {}), '(-settings.psf_z_range, settings.psf_z_range + 0.05, 0.01)\n', (11035, 11093), False, 'import numpy\n'), ((11115, 11140), 'numpy.zeros', 'numpy.zeros', (['(dz.size, 3)'], {}), '((dz.size, 3))\n', (11126, 11140), False, 'import numpy\n'), ((11178, 11216), 'numpy.savetxt', 'numpy.savetxt', (['"""drift.txt"""', 'drift_data'], {}), "('drift.txt', drift_data)\n", (11191, 11216), False, 'import numpy\n'), ((11288, 11312), 'numpy.ones', 'numpy.ones', (['(dz.size, 2)'], {}), '((dz.size, 2))\n', (11298, 11312), False, 'import numpy\n'), ((11348, 11387), 'numpy.savetxt', 'numpy.savetxt', (['"""z_offset.txt"""', 'z_offset'], {}), "('z_offset.txt', z_offset)\n", (11361, 11387), False, 'import numpy\n'), ((11894, 12083), 'storm_analysis.simulator.simulate.Simulate', 'simulate.Simulate', ([], {'background_factory': 'bg_f', 'camera_factory': 'cam_f', 'drift_factory': 'drift_f', 'photophysics_factory': 'pp_f', 'psf_factory': 'psf_f', 'x_size': 'settings.x_size', 'y_size': 'settings.y_size'}), '(background_factory=bg_f, camera_factory=cam_f,\n drift_factory=drift_f, photophysics_factory=pp_f, psf_factory=psf_f,\n x_size=settings.x_size, y_size=settings.y_size)\n', (11911, 12083), True, 'import storm_analysis.simulator.simulate as simulate\n'), ((4186, 4217), 'inspect.getfile', 'inspect.getfile', (['storm_analysis'], {}), '(storm_analysis)\n', (4201, 4217), False, 'import inspect\n'), ((4266, 4297), 'inspect.getfile', 'inspect.getfile', (['storm_analysis'], {}), '(storm_analysis)\n', (4281, 4297), False, 'import inspect\n'), ((4347, 4378), 'inspect.getfile', 'inspect.getfile', (['storm_analysis'], {}), '(storm_analysis)\n', (4362, 4378), False, 'import inspect\n'), ((8419, 8472), 'storm_analysis.simulator.background.UniformBackground', 'background.UniformBackground', (['s', 'x', 'y', 'h5'], {'photons': '(10)'}), '(s, x, y, h5, photons=10)\n', (8447, 8472), True, 'import storm_analysis.simulator.background as background\n'), ((8512, 8550), 'storm_analysis.simulator.camera.SCMOS', 'camera.SCMOS', (['s', 'x', 'y', 'h5', '"""calib.npy"""'], {}), "(s, x, y, h5, 'calib.npy')\n", (8524, 8550), True, 'import storm_analysis.simulator.camera as camera\n'), ((8587, 8630), 'storm_analysis.simulator.photophysics.AlwaysOn', 'photophysics.AlwaysOn', (['s', 'x', 'y', 'h5', '(20000.0)'], {}), '(s, x, y, h5, 20000.0)\n', (8608, 8630), True, 'import storm_analysis.simulator.photophysics as photophysics\n'), ((8668, 8717), 'storm_analysis.simulator.psf.GaussianPSF', 'psf.GaussianPSF', (['s', 'x', 'y', 'i3', 'settings.pixel_size'], {}), '(s, x, y, i3, settings.pixel_size)\n', (8683, 8717), True, 'import storm_analysis.simulator.psf as psf\n'), ((10081, 10166), 'subprocess.call', 'subprocess.call', (["['python', mp_path + 'print_mapping.py', '--mapping', 'map.map']"], {}), "(['python', mp_path + 'print_mapping.py', '--mapping',\n 'map.map'])\n", (10096, 10166), False, 'import subprocess\n'), ((10359, 10374), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (10370, 10374), False, 'import pickle\n'), ((11489, 11542), 'storm_analysis.simulator.background.UniformBackground', 'background.UniformBackground', (['s', 'x', 'y', 'h5'], {'photons': '(10)'}), '(s, x, y, h5, photons=10)\n', (11517, 11542), True, 'import storm_analysis.simulator.background as background\n'), ((11582, 11620), 'storm_analysis.simulator.camera.SCMOS', 'camera.SCMOS', (['s', 'x', 'y', 'h5', '"""calib.npy"""'], {}), "(s, x, y, h5, 'calib.npy')\n", (11594, 11620), True, 'import storm_analysis.simulator.camera as camera\n'), ((11660, 11705), 'storm_analysis.simulator.drift.DriftFromFile', 'drift.DriftFromFile', (['s', 'x', 'y', 'h5', '"""drift.txt"""'], {}), "(s, x, y, h5, 'drift.txt')\n", (11679, 11705), True, 'import storm_analysis.simulator.drift as drift\n'), ((11742, 11785), 'storm_analysis.simulator.photophysics.AlwaysOn', 'photophysics.AlwaysOn', (['s', 'x', 'y', 'h5', '(20000.0)'], {}), '(s, x, y, h5, 20000.0)\n', (11763, 11785), True, 'import storm_analysis.simulator.photophysics as photophysics\n'), ((11823, 11878), 'storm_analysis.simulator.psf.PupilFunction', 'psf.PupilFunction', (['s', 'x', 'y', 'h5', 'settings.pixel_size', '[]'], {}), '(s, x, y, h5, settings.pixel_size, [])\n', (11840, 11878), True, 'import storm_analysis.simulator.psf as psf\n'), ((5359, 5390), 'inspect.getfile', 'inspect.getfile', (['storm_analysis'], {}), '(storm_analysis)\n', (5374, 5390), False, 'import inspect\n'), ((6986, 7017), 'inspect.getfile', 'inspect.getfile', (['storm_analysis'], {}), '(storm_analysis)\n', (7001, 7017), False, 'import inspect\n'), ((4891, 4938), 'numpy.zeros', 'numpy.zeros', (['(settings.y_size, settings.x_size)'], {}), '((settings.y_size, settings.x_size))\n', (4902, 4938), False, 'import numpy\n'), ((4998, 5044), 'numpy.ones', 'numpy.ones', (['(settings.y_size, settings.x_size)'], {}), '((settings.y_size, settings.x_size))\n', (5008, 5044), False, 'import numpy\n'), ((5106, 5152), 'numpy.ones', 'numpy.ones', (['(settings.y_size, settings.x_size)'], {}), '((settings.y_size, settings.x_size))\n', (5116, 5152), False, 'import numpy\n'), ((10471, 10517), 'numpy.array', 'numpy.array', (['[settings.dx * (i + 1), 1.0, 0.0]'], {}), '([settings.dx * (i + 1), 1.0, 0.0])\n', (10482, 10517), False, 'import numpy\n'), ((10673, 10719), 'numpy.array', 'numpy.array', (['[settings.dy * (i + 1), 0.0, 1.0]'], {}), '([settings.dy * (i + 1), 0.0, 1.0])\n', (10684, 10719), False, 'import numpy\n')]
|
import numpy as np
import scipy.spatial as spatial
def bilinear_interpolate(img, coords):
""" Interpolates over every image channel
http://en.wikipedia.org/wiki/Bilinear_interpolation
:param img: max 3 channel image
:param coords: 2 x _m_ array. 1st row = xcoords, 2nd row = ycoords
:returns: array of interpolated pixels with same shape as coords
"""
int_coords = np.int32(coords)
x0, y0 = int_coords
dx, dy = coords - int_coords
# 4 Neighour pixels
q11 = img[y0, x0]
q21 = img[y0, x0+1]
q12 = img[y0+1, x0]
q22 = img[y0+1, x0+1]
btm = q21.T * dx + q11.T * (1 - dx)
top = q22.T * dx + q12.T * (1 - dx)
inter_pixel = top * dy + btm * (1 - dy)
return inter_pixel.T
def grid_coordinates(points):
""" x,y grid coordinates within the ROI of supplied points
:param points: points to generate grid coordinates
:returns: array of (x, y) coordinates
"""
xmin = np.min(points[:, 0])
xmax = np.max(points[:, 0]) + 1
ymin = np.min(points[:, 1])
ymax = np.max(points[:, 1]) + 1
return np.asarray([(x, y) for y in range(ymin, ymax)
for x in range(xmin, xmax)], np.uint32)
def process_warp(src_img, result_img, tri_affines, dst_points, delaunay):
"""
Warp each triangle from the src_image only within the
ROI of the destination image (points in dst_points).
"""
roi_coords = grid_coordinates(dst_points)
# indices to vertices. -1 if pixel is not in any triangle
roi_tri_indices = delaunay.find_simplex(roi_coords)
for simplex_index in range(len(delaunay.simplices)):
coords = roi_coords[roi_tri_indices == simplex_index]
num_coords = len(coords)
out_coords = np.dot(tri_affines[simplex_index],
np.vstack((coords.T, np.ones(num_coords))))
x, y = coords.T
result_img[y, x] = bilinear_interpolate(src_img, out_coords)
return None
def triangular_affine_matrices(vertices, src_points, dest_points):
"""
Calculate the affine transformation matrix for each
triangle (x,y) vertex from dest_points to src_points
:param vertices: array of triplet indices to corners of triangle
:param src_points: array of [x, y] points to landmarks for source image
:param dest_points: array of [x, y] points to landmarks for destination image
:returns: 2 x 3 affine matrix transformation for a triangle
"""
ones = [1, 1, 1]
for tri_indices in vertices:
src_tri = np.vstack((src_points[tri_indices, :].T, ones))
dst_tri = np.vstack((dest_points[tri_indices, :].T, ones))
mat = np.dot(src_tri, np.linalg.inv(dst_tri))[:2, :]
yield mat
def warp_image(src_img, src_points, dest_points, dest_shape, dtype=np.uint8):
# Resultant image will not have an alpha channel
num_chans = 3
src_img = src_img[:, :, :3]
rows, cols = dest_shape[:2]
result_img = np.zeros((rows, cols, num_chans), dtype)
delaunay = spatial.Delaunay(dest_points)
tri_affines = np.asarray(list(triangular_affine_matrices(
delaunay.simplices, src_points, dest_points)))
process_warp(src_img, result_img, tri_affines, dest_points, delaunay)
return result_img
def test_local():
from functools import partial
import cv2
import scipy.misc
import locator
import aligner
from matplotlib import pyplot as plt
# Load source image
face_points_func = partial(locator.face_points, '../data')
base_path = '../females/Screenshot 2015-03-04 17.11.12.png'
src_path = '../females/BlDmB5QCYAAY8iw.jpg'
src_img = cv2.imread(src_path)
# Define control points for warps
src_points = face_points_func(src_path)
base_img = cv2.imread(base_path)
base_points = face_points_func(base_path)
size = (600, 500)
src_img, src_points = aligner.resize_align(src_img, src_points, size)
base_img, base_points = aligner.resize_align(base_img, base_points, size)
result_points = locator.weighted_average_points(src_points, base_points, 0.2)
# Perform transform
dst_img1 = warp_image(src_img, src_points, result_points, size)
dst_img2 = warp_image(base_img, base_points, result_points, size)
import blender
ave = blender.weighted_average(dst_img1, dst_img2, 0.6)
mask = blender.mask_from_points(size, result_points)
blended_img = blender.poisson_blend(dst_img1, dst_img2, mask)
plt.subplot(2, 2, 1)
plt.imshow(ave)
plt.subplot(2, 2, 2)
plt.imshow(dst_img1)
plt.subplot(2, 2, 3)
plt.imshow(dst_img2)
plt.subplot(2, 2, 4)
plt.imshow(blended_img)
plt.show()
if __name__ == "__main__":
test_local()
|
[
"matplotlib.pyplot.imshow",
"numpy.ones",
"numpy.int32",
"numpy.min",
"numpy.max",
"matplotlib.pyplot.subplot",
"numpy.zeros",
"numpy.linalg.inv",
"functools.partial",
"locator.weighted_average_points",
"blender.weighted_average",
"scipy.spatial.Delaunay",
"numpy.vstack",
"blender.mask_from_points",
"blender.poisson_blend",
"aligner.resize_align",
"cv2.imread",
"matplotlib.pyplot.show"
] |
[((381, 397), 'numpy.int32', 'np.int32', (['coords'], {}), '(coords)\n', (389, 397), True, 'import numpy as np\n'), ((906, 926), 'numpy.min', 'np.min', (['points[:, 0]'], {}), '(points[:, 0])\n', (912, 926), True, 'import numpy as np\n'), ((970, 990), 'numpy.min', 'np.min', (['points[:, 1]'], {}), '(points[:, 1])\n', (976, 990), True, 'import numpy as np\n'), ((2801, 2841), 'numpy.zeros', 'np.zeros', (['(rows, cols, num_chans)', 'dtype'], {}), '((rows, cols, num_chans), dtype)\n', (2809, 2841), True, 'import numpy as np\n'), ((2856, 2885), 'scipy.spatial.Delaunay', 'spatial.Delaunay', (['dest_points'], {}), '(dest_points)\n', (2872, 2885), True, 'import scipy.spatial as spatial\n'), ((3292, 3331), 'functools.partial', 'partial', (['locator.face_points', '"""../data"""'], {}), "(locator.face_points, '../data')\n", (3299, 3331), False, 'from functools import partial\n'), ((3452, 3472), 'cv2.imread', 'cv2.imread', (['src_path'], {}), '(src_path)\n', (3462, 3472), False, 'import cv2\n'), ((3565, 3586), 'cv2.imread', 'cv2.imread', (['base_path'], {}), '(base_path)\n', (3575, 3586), False, 'import cv2\n'), ((3676, 3723), 'aligner.resize_align', 'aligner.resize_align', (['src_img', 'src_points', 'size'], {}), '(src_img, src_points, size)\n', (3696, 3723), False, 'import aligner\n'), ((3750, 3799), 'aligner.resize_align', 'aligner.resize_align', (['base_img', 'base_points', 'size'], {}), '(base_img, base_points, size)\n', (3770, 3799), False, 'import aligner\n'), ((3818, 3879), 'locator.weighted_average_points', 'locator.weighted_average_points', (['src_points', 'base_points', '(0.2)'], {}), '(src_points, base_points, 0.2)\n', (3849, 3879), False, 'import locator\n'), ((4063, 4112), 'blender.weighted_average', 'blender.weighted_average', (['dst_img1', 'dst_img2', '(0.6)'], {}), '(dst_img1, dst_img2, 0.6)\n', (4087, 4112), False, 'import blender\n'), ((4122, 4167), 'blender.mask_from_points', 'blender.mask_from_points', (['size', 'result_points'], {}), '(size, result_points)\n', (4146, 4167), False, 'import blender\n'), ((4184, 4231), 'blender.poisson_blend', 'blender.poisson_blend', (['dst_img1', 'dst_img2', 'mask'], {}), '(dst_img1, dst_img2, mask)\n', (4205, 4231), False, 'import blender\n'), ((4235, 4255), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (4246, 4255), True, 'from matplotlib import pyplot as plt\n'), ((4258, 4273), 'matplotlib.pyplot.imshow', 'plt.imshow', (['ave'], {}), '(ave)\n', (4268, 4273), True, 'from matplotlib import pyplot as plt\n'), ((4276, 4296), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (4287, 4296), True, 'from matplotlib import pyplot as plt\n'), ((4299, 4319), 'matplotlib.pyplot.imshow', 'plt.imshow', (['dst_img1'], {}), '(dst_img1)\n', (4309, 4319), True, 'from matplotlib import pyplot as plt\n'), ((4322, 4342), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (4333, 4342), True, 'from matplotlib import pyplot as plt\n'), ((4345, 4365), 'matplotlib.pyplot.imshow', 'plt.imshow', (['dst_img2'], {}), '(dst_img2)\n', (4355, 4365), True, 'from matplotlib import pyplot as plt\n'), ((4368, 4388), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (4379, 4388), True, 'from matplotlib import pyplot as plt\n'), ((4392, 4415), 'matplotlib.pyplot.imshow', 'plt.imshow', (['blended_img'], {}), '(blended_img)\n', (4402, 4415), True, 'from matplotlib import pyplot as plt\n'), ((4418, 4428), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4426, 4428), True, 'from matplotlib import pyplot as plt\n'), ((936, 956), 'numpy.max', 'np.max', (['points[:, 0]'], {}), '(points[:, 0])\n', (942, 956), True, 'import numpy as np\n'), ((1000, 1020), 'numpy.max', 'np.max', (['points[:, 1]'], {}), '(points[:, 1])\n', (1006, 1020), True, 'import numpy as np\n'), ((2397, 2444), 'numpy.vstack', 'np.vstack', (['(src_points[tri_indices, :].T, ones)'], {}), '((src_points[tri_indices, :].T, ones))\n', (2406, 2444), True, 'import numpy as np\n'), ((2459, 2507), 'numpy.vstack', 'np.vstack', (['(dest_points[tri_indices, :].T, ones)'], {}), '((dest_points[tri_indices, :].T, ones))\n', (2468, 2507), True, 'import numpy as np\n'), ((2534, 2556), 'numpy.linalg.inv', 'np.linalg.inv', (['dst_tri'], {}), '(dst_tri)\n', (2547, 2556), True, 'import numpy as np\n'), ((1737, 1756), 'numpy.ones', 'np.ones', (['num_coords'], {}), '(num_coords)\n', (1744, 1756), True, 'import numpy as np\n')]
|
import face_embedding
import argparse
import cv2
import numpy as np
parser = argparse.ArgumentParser(description='face model test')
# general
parser.add_argument('--image-size', default='112,112', help='')
parser.add_argument('--model', default='../models/model-r34-amf/model,0', help='path to load model.')
parser.add_argument('--gpu', default=None, type=int, help='gpu id')
parser.add_argument('--det', default=2, type=int, help='mtcnn option, 2 means using R+O, else using O')
parser.add_argument('--flip', default=0, type=int, help='whether do lr flip aug')
parser.add_argument('--threshold', default=1.24, type=float, help='ver dist threshold')
args = parser.parse_args()
if __name__ == '__main__':
model = face_embedding.FaceModel(args)
img = cv2.imread('/Users/aub3/1.jpg')
f1 = model.get_feature(img)
img = cv2.imread('/Users/aub3/2.jpg')
f2 = model.get_feature(img)
img = cv2.imread('/Users/aub3/3.jpg')
f3 = model.get_feature(img)
dist1 = np.sum(np.square(f1-f2))
dist2 = np.sum(np.square(f1-f3))
print(dist1,dist2)
|
[
"face_embedding.FaceModel",
"cv2.imread",
"argparse.ArgumentParser",
"numpy.square"
] |
[((78, 132), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""face model test"""'}), "(description='face model test')\n", (101, 132), False, 'import argparse\n'), ((718, 748), 'face_embedding.FaceModel', 'face_embedding.FaceModel', (['args'], {}), '(args)\n', (742, 748), False, 'import face_embedding\n'), ((759, 790), 'cv2.imread', 'cv2.imread', (['"""/Users/aub3/1.jpg"""'], {}), "('/Users/aub3/1.jpg')\n", (769, 790), False, 'import cv2\n'), ((833, 864), 'cv2.imread', 'cv2.imread', (['"""/Users/aub3/2.jpg"""'], {}), "('/Users/aub3/2.jpg')\n", (843, 864), False, 'import cv2\n'), ((907, 938), 'cv2.imread', 'cv2.imread', (['"""/Users/aub3/3.jpg"""'], {}), "('/Users/aub3/3.jpg')\n", (917, 938), False, 'import cv2\n'), ((990, 1008), 'numpy.square', 'np.square', (['(f1 - f2)'], {}), '(f1 - f2)\n', (999, 1008), True, 'import numpy as np\n'), ((1027, 1045), 'numpy.square', 'np.square', (['(f1 - f3)'], {}), '(f1 - f3)\n', (1036, 1045), True, 'import numpy as np\n')]
|
import os
import datetime
import h5py
import numpy as np
DEFAULT_DTYPE = np.dtype([
('datetime', np.int64),
('open', np.float),
('close', np.float),
('high', np.float),
('low', np.float),
('limit_up', np.float),
('limit_down', np.float),
('volume', np.float),
('total_turnover', np.float),
('settlement', np.float),
('prev_settlement', np.float),
])
class Kline2HDF5:
def __init__(self, fo_name):
self._timeformat = "%Y-%m-%d %H:%M:%S"
self._create_output_file(fo_name)
def _create_output_file(self, fo_name):
self._fo = h5py.File(fo_name, "w")
def finished(self):
self._fo.close()
def translate(self, fi_name, symbol=None):
print(fi_name, symbol)
fi = open(fi_name, 'r')
if not symbol:
symbol = os.path.basename(fi_name).split('.')[0]
res = []
lines = fi.readlines()
last_dt = None
for line in lines:
vars = line.strip('\n').split(',')
datetime_array = datetime.datetime.fromtimestamp(int(vars[0]))
if last_dt:
delta = datetime_array - last_dt
if delta.days >= 1 and 20 <= datetime_array.hour <= 24:
# datetime_array = datetime_array - datetime.timedelta(days=1)
datetime_array = datetime_array.replace(day=last_dt.day, month=last_dt.month)
if datetime_array <= last_dt:
print(line)
print(datetime_array)
print(datetime_array.second)
print(last_dt)
print(last_dt.hour)
assert False
datetime_str = datetime_array.strftime("%Y%m%d%H%M%S")
# t = int(vars[0])
o = float(vars[1])
h = float(vars[2])
l = float(vars[3])
c = float(vars[4])
v = float(vars[5])
res.append((datetime_str, o, c, h, l, o * 1.1, o * 0.9, v, -1, -1, -1))
last_dt = datetime_array
fi.close()
res_array = np.asarray(res, dtype=DEFAULT_DTYPE)
self._fo.create_dataset(symbol, data=res_array)
return True
if __name__ == '__main__':
rq2h5 = Kline2HDF5("futures_min_test.h5")
rq2h5.translate("/Users/zhifeng/rqalpha/data/rqdata/I88-4.csv", "I88")
rq2h5.finished()
|
[
"numpy.dtype",
"numpy.asarray",
"os.path.basename",
"h5py.File"
] |
[((74, 361), 'numpy.dtype', 'np.dtype', (["[('datetime', np.int64), ('open', np.float), ('close', np.float), ('high',\n np.float), ('low', np.float), ('limit_up', np.float), ('limit_down', np\n .float), ('volume', np.float), ('total_turnover', np.float), (\n 'settlement', np.float), ('prev_settlement', np.float)]"], {}), "([('datetime', np.int64), ('open', np.float), ('close', np.float),\n ('high', np.float), ('low', np.float), ('limit_up', np.float), (\n 'limit_down', np.float), ('volume', np.float), ('total_turnover', np.\n float), ('settlement', np.float), ('prev_settlement', np.float)])\n", (82, 361), True, 'import numpy as np\n'), ((601, 624), 'h5py.File', 'h5py.File', (['fo_name', '"""w"""'], {}), "(fo_name, 'w')\n", (610, 624), False, 'import h5py\n'), ((2106, 2142), 'numpy.asarray', 'np.asarray', (['res'], {'dtype': 'DEFAULT_DTYPE'}), '(res, dtype=DEFAULT_DTYPE)\n', (2116, 2142), True, 'import numpy as np\n'), ((830, 855), 'os.path.basename', 'os.path.basename', (['fi_name'], {}), '(fi_name)\n', (846, 855), False, 'import os\n')]
|
import sys, os
import time
import getopt
import pprint
try:
# doesn't exist on macos
from shmem import PyShmemClient
except:
pass
from psana import dgram
from psana.event import Event
from psana.detector import detectors
from psana.psexp.event_manager import TransitionId
import numpy as np
def dumpDict(dict,indent):
for k in sorted(dict.keys()):
if hasattr(dict[k],'__dict__'):
print(' '*indent,k)
dumpDict(dict[k].__dict__,indent+2)
else:
print(' '*indent,k,dict[k])
# method to dump dgrams to stdout. ideally this would move into dgram.cc
def dumpDgram(d):
dumpDict(d.__dict__,0)
FN_L = 200
# Warning: If XtcData::Dgram ever changes, this function will likely need to change
def _service(view):
iSvc = 2 # Index of service field, in units of uint32_t
return (np.array(view, copy=False).view(dtype=np.uint32)[iSvc] >> 24) & 0x0f
# Warning: If XtcData::Dgram ever changes, this function will likely need to change
def _dgSize(view):
iExt = 5 # Index of extent field, in units of uint32_t
txSize = 3 * 4 # sizeof(XtcData::TransitionBase)
return txSize + np.array(view, copy=False).view(dtype=np.uint32)[iExt]
class DgramManager(object):
def __init__(self, xtc_files, configs=[], fds=[],
tag=None, run=None, max_retries=0,
found_xtc2_callback=None):
""" Opens xtc_files and stores configs.
If file descriptors (fds) is given, reuse the given file descriptors.
"""
self.xtc_files = []
self.shmem_cli = None
self.shmem_kwargs = {'index':-1,'size':0,'cli_cptr':None}
self.configs = []
self._timestamps = [] # built when iterating
self._run = run
self.found_endrun = True
self.buffered_beginruns = []
self.max_retries = max_retries
self.chunk_ids = []
# Add ability for dgrammanager to check if xtc2 files exist (in case
# .inprogress file is use).
if found_xtc2_callback:
setattr(self, 'found_xtc2', found_xtc2_callback)
if isinstance(xtc_files, (str)):
self.xtc_files = np.array([xtc_files], dtype='U%s'%FN_L)
elif isinstance(xtc_files, (list, np.ndarray)):
if len(xtc_files) > 0: # handles smalldata-only case
if xtc_files[0] == 'shmem':
self.shmem_cli = PyShmemClient()
#establish connection to available server - blocking
status = int(self.shmem_cli.connect(tag,0))
assert not status,'shmem connect failure %d' % status
#wait for first configure datagram - blocking
view = self.shmem_cli.get(self.shmem_kwargs)
assert view
# Release shmem buffer after copying Transition data
# cpo: copy L1Accepts too because some shmem
# applications like AMI's pickN can hold references
# to dgrams for a long time, consuming the shmem buffers
# and creating a deadlock situation. could revisit this
# later and only deep-copy arrays inside pickN, for example
# but would be more fragile.
barray = bytes(view[:_dgSize(view)])
self.shmem_cli.freeByIndex(self.shmem_kwargs['index'], self.shmem_kwargs['size'])
view = memoryview(barray)
d = dgram.Dgram(view=view)
self.configs += [d]
else:
self.xtc_files = np.asarray(xtc_files, dtype='U%s'%FN_L)
self.given_fds = True if len(fds) > 0 else False
if self.given_fds:
self.fds = np.asarray(fds, dtype=np.int32)
else:
self.fds = np.array([os.open(xtc_file, os.O_RDONLY) for xtc_file in self.xtc_files], dtype=np.int32)
self.fds_map = {}
for fd, xtc_file in zip(self.fds, self.xtc_files):
self.fds_map[fd] = xtc_file
given_configs = True if len(configs) > 0 else False
if given_configs:
self.configs = configs
elif xtc_files[0] != 'shmem':
self.configs = [dgram.Dgram(file_descriptor=fd, max_retries=self.max_retries) for fd in self.fds]
self.calibconst = {} # initialize to empty dict - will be populated by run class
self.n_files = len(self.xtc_files)
self.set_chunk_ids()
def set_chunk_ids(self):
if len(self.xtc_files) == 0: return
if self.xtc_files[0] == 'shmem': return
for xtc_file in self.xtc_files:
filename = os.path.basename(xtc_file)
found = filename.find('-c')
if found >= 0:
found_e = filename.find('.xtc2')
self.chunk_ids.append(int(filename[found+2:found_e]))
def get_chunk_id(self, ind):
if not self.chunk_ids: return None
return self.chunk_ids[ind]
def set_chunk_id(self, ind, new_chunk_id):
self.chunk_ids[ind] = new_chunk_id
def close(self):
if not self.given_fds:
for fd in self.fds:
os.close(fd)
def __iter__(self):
return self
def _check_missing_endrun(self, beginruns=None):
fake_endruns = None
if not self.found_endrun: # there's no previous EndRun
sec = (self._timestamps[-1] >> 32) & 0xffffffff
usec = int((self._timestamps[-1] & 0xffffffff) * 1e3 + 1)
if beginruns:
self.buffered_beginruns = [dgram.Dgram(config=config,
view=d, offset=0, size=d._size)
for d, config in zip(beginruns, self.configs)]
fake_endruns = [dgram.Dgram(config=config, fake_endrun=1, \
fake_endrun_sec=sec, fake_endrun_usec=usec) \
for config in self.configs]
self.found_endrun = True
else:
self.found_endrun = False
return fake_endruns
def __next__(self):
""" only support sequential read - no event building"""
if self.buffered_beginruns:
self.found_endrun = False
evt = Event(self.buffered_beginruns, run=self.run())
self._timestamps += [evt.timestamp]
self.buffered_beginruns = []
return evt
if self.shmem_cli:
view = self.shmem_cli.get(self.shmem_kwargs)
if view:
# Release shmem buffer after copying Transition data
# cpo: copy L1Accepts too because some shmem
# applications like AMI's pickN can hold references
# to dgrams for a long time, consuming the shmem buffers
# and creating a deadlock situation. could revisit this
# later and only deep-copy arrays inside pickN, for example
# but would be more fragile.
barray = bytes(view[:_dgSize(view)])
self.shmem_cli.freeByIndex(self.shmem_kwargs['index'], self.shmem_kwargs['size'])
view = memoryview(barray)
# use the most recent configure datagram
config = self.configs[len(self.configs)-1]
d = dgram.Dgram(config=config,view=view)
dgrams = [d]
else:
raise StopIteration
else:
try:
dgrams = [dgram.Dgram(config=config, max_retries=self.max_retries) for config in self.configs]
except StopIteration as err:
fake_endruns = self._check_missing_endrun()
if fake_endruns:
dgrams = fake_endruns
else:
print(err)
raise StopIteration
# Check BeginRun - EndRun pairing
service = dgrams[0].service()
if service == TransitionId.BeginRun:
fake_endruns = self._check_missing_endrun(beginruns=dgrams)
if fake_endruns:
dgrams = fake_endruns
if service == TransitionId.EndRun:
self.found_endrun = True
evt = Event(dgrams, run=self.get_run())
self._timestamps += [evt.timestamp]
return evt
def jumps(self, dgram_i, offset, size):
if offset == 0 and size == 0:
d = None
else:
try:
d = dgram.Dgram(file_descriptor=self.fds[dgram_i],
config=self.configs[dgram_i],
offset=offset,
size=size,
max_retries=self.max_retries)
except StopIteration:
d = None
return d
def jump(self, offsets, sizes):
""" Jumps to the offset and reads out dgram on each xtc file.
This is used in normal mode (multiple detectors with MPI).
"""
assert len(offsets) > 0 and len(sizes) > 0
dgrams = [self.jumps(dgram_i, offset, size) for dgram_i, (offset, size)
in enumerate(zip(offsets, sizes))]
evt = Event(dgrams, run=self._run)
return evt
def get_timestamps(self):
return np.asarray(self._timestamps, dtype=np.uint64) # return numpy array for easy search later
def set_run(self, run):
self._run = run
def get_run(self):
return self._run
def parse_command_line():
opts, args_proper = getopt.getopt(sys.argv[1:], 'hvd:f:')
xtcdata_filename="data.xtc"
for option, parameter in opts:
if option=='-h': usage_error()
if option=='-f': xtcdata_filename = parameter
if xtcdata_filename is None:
xtcdata_filename="data.xtc"
return (args_proper, xtcdata_filename)
def getMemUsage():
pid=os.getpid()
ppid=os.getppid()
cmd="/usr/bin/ps -q %d --no-headers -eo size" % pid
p=os.popen(cmd)
size=int(p.read())
return size
def main():
args_proper, xtcdata_filename = parse_command_line()
ds=DgramManager(xtcdata_filename)
print("vars(ds):")
for var_name in sorted(vars(ds)):
print(" %s:" % var_name)
e=getattr(ds, var_name)
if not isinstance(e, (tuple, list, int, float, str)):
for key in sorted(e.__dict__.keys()):
print("%s: %s" % (key, e.__dict__[key]))
print()
count=0
for evt in ds:
print("evt:", count)
for dgram in evt:
for var_name in sorted(vars(dgram)):
val=getattr(dgram, var_name)
print(" %s: %s" % (var_name, type(val)))
a=dgram.xpphsd.raw.array0Pgp
try:
a[0][0]=999
except ValueError:
print("The dgram.xpphsd.raw.array0Pgp is read-only, as it should be.")
else:
print("Warning: the evt.array0_pgp array is writable")
print()
count+=1
return
def usage_error():
s="usage: python %s" % os.path.basename(sys.argv[0])
sys.stdout.write("%s [-h]\n" % s)
sys.stdout.write("%s [-f xtcdata_filename]\n" % (" "*len(s)))
sys.exit(1)
if __name__=='__main__':
main()
|
[
"getopt.getopt",
"shmem.PyShmemClient",
"psana.dgram.Dgram",
"os.close",
"os.open",
"numpy.asarray",
"os.getppid",
"numpy.array",
"os.popen",
"os.path.basename",
"os.getpid",
"sys.exit",
"psana.event.Event",
"sys.stdout.write"
] |
[((9560, 9597), 'getopt.getopt', 'getopt.getopt', (['sys.argv[1:]', '"""hvd:f:"""'], {}), "(sys.argv[1:], 'hvd:f:')\n", (9573, 9597), False, 'import getopt\n'), ((9898, 9909), 'os.getpid', 'os.getpid', ([], {}), '()\n', (9907, 9909), False, 'import sys, os\n'), ((9919, 9931), 'os.getppid', 'os.getppid', ([], {}), '()\n', (9929, 9931), False, 'import sys, os\n'), ((9994, 10007), 'os.popen', 'os.popen', (['cmd'], {}), '(cmd)\n', (10002, 10007), False, 'import sys, os\n'), ((11124, 11157), 'sys.stdout.write', 'sys.stdout.write', (["('%s [-h]\\n' % s)"], {}), "('%s [-h]\\n' % s)\n", (11140, 11157), False, 'import sys, os\n'), ((11228, 11239), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (11236, 11239), False, 'import sys, os\n'), ((9220, 9248), 'psana.event.Event', 'Event', (['dgrams'], {'run': 'self._run'}), '(dgrams, run=self._run)\n', (9225, 9248), False, 'from psana.event import Event\n'), ((9314, 9359), 'numpy.asarray', 'np.asarray', (['self._timestamps'], {'dtype': 'np.uint64'}), '(self._timestamps, dtype=np.uint64)\n', (9324, 9359), True, 'import numpy as np\n'), ((11090, 11119), 'os.path.basename', 'os.path.basename', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (11106, 11119), False, 'import sys, os\n'), ((2221, 2262), 'numpy.array', 'np.array', (['[xtc_files]'], {'dtype': "('U%s' % FN_L)"}), "([xtc_files], dtype='U%s' % FN_L)\n", (2229, 2262), True, 'import numpy as np\n'), ((3845, 3876), 'numpy.asarray', 'np.asarray', (['fds'], {'dtype': 'np.int32'}), '(fds, dtype=np.int32)\n', (3855, 3876), True, 'import numpy as np\n'), ((4755, 4781), 'os.path.basename', 'os.path.basename', (['xtc_file'], {}), '(xtc_file)\n', (4771, 4781), False, 'import sys, os\n'), ((5276, 5288), 'os.close', 'os.close', (['fd'], {}), '(fd)\n', (5284, 5288), False, 'import sys, os\n'), ((5866, 5955), 'psana.dgram.Dgram', 'dgram.Dgram', ([], {'config': 'config', 'fake_endrun': '(1)', 'fake_endrun_sec': 'sec', 'fake_endrun_usec': 'usec'}), '(config=config, fake_endrun=1, fake_endrun_sec=sec,\n fake_endrun_usec=usec)\n', (5877, 5955), False, 'from psana import dgram\n'), ((7380, 7417), 'psana.dgram.Dgram', 'dgram.Dgram', ([], {'config': 'config', 'view': 'view'}), '(config=config, view=view)\n', (7391, 7417), False, 'from psana import dgram\n'), ((8549, 8685), 'psana.dgram.Dgram', 'dgram.Dgram', ([], {'file_descriptor': 'self.fds[dgram_i]', 'config': 'self.configs[dgram_i]', 'offset': 'offset', 'size': 'size', 'max_retries': 'self.max_retries'}), '(file_descriptor=self.fds[dgram_i], config=self.configs[dgram_i],\n offset=offset, size=size, max_retries=self.max_retries)\n', (8560, 8685), False, 'from psana import dgram\n'), ((1205, 1231), 'numpy.array', 'np.array', (['view'], {'copy': '(False)'}), '(view, copy=False)\n', (1213, 1231), True, 'import numpy as np\n'), ((3924, 3954), 'os.open', 'os.open', (['xtc_file', 'os.O_RDONLY'], {}), '(xtc_file, os.O_RDONLY)\n', (3931, 3954), False, 'import sys, os\n'), ((4326, 4387), 'psana.dgram.Dgram', 'dgram.Dgram', ([], {'file_descriptor': 'fd', 'max_retries': 'self.max_retries'}), '(file_descriptor=fd, max_retries=self.max_retries)\n', (4337, 4387), False, 'from psana import dgram\n'), ((5678, 5736), 'psana.dgram.Dgram', 'dgram.Dgram', ([], {'config': 'config', 'view': 'd', 'offset': '(0)', 'size': 'd._size'}), '(config=config, view=d, offset=0, size=d._size)\n', (5689, 5736), False, 'from psana import dgram\n'), ((7557, 7613), 'psana.dgram.Dgram', 'dgram.Dgram', ([], {'config': 'config', 'max_retries': 'self.max_retries'}), '(config=config, max_retries=self.max_retries)\n', (7568, 7613), False, 'from psana import dgram\n'), ((868, 894), 'numpy.array', 'np.array', (['view'], {'copy': '(False)'}), '(view, copy=False)\n', (876, 894), True, 'import numpy as np\n'), ((2463, 2478), 'shmem.PyShmemClient', 'PyShmemClient', ([], {}), '()\n', (2476, 2478), False, 'from shmem import PyShmemClient\n'), ((3574, 3596), 'psana.dgram.Dgram', 'dgram.Dgram', ([], {'view': 'view'}), '(view=view)\n', (3585, 3596), False, 'from psana import dgram\n'), ((3696, 3737), 'numpy.asarray', 'np.asarray', (['xtc_files'], {'dtype': "('U%s' % FN_L)"}), "(xtc_files, dtype='U%s' % FN_L)\n", (3706, 3737), True, 'import numpy as np\n')]
|
# Copyright (c) 2020, <NAME>, University of Washington
# This file is part of rcwa_tf
# Written by <NAME> (Email: <EMAIL>)
import tensorflow as tf
import numpy as np
def convmat(A, P, Q):
'''
This function computes a convolution matrix for a real space matrix `A` that
represents either a relative permittivity or permeability distribution for a
set of pixels, layers, and batch.
Args:
A: A `tf.Tensor` of dtype `complex` and shape `(batchSize, pixelsX,
pixelsY, Nlayers, Nx, Ny)` specifying real space values on a Cartesian
grid.
P: A positive and odd `int` specifying the number of spatial harmonics
along `T1`.
Q: A positive and odd `int` specifying the number of spatial harmonics
along `T2`.
Returns:
A `tf.Tensor` of dtype `complex` and shape `(batchSize, pixelsX,
pixelsY, Nlayers, P * Q, P * Q)` representing a stack of convolution
matrices based on `A`.
'''
# Determine the shape of A.
batchSize, pixelsX, pixelsY, Nlayers, Nx, Ny = A.shape
# Compute indices of spatial harmonics.
NH = P * Q # total number of harmonics.
p_max = np.floor(P / 2.0)
q_max = np.floor(P / 2.0)
# Indices along T1 and T2.
p = np.linspace(-p_max, p_max, P)
q = np.linspace(-q_max, q_max, Q)
# Compute array indices of the center harmonic.
p0 = int(np.floor(Nx / 2))
q0 = int(np.floor(Ny / 2))
# Fourier transform the real space distributions.
A = tf.signal.fftshift(tf.signal.fft2d(A), axes = (4, 5)) / (Nx * Ny)
# Build the matrix.
firstCoeff = True
for qrow in range(Q):
for prow in range(P):
for qcol in range(Q):
for pcol in range(P):
pfft = int(p[prow] - p[pcol])
qfft = int(q[qrow] - q[qcol])
# Sequentially concatenate Fourier coefficients.
value = A[:, :, :, :, p0 + pfft, q0 + qfft]
value = value[:, :, :, :, tf.newaxis, tf.newaxis]
if firstCoeff:
firstCoeff = False
C = value
else:
C = tf.concat([C, value], axis = 5)
# Reshape the coefficients tensor into a stack of convolution matrices.
convMatrixShape = (batchSize, pixelsX, pixelsY, Nlayers, P * Q, P * Q)
matrixStack = tf.reshape(C, shape = convMatrixShape)
return matrixStack
def redheffer_star_product(SA, SB):
'''
This function computes the redheffer star product of two block matrices,
which is the result of combining the S-parameter of two systems.
Args:
SA: A `dict` of `tf.Tensor` values specifying the block matrix
corresponding to the S-parameters of a system. `SA` needs to have the
keys ('S11', 'S12', 'S21', 'S22'), where each key maps to a `tf.Tensor`
of shape `(batchSize, pixelsX, pixelsY, 2*NH, 2*NH)`, where NH is the
total number of spatial harmonics.
SB: A `dict` of `tf.Tensor` values specifying the block matrix
corresponding to the S-parameters of a second system. `SB` needs to have
the keys ('S11', 'S12', 'S21', 'S22'), where each key maps to a
`tf.Tensor` of shape `(batchSize, pixelsX, pixelsY, 2*NH, 2*NH)`, where
NH is the total number of spatial harmonics.
Returns:
A `dict` of `tf.Tensor` values specifying the block matrix
corresponding to the S-parameters of the combined system. `SA` needs
to have the keys ('S11', 'S12', 'S21', 'S22'), where each key maps to
a `tf.Tensor` of shape `(batchSize, pixelsX, pixelsY, 2*NH, 2*NH),
where NH is the total number of spatial harmonics.
'''
# Define the identity matrix.
batchSize, pixelsX, pixelsY, dim, _ = SA['S11'].shape
I = tf.eye(num_rows = dim, dtype = tf.complex64)
I = I[tf.newaxis, tf.newaxis, tf.newaxis, :, :]
I = tf.tile(I, multiples = (batchSize, pixelsX, pixelsY, 1, 1))
# Calculate S11.
S11 = tf.linalg.inv(I - tf.linalg.matmul(SB['S11'], SA['S22']))
S11 = tf.linalg.matmul(S11, SB['S11'])
S11 = tf.linalg.matmul(SA['S12'], S11)
S11 = SA['S11'] + tf.linalg.matmul(S11, SA['S21'])
# Calculate S12.
S12 = tf.linalg.inv(I - tf.linalg.matmul(SB['S11'], SA['S22']))
S12 = tf.linalg.matmul(S12, SB['S12'])
S12 = tf.linalg.matmul(SA['S12'], S12)
# Calculate S21.
S21 = tf.linalg.inv(I - tf.linalg.matmul(SA['S22'], SB['S11']))
S21 = tf.linalg.matmul(S21, SA['S21'])
S21 = tf.linalg.matmul(SB['S21'], S21)
# Calculate S22.
S22 = tf.linalg.inv(I - tf.linalg.matmul(SA['S22'], SB['S11']))
S22 = tf.linalg.matmul(S22, SA['S22'])
S22 = tf.linalg.matmul(SB['S21'], S22)
S22 = SB['S22'] + tf.linalg.matmul(S22, SB['S12'])
# Store S parameters in an output dictionary.
S = dict({})
S['S11'] = S11
S['S12'] = S12
S['S21'] = S21
S['S22'] = S22
return S
|
[
"tensorflow.eye",
"tensorflow.tile",
"numpy.floor",
"tensorflow.concat",
"numpy.linspace",
"tensorflow.reshape",
"tensorflow.linalg.matmul",
"tensorflow.signal.fft2d"
] |
[((1189, 1206), 'numpy.floor', 'np.floor', (['(P / 2.0)'], {}), '(P / 2.0)\n', (1197, 1206), True, 'import numpy as np\n'), ((1219, 1236), 'numpy.floor', 'np.floor', (['(P / 2.0)'], {}), '(P / 2.0)\n', (1227, 1236), True, 'import numpy as np\n'), ((1277, 1306), 'numpy.linspace', 'np.linspace', (['(-p_max)', 'p_max', 'P'], {}), '(-p_max, p_max, P)\n', (1288, 1306), True, 'import numpy as np\n'), ((1315, 1344), 'numpy.linspace', 'np.linspace', (['(-q_max)', 'q_max', 'Q'], {}), '(-q_max, q_max, Q)\n', (1326, 1344), True, 'import numpy as np\n'), ((2470, 2506), 'tensorflow.reshape', 'tf.reshape', (['C'], {'shape': 'convMatrixShape'}), '(C, shape=convMatrixShape)\n', (2480, 2506), True, 'import tensorflow as tf\n'), ((3929, 3969), 'tensorflow.eye', 'tf.eye', ([], {'num_rows': 'dim', 'dtype': 'tf.complex64'}), '(num_rows=dim, dtype=tf.complex64)\n', (3935, 3969), True, 'import tensorflow as tf\n'), ((4034, 4091), 'tensorflow.tile', 'tf.tile', (['I'], {'multiples': '(batchSize, pixelsX, pixelsY, 1, 1)'}), '(I, multiples=(batchSize, pixelsX, pixelsY, 1, 1))\n', (4041, 4091), True, 'import tensorflow as tf\n'), ((4198, 4230), 'tensorflow.linalg.matmul', 'tf.linalg.matmul', (['S11', "SB['S11']"], {}), "(S11, SB['S11'])\n", (4214, 4230), True, 'import tensorflow as tf\n'), ((4241, 4273), 'tensorflow.linalg.matmul', 'tf.linalg.matmul', (["SA['S12']", 'S11'], {}), "(SA['S12'], S11)\n", (4257, 4273), True, 'import tensorflow as tf\n'), ((4433, 4465), 'tensorflow.linalg.matmul', 'tf.linalg.matmul', (['S12', "SB['S12']"], {}), "(S12, SB['S12'])\n", (4449, 4465), True, 'import tensorflow as tf\n'), ((4476, 4508), 'tensorflow.linalg.matmul', 'tf.linalg.matmul', (["SA['S12']", 'S12'], {}), "(SA['S12'], S12)\n", (4492, 4508), True, 'import tensorflow as tf\n'), ((4613, 4645), 'tensorflow.linalg.matmul', 'tf.linalg.matmul', (['S21', "SA['S21']"], {}), "(S21, SA['S21'])\n", (4629, 4645), True, 'import tensorflow as tf\n'), ((4656, 4688), 'tensorflow.linalg.matmul', 'tf.linalg.matmul', (["SB['S21']", 'S21'], {}), "(SB['S21'], S21)\n", (4672, 4688), True, 'import tensorflow as tf\n'), ((4793, 4825), 'tensorflow.linalg.matmul', 'tf.linalg.matmul', (['S22', "SA['S22']"], {}), "(S22, SA['S22'])\n", (4809, 4825), True, 'import tensorflow as tf\n'), ((4836, 4868), 'tensorflow.linalg.matmul', 'tf.linalg.matmul', (["SB['S21']", 'S22'], {}), "(SB['S21'], S22)\n", (4852, 4868), True, 'import tensorflow as tf\n'), ((1415, 1431), 'numpy.floor', 'np.floor', (['(Nx / 2)'], {}), '(Nx / 2)\n', (1423, 1431), True, 'import numpy as np\n'), ((1446, 1462), 'numpy.floor', 'np.floor', (['(Ny / 2)'], {}), '(Ny / 2)\n', (1454, 1462), True, 'import numpy as np\n'), ((4296, 4328), 'tensorflow.linalg.matmul', 'tf.linalg.matmul', (['S11', "SA['S21']"], {}), "(S11, SA['S21'])\n", (4312, 4328), True, 'import tensorflow as tf\n'), ((4891, 4923), 'tensorflow.linalg.matmul', 'tf.linalg.matmul', (['S22', "SB['S12']"], {}), "(S22, SB['S12'])\n", (4907, 4923), True, 'import tensorflow as tf\n'), ((1546, 1564), 'tensorflow.signal.fft2d', 'tf.signal.fft2d', (['A'], {}), '(A)\n', (1561, 1564), True, 'import tensorflow as tf\n'), ((4148, 4186), 'tensorflow.linalg.matmul', 'tf.linalg.matmul', (["SB['S11']", "SA['S22']"], {}), "(SB['S11'], SA['S22'])\n", (4164, 4186), True, 'import tensorflow as tf\n'), ((4383, 4421), 'tensorflow.linalg.matmul', 'tf.linalg.matmul', (["SB['S11']", "SA['S22']"], {}), "(SB['S11'], SA['S22'])\n", (4399, 4421), True, 'import tensorflow as tf\n'), ((4563, 4601), 'tensorflow.linalg.matmul', 'tf.linalg.matmul', (["SA['S22']", "SB['S11']"], {}), "(SA['S22'], SB['S11'])\n", (4579, 4601), True, 'import tensorflow as tf\n'), ((4743, 4781), 'tensorflow.linalg.matmul', 'tf.linalg.matmul', (["SA['S22']", "SB['S11']"], {}), "(SA['S22'], SB['S11'])\n", (4759, 4781), True, 'import tensorflow as tf\n'), ((2238, 2267), 'tensorflow.concat', 'tf.concat', (['[C, value]'], {'axis': '(5)'}), '([C, value], axis=5)\n', (2247, 2267), True, 'import tensorflow as tf\n')]
|
import pandas as pd
import json
import os
import os.path as osp
import numpy as np
"""
python -m spinup.run hyper_search <files> -ae <start from which epoch>
make a file that can order the experiments in terms of their performance
use this to easily find good hyperparameters when doing hyperparameter search
upload this file when it's ready don't use it again lol
"""
DIV_LINE_WIDTH = 50
# Global vars for tracking and labeling data at load time.
exp_idx = 0
units = dict()
def compute_hyper(data, xaxis='Epoch', value="AverageEpRet", condition="Condition1", smooth=1, no_legend=False,
legend_loc='best', color=None, linestyle=None, font_scale=1.5,
label_font_size=24, xlabel=None, ylabel=None, after_epoch=0, no_order=False,
**kwargs):
if smooth > 1:
"""
smooth data with moving window average.
that is,
smoothed_y[t] = average(y[t-k], y[t-k+1], ..., y[t+k-1], y[t+k])
where the "smooth" param is width of that window (2k+1)
"""
y = np.ones(smooth)
for datum in data:
x = np.asarray(datum[value])
z = np.ones(len(x))
smoothed_x = np.convolve(x, y, 'same') / np.convolve(z, y, 'same')
datum[value] = smoothed_x
if isinstance(data, list):
data = pd.concat(data, ignore_index=True)
# print("columns", data.columns)
unique_names = data[condition].unique() ## these are the experiment names
n_settings = len(unique_names)
score_list = np.zeros(n_settings)
std_list = np.zeros(n_settings)
print(score_list)
for i in range(n_settings):
un = unique_names[i]
print("\nunique name: ",un)
exp_data = data.loc[data[condition] == un] ## the data related to this experiment
# average_test_epret = exp_data['AverageTestEpRet'].values
# print(average_test_epret.shape)
# final performance data only concern the last few epoches
final_performance_data = exp_data.loc[exp_data['Epoch'] >= after_epoch]
average_test_epret_final = final_performance_data['AverageTestEpRet'].values
mean_score = average_test_epret_final.mean()
std_score = average_test_epret_final.std()
score_list[i] = mean_score
std_list[i] = std_score
epoch_reached = final_performance_data['Epoch'].max()
if np.isnan(mean_score):
print('n/a')
else:
print('total epoch: %d, score: %.2f' % (epoch_reached,mean_score))
"""
here we want to give an ordering of the hyper-settings, so that we can know
which ones are good hyper-parameters
"""
sorted_index =np.flip(np.argsort(score_list))
if no_order:
sorted_index = np.arange(len(sorted_index))
for i in range(n_settings):
setting_index = sorted_index[i]
print('%s\t%.1f\t%.1f' % (unique_names[setting_index], score_list[setting_index], std_list[setting_index]))
def get_datasets(logdir, condition=None):
"""
Recursively look through logdir for output files produced by
spinup.logx.Logger.
Assumes that any file "progress.txt" is a valid hit.
"""
global exp_idx
global units
datasets = []
for root, _, files in os.walk(logdir):
if 'progress.txt' in files:
exp_name = None
try:
config_path = open(os.path.join(root, 'config.json'))
config = json.load(config_path)
if 'exp_name' in config:
exp_name = config['exp_name']
except:
print('No file named config.json')
condition1 = condition or exp_name or 'exp'
condition2 = condition1 + '-' + str(exp_idx)
exp_idx += 1
if condition1 not in units:
units[condition1] = 0
unit = units[condition1]
units[condition1] += 1
try:
exp_data = pd.read_table(os.path.join(root, 'progress.txt'))
performance = 'AverageTestEpRet' if 'AverageTestEpRet' in exp_data else 'AverageEpRet'
exp_data.insert(len(exp_data.columns), 'Unit', unit)
exp_data.insert(len(exp_data.columns), 'Condition1', condition1)
exp_data.insert(len(exp_data.columns), 'Condition2', condition2)
exp_data.insert(len(exp_data.columns), 'Performance', exp_data[performance])
datasets.append(exp_data)
except Exception as e:
print(e)
return datasets
def get_all_datasets(all_logdirs, legend=None, select=None, exclude=None):
"""
For every entry in all_logdirs,
1) check if the entry is a real directory and if it is,
pull data from it;
2) if not, check to see if the entry is a prefix for a
real directory, and pull data from that.
"""
logdirs = []
for logdir in all_logdirs:
if osp.isdir(logdir) and logdir[-1] == '/':
logdirs += [logdir]
else:
basedir = osp.dirname(logdir)
fulldir = lambda x: osp.join(basedir, x)
prefix = logdir.split('/')[-1]
listdir = os.listdir(basedir)
logdirs += sorted([fulldir(x) for x in listdir if prefix in x])
"""
Enforce selection rules, which check logdirs for certain substrings.
Makes it easier to look at graphs from particular ablations, if you
launch many jobs at once with similar names.
"""
if select is not None:
logdirs = [log for log in logdirs if all(x in log for x in select)]
if exclude is not None:
logdirs = [log for log in logdirs if all(not (x in log) for x in exclude)]
# Verify logdirs
print('Plotting from...\n' + '=' * DIV_LINE_WIDTH + '\n')
for logdir in logdirs:
print(logdir)
print('\n' + '=' * DIV_LINE_WIDTH)
# Make sure the legend is compatible with the logdirs
assert not (legend) or (len(legend) == len(logdirs)), \
"Must give a legend title for each set of experiments."
# Load data from logdirs
data = []
if legend:
for log, leg in zip(logdirs, legend):
data += get_datasets(log, leg)
else:
for log in logdirs:
data += get_datasets(log)
return data
def compare_performance(all_logdirs, legend=None, xaxis=None, values=None, count=False,
font_scale=1.5, smooth=1, select=None, exclude=None, estimator='mean', no_legend=False,
legend_loc='best', after_epoch=0,
save_name=None, xlimit=-1, color=None, linestyle=None, label_font_size=24,
xlabel=None, ylabel=None,
no_order=False):
data = get_all_datasets(all_logdirs, legend, select, exclude)
values = values if isinstance(values, list) else [values]
condition = 'Condition2' if count else 'Condition1'
estimator = getattr(np, estimator) # choose what to show on main curve: mean? max? min?
for value in values:
compute_hyper(data, xaxis=xaxis, value=value, condition=condition, smooth=smooth, no_legend=no_legend,
legend_loc=legend_loc,
estimator=estimator, color=color, linestyle=linestyle, font_scale=font_scale,
label_font_size=label_font_size,
xlabel=xlabel, ylabel=ylabel, after_epoch=after_epoch, no_order=no_order)
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('logdir', nargs='*')
parser.add_argument('--legend', '-l', nargs='*')
parser.add_argument('--xaxis', '-x', default='TotalEnvInteracts')
parser.add_argument('--value', '-y', default='Performance', nargs='*')
parser.add_argument('--count', action='store_true')
parser.add_argument('--smooth', '-s', type=int, default=1)
parser.add_argument('--select', nargs='*')
parser.add_argument('--exclude', nargs='*')
parser.add_argument('--est', default='mean')
parser.add_argument('--after-epoch', '-ae', type=int, default=0)
parser.add_argument('-no', '--no-order', action='store_true')
args = parser.parse_args()
"""
Args:
logdir (strings): As many log directories (or prefixes to log
directories, which the plotter will autocomplete internally) as
you'd like to plot from.
legend (strings): Optional way to specify legend for the plot. The
plotter legend will automatically use the ``exp_name`` from the
config.json file, unless you tell it otherwise through this flag.
This only works if you provide a name for each directory that
will get plotted. (Note: this may not be the same as the number
of logdir args you provide! Recall that the plotter looks for
autocompletes of the logdir args: there may be more than one
match for a given logdir prefix, and you will need to provide a
legend string for each one of those matches---unless you have
removed some of them as candidates via selection or exclusion
rules (below).)
xaxis (string): Pick what column from data is used for the x-axis.
Defaults to ``TotalEnvInteracts``.
value (strings): Pick what columns from data to graph on the y-axis.
Submitting multiple values will produce multiple graphs. Defaults
to ``Performance``, which is not an actual output of any algorithm.
Instead, ``Performance`` refers to either ``AverageEpRet``, the
correct performance measure for the on-policy algorithms, or
``AverageTestEpRet``, the correct performance measure for the
off-policy algorithms. The plotter will automatically figure out
which of ``AverageEpRet`` or ``AverageTestEpRet`` to report for
each separate logdir.
count: Optional flag. By default, the plotter shows y-values which
are averaged across all results that share an ``exp_name``,
which is typically a set of identical experiments that only vary
in random seed. But if you'd like to see all of those curves
separately, use the ``--count`` flag.
smooth (int): Smooth data by averaging it over a fixed window. This
parameter says how wide the averaging window will be.
select (strings): Optional selection rule: the plotter will only show
curves from logdirs that contain all of these substrings.
exclude (strings): Optional exclusion rule: plotter will only show
curves from logdirs that do not contain these substrings.
after-epoch: if > 0 then when computing an algorithm's "score",
we will use the average of test returns after a certain epoch number
no-order: have this option so it doesn't print setting names in order of performance
"""
compare_performance(args.logdir, args.legend, args.xaxis, args.value, args.count,
smooth=args.smooth, select=args.select, exclude=args.exclude,
estimator=args.est, after_epoch=args.after_epoch, no_order=args.no_order)
if __name__ == "__main__":
main()
|
[
"os.listdir",
"numpy.convolve",
"numpy.ones",
"argparse.ArgumentParser",
"numpy.asarray",
"os.path.join",
"numpy.argsort",
"os.path.dirname",
"numpy.zeros",
"os.path.isdir",
"numpy.isnan",
"json.load",
"pandas.concat",
"os.walk"
] |
[((1541, 1561), 'numpy.zeros', 'np.zeros', (['n_settings'], {}), '(n_settings)\n', (1549, 1561), True, 'import numpy as np\n'), ((1577, 1597), 'numpy.zeros', 'np.zeros', (['n_settings'], {}), '(n_settings)\n', (1585, 1597), True, 'import numpy as np\n'), ((3264, 3279), 'os.walk', 'os.walk', (['logdir'], {}), '(logdir)\n', (3271, 3279), False, 'import os\n'), ((7539, 7564), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (7562, 7564), False, 'import argparse\n'), ((1058, 1073), 'numpy.ones', 'np.ones', (['smooth'], {}), '(smooth)\n', (1065, 1073), True, 'import numpy as np\n'), ((1338, 1372), 'pandas.concat', 'pd.concat', (['data'], {'ignore_index': '(True)'}), '(data, ignore_index=True)\n', (1347, 1372), True, 'import pandas as pd\n'), ((2393, 2413), 'numpy.isnan', 'np.isnan', (['mean_score'], {}), '(mean_score)\n', (2401, 2413), True, 'import numpy as np\n'), ((2697, 2719), 'numpy.argsort', 'np.argsort', (['score_list'], {}), '(score_list)\n', (2707, 2719), True, 'import numpy as np\n'), ((1117, 1141), 'numpy.asarray', 'np.asarray', (['datum[value]'], {}), '(datum[value])\n', (1127, 1141), True, 'import numpy as np\n'), ((4972, 4989), 'os.path.isdir', 'osp.isdir', (['logdir'], {}), '(logdir)\n', (4981, 4989), True, 'import os.path as osp\n'), ((5081, 5100), 'os.path.dirname', 'osp.dirname', (['logdir'], {}), '(logdir)\n', (5092, 5100), True, 'import os.path as osp\n'), ((5219, 5238), 'os.listdir', 'os.listdir', (['basedir'], {}), '(basedir)\n', (5229, 5238), False, 'import os\n'), ((1199, 1224), 'numpy.convolve', 'np.convolve', (['x', 'y', '"""same"""'], {}), "(x, y, 'same')\n", (1210, 1224), True, 'import numpy as np\n'), ((1227, 1252), 'numpy.convolve', 'np.convolve', (['z', 'y', '"""same"""'], {}), "(z, y, 'same')\n", (1238, 1252), True, 'import numpy as np\n'), ((3457, 3479), 'json.load', 'json.load', (['config_path'], {}), '(config_path)\n', (3466, 3479), False, 'import json\n'), ((5133, 5153), 'os.path.join', 'osp.join', (['basedir', 'x'], {}), '(basedir, x)\n', (5141, 5153), True, 'import os.path as osp\n'), ((3397, 3430), 'os.path.join', 'os.path.join', (['root', '"""config.json"""'], {}), "(root, 'config.json')\n", (3409, 3430), False, 'import os\n'), ((3989, 4023), 'os.path.join', 'os.path.join', (['root', '"""progress.txt"""'], {}), "(root, 'progress.txt')\n", (4001, 4023), False, 'import os\n')]
|
import copy
import json
import numpy as np
import pandas as pd
import basicDeltaOperations as op
import calcIsotopologues as ci
import fragmentAndSimulate as fas
import solveSystem as ss
'''
This is a set of functions to quickly initalize methionine molecules based on input delta values and to simulate its fragmentation. See runAllTests for implementation.
'''
def initializeMethionine(deltas, fragSubset = ['full','133','104','102','88','74High','74Low','61','56'], printHeavy = True):
'''
Initializes methionine, returning a dataframe with basic information about the molecule as well as information about fragmentation.
Inputs:
deltas: A list of 13 M1 delta values, giving the delta values by site for the 13C, 17O, 15N, 33S, and 2H isotopes. The sites are defined in the IDList variable, below.
fragSubset: A list giving the subset of fragments to observe. If you are not observing all fragments, you may input only those you do observe.
printHeavy: The user manually specifies delta 17O, and delta 18O is set via mass scaling (see basicDeltaOperations). If True, this will print out delta 18O, 34S, & 36S.
Outputs:
molecularDataFrame: A dataframe containing basic information about the molecule.
expandedFrags: An ATOM depiction of each fragment, where an ATOM depiction has one entry for each atom (rather than for each site). See fragmentAndSimulate for details.
fragSubgeometryKeys: A list of strings, e.g. 133_01, 133_02, corresponding to each subgeometry of each fragment. A fragment will have multiple subgeometries if there are multiple fragmentation pathways to form it.
fragmentationDictionary: A dictionary like the allFragments variable, but only including the subset of fragments selected by fragSubset.
'''
##### INITIALIZE SITES #####
IDList = ['Cmethyl','Cgamma','Calphabeta','Ccarboxyl','Ocarboxyl','Ssulfur','Namine','Hmethyl','Hgamma',
'Halphabeta','Hamine','Hhydroxyl','Hprotonated']
elIDs = ['C','C','C','C','O','S','N','H','H','H','H','H','H']
numberAtSite = [1,1,2,1,2,1,1,3,2,3,2,1,1]
l = [elIDs, numberAtSite, deltas]
cols = ['IDS','Number','deltas']
condensedFrags =[]
fragKeys = []
#88 and both 74 are conjecture. 74 High has only one oxygen, so we generally do not use it.
allFragments = {'full':{'01':{'subgeometry':[1,1,1,1,1,1,1,1,1,1,1,1,1],'relCont':1}},
'133':{'01':{'subgeometry':[1,1,1,1,1,1,'x',1,1,1,'x',1,'x'],'relCont':1}},
'104':{'01':{'subgeometry':[1,1,1,'x','x',1,1,1,1,1,1,'x','x'],'relCont':1}},
'102':{'01':{'subgeometry':['x',1,1,1,1,'x',1,'x',1,1,1,1,'x'],'relCont':1}},
'88':{'01':{'subgeometry':[1,1,1,'x','x',1,'x',1,1,'x',1,'x','x'],'relCont':1}},
'74High':{'01':{'subgeometry':[1,'x',1,'x',1,'x',1,1,1,1,'x','x','x'],'relCont':1}},
'74Low':{'01':{'subgeometry':[1,1,'x','x',1,'x',1,'x',1,'x',1,'x','x'],'relCont':1}},
'61':{'01':{'subgeometry':[1,1,'x','x','x',1,'x',1,1,'x','x','x','x'],'relCont':1}},
'56':{'01':{'subgeometry':['x',1,1,'x','x','x',1,'x',1,1,'x',1,'x'],'relCont':1}}}
fragmentationDictionary = {key: value for key, value in allFragments.items() if key in fragSubset}
for fragKey, subFragDict in fragmentationDictionary.items():
for subFragNum, subFragInfo in subFragDict.items():
l.append(subFragInfo['subgeometry'])
cols.append(fragKey + '_' + subFragNum)
condensedFrags.append(subFragInfo['subgeometry'])
fragKeys.append(fragKey + '_' + subFragNum)
molecularDataFrame = pd.DataFrame(l, columns = IDList)
molecularDataFrame = molecularDataFrame.transpose()
molecularDataFrame.columns = cols
expandedFrags = [fas.expandFrag(x, numberAtSite) for x in condensedFrags]
if printHeavy:
SConc = op.deltaToConcentration('S',deltas[5])
del34 = op.ratioToDelta('34S',SConc[2]/SConc[0])
del36 = op.ratioToDelta('36S',SConc[3]/SConc[0])
OConc = op.deltaToConcentration('O',deltas[4])
del18 = op.ratioToDelta('18O',OConc[2]/OConc[0])
print("Delta 34S")
print(del34)
print("Delta 36S")
print(del36)
print("Delta 18O")
print(del18)
return molecularDataFrame, expandedFrags, fragKeys, fragmentationDictionary
def simulateMeasurement(molecularDataFrame, fragmentationDictionary, expandedFrags, fragKeys, abundanceThreshold = 0, UValueList = [],
massThreshold = 4, clumpD = {}, outputPath = None, disableProgress = False, calcFF = False, fractionationFactors = {}, omitMeasurements = {}, ffstd = 0.05, unresolvedDict = {}, outputFull = False):
'''
Simulates M+N measurements of a methionine molecule with input deltas specified by the input dataframe molecularDataFrame.
Inputs:
molecularDataFrame: A dataframe containing basic information about the molecule.
expandedFrags: An ATOM depiction of each fragment, where an ATOM depiction has one entry for each atom (rather than for each site). See fragmentAndSimulate for details.
fragSubgeometryKeys: A list of strings, e.g. 133_01, 133_02, corresponding to each subgeometry of each fragment. A fragment will have multiple subgeometries if there are multiple fragmentation pathways to form it.
fragmentationDictionary: A dictionary like the allFragments variable from initalizeMethionine, but only including the subset of fragments selected by fragSubset.
abundanceThreshold: A float; Does not include measurements below this M+N relative abundance, i.e. assuming they will not be measured due to low abundance.
UValueList: A list giving specific substitutions to calculate molecular average U values for ('13C', '15N', etc.)
massThreshold: An integer; will calculate M+N relative abundances for N <= massThreshold
clumpD: Specifies information about clumps to add; otherwise the isotome follows the stochastic assumption. Currently works only for mass 1 substitutions (e.g. 1717, 1317, etc.) See ci.introduceClump for details.
outputPath: A string, e.g. 'output', or None. If it is a string, outputs the simulated spectrum as a json.
disableProgress: Disables tqdm progress bars when True.
calcFF: When True, computes a new set of fractionation factors for this measurement.
fractionationFactors: A dictionary, specifying a fractionation factor to apply to each ion beam. This is used to apply fractionation factors calculated previously to this predicted measurement (e.g. for a sample/standard comparison with the same experimental fractionation)
omitMeasurements: omitMeasurements: A dictionary, {}, specifying measurements which I will not observed. For example, omitMeasurements = {'M1':{'61':'D'}} would mean I do not observe the D ion beam of the 61 fragment of the M+1 experiment, regardless of its abundance.
ffstd: A float; if new fractionation factors are calculated, they are pulled from a normal distribution centered around 1, with this standard deviation.
unresolvedDict: A dictionary, specifying which unresolved ion beams add to each other.
outputFull: A boolean. Typically False, in which case beams that are not observed are culled from the dictionary. If True, includes this information; this should only be used for debugging, and will likely break the solver routine.
Outputs:
predictedMeasurement: A dictionary giving information from the M+N measurements.
MN: A dictionary where keys are mass selections ("M1", "M2") and values are dictionaries giving information about the isotopologues of each mass selection.
fractionationFactors: The calculated fractionation factors for this measurement (empty unless calcFF == True)
'''
M1Only = False
if massThreshold == 1:
M1Only = True
byAtom = ci.inputToAtomDict(molecularDataFrame, disable = disableProgress, M1Only = M1Only)
#Introduce any clumps of interest with clumps
if clumpD == {}:
bySub = ci.calcSubDictionary(byAtom, molecularDataFrame, atomInput = True)
else:
print("Adding clumps")
stochD = copy.deepcopy(byAtom)
for clumpNumber, clumpInfo in clumpD.items():
byAtom = ci.introduceClump(byAtom, clumpInfo['Sites'], clumpInfo['Amount'], molecularDataFrame)
for clumpNumber, clumpInfo in clumpD.items():
ci.checkClumpDelta(clumpInfo['Sites'], molecularDataFrame, byAtom, stochD)
bySub = ci.calcSubDictionary(byAtom, molecularDataFrame, atomInput = True)
#Initialize Measurement output
if disableProgress == False:
print("Simulating Measurement")
allMeasurementInfo = {}
allMeasurementInfo = fas.UValueMeasurement(bySub, allMeasurementInfo, massThreshold = massThreshold,
subList = UValueList)
MN = ci.massSelections(byAtom, massThreshold = massThreshold)
MN = fas.trackMNFragments(MN, expandedFrags, fragKeys, molecularDataFrame, unresolvedDict = unresolvedDict)
predictedMeasurement, FF = fas.predictMNFragmentExpt(allMeasurementInfo, MN, expandedFrags, fragKeys, molecularDataFrame,
fragmentationDictionary,
abundanceThreshold = abundanceThreshold, calcFF = calcFF, ffstd = ffstd, fractionationFactors = fractionationFactors, omitMeasurements = omitMeasurements, unresolvedDict = unresolvedDict, outputFull = outputFull)
if outputPath != None:
output = json.dumps(predictedMeasurement)
f = open(outputPath + ".json","w")
f.write(output)
f.close()
return predictedMeasurement, MN, FF
def updateAbundanceCorrection(latestDeltas, fragSubset, fragmentationDictionary, expandedFrags,
fragSubgeometryKeys, processStandard, processSample, isotopologuesDict, UValuesSmp, molecularDataFrame,
NUpdates = 30, breakCondition = 1, perturbTheoryOAmt = 0.002,
experimentalOCorrectList = [],
abundanceThreshold = 0,
massThreshold = 1,
omitMeasurements = {},
unresolvedDict = {},
UMNSub = ['13C'],
N = 100,
setSpreadByExtreme = False,
oACorrectBounds = False):
'''
A function for the iterated abundance correction. This function iterates N times; for each, it:
1) takes the most recent set of deltas, recomputes the predicted measurement of methionine with them, and uses this to update the O value correction.
2) Defines a reasonable standard deviation to sample around this O value, based on the perturbTheoryOAmt parameter (e.g. sigma of 0.002 * O_correct)
3) Recalculates the site specific structure using the new correction factors.
4) Checks if the difference between the old deltas and new deltas is smaller than a break condition; if so, ends the routine.
It outputs the final set of results and thisODict, a data product storing information about the correction procedure.
Inputs:
latestDeltas: The input deltas to use for the first iteration of the procedure.
fragSubset: A list giving the subset of fragments to observe. If you are not observing all fragments, you may input only those you do observe.
fragmentationDictionary: A dictionary like the allFragments variable from initalizeMethionine, but only including the subset of fragments selected by fragSubset.
expandedFrags: An ATOM depiction of each fragment, where an ATOM depiction has one entry for each atom (rather than for each site). See fragmentAndSimulate for details.
fragSubgeometryKeys: A list of strings, e.g. 133_01, 133_02, corresponding to each subgeometry of each fragment. A fragment will have multiple subgeometries if there are multiple fragmentation pathways to form it.
processStandard: A dictionary containing data from several measurements, in the form: process[fileKey][MNKey][fragKey] = {'Observed Abundance':A list of floats,
'Subs':A list of strings
'Error':A list of floats
'predicted Abundance':A list of floats}
it should have information for each measurement of each observation. See runAllTests for implementation.
processSample: As processStandard, but the 'Predicted Abundance' terms will be an empty list.
isotopologuesDict: isotopologuesDict: A dictionary where the keys are "M0", "M1", etc. and the values are dataFrames giving the isotopologues with those substitutions.
UValuesSmp: A dictionary specifying the molecular average U values and their errors, i.e. {'13C':'Observed':float,'Error':float}. See readInput.readComputedUValues
molecularDataFrame: A dataFrame containing information about the molecule.
NUpdates: The maximum number of iterations to perform.
breakCondition: Each iteration, a residual is calculated as the sum of squares between all delta values. If that sums is <break condition, the routine ends.
perturbTheoryOAmt: Each O correction is given as a mean and a sigma. Then for each iteration of the Monte Carlo, we draw a new factor from this distribution. This parameter determines the relative width, e.g. sigma = mean * perturbTheoryOAmt
N = 100: The number of iterations for each MN Monte Carlo. E.g., if NUPdates is 30 and N is 100, we recalculate the methionine spectrum 30 times. Each iteration, we solve for site specific values using a monte carlo routine with N = 100.
UMNSub: Sets the specific substitutions that we will use molecular average U values from to calculate UMN. Otherwise it will use all molecular average U values for that UMN. Recommended to use--the procedure only works for substitions that are totally solved for. For example, if one 13C 13C isotopologue is not solved for precisely in M+N relative abundance space, we should not use 13C13C in the UMN routine. The best candidates tend to be abundant things--36S, 18O, 13C, 34S, and so forth.
abundanceThreshold, massThreshold, omitMeasurements, unresolvedDict: See simulateMeasurement; set these parameters for each simulated dataset.
experimentalOCorrectList: A list, containing information about which peaks to use experimental correction for. See solveSystem.perturbSample.
Outputs:
M1Results: A dataframe giving the final results of the iterated correction process.
thisODict: A dictionary containing information about each correction (all except Histogram) and histograms of the sampled O values from every 10th iteration (as well as the final iteration).
'''
#Initialize dictionary to track output of iterated correction process.
thisODict = {'residual':[],
'delta':[],
'O':[],
'relDelta':[],
'relDeltaErr':[],
'Histogram':[]}
for i in range(NUpdates):
oldDeltas = latestDeltas
#Get new dataframe, simulate new measurement.
M1Df, expandedFrags, fragSubgeometryKeys, fragmentationDictionary = initializeMethionine(latestDeltas, fragSubset,
printHeavy = False)
predictedMeasurementUpdate, MNDictUpdate, FFUpdate = simulateMeasurement(M1Df, fragmentationDictionary,
expandedFrags,
fragSubgeometryKeys,
abundanceThreshold = abundanceThreshold,
massThreshold = massThreshold,
calcFF = False,
outputPath = None,
disableProgress = True,
fractionationFactors = {},
omitMeasurements = omitMeasurements,
unresolvedDict = unresolvedDict)
#Generate new O Corrections
OCorrectionUpdate = ss.OValueCorrectTheoretical(predictedMeasurementUpdate, processSample,
massThreshold = massThreshold)
#For each O correction, generate a normal distribution. The computed value is the mean, and the sigma is set by perturbTheoryOAmt.
#explicitOCorrect may optionally contain a "Bounds" entry, when using extreme values. For example, explicitOCorrect[MNKey][fragKey] = (Lower Bound, Upper Bound).
#This is not implemented in this routine.
explicitOCorrect = {}
for MNKey, MNData in OCorrectionUpdate.items():
if MNKey not in explicitOCorrect:
explicitOCorrect[MNKey] = {}
for fragKey, fragData in MNData.items():
if fragKey not in explicitOCorrect[MNKey]:
explicitOCorrect[MNKey][fragKey] = {}
explicitOCorrect[MNKey][fragKey]['Mu,Sigma'] = (fragData, fragData * perturbTheoryOAmt)
M1Results = ss.M1MonteCarlo(processStandard, processSample, OCorrectionUpdate, isotopologuesDict,
fragmentationDictionary, perturbTheoryOAmt = perturbTheoryOAmt,
experimentalOCorrectList = experimentalOCorrectList,
N = N, GJ = False, debugMatrix = False, disableProgress = True,
storePerturbedSamples = False, storeOCorrect = True,
explicitOCorrect = explicitOCorrect, perturbOverrideList = ['M1'])
processedResults = ss.processM1MCResults(M1Results, UValuesSmp, isotopologuesDict, molecularDataFrame, disableProgress = True,
UMNSub = UMNSub)
ss.updateSiteSpecificDfM1MC(processedResults, molecularDataFrame)
M1Df = molecularDataFrame.copy()
M1Df['deltas'] = M1Df['VPDB etc. Deltas']
thisODict['O'].append(copy.deepcopy(OCorrectionUpdate['M1']))
thisODict['delta'].append(list(M1Df['deltas']))
residual = ((np.array(M1Df['deltas']) - np.array(oldDeltas))**2).sum()
thisODict['residual'].append(residual)
latestDeltas = M1Df['deltas'].values
thisODict['relDelta'].append(M1Df['Relative Deltas'].values)
thisODict['relDeltaErr'].append(M1Df['Relative Deltas Error'].values)
print(residual)
if i % 10 == 0 or residual <= breakCondition:
correctVals = {'61':[],
'133':[],
'full':[]}
for res in M1Results['Extra Info']['O Correct']:
correctVals['full'].append(res['full'])
correctVals['133'].append(res['133'])
correctVals['61'].append(res['61'])
thisODict['Histogram'].append(copy.deepcopy(correctVals))
if residual <= breakCondition:
break
return M1Results, thisODict
|
[
"numpy.array",
"fragmentAndSimulate.predictMNFragmentExpt",
"copy.deepcopy",
"solveSystem.processM1MCResults",
"calcIsotopologues.inputToAtomDict",
"solveSystem.OValueCorrectTheoretical",
"solveSystem.updateSiteSpecificDfM1MC",
"json.dumps",
"solveSystem.M1MonteCarlo",
"pandas.DataFrame",
"basicDeltaOperations.ratioToDelta",
"calcIsotopologues.calcSubDictionary",
"calcIsotopologues.introduceClump",
"calcIsotopologues.checkClumpDelta",
"calcIsotopologues.massSelections",
"basicDeltaOperations.deltaToConcentration",
"fragmentAndSimulate.expandFrag",
"fragmentAndSimulate.trackMNFragments",
"fragmentAndSimulate.UValueMeasurement"
] |
[((3681, 3712), 'pandas.DataFrame', 'pd.DataFrame', (['l'], {'columns': 'IDList'}), '(l, columns=IDList)\n', (3693, 3712), True, 'import pandas as pd\n'), ((8014, 8092), 'calcIsotopologues.inputToAtomDict', 'ci.inputToAtomDict', (['molecularDataFrame'], {'disable': 'disableProgress', 'M1Only': 'M1Only'}), '(molecularDataFrame, disable=disableProgress, M1Only=M1Only)\n', (8032, 8092), True, 'import calcIsotopologues as ci\n'), ((8923, 9025), 'fragmentAndSimulate.UValueMeasurement', 'fas.UValueMeasurement', (['bySub', 'allMeasurementInfo'], {'massThreshold': 'massThreshold', 'subList': 'UValueList'}), '(bySub, allMeasurementInfo, massThreshold=\n massThreshold, subList=UValueList)\n', (8944, 9025), True, 'import fragmentAndSimulate as fas\n'), ((9081, 9135), 'calcIsotopologues.massSelections', 'ci.massSelections', (['byAtom'], {'massThreshold': 'massThreshold'}), '(byAtom, massThreshold=massThreshold)\n', (9098, 9135), True, 'import calcIsotopologues as ci\n'), ((9147, 9251), 'fragmentAndSimulate.trackMNFragments', 'fas.trackMNFragments', (['MN', 'expandedFrags', 'fragKeys', 'molecularDataFrame'], {'unresolvedDict': 'unresolvedDict'}), '(MN, expandedFrags, fragKeys, molecularDataFrame,\n unresolvedDict=unresolvedDict)\n', (9167, 9251), True, 'import fragmentAndSimulate as fas\n'), ((9290, 9627), 'fragmentAndSimulate.predictMNFragmentExpt', 'fas.predictMNFragmentExpt', (['allMeasurementInfo', 'MN', 'expandedFrags', 'fragKeys', 'molecularDataFrame', 'fragmentationDictionary'], {'abundanceThreshold': 'abundanceThreshold', 'calcFF': 'calcFF', 'ffstd': 'ffstd', 'fractionationFactors': 'fractionationFactors', 'omitMeasurements': 'omitMeasurements', 'unresolvedDict': 'unresolvedDict', 'outputFull': 'outputFull'}), '(allMeasurementInfo, MN, expandedFrags, fragKeys,\n molecularDataFrame, fragmentationDictionary, abundanceThreshold=\n abundanceThreshold, calcFF=calcFF, ffstd=ffstd, fractionationFactors=\n fractionationFactors, omitMeasurements=omitMeasurements, unresolvedDict\n =unresolvedDict, outputFull=outputFull)\n', (9315, 9627), True, 'import fragmentAndSimulate as fas\n'), ((3831, 3862), 'fragmentAndSimulate.expandFrag', 'fas.expandFrag', (['x', 'numberAtSite'], {}), '(x, numberAtSite)\n', (3845, 3862), True, 'import fragmentAndSimulate as fas\n'), ((3924, 3963), 'basicDeltaOperations.deltaToConcentration', 'op.deltaToConcentration', (['"""S"""', 'deltas[5]'], {}), "('S', deltas[5])\n", (3947, 3963), True, 'import basicDeltaOperations as op\n'), ((3979, 4022), 'basicDeltaOperations.ratioToDelta', 'op.ratioToDelta', (['"""34S"""', '(SConc[2] / SConc[0])'], {}), "('34S', SConc[2] / SConc[0])\n", (3994, 4022), True, 'import basicDeltaOperations as op\n'), ((4036, 4079), 'basicDeltaOperations.ratioToDelta', 'op.ratioToDelta', (['"""36S"""', '(SConc[3] / SConc[0])'], {}), "('36S', SConc[3] / SConc[0])\n", (4051, 4079), True, 'import basicDeltaOperations as op\n'), ((4094, 4133), 'basicDeltaOperations.deltaToConcentration', 'op.deltaToConcentration', (['"""O"""', 'deltas[4]'], {}), "('O', deltas[4])\n", (4117, 4133), True, 'import basicDeltaOperations as op\n'), ((4149, 4192), 'basicDeltaOperations.ratioToDelta', 'op.ratioToDelta', (['"""18O"""', '(OConc[2] / OConc[0])'], {}), "('18O', OConc[2] / OConc[0])\n", (4164, 4192), True, 'import basicDeltaOperations as op\n'), ((8189, 8253), 'calcIsotopologues.calcSubDictionary', 'ci.calcSubDictionary', (['byAtom', 'molecularDataFrame'], {'atomInput': '(True)'}), '(byAtom, molecularDataFrame, atomInput=True)\n', (8209, 8253), True, 'import calcIsotopologues as ci\n'), ((8314, 8335), 'copy.deepcopy', 'copy.deepcopy', (['byAtom'], {}), '(byAtom)\n', (8327, 8335), False, 'import copy\n'), ((8690, 8754), 'calcIsotopologues.calcSubDictionary', 'ci.calcSubDictionary', (['byAtom', 'molecularDataFrame'], {'atomInput': '(True)'}), '(byAtom, molecularDataFrame, atomInput=True)\n', (8710, 8754), True, 'import calcIsotopologues as ci\n'), ((9771, 9803), 'json.dumps', 'json.dumps', (['predictedMeasurement'], {}), '(predictedMeasurement)\n', (9781, 9803), False, 'import json\n'), ((17088, 17191), 'solveSystem.OValueCorrectTheoretical', 'ss.OValueCorrectTheoretical', (['predictedMeasurementUpdate', 'processSample'], {'massThreshold': 'massThreshold'}), '(predictedMeasurementUpdate, processSample,\n massThreshold=massThreshold)\n', (17115, 17191), True, 'import solveSystem as ss\n'), ((18103, 18492), 'solveSystem.M1MonteCarlo', 'ss.M1MonteCarlo', (['processStandard', 'processSample', 'OCorrectionUpdate', 'isotopologuesDict', 'fragmentationDictionary'], {'perturbTheoryOAmt': 'perturbTheoryOAmt', 'experimentalOCorrectList': 'experimentalOCorrectList', 'N': 'N', 'GJ': '(False)', 'debugMatrix': '(False)', 'disableProgress': '(True)', 'storePerturbedSamples': '(False)', 'storeOCorrect': '(True)', 'explicitOCorrect': 'explicitOCorrect', 'perturbOverrideList': "['M1']"}), "(processStandard, processSample, OCorrectionUpdate,\n isotopologuesDict, fragmentationDictionary, perturbTheoryOAmt=\n perturbTheoryOAmt, experimentalOCorrectList=experimentalOCorrectList, N\n =N, GJ=False, debugMatrix=False, disableProgress=True,\n storePerturbedSamples=False, storeOCorrect=True, explicitOCorrect=\n explicitOCorrect, perturbOverrideList=['M1'])\n", (18118, 18492), True, 'import solveSystem as ss\n'), ((18705, 18829), 'solveSystem.processM1MCResults', 'ss.processM1MCResults', (['M1Results', 'UValuesSmp', 'isotopologuesDict', 'molecularDataFrame'], {'disableProgress': '(True)', 'UMNSub': 'UMNSub'}), '(M1Results, UValuesSmp, isotopologuesDict,\n molecularDataFrame, disableProgress=True, UMNSub=UMNSub)\n', (18726, 18829), True, 'import solveSystem as ss\n'), ((18883, 18948), 'solveSystem.updateSiteSpecificDfM1MC', 'ss.updateSiteSpecificDfM1MC', (['processedResults', 'molecularDataFrame'], {}), '(processedResults, molecularDataFrame)\n', (18910, 18948), True, 'import solveSystem as ss\n'), ((8420, 8510), 'calcIsotopologues.introduceClump', 'ci.introduceClump', (['byAtom', "clumpInfo['Sites']", "clumpInfo['Amount']", 'molecularDataFrame'], {}), "(byAtom, clumpInfo['Sites'], clumpInfo['Amount'],\n molecularDataFrame)\n", (8437, 8510), True, 'import calcIsotopologues as ci\n'), ((8586, 8660), 'calcIsotopologues.checkClumpDelta', 'ci.checkClumpDelta', (["clumpInfo['Sites']", 'molecularDataFrame', 'byAtom', 'stochD'], {}), "(clumpInfo['Sites'], molecularDataFrame, byAtom, stochD)\n", (8604, 8660), True, 'import calcIsotopologues as ci\n'), ((19088, 19126), 'copy.deepcopy', 'copy.deepcopy', (["OCorrectionUpdate['M1']"], {}), "(OCorrectionUpdate['M1'])\n", (19101, 19126), False, 'import copy\n'), ((19986, 20012), 'copy.deepcopy', 'copy.deepcopy', (['correctVals'], {}), '(correctVals)\n', (19999, 20012), False, 'import copy\n'), ((19215, 19239), 'numpy.array', 'np.array', (["M1Df['deltas']"], {}), "(M1Df['deltas'])\n", (19223, 19239), True, 'import numpy as np\n'), ((19242, 19261), 'numpy.array', 'np.array', (['oldDeltas'], {}), '(oldDeltas)\n', (19250, 19261), True, 'import numpy as np\n')]
|
"""
Demonstrate the type 1 NUFFT using cuFINUFFT
"""
import numpy as np
import pycuda.autoinit
from pycuda.gpuarray import GPUArray, to_gpu
from cufinufft import cufinufft
# Set up parameters for problem.
N1, N2 = 59, 61 # Size of uniform grid
M = 100 # Number of nonuniform points
n_transf = 2 # Number of input arrays
eps = 1e-6 # Requested tolerance
dtype = np.float32 # Datatype (real)
complex_dtype = np.complex64 # Datatype (complex)
# Generate coordinates of non-uniform points.
kx = np.random.uniform(-np.pi, np.pi, size=M)
ky = np.random.uniform(-np.pi, np.pi, size=M)
# Generate source strengths.
c = (np.random.standard_normal((n_transf, M))
+ 1j * np.random.standard_normal((n_transf, M)))
# Cast to desired datatype.
kx = kx.astype(dtype)
ky = ky.astype(dtype)
c = c.astype(complex_dtype)
# Allocate memory for the uniform grid on the GPU.
fk_gpu = GPUArray((n_transf, N1, N2), dtype=complex_dtype)
# Initialize the plan and set the points.
plan = cufinufft(1, (N1, N2), n_transf, eps=eps, dtype=dtype)
plan.set_pts(to_gpu(kx), to_gpu(ky))
# Execute the plan, reading from the strengths array c and storing the
# result in fk_gpu.
plan.execute(to_gpu(c), fk_gpu)
# Retreive the result from the GPU.
fk = fk_gpu.get()
# Check accuracy of the transform at position (nt1, nt2).
nt1 = int(0.37 * N1)
nt2 = int(0.26 * N2)
for i in range(n_transf):
# Calculate the true value of the type 1 transform at the uniform grid
# point (nt1, nt2), which corresponds to the coordinate nt1 - N1 // 2 and
# nt2 - N2 // 2.
x, y = nt1 - N1 // 2, nt2 - N2 // 2
fk_true = np.sum(c[i] * np.exp(1j * (x * kx + y * ky)))
# Calculate the absolute and relative error.
err = np.abs(fk[i, nt1, nt2] - fk_true)
rel_err = err / np.max(np.abs(fk[i]))
print(f"[{i}] Absolute error on mode [{nt1}, {nt2}] is {err:.3g}")
print(f"[{i}] Relative error on mode [{nt1}, {nt2}] is {rel_err:.3g}")
assert(rel_err < 10 * eps)
|
[
"numpy.random.standard_normal",
"numpy.abs",
"pycuda.gpuarray.GPUArray",
"numpy.exp",
"cufinufft.cufinufft",
"numpy.random.uniform",
"pycuda.gpuarray.to_gpu"
] |
[((592, 632), 'numpy.random.uniform', 'np.random.uniform', (['(-np.pi)', 'np.pi'], {'size': 'M'}), '(-np.pi, np.pi, size=M)\n', (609, 632), True, 'import numpy as np\n'), ((638, 678), 'numpy.random.uniform', 'np.random.uniform', (['(-np.pi)', 'np.pi'], {'size': 'M'}), '(-np.pi, np.pi, size=M)\n', (655, 678), True, 'import numpy as np\n'), ((971, 1020), 'pycuda.gpuarray.GPUArray', 'GPUArray', (['(n_transf, N1, N2)'], {'dtype': 'complex_dtype'}), '((n_transf, N1, N2), dtype=complex_dtype)\n', (979, 1020), False, 'from pycuda.gpuarray import GPUArray, to_gpu\n'), ((1071, 1125), 'cufinufft.cufinufft', 'cufinufft', (['(1)', '(N1, N2)', 'n_transf'], {'eps': 'eps', 'dtype': 'dtype'}), '(1, (N1, N2), n_transf, eps=eps, dtype=dtype)\n', (1080, 1125), False, 'from cufinufft import cufinufft\n'), ((714, 754), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(n_transf, M)'], {}), '((n_transf, M))\n', (739, 754), True, 'import numpy as np\n'), ((1139, 1149), 'pycuda.gpuarray.to_gpu', 'to_gpu', (['kx'], {}), '(kx)\n', (1145, 1149), False, 'from pycuda.gpuarray import GPUArray, to_gpu\n'), ((1151, 1161), 'pycuda.gpuarray.to_gpu', 'to_gpu', (['ky'], {}), '(ky)\n', (1157, 1161), False, 'from pycuda.gpuarray import GPUArray, to_gpu\n'), ((1268, 1277), 'pycuda.gpuarray.to_gpu', 'to_gpu', (['c'], {}), '(c)\n', (1274, 1277), False, 'from pycuda.gpuarray import GPUArray, to_gpu\n'), ((1804, 1837), 'numpy.abs', 'np.abs', (['(fk[i, nt1, nt2] - fk_true)'], {}), '(fk[i, nt1, nt2] - fk_true)\n', (1810, 1837), True, 'import numpy as np\n'), ((767, 807), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(n_transf, M)'], {}), '((n_transf, M))\n', (792, 807), True, 'import numpy as np\n'), ((1712, 1744), 'numpy.exp', 'np.exp', (['(1.0j * (x * kx + y * ky))'], {}), '(1.0j * (x * kx + y * ky))\n', (1718, 1744), True, 'import numpy as np\n'), ((1865, 1878), 'numpy.abs', 'np.abs', (['fk[i]'], {}), '(fk[i])\n', (1871, 1878), True, 'import numpy as np\n')]
|
from openff.toolkit.typing.engines.smirnoff.forcefield import ForceField
from simtk import openmm, unit
from scipy.stats import distributions
import copy
import numpy as np
import os
from smt.sampling_methods import LHS
def vary_parameters_lhc(filename, num_samples, output_directory):
forcefield = ForceField(filename, allow_cosmetic_attributes=True)
lj_params = forcefield.get_parameter_handler('vdW', allow_cosmetic_attributes=True)
smirks_types_to_change = ['[#6X4:1]', '[#1:1]-[#6X4]', '[#8X2H1+0:1]', '[#1:1]-[#8]']
param_range = np.asarray([0.75, 1.25])
n_dim = len(smirks_types_to_change) * 2
lj_sample_ranges = []
for i in range(n_dim):
lj_sample_ranges.append(param_range)
lj_sample_ranges = np.asarray(lj_sample_ranges)
sampling = LHS(xlimits=lj_sample_ranges)
values = sampling(num_samples)
os.makedirs(output_directory,exist_ok=True)
for i, value in enumerate(values):
reshape_values = value.reshape((int(n_dim/2), 2))
counter = 0
for lj in lj_params:
if lj.smirks in smirks_types_to_change:
lj.epsilon *= reshape_values[counter, 0]
lj.rmin_half *= reshape_values[counter, 1]
counter += 1
os.makedirs(os.path.join(output_directory,str(i+1)))
ff_name = 'force-field.offxml'
forcefield.to_file(os.path.join(output_directory, str(i+1),ff_name))
|
[
"smt.sampling_methods.LHS",
"numpy.asarray",
"os.makedirs",
"openff.toolkit.typing.engines.smirnoff.forcefield.ForceField"
] |
[((305, 357), 'openff.toolkit.typing.engines.smirnoff.forcefield.ForceField', 'ForceField', (['filename'], {'allow_cosmetic_attributes': '(True)'}), '(filename, allow_cosmetic_attributes=True)\n', (315, 357), False, 'from openff.toolkit.typing.engines.smirnoff.forcefield import ForceField\n'), ((556, 580), 'numpy.asarray', 'np.asarray', (['[0.75, 1.25]'], {}), '([0.75, 1.25])\n', (566, 580), True, 'import numpy as np\n'), ((746, 774), 'numpy.asarray', 'np.asarray', (['lj_sample_ranges'], {}), '(lj_sample_ranges)\n', (756, 774), True, 'import numpy as np\n'), ((790, 819), 'smt.sampling_methods.LHS', 'LHS', ([], {'xlimits': 'lj_sample_ranges'}), '(xlimits=lj_sample_ranges)\n', (793, 819), False, 'from smt.sampling_methods import LHS\n'), ((859, 903), 'os.makedirs', 'os.makedirs', (['output_directory'], {'exist_ok': '(True)'}), '(output_directory, exist_ok=True)\n', (870, 903), False, 'import os\n')]
|
import argparse
from collections import defaultdict
import pickle
import re
import lightgbm as lgb
import pandas as pd
import numpy as np
import xgboost as xgb
from ..data_utils import SEG_FP, get_encoded_classes
from ..utils import print_metrics
from ..metric import get_metrics
from .blend import (
score_predictions_by_image_id, submission_from_predictions_by_image_id)
def main():
parser = argparse.ArgumentParser()
arg = parser.add_argument
arg('detailed_then_features', nargs='+',
help='detailed dataframes and the features in the same order')
arg('--use-xgb', type=int, default=1)
arg('--use-lgb', type=int, default=1)
arg('--num-boost-round', type=int, default=400)
arg('--lr', type=float, default=0.05, help='for lightgbm')
arg('--eta', type=float, default=0.15, help='for xgboost')
arg('--save-model')
arg('--load-model')
arg('--output')
arg('--n-folds', type=int, default=5)
arg('--seg-fp-adjust', type=float)
args = parser.parse_args()
if len(args.detailed_then_features) % 2 != 0:
parser.error('number of detailed and features must be equal')
n = len(args.detailed_then_features) // 2
detailed_paths, feature_paths = (args.detailed_then_features[:n],
args.detailed_then_features[n:])
if args.output:
if not args.load_model:
parser.error('--output needs --load-model')
elif len(feature_paths) == 1:
parser.error('need more than one feature df for train/valid split')
print('\n'.join(
f'{f} | {d}' for f, d in zip(detailed_paths, feature_paths)))
detailed_dfs = [pd.read_csv(path) for path in detailed_paths]
feature_dfs = [pd.read_csv(path) for path in feature_paths]
valid_df = feature_dfs[0]
assert valid_df.columns[0] == 'item'
assert valid_df.columns[-1] == 'y'
feature_cols = [
col for col in valid_df.columns[1:-1] if col not in {
'width', 'height', 'aspect',
'candidate_count', 'candidate_count_on_page',
'candidate_freq_on_page',
}]
top_cls_re = re.compile('^top_\d+_cls')
def build_features(df):
df = df[feature_cols].copy()
for col in feature_cols:
if top_cls_re.match(col):
df[f'{col}_is_candidate'] = df[col] == df['candidate_cls']
# del df[col]
print(' '.join(df.columns))
return df
classes = get_encoded_classes()
cls_by_idx = {idx: cls for cls, idx in classes.items()}
cls_by_idx[-1] = SEG_FP
y_preds = []
all_metrics = []
for fold_num in range(args.n_folds):
print(f'fold {fold_num}')
detailed = (detailed_dfs[fold_num if len(detailed_dfs) != 1 else 0]
.copy())
valid_df = feature_dfs[fold_num if len(feature_dfs) != 1 else 0].copy()
valid_features = build_features(valid_df)
xgb_valid_data = xgb.DMatrix(valid_features, label=valid_df['y'])
fold_path = lambda path, kind: f'{path}.{kind}.fold{fold_num}'
if args.load_model:
lgb_load_path = (fold_path(args.load_model, 'lgb')
if args.use_lgb else None)
xgb_load_path = (fold_path(args.load_model, 'xgb')
if args.use_xgb else None)
print(f'loading from {lgb_load_path}, {xgb_load_path}')
if lgb_load_path:
lgb_model = lgb.Booster(model_file=lgb_load_path)
if xgb_load_path:
with open(xgb_load_path, 'rb') as f:
xgb_model = pickle.load(f)
else:
train_df = pd.concat([df for i, df in enumerate(feature_dfs)
if i != fold_num])
train_features = build_features(train_df)
if args.use_lgb:
lgb_model = train_lgb(
train_features, train_df['y'],
valid_features, valid_df['y'],
lr=args.lr,
num_boost_round=args.num_boost_round)
if args.use_xgb:
xgb_model = train_xgb(
train_features, train_df['y'],
valid_features, valid_df['y'],
eta=args.eta,
num_boost_round=args.num_boost_round)
if args.save_model:
lgb_save_path = (fold_path(args.save_model, 'lgb')
if args.use_lgb else None)
xgb_save_path = (fold_path(args.save_model, 'xgb')
if args.use_xgb else None)
print(f'saving to {lgb_save_path}, {xgb_save_path}')
if lgb_save_path:
lgb_model.save_model(
lgb_save_path, num_iteration=lgb_model.best_iteration)
if xgb_save_path:
with open(xgb_save_path, 'wb') as f:
pickle.dump(xgb_model, f)
print('prediction')
predictions = []
if args.use_lgb:
predictions.append(lgb_model.predict(
valid_features, num_iteration=lgb_model.best_iteration))
if args.use_xgb:
predictions.append(xgb_model.predict(
xgb_valid_data, ntree_limit=xgb_model.best_ntree_limit))
valid_df['y_pred'] = np.mean(predictions, axis=0)
if args.seg_fp_adjust:
valid_df.loc[valid_df['candidate_cls'] == -1, 'y_pred'] += \
args.seg_fp_adjust
y_preds.append(valid_df['y_pred'].values)
max_by_item = get_max_by_item(valid_df)
print('scoring')
detailed['pred'] = \
max_by_item['candidate_cls'].apply(cls_by_idx.__getitem__)
print(f'SEG_FP ratio: {(detailed["pred"] == SEG_FP).mean():.5f}')
predictions_by_image_id = get_predictions_by_image_id(detailed)
if not args.output:
metrics = {
'accuracy': (detailed["pred"] == detailed["true"]).mean(),
}
metrics.update(
score_predictions_by_image_id(predictions_by_image_id))
print_metrics(metrics)
all_metrics.append(metrics)
if args.output:
valid_df['y_pred'] = np.mean(y_preds, axis=0)
max_by_item = get_max_by_item(valid_df)
detailed['pred'] = \
max_by_item['candidate_cls'].apply(cls_by_idx.__getitem__)
predictions_by_image_id = get_predictions_by_image_id(detailed)
submission = submission_from_predictions_by_image_id(
predictions_by_image_id)
submission.to_csv(args.output, index=False)
else:
print('\nAll folds:')
print_metrics(get_metrics(all_metrics))
def train_lgb(train_features, train_y, valid_features, valid_y, *,
lr, num_boost_round):
train_data = lgb.Dataset(train_features, train_y)
valid_data = lgb.Dataset(valid_features, valid_y, reference=train_data)
params = {
'objective': 'binary',
'metric': 'binary_logloss',
'learning_rate': lr,
'bagging_fraction': 0.8,
'bagging_freq': 5,
'feature_fraction': 0.9,
'min_data_in_leaf': 20,
'num_leaves': 41,
'scale_pos_weight': 1.2,
'lambda_l2': 1,
}
print(params)
return lgb.train(
params=params,
train_set=train_data,
num_boost_round=num_boost_round,
early_stopping_rounds=20,
valid_sets=[valid_data],
verbose_eval=10,
)
def train_xgb(train_features, train_y, valid_features, valid_y, *,
eta, num_boost_round):
train_data = xgb.DMatrix(train_features, label=train_y)
valid_data = xgb.DMatrix(valid_features, label=valid_y)
params = {
'eta': eta,
'objective': 'binary:logistic',
'gamma': 0.01,
'max_depth': 8,
}
print(params)
eval_list = [(valid_data, 'eval')]
return xgb.train(
params, train_data, num_boost_round, eval_list,
early_stopping_rounds=20,
verbose_eval=10,
)
def get_max_by_item(df):
return (df.iloc[df.groupby('item')['y_pred'].idxmax()]
.reset_index(drop=True))
def get_predictions_by_image_id(detailed):
predictions_by_image_id = defaultdict(list)
for item in detailed.itertuples():
if item.pred != SEG_FP:
predictions_by_image_id[item.image_id].append({
'cls': item.pred,
'center': (item.x + item.w / 2, item.y + item.h / 2),
})
return predictions_by_image_id
if __name__ == '__main__':
main()
|
[
"numpy.mean",
"pickle.dump",
"argparse.ArgumentParser",
"re.compile",
"xgboost.train",
"pandas.read_csv",
"lightgbm.train",
"lightgbm.Booster",
"pickle.load",
"lightgbm.Dataset",
"collections.defaultdict",
"xgboost.DMatrix"
] |
[((406, 431), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (429, 431), False, 'import argparse\n'), ((2124, 2151), 're.compile', 're.compile', (['"""^top_\\\\d+_cls"""'], {}), "('^top_\\\\d+_cls')\n", (2134, 2151), False, 'import re\n'), ((6836, 6872), 'lightgbm.Dataset', 'lgb.Dataset', (['train_features', 'train_y'], {}), '(train_features, train_y)\n', (6847, 6872), True, 'import lightgbm as lgb\n'), ((6890, 6948), 'lightgbm.Dataset', 'lgb.Dataset', (['valid_features', 'valid_y'], {'reference': 'train_data'}), '(valid_features, valid_y, reference=train_data)\n', (6901, 6948), True, 'import lightgbm as lgb\n'), ((7303, 7459), 'lightgbm.train', 'lgb.train', ([], {'params': 'params', 'train_set': 'train_data', 'num_boost_round': 'num_boost_round', 'early_stopping_rounds': '(20)', 'valid_sets': '[valid_data]', 'verbose_eval': '(10)'}), '(params=params, train_set=train_data, num_boost_round=\n num_boost_round, early_stopping_rounds=20, valid_sets=[valid_data],\n verbose_eval=10)\n', (7312, 7459), True, 'import lightgbm as lgb\n'), ((7629, 7671), 'xgboost.DMatrix', 'xgb.DMatrix', (['train_features'], {'label': 'train_y'}), '(train_features, label=train_y)\n', (7640, 7671), True, 'import xgboost as xgb\n'), ((7689, 7731), 'xgboost.DMatrix', 'xgb.DMatrix', (['valid_features'], {'label': 'valid_y'}), '(valid_features, label=valid_y)\n', (7700, 7731), True, 'import xgboost as xgb\n'), ((7928, 8032), 'xgboost.train', 'xgb.train', (['params', 'train_data', 'num_boost_round', 'eval_list'], {'early_stopping_rounds': '(20)', 'verbose_eval': '(10)'}), '(params, train_data, num_boost_round, eval_list,\n early_stopping_rounds=20, verbose_eval=10)\n', (7937, 8032), True, 'import xgboost as xgb\n'), ((8258, 8275), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (8269, 8275), False, 'from collections import defaultdict\n'), ((1656, 1673), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (1667, 1673), True, 'import pandas as pd\n'), ((1721, 1738), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (1732, 1738), True, 'import pandas as pd\n'), ((2946, 2994), 'xgboost.DMatrix', 'xgb.DMatrix', (['valid_features'], {'label': "valid_df['y']"}), "(valid_features, label=valid_df['y'])\n", (2957, 2994), True, 'import xgboost as xgb\n'), ((5325, 5353), 'numpy.mean', 'np.mean', (['predictions'], {'axis': '(0)'}), '(predictions, axis=0)\n', (5332, 5353), True, 'import numpy as np\n'), ((6230, 6254), 'numpy.mean', 'np.mean', (['y_preds'], {'axis': '(0)'}), '(y_preds, axis=0)\n', (6237, 6254), True, 'import numpy as np\n'), ((3459, 3496), 'lightgbm.Booster', 'lgb.Booster', ([], {'model_file': 'lgb_load_path'}), '(model_file=lgb_load_path)\n', (3470, 3496), True, 'import lightgbm as lgb\n'), ((3612, 3626), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3623, 3626), False, 'import pickle\n'), ((4920, 4945), 'pickle.dump', 'pickle.dump', (['xgb_model', 'f'], {}), '(xgb_model, f)\n', (4931, 4945), False, 'import pickle\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 2 21:55:56 2015
@author: aidanrocke
"""
import numpy as np
def compressSequenceNFast(posture_seq, newStart, nMax):
"""
# COMPRESSSEQUENCE Recursively finds the most compressive subsequence in
# posture_seq and creates and replaces it with a new number. This replacement
# creates a new rule in the grammar. Replacements are made until there are
# none left that lead to further compression. See the following paper
# for more details: <NAME> Witten (2000) On-Line and Off-Line
# Heuristics for Inferring Hierarchies of Repetitions in Sequences.
# Proceedings of the IEEE 88:1745.
#
# Input
# posture_seq - a list of posture sequences to be compressed
# newStart - this is the number that will be used to label the first new
# rule in the grammar. It must be greater than the maximum
# value in posture_seq. If empty, then max(posture_seq) + 1 is used.
# nMax - the maximum length n-gram to check for compression
#
# Output
# grammar - a number of rules by 2 cell array. The first column has the
# left hand side of each replacement rule while the second
# column has the right hand side (so the first column lists
# all non-terminals in the grammar).
# compVec - the vector that has been compressed using grammar. posture_seq
# can be recovered by applying the grammar rules in reverse.
# totSavings - the total space saving achieved during the compression,
# taking into account the size of the created grammar rules"""
# check posture_seq
if len(np.shape(posture_seq)) > 1:
raise ValueError('posture_seq must be a row vector.')
# define newStart if left empty
if newStart == 0:
newStart = max(posture_seq) + 1
# check that newStart is large enough
if newStart <= max(posture_seq):
raise ValueError('newStart must be greater than max(posture_seq).')
# initialise grammar
grammar = [[0,[0,0]]]
# initialise compVec and make a suffix array
compVec = posture_seq
totSavings = 0
# compress segments until none are found that lead to compression
sequence = [np.nan]
newInd = newStart
while len(sequence) > 0:
# find the most compressive sequence in posture_seq
[sequence, locations, savings] = compressiveNFast(compVec, nMax)
# update the total savings (i.e. compression)
totSavings = totSavings + savings
# add the rule to grammar
grammar.append([newInd,sequence])
# make the replacements. Note: strrep does not work here. For example
# if sequence is [44 68 44] and compVec has a subsequence that is
# [44 68 44 68 44 68 44 448], strrep will give [68 480 480 480 448]
# which is wrong.
for j in range(len(locations)):
compVec[locations[j]:locations[j] + len(sequence) - 1] = [newInd]+[np.nan]*(len(sequence)-1)
while compVec.count(np.nan) > 0:
compVec.remove(np.nan)
newInd += 1
# check that compressed lengths, savings, and grammar size are
# consistent
if len(sequence) > 0: # on last iteration last grammar entry is empty
if len(compVec) + totSavings + len(grammar) + np.sum(len(grammar[i][1]) for i in range(len(grammar))) != len(posture_seq):
raise ValueError(['Calculated savings not consistent with original and compressed lengths and grammar size.'])
else:
if len(compVec) + totSavings + len(grammar)-1 + np.sum(len(grammar[i][1]) for i in range(len(grammar))) != len(posture_seq):
ValueError(['Calculated savings not consistent with original and compressed lengths and grammar size.'])
# remove the last (empty) entry of the grammar
return grammar[1:-1]
|
[
"numpy.shape"
] |
[((1731, 1752), 'numpy.shape', 'np.shape', (['posture_seq'], {}), '(posture_seq)\n', (1739, 1752), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
### block that grows in fiber direction triggered by fiber stretch and remodels to softer material
# TODO: Somehow, this does not converge quadratically at the end (seems irrespective of remodeling,
# but likely to be attributed to the growth in fiber direction) ---> check linearization terms!
# only one hex element in this testcase - cannot be run on multiple cores!
import ambit
import sys, traceback
import numpy as np
from pathlib import Path
import results_check
def main():
basepath = str(Path(__file__).parent.absolute())
IO_PARAMS = {'problem_type' : 'solid',
'mesh_domain' : ''+basepath+'/input/blockhex_domain.xdmf',
'mesh_boundary' : ''+basepath+'/input/blockhex_boundary.xdmf',
'fiber_data' : {'nodal' : [''+basepath+'/input/fib1_blockhex.txt',''+basepath+'/input/fib2_blockhex.txt']},
'write_results_every' : -999,
'output_path' : ''+basepath+'/tmp/',
'results_to_write' : ['displacement','theta','fiberstretch','fiberstretch_e','phi_remod'],
'simname' : 'solid_growthremodeling_fiberstretch'}
SOLVER_PARAMS_SOLID = {'solve_type' : 'direct',
'tol_res' : 1.0e-8,
'tol_inc' : 1.0e-8}
TIME_PARAMS_SOLID = {'maxtime' : 1.0,
'numstep' : 20,
'timint' : 'static'}
FEM_PARAMS = {'order_disp' : 1,
'order_pres' : 1,
'quad_degree' : 3,
'incompressible_2field' : False}
MATERIALS = {'MAT1' : {'neohooke_dev' : {'mu' : 10.},
'ogden_vol' : {'kappa' : 10./(1.-2.*0.49)},
'growth' : {'growth_dir' : 'isotropic', # isotropic, fiber, crossfiber, radial
'growth_trig' : 'fibstretch', # fibstretch, volstress, prescribed
'growth_thres' : 1.15,
'thetamax' : 3.0,
'thetamin' : 1.0,
'tau_gr' : 1.0,
'gamma_gr' : 1.72,
'tau_gr_rev' : 10000.0,
'gamma_gr_rev' : 1.0,
'remodeling_mat' : {'neohooke_dev' : {'mu' : 3.},
'ogden_vol' : {'kappa' : 3./(1.-2.*0.49)}}}}}
# define your load curves here (syntax: tcX refers to curve X, to be used in BC_DICT key 'curve' : [X,0,0], or 'curve' : X)
class time_curves():
def tc1(self, t):
pmax = 10.0
return pmax*t/TIME_PARAMS_SOLID['maxtime']
BC_DICT = { 'dirichlet' : [{'id' : [1], 'dir' : 'x', 'val' : 0.},
{'id' : [2], 'dir' : 'y', 'val' : 0.},
{'id' : [3], 'dir' : 'z', 'val' : 0.}],
'neumann' : [{'type' : 'pk1', 'id' : [4], 'dir' : 'xyz', 'curve' : [1,0,0]}] }
# problem setup
problem = ambit.Ambit(IO_PARAMS, TIME_PARAMS_SOLID, SOLVER_PARAMS_SOLID, FEM_PARAMS, MATERIALS, BC_DICT, time_curves=time_curves())
# solve time-dependent problem
problem.solve_problem()
# --- results check
tol = 1.0e-6
check_node = []
check_node.append(np.array([1.0, 1.0, 1.0]))
u_corr = np.zeros(3*len(check_node))
## correct results
u_corr[0] = 1.0812823521095760E+00 # x
u_corr[1] = -1.4360291810029382E-01 # y
u_corr[2] = -1.4360291810029457E-01 # z
check1 = results_check.results_check_node(problem.mp.u, check_node, u_corr, problem.mp.V_u, problem.mp.comm, tol=tol, nm='u')
success = results_check.success_check([check1], problem.mp.comm)
return success
if __name__ == "__main__":
success = False
try:
success = main()
except:
print(traceback.format_exc())
if success:
sys.exit(0)
else:
sys.exit(1)
|
[
"traceback.format_exc",
"pathlib.Path",
"results_check.results_check_node",
"numpy.array",
"sys.exit",
"results_check.success_check"
] |
[((4321, 4442), 'results_check.results_check_node', 'results_check.results_check_node', (['problem.mp.u', 'check_node', 'u_corr', 'problem.mp.V_u', 'problem.mp.comm'], {'tol': 'tol', 'nm': '"""u"""'}), "(problem.mp.u, check_node, u_corr, problem.\n mp.V_u, problem.mp.comm, tol=tol, nm='u')\n", (4353, 4442), False, 'import results_check\n'), ((4452, 4506), 'results_check.success_check', 'results_check.success_check', (['[check1]', 'problem.mp.comm'], {}), '([check1], problem.mp.comm)\n', (4479, 4506), False, 'import results_check\n'), ((4079, 4104), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (4087, 4104), True, 'import numpy as np\n'), ((4704, 4715), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (4712, 4715), False, 'import sys, traceback\n'), ((4734, 4745), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4742, 4745), False, 'import sys, traceback\n'), ((4651, 4673), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (4671, 4673), False, 'import sys, traceback\n'), ((536, 550), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (540, 550), False, 'from pathlib import Path\n')]
|
import mne
import numpy as np
import pandas as pd
from mne.beamformer import make_dics, apply_dics_csd
from config import dics_settings, fname, args
from megset.config import fname as megset_fname
from megset.config import freq_range
subject = args.subject
print(f'Running analsis for subject {subject}')
mne.set_log_level(False) # Shhh
###############################################################################
# Load the data
###############################################################################
epochs = mne.read_epochs(megset_fname.epochs_long(subject=subject))
fwd = mne.read_forward_solution(megset_fname.fwd(subject=subject))
dip = mne.read_dipole(megset_fname.ecd(subject=subject))
###############################################################################
# Sensor-level analysis for beamformer
###############################################################################
epochs_grad = epochs.copy().pick_types(meg='grad')
epochs_mag = epochs.copy().pick_types(meg='mag')
epochs_joint = epochs.copy().pick_types(meg=True)
# Make csd matrices
freqs = np.arange(*freq_range[subject])
csd = mne.time_frequency.csd_morlet(epochs, freqs, tmin=-0.8, tmax=1.0, decim=5)
csd_baseline = mne.time_frequency.csd_morlet(epochs, freqs, tmin=-0.8, tmax=0, decim=5)
# ERS activity starts at 0.5 seconds after stimulus onset
csd_ers = mne.time_frequency.csd_morlet(epochs, freqs, tmin=0.2, tmax=1.0, decim=5)
csd = csd.mean()
csd_baseline = csd_baseline.mean()
csd_ers = csd_ers.mean()
###############################################################################
# Compute dics solution and plot stc at dipole location
###############################################################################
dists = []
focs = []
ori_errors = []
for setting in dics_settings:
reg, sensor_type, pick_ori, inversion, weight_norm, normalize_fwd, real_filter, use_noise_cov, reduce_rank = setting
try:
if sensor_type == 'grad':
info = epochs_grad.info
elif sensor_type == 'mag':
info = epochs_mag.info
elif sensor_type == 'joint':
info = epochs_joint.info
else:
raise ValueError('Invalid sensor type: %s', sensor_type)
info_eq, fwd_eq, csd_eq = mne.channels.equalize_channels([info, fwd, csd])
filters = make_dics(info_eq, fwd_eq, csd_eq, reg=reg, pick_ori=pick_ori,
inversion=inversion, weight_norm=weight_norm,
noise_csd=csd_baseline if use_noise_cov else None,
normalize_fwd=normalize_fwd,
real_filter=real_filter, reduce_rank=reduce_rank)
# Compute source power
stc_baseline, _ = apply_dics_csd(csd_baseline, filters)
stc_power, _ = apply_dics_csd(csd_ers, filters)
# Normalize with baseline power.
stc_power /= stc_baseline
stc_power.data = np.log(stc_power.data)
peak_vertex, _ = stc_power.get_peak(vert_as_index=True)
# Compute distance between true and estimated source locations
pos = fwd['source_rr'][peak_vertex]
dist = np.linalg.norm(dip.pos - pos)
# Ratio between estimated peak activity and all estimated activity.
focality_score = stc_power.data[peak_vertex, 0] / stc_power.data.sum()
if pick_ori == 'max-power':
estimated_ori = filters['max_power_oris'][0][peak_vertex]
ori_error = np.rad2deg(np.arccos(estimated_ori @ dip.ori[0]))
if ori_error > 90:
ori_error = 180 - ori_error
else:
ori_error = np.nan
except Exception as e:
print(e)
dist = np.nan
focality_score = np.nan
ori_error = np.nan
print(setting, dist, focality_score, ori_error)
dists.append(dist)
focs.append(focality_score)
ori_errors.append(ori_error)
###############################################################################
# Save everything to a pandas dataframe
###############################################################################
df = pd.DataFrame(dics_settings,
columns=['reg', 'sensor_type', 'pick_ori', 'inversion',
'weight_norm', 'normalize_fwd', 'real_filter',
'use_noise_cov', 'reduce_rank'])
df['dist'] = dists
df['focality'] = focs
df['ori_error'] = ori_errors
df.to_csv(fname.dics_megset_results(subject=subject))
print('OK!')
|
[
"numpy.arccos",
"mne.set_log_level",
"numpy.arange",
"megset.config.fname.fwd",
"megset.config.fname.ecd",
"mne.beamformer.apply_dics_csd",
"numpy.log",
"mne.channels.equalize_channels",
"numpy.linalg.norm",
"mne.time_frequency.csd_morlet",
"config.fname.dics_megset_results",
"pandas.DataFrame",
"mne.beamformer.make_dics",
"megset.config.fname.epochs_long"
] |
[((308, 332), 'mne.set_log_level', 'mne.set_log_level', (['(False)'], {}), '(False)\n', (325, 332), False, 'import mne\n'), ((1091, 1122), 'numpy.arange', 'np.arange', (['*freq_range[subject]'], {}), '(*freq_range[subject])\n', (1100, 1122), True, 'import numpy as np\n'), ((1129, 1203), 'mne.time_frequency.csd_morlet', 'mne.time_frequency.csd_morlet', (['epochs', 'freqs'], {'tmin': '(-0.8)', 'tmax': '(1.0)', 'decim': '(5)'}), '(epochs, freqs, tmin=-0.8, tmax=1.0, decim=5)\n', (1158, 1203), False, 'import mne\n'), ((1219, 1291), 'mne.time_frequency.csd_morlet', 'mne.time_frequency.csd_morlet', (['epochs', 'freqs'], {'tmin': '(-0.8)', 'tmax': '(0)', 'decim': '(5)'}), '(epochs, freqs, tmin=-0.8, tmax=0, decim=5)\n', (1248, 1291), False, 'import mne\n'), ((1360, 1433), 'mne.time_frequency.csd_morlet', 'mne.time_frequency.csd_morlet', (['epochs', 'freqs'], {'tmin': '(0.2)', 'tmax': '(1.0)', 'decim': '(5)'}), '(epochs, freqs, tmin=0.2, tmax=1.0, decim=5)\n', (1389, 1433), False, 'import mne\n'), ((4110, 4281), 'pandas.DataFrame', 'pd.DataFrame', (['dics_settings'], {'columns': "['reg', 'sensor_type', 'pick_ori', 'inversion', 'weight_norm',\n 'normalize_fwd', 'real_filter', 'use_noise_cov', 'reduce_rank']"}), "(dics_settings, columns=['reg', 'sensor_type', 'pick_ori',\n 'inversion', 'weight_norm', 'normalize_fwd', 'real_filter',\n 'use_noise_cov', 'reduce_rank'])\n", (4122, 4281), True, 'import pandas as pd\n'), ((544, 585), 'megset.config.fname.epochs_long', 'megset_fname.epochs_long', ([], {'subject': 'subject'}), '(subject=subject)\n', (568, 585), True, 'from megset.config import fname as megset_fname\n'), ((619, 652), 'megset.config.fname.fwd', 'megset_fname.fwd', ([], {'subject': 'subject'}), '(subject=subject)\n', (635, 652), True, 'from megset.config import fname as megset_fname\n'), ((676, 709), 'megset.config.fname.ecd', 'megset_fname.ecd', ([], {'subject': 'subject'}), '(subject=subject)\n', (692, 709), True, 'from megset.config import fname as megset_fname\n'), ((4427, 4469), 'config.fname.dics_megset_results', 'fname.dics_megset_results', ([], {'subject': 'subject'}), '(subject=subject)\n', (4452, 4469), False, 'from config import dics_settings, fname, args\n'), ((2260, 2308), 'mne.channels.equalize_channels', 'mne.channels.equalize_channels', (['[info, fwd, csd]'], {}), '([info, fwd, csd])\n', (2290, 2308), False, 'import mne\n'), ((2327, 2579), 'mne.beamformer.make_dics', 'make_dics', (['info_eq', 'fwd_eq', 'csd_eq'], {'reg': 'reg', 'pick_ori': 'pick_ori', 'inversion': 'inversion', 'weight_norm': 'weight_norm', 'noise_csd': '(csd_baseline if use_noise_cov else None)', 'normalize_fwd': 'normalize_fwd', 'real_filter': 'real_filter', 'reduce_rank': 'reduce_rank'}), '(info_eq, fwd_eq, csd_eq, reg=reg, pick_ori=pick_ori, inversion=\n inversion, weight_norm=weight_norm, noise_csd=csd_baseline if\n use_noise_cov else None, normalize_fwd=normalize_fwd, real_filter=\n real_filter, reduce_rank=reduce_rank)\n', (2336, 2579), False, 'from mne.beamformer import make_dics, apply_dics_csd\n'), ((2736, 2773), 'mne.beamformer.apply_dics_csd', 'apply_dics_csd', (['csd_baseline', 'filters'], {}), '(csd_baseline, filters)\n', (2750, 2773), False, 'from mne.beamformer import make_dics, apply_dics_csd\n'), ((2797, 2829), 'mne.beamformer.apply_dics_csd', 'apply_dics_csd', (['csd_ers', 'filters'], {}), '(csd_ers, filters)\n', (2811, 2829), False, 'from mne.beamformer import make_dics, apply_dics_csd\n'), ((2931, 2953), 'numpy.log', 'np.log', (['stc_power.data'], {}), '(stc_power.data)\n', (2937, 2953), True, 'import numpy as np\n'), ((3150, 3179), 'numpy.linalg.norm', 'np.linalg.norm', (['(dip.pos - pos)'], {}), '(dip.pos - pos)\n', (3164, 3179), True, 'import numpy as np\n'), ((3478, 3515), 'numpy.arccos', 'np.arccos', (['(estimated_ori @ dip.ori[0])'], {}), '(estimated_ori @ dip.ori[0])\n', (3487, 3515), True, 'import numpy as np\n')]
|
import csv
from sklearn.cluster import MiniBatchKMeans
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import pickle
import numpy as np
import sklearn.metrics as metrics
from yellowbrick.cluster import InterclusterDistance
from scipy.optimize import curve_fit
import umap.umap_ as umap
from colorsys import hls_to_rgb
from pylab import *
from datetime import datetime
import os
import argparse
import scipy.stats as scist
from docx import Document
from feature_extraction import LemmaStemmerTokenizer
# Allow for larger CSV files
maxInt = sys.maxsize
while True:
# decrease the maxInt value by factor 10
# as long as the OverflowError occurs.
try:
csv.field_size_limit(maxInt)
break
except OverflowError:
maxInt = int(maxInt/10)
def get_clusters(selected_k, data_file, processed_file, centers, years, save_folder="", save=True):
"""
Parameters
----------
selected_k : selected number of clusters
data_file : pickle with raw data as list of dictionaries
processed_file : pickle with transformed data as array
centers : array. initial centroids from LDA. Can be initialized as 'k-means++'
years : list of strings. years for intracluster analysis
save_folder : string. directory to save result, the default is "".
save : boolean
Returns
-------
output : dictionary. Keys:
"yr_avg_cost": List of lists. Average funding by year for each cluster.
"yr_total_cost": List of lists. Total funding by year for each cluster.
"size": List. Size of each cluster.
"data_by_cluster": List of lists of dictionaries. Points in each cluster: [ [{Cluster1pt1}, {Cluster1pt2},...], [{Cluster2pt1}, {Cluster2pt2},...], ...]
"centroids": 10 x K array of cluster centroids,
"score": List. Silhouette score by cluster
"model": MiniBatchKMeans model
"labels": Cluster labels of data points (ordered)
"""
# Load data as list of dictionaries
data = pickle.load(open(data_file,"rb"))
# Transformed data
X_transformed = pickle.load(open(processed_file,"rb"))
# Perform mini batch k means
km = MiniBatchKMeans(n_clusters=selected_k, init=centers, verbose=0, max_no_improvement=None)
clusters = km.fit_predict(X_transformed)
scores = metrics.silhouette_samples(X_transformed, clusters)
# Output data
cluster_all = []
costs = []
yoy = []
size = []
mechanisms = []
for i in range(6): # initialization
mechanisms.append([])
MECH_NAMES = "R01", "U01", "R44", "U24", "R21", "U54"
for i in range(0,selected_k):
# indices of cluster k
cluster = [idx for idx, element in enumerate(clusters) if element == i]
# get points
cluster_data = [data[ind] for ind in cluster]
cluster_scores = [scores[ind] for ind in cluster]
for i in range(len(cluster_data)):
cluster_data[i]["score"] = cluster_scores[i]
cluster_all.append(cluster_data)
# calculate average cost and std
try:
average_cost = sum([item["award_amount"] for item in cluster_data])/len(cluster_data)
except:
average_cost = 0
costs.append(average_cost)
cost_trend = []
for year in years:
year_data = [data[ind]["award_amount"] for ind in cluster if data[ind]["year"] == year]
if len(year_data) == 0:
cost_trend.append(0)
else:
year_cost = sum(year_data) # /len(year_data)
cost_trend.append(year_cost)
yoy.append(cost_trend)
size.append(len(cluster))
# get number of awards per mechanism
if len(cluster_data) != 0:
for j in range(len(mechanisms)):
mech = len([ind for ind in cluster if data[ind]["mechanism"] == MECH_NAMES[j]])/len(cluster_data)
mechanisms[j].append(mech)
else:
for j in range(len(mechanisms)):
mechanisms[j].append(0)
# Get centroids
# Identify the top terms for each cluster, using the TF-IDF terms with the highest values in the centroid
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
vectorizer = pickle.load(open("data/vectorizer.pkl","rb"))
terms = vectorizer.get_feature_names_out()
centroids = []
for i in range(selected_k):
centroid_list = []
for ind in order_centroids[i, :15]:
centroid_list.append(terms[ind])
centroids.append(centroid_list)
# Save centroids
if save:
centroid_file = open("{}/centroids".format(save_folder), "w", encoding='utf8')
for i in range(selected_k):
centroid_file.write("Cluster %d:" % i)
for ind in order_centroids[i, :15]:
centroid_file.write(" %s" % terms[ind])
centroid_file.write("\n")
centroid_file.close()
# get scores
score = metrics.silhouette_score(X_transformed, km.labels_)
output = {
"yr_avg_cost": costs, # Average award size by year by cluster
"yr_total_cost": yoy, # Total award size by year by cluster
"size": size, # Number of awards in each cluster
"data_by_cluster": cluster_all,
"centroids": centroids,
"score": score, # Silhouette score for
"model": km, # K-means model
"labels": clusters, # Ordered list of cluster number labels for each award
"mechanisms": mechanisms # List of lists: [r01, u01, r44, u24, r21, u54]. Each internal list has number of awards per mechanism by cluster
}
return output
def umap_visualization(X_transformed, cluster_labels, silhouette_scores, sizes, save_folder=""):
#outlier_scores = sklearn.neighbors.LocalOutlierFactor(contamination=0.1).fit_predict(X_transformed)
#X_transformed = X_transformed[outlier_scores != -1]
#cluster_labels = cluster_labels[outlier_scores != -1]
# product = [silhouette_scores[i]*sizes[i] for i in range(len(sizes))]
top_clusters = sorted(range(len(silhouette_scores)), key=lambda i: silhouette_scores[i], reverse=True)[:9]
n_subset = len(cluster_labels)
selected_cells = np.random.choice(np.arange(X_transformed.shape[0]), size = n_subset, replace = False)
mapper = umap.UMAP(metric='hellinger', random_state=42).fit(X_transformed[selected_cells,:])
embedding = mapper.transform(X_transformed[selected_cells,:])
# Colors
colors = ['tab:blue', 'tab:orange', 'tab:green', 'tab:red', 'tab:purple', 'tab:brown', 'tab:pink', 'tab:olive', 'tab:cyan']
selected_colors = []
for point in selected_cells:
if cluster_labels[point] in top_clusters:
selected_colors.append(colors[top_clusters.index(cluster_labels[point])])
else:
selected_colors.append('tab:gray')
# Plot Clusters on UMAP
plt.figure()
plt.grid(visible=None)
plt.scatter(embedding[:, 0], embedding[:, 1], cmap='Spectral', s=5, c=selected_colors)
plt.gca().set_aspect('equal', 'datalim')
num_clust = len(np.unique(cluster_labels[selected_cells]))
#plt.colorbar(boundaries=np.arange(num_clust+1)-0.5).set_ticks(np.arange(num_clust))
plt.title('UMAP Projection of Awards, TF-IDF', fontsize=14)
plt.xlabel("UMAP 1")
plt.ylabel("UMAP 2")
manager = plt.get_current_fig_manager()
manager.resize(*manager.window.maxsize())
plt.savefig('{}/umap.png'.format(save_folder))
def rainbow_color_stops(n=10, end=1, shade=0.9):
return [ hls_to_rgb(end * i/(n-1)*shade, 0.5*shade, 1*shade) for i in range(n) ]
def get_funding_projections(data):
# 1. Determine dimensions for plot
k = len(data["size"])
factors = []
for i in range(1, k+1):
if k / i == i:
factors.extend([i,i])
elif k % i == 0:
factors.append(i)
dim1, dim2 = factors[int(len(factors)/2)], factors[int(len(factors)/2-1)]
# 2. Create plot
fig, axs = plt.subplots(dim1, dim2, sharex='all', sharey='all')
# 3. Create hidden frame for shared labels
fig.add_subplot(111, frameon=False)
plt.grid(visible=None)
plt.tick_params(labelcolor='none', which='both', top=False, bottom=False, left=False, right=False)
plt.xlabel("Years from 1985")
plt.ylabel("Funding ($100 millions)")
# 4. Get projections
years_int = list(range(0,36))
projection = []
growth = []
bounds = []
for i in range(len(data["yr_total_cost"])):
popt, pcov = curve_fit(lambda t,a,b: a*np.exp(b*t), years_int, data["yr_total_cost"][i], p0=(4000, 0.1))
std = np.sqrt(np.diagonal(pcov))
x = np.linspace(0,21,400)
# upper0 = popt[0]+1.96*std[0]
# lower0 = popt[0]-1.96*std[0]
upper1 = popt[1]+1.96*std[1]
lower1 = popt[1]-1.96*std[1]
ypred = [popt[0]*np.exp(popt[1]*point) for point in x] #-popt[0]
projection.append(ypred[-1])
growth.append(popt[1])
bounds.append([lower1, upper1])
# projection.append(0)
# growth.append(0)
# bounds.append([0,0])
# 5. Return 2021 projections and growth rate
return projection, growth, bounds
def viz_centroids(data):
model = data["model"]
X_transformed = pickle.load(open("data/processed-data.pkl","rb"))
plt.figure()
visualizer = InterclusterDistance(model, random_state=0)
visualizer.fit(X_transformed) # Fit the data to the visualizer
visualizer.show() # Finalize and render the figure
def predict_clusters(test_data, selected_k, model):
test_data = pickle.load(open(test_data,"rb"))
vectorizer = pickle.load(open("data/vectorizer.pkl","rb"))
input_text = [item["text"] for item in test_data]
if len(input_text) == 0:
return [0 for i in range(0,selected_k)], 0
test_transformed = vectorizer.transform(input_text)
years = [str(i) for i in range(1985,2021)]
labels = model.predict(test_transformed)
# Output data
cluster_all = []
costs = []
yoy = []
size = []
for i in range(0,selected_k):
# indices of cluster k
cluster = [idx for idx, element in enumerate(labels) if element == i]
# get points
cluster_data = [test_data[ind] for ind in cluster]
cluster_all.append(cluster_data)
# calculate average cost and std
try:
average_cost = sum([item["award_amount"] for item in cluster_data])/len(cluster_data)
except:
average_cost = 0
costs.append(average_cost)
cost_trend = []
for year in years:
year_data = [test_data[ind]["award_amount"] for ind in cluster if test_data[ind]["year"] == year]
if len(year_data) == 0:
cost_trend.append(0)
else:
year_cost = sum(year_data)
cost_trend.append(year_cost)
yoy.append(cost_trend)
size.append(len(cluster))
return cluster_all, size
def get_best_cluster(selected_k, num_trials, centers, years, save_folder="", save=True):
scores = []
results = {}
print("Optimizing model...")
for i in range(num_trials):
# Generate clusters for a selected k
data = get_clusters(selected_k, "data/data.pkl", "data/processed-data.pkl", 'k-means++', years, save_folder, save=save)
j = 0
for thing in data["data_by_cluster"]:
for item in thing:
try:
results[item["id"]].append(centroids[j])
except:
results[item["id"]] = [item["id"],item["title"],item["award_amount"],data["centroids"][j]]
j+=1
print("Trial {}: Score = {:.3f}".format(str(i+1), data["score"]))
scores.append(data["score"])
if data["score"] >= max(scores):
chosen = data
return chosen, scores
def get_citations(clusters):
"""
Parameters
----------
clusters : nested lists of dictionaries representing each award in a cluster.
Returns
-------
total_citations : list of total citations by cluster
total_papers : list of total papers by cluster
apts: average APT [0.9, ...]
lower: lower bound of 95% CI of average APT: "APT (lower - upper)" [0.85,...]
upper: upper bound of 95% CI of average APT [0.95,...] - "0.9 (0.85-0.95)"
"""
# Get clusters by project number
clusters_by_project = []
for cluster in clusters:
cluster = [item["project_number"] for item in cluster]
cluster = list(set(cluster)) # Remove duplicates
clusters_by_project.append(cluster)
# Get number of citations, apt, and publication year by paper
output = {}
with open("data/citations.csv", newline='', encoding='utf8') as csvfile:
raw_data = list(csv.reader(csvfile))
for i in range(1,len(raw_data)): # "rcr": float(raw_data[i][6]),
output[raw_data[i][0]] = {
"citations": int(raw_data[i][13]),
"apt": float(raw_data[i][11]),
"year": int(raw_data[i][1])}
# Get project number and year by paper
with open("data/publications.csv", newline='', encoding='utf8') as csvfile:
raw_data = list(csv.reader(csvfile))
for i in range(1,len(raw_data)):
if raw_data[i][1] in output.keys():
output[raw_data[i][1]]["project"] = raw_data[i][0]
# Calculate total number of citations, total number of papers, average RCR, average APT for each cluster
total_citations = []
total_papers = []
apts = []
apts_95 = []
lower = []
upper = []
total_availability = []
# rcrs = []
for cluster in clusters_by_project:
cluster_citations = []
# cluster_rcr = []
cluster_apt = []
num_papers = 0
availability = []
for idd in cluster:
papers = [output[key]["citations"] for key in output if output[key]["project"]==idd] # list of all papers associated with cluster by citation count
# rcr = [output[key]["rcr"] for key in output if output[key]["project"]==idd]
apt = [output[key]["apt"] for key in output if output[key]["project"]==idd]
avail_years = [max(0, 2021-output[key]["year"]) for key in output if output[key]["project"]==idd]
# cluster_rcr.extend(rcr)
cluster_apt.extend(apt)
num_papers += len(papers)
cluster_citations.append(sum(papers))
availability.append(sum(avail_years))
total_citations.append(sum(cluster_citations))
total_papers.append(num_papers)
apts_95.append(sum([1 for i in cluster_apt if i==0.95])/len(cluster_apt))
apts.append(np.mean(cluster_apt))
#create 95% confidence interval for population mean weight
apts_interval = scist.norm.interval(alpha=0.95, loc=np.mean(cluster_apt), scale=scist.sem(cluster_apt))
lower.append(apts_interval[0])
upper.append(apts_interval[1])
# rcrs.append(sum(cluster_apt)/len(cluster_apt))
total_availability.append(int(sum(availability)))
return total_citations, total_papers, apts_95, apts, lower, upper, total_availability
def get_rep_clusters(result):
path, dirs, files = next(os.walk('{}/clusters'.format(result)))
file_count = len(files)
if file_count == 0:
return
document = Document()
for i in range(file_count):
unique_awards = {}
# open file
with open('{}/clusters/cluster-{}.csv'.format(result, str(i)), newline='', encoding='utf8') as csvfile:
raw_data = list(csv.reader(csvfile))
for j in range(1,len(raw_data)):
title = raw_data[j][1]
organization = raw_data[j][6]
mechanism = raw_data[j][7]
year = int(raw_data[j][8])
score = float(raw_data[j][11])
# If this is a new title
if title not in unique_awards:
unique_awards[title] = {
"organization": organization,
"activity": mechanism,
"year": year,
"score": score,
}
# If the title is already there
else:
current_year = unique_awards[title]["year"]
# Use the most recent one
if year > current_year:
unique_awards[title] = {
"organization": organization,
"activity": mechanism,
"year": year,
"score": score,
}
unique_awards_sorted = dict(sorted(unique_awards.items(), key = lambda item: -item[1]["score"]))
unique_awards_list = list(unique_awards_sorted.items())[0:5]
p = document.add_paragraph()
p.add_run('Cluster {}:'.format(str(i))).bold = True
table = document.add_table(rows=6, cols=5)
hdr_cells = table.rows[0].cells
hdr_cells[0].text = 'Title'
hdr_cells[1].text = 'Awardee'
hdr_cells[2].text = 'Award Activity'
hdr_cells[3].text = 'Year'
hdr_cells[4].text = 'Sample Silhouette Score'
for i in range(len(unique_awards_list)):
table.cell(i+1,0).text = unique_awards_list[i][0] # Title
table.cell(i+1,1).text = unique_awards_list[i][1]['organization'] # Awardee
table.cell(i+1,2).text = unique_awards_list[i][1]['activity'] # Award Activity
table.cell(i+1,3).text = str(unique_awards_list[i][1]['year']) # Year
table.cell(i+1,4).text = "{:.2g}".format(unique_awards_list[i][1]['score']) # Sample Silhouette Score
document.add_page_break()
document.save('{}/supp_info.docx'.format(result))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
'--k',
type=int,
required=True,
help='number of clusters',
default=30,
)
parser.add_argument(
'--trials',
type=int,
required=True,
help='number of trials',
default=50,
)
FLAGS, unparsed = parser.parse_known_args()
years = [str(i) for i in range(1985,2021)]
selected_k = FLAGS.k
num_trials = FLAGS.trials
centers = 'k-means++'
# Create folder to save results
now = datetime.now()
save_folder = "results/"+now.strftime("%m-%d-%Y--%H%M%S")
os.mkdir(save_folder)
# Get best clustering
data, scores = get_best_cluster(selected_k, num_trials, centers, years, save_folder)
with open("{}/model_clustering.pkl".format(save_folder), 'wb') as handle:
pickle.dump(data, handle)
# Final cluster files
num = 0
os.mkdir(save_folder+"/clusters")
for cluster in data["data_by_cluster"]:
if cluster == []:
continue
keys = cluster[0].keys()
with open('{}/clusters/cluster-{}.csv'.format(save_folder,str(num)), 'w', newline='', encoding='utf8') as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(cluster)
num+=1
# Silhouette score by cluster
print("")
print("------Silhouette scores------")
X_transformed = pickle.load(open("data/processed-data.pkl","rb"))
scores = metrics.silhouette_samples(X_transformed, data["labels"])
tabulated = []
pairs = [(scores[i],data["labels"][i]) for i in range(len(scores))]
for i in range(selected_k):
avg_score = np.mean([j[0] for j in pairs if j[1] == i])
print("Cluster {}: {}".format(str(i), str(avg_score)))
tabulated.append(avg_score)
print("----------------------------")
print("")
# Final centroids
order_centroids = data["model"].cluster_centers_.argsort()[:, ::-1]
vectorizer = pickle.load(open("data/vectorizer.pkl","rb"))
terms = vectorizer.get_feature_names_out()
centroids = []
centroid_file = open("{}/centroids".format(save_folder), "w", encoding='utf8')
for i in range(selected_k):
centroid_file.write("Cluster %d:" % i)
centroid_list = []
for ind in order_centroids[i, :15]:
centroid_file.write(" %s," % terms[ind])
centroid_list.append(terms[ind])
centroids.append(centroid_list)
centroid_file.write("\n")
centroid_file.close()
# UMAP Visualization
X_transformed = pickle.load(open("data/processed-data.pkl","rb"))
umap_visualization(X_transformed, data["labels"], tabulated, data["size"], save_folder)
# Get 2021 projections, projected growth rates, and confidence bounds on growth rates by cluster
projection, growth, bounds = get_funding_projections(data) # 2021 prediction
# Get 2021 clusters
model = data["model"]
clusters_test, size_test = predict_clusters("data/test-data.pkl", selected_k, model)
x = np.arange(selected_k)
if size_test == 0:
cluster_cost_2021 = [0 for i in range(0, selected_k)]
else:
cluster_cost_2021 = [(sum([item["award_amount"] for item in group]) if len(group) > 0 else 0) for group in clusters_test]
# Save 2021 clusters
num = 0
os.mkdir("{}/clusters_test".format(save_folder))
for cluster in clusters_test:
try:
keys = cluster[0].keys()
except:
num+=1
continue
with open('{}/clusters_test/cluster-{}.csv'.format(save_folder,str(num)), 'w', newline='', encoding='utf8') as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(cluster)
num+=1
# Citations and papers
citations, papers, apt_pct, apt, lower, upper, availability = get_citations(data["data_by_cluster"])
# Total funding
total_cluster_funding = [sum([item["award_amount"] for item in group]) for group in data["data_by_cluster"]]
# Get representative clusters for supp info
get_rep_clusters(save_folder)
# All data - note blank columns for description, category
output = [["Cluster", "Size", "Total", "Citations", "APT % over 95%", "Avg. APT", "95%CI L", "95%CI U", "Papers", "Citations per $1mil funding", "Years of Availability", "Citations per thousand dollars of funding per year", "Projected 2021 Award", "Actual 2021 Award To Date", "Growth Rate", "95%CI L", "95%CI U", "Score", "Description", "Category", "Clinical/Technical", "Centroids", "%R01", "%U01", "%R44", "%U24", "%R21", "%U54"]]
for i in range(selected_k):
output.append([i, data["size"][i], total_cluster_funding[i], citations[i], apt_pct[i], apt[i], lower[i], upper[i], papers[i], citations[i]/total_cluster_funding[i]*1e6, availability[i], citations[i]/total_cluster_funding[i]*1e3/availability[i], projection[i], cluster_cost_2021[i], growth[i], bounds[i][0], bounds[i][1], tabulated[i], " ", " ", " ", centroids[i], data["mechanisms"][0][i], data["mechanisms"][1][i], data["mechanisms"][2][i], data["mechanisms"][3][i], data["mechanisms"][4][i], data["mechanisms"][5][i]])
with open('{}/final_data.csv'.format(save_folder), 'w', newline='', encoding='utf8') as csvfile:
writer = csv.writer(csvfile)
writer.writerows(output)
print("Complete.")
|
[
"csv.field_size_limit",
"csv.DictWriter",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"yellowbrick.cluster.InterclusterDistance",
"sklearn.metrics.silhouette_samples",
"scipy.stats.sem",
"numpy.arange",
"numpy.mean",
"argparse.ArgumentParser",
"matplotlib.pyplot.xlabel",
"numpy.exp",
"numpy.linspace",
"os.mkdir",
"matplotlib.pyplot.scatter",
"csv.reader",
"numpy.diagonal",
"matplotlib.use",
"matplotlib.pyplot.gca",
"sklearn.cluster.MiniBatchKMeans",
"matplotlib.pyplot.tick_params",
"csv.writer",
"matplotlib.pyplot.title",
"pickle.dump",
"numpy.unique",
"colorsys.hls_to_rgb",
"umap.umap_.UMAP",
"datetime.datetime.now",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.get_current_fig_manager",
"sklearn.metrics.silhouette_score",
"matplotlib.pyplot.subplots",
"docx.Document"
] |
[((73, 96), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (87, 96), False, 'import matplotlib\n'), ((2181, 2273), 'sklearn.cluster.MiniBatchKMeans', 'MiniBatchKMeans', ([], {'n_clusters': 'selected_k', 'init': 'centers', 'verbose': '(0)', 'max_no_improvement': 'None'}), '(n_clusters=selected_k, init=centers, verbose=0,\n max_no_improvement=None)\n', (2196, 2273), False, 'from sklearn.cluster import MiniBatchKMeans\n'), ((2328, 2379), 'sklearn.metrics.silhouette_samples', 'metrics.silhouette_samples', (['X_transformed', 'clusters'], {}), '(X_transformed, clusters)\n', (2354, 2379), True, 'import sklearn.metrics as metrics\n'), ((4983, 5034), 'sklearn.metrics.silhouette_score', 'metrics.silhouette_score', (['X_transformed', 'km.labels_'], {}), '(X_transformed, km.labels_)\n', (5007, 5034), True, 'import sklearn.metrics as metrics\n'), ((6910, 6922), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6920, 6922), True, 'import matplotlib.pyplot as plt\n'), ((6927, 6949), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'visible': 'None'}), '(visible=None)\n', (6935, 6949), True, 'import matplotlib.pyplot as plt\n'), ((6954, 7045), 'matplotlib.pyplot.scatter', 'plt.scatter', (['embedding[:, 0]', 'embedding[:, 1]'], {'cmap': '"""Spectral"""', 's': '(5)', 'c': 'selected_colors'}), "(embedding[:, 0], embedding[:, 1], cmap='Spectral', s=5, c=\n selected_colors)\n", (6965, 7045), True, 'import matplotlib.pyplot as plt\n'), ((7242, 7301), 'matplotlib.pyplot.title', 'plt.title', (['"""UMAP Projection of Awards, TF-IDF"""'], {'fontsize': '(14)'}), "('UMAP Projection of Awards, TF-IDF', fontsize=14)\n", (7251, 7301), True, 'import matplotlib.pyplot as plt\n'), ((7306, 7326), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""UMAP 1"""'], {}), "('UMAP 1')\n", (7316, 7326), True, 'import matplotlib.pyplot as plt\n'), ((7331, 7351), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""UMAP 2"""'], {}), "('UMAP 2')\n", (7341, 7351), True, 'import matplotlib.pyplot as plt\n'), ((7367, 7396), 'matplotlib.pyplot.get_current_fig_manager', 'plt.get_current_fig_manager', ([], {}), '()\n', (7394, 7396), True, 'import matplotlib.pyplot as plt\n'), ((8003, 8055), 'matplotlib.pyplot.subplots', 'plt.subplots', (['dim1', 'dim2'], {'sharex': '"""all"""', 'sharey': '"""all"""'}), "(dim1, dim2, sharex='all', sharey='all')\n", (8015, 8055), True, 'import matplotlib.pyplot as plt\n'), ((8148, 8170), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'visible': 'None'}), '(visible=None)\n', (8156, 8170), True, 'import matplotlib.pyplot as plt\n'), ((8175, 8277), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelcolor': '"""none"""', 'which': '"""both"""', 'top': '(False)', 'bottom': '(False)', 'left': '(False)', 'right': '(False)'}), "(labelcolor='none', which='both', top=False, bottom=False,\n left=False, right=False)\n", (8190, 8277), True, 'import matplotlib.pyplot as plt\n'), ((8278, 8307), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Years from 1985"""'], {}), "('Years from 1985')\n", (8288, 8307), True, 'import matplotlib.pyplot as plt\n'), ((8312, 8349), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Funding ($100 millions)"""'], {}), "('Funding ($100 millions)')\n", (8322, 8349), True, 'import matplotlib.pyplot as plt\n'), ((9347, 9359), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9357, 9359), True, 'import matplotlib.pyplot as plt\n'), ((9377, 9420), 'yellowbrick.cluster.InterclusterDistance', 'InterclusterDistance', (['model'], {'random_state': '(0)'}), '(model, random_state=0)\n', (9397, 9420), False, 'from yellowbrick.cluster import InterclusterDistance\n'), ((15427, 15437), 'docx.Document', 'Document', ([], {}), '()\n', (15435, 15437), False, 'from docx import Document\n'), ((17959, 17984), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (17982, 17984), False, 'import argparse\n'), ((18505, 18519), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (18517, 18519), False, 'from datetime import datetime\n'), ((18586, 18607), 'os.mkdir', 'os.mkdir', (['save_folder'], {}), '(save_folder)\n', (18594, 18607), False, 'import os\n'), ((18879, 18914), 'os.mkdir', 'os.mkdir', (["(save_folder + '/clusters')"], {}), "(save_folder + '/clusters')\n", (18887, 18914), False, 'import os\n'), ((19496, 19553), 'sklearn.metrics.silhouette_samples', 'metrics.silhouette_samples', (['X_transformed', "data['labels']"], {}), "(X_transformed, data['labels'])\n", (19522, 19553), True, 'import sklearn.metrics as metrics\n'), ((21070, 21091), 'numpy.arange', 'np.arange', (['selected_k'], {}), '(selected_k)\n', (21079, 21091), True, 'import numpy as np\n'), ((696, 724), 'csv.field_size_limit', 'csv.field_size_limit', (['maxInt'], {}), '(maxInt)\n', (716, 724), False, 'import csv\n'), ((6243, 6276), 'numpy.arange', 'np.arange', (['X_transformed.shape[0]'], {}), '(X_transformed.shape[0])\n', (6252, 6276), True, 'import numpy as np\n'), ((7106, 7147), 'numpy.unique', 'np.unique', (['cluster_labels[selected_cells]'], {}), '(cluster_labels[selected_cells])\n', (7115, 7147), True, 'import numpy as np\n'), ((7557, 7618), 'colorsys.hls_to_rgb', 'hls_to_rgb', (['(end * i / (n - 1) * shade)', '(0.5 * shade)', '(1 * shade)'], {}), '(end * i / (n - 1) * shade, 0.5 * shade, 1 * shade)\n', (7567, 7618), False, 'from colorsys import hls_to_rgb\n'), ((8679, 8702), 'numpy.linspace', 'np.linspace', (['(0)', '(21)', '(400)'], {}), '(0, 21, 400)\n', (8690, 8702), True, 'import numpy as np\n'), ((18810, 18835), 'pickle.dump', 'pickle.dump', (['data', 'handle'], {}), '(data, handle)\n', (18821, 18835), False, 'import pickle\n'), ((19697, 19740), 'numpy.mean', 'np.mean', (['[j[0] for j in pairs if j[1] == i]'], {}), '([j[0] for j in pairs if j[1] == i])\n', (19704, 19740), True, 'import numpy as np\n'), ((23367, 23386), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (23377, 23386), False, 'import csv\n'), ((6325, 6371), 'umap.umap_.UMAP', 'umap.UMAP', ([], {'metric': '"""hellinger"""', 'random_state': '(42)'}), "(metric='hellinger', random_state=42)\n", (6334, 6371), True, 'import umap.umap_ as umap\n'), ((7045, 7054), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7052, 7054), True, 'import matplotlib.pyplot as plt\n'), ((8648, 8665), 'numpy.diagonal', 'np.diagonal', (['pcov'], {}), '(pcov)\n', (8659, 8665), True, 'import numpy as np\n'), ((12843, 12862), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (12853, 12862), False, 'import csv\n'), ((13262, 13281), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (13272, 13281), False, 'import csv\n'), ((14759, 14779), 'numpy.mean', 'np.mean', (['cluster_apt'], {}), '(cluster_apt)\n', (14766, 14779), True, 'import numpy as np\n'), ((19191, 19224), 'csv.DictWriter', 'csv.DictWriter', (['output_file', 'keys'], {}), '(output_file, keys)\n', (19205, 19224), False, 'import csv\n'), ((21707, 21740), 'csv.DictWriter', 'csv.DictWriter', (['output_file', 'keys'], {}), '(output_file, keys)\n', (21721, 21740), False, 'import csv\n'), ((8879, 8902), 'numpy.exp', 'np.exp', (['(popt[1] * point)'], {}), '(popt[1] * point)\n', (8885, 8902), True, 'import numpy as np\n'), ((14909, 14929), 'numpy.mean', 'np.mean', (['cluster_apt'], {}), '(cluster_apt)\n', (14916, 14929), True, 'import numpy as np\n'), ((14937, 14959), 'scipy.stats.sem', 'scist.sem', (['cluster_apt'], {}), '(cluster_apt)\n', (14946, 14959), True, 'import scipy.stats as scist\n'), ((15659, 15678), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (15669, 15678), False, 'import csv\n'), ((8557, 8570), 'numpy.exp', 'np.exp', (['(b * t)'], {}), '(b * t)\n', (8563, 8570), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import cv2
class LaneIdentifier:
def __init__(self, smooth_factor, filter):
self.left_lane_inds = []
self.right_lane_inds = []
self.lane_gap = []
self.binary_warped = None
self.window_height = None
self.leftx_current = 0
self.rightx_current = 0
self.nonzeroy = None
self.nonzerox = None
self.left_fit = None
self.right_fit = None
self.margin = 100
self.nwindows = 9
self.minpix = 50
self.leftx = []
self.lefty = []
self.rightx = []
self.righty = []
self.smooth_factor = smooth_factor
self.filter = filter
return
def identify_lanes(self, binary):
self.binary_warped = binary
self.window_height = np.int(self.binary_warped.shape[0] // self.nwindows)
nonzero = binary.nonzero()
self.nonzeroy = np.array(nonzero[0])
self.nonzerox = np.array(nonzero[1])
if self.left_fit is None or self.right_fit is None:
self.blind_sliding_window_search()
else:
self.selective_window_search()
ret = self.extract_lane_lines()
if ret is False:
return False, None, None
return True, self.left_fit, self.right_fit
def blind_sliding_window_search(self):
histogram = np.sum(self.binary_warped[self.binary_warped.shape[0] // 2:, :], axis=0)
midpoint = np.int(histogram.shape[0] // 2)
leftx_current = np.argmax(histogram[:midpoint])
rightx_current = np.argmax(histogram[midpoint:]) + midpoint
l_lane_inds = []
r_lane_inds = []
for window in range(self.nwindows):
win_y_low = self.binary_warped.shape[0] - (window + 1) * self.window_height
win_y_high = self.binary_warped.shape[0] - window * self.window_height
win_xleft_low = leftx_current - self.margin
win_xleft_high = leftx_current + self.margin
win_xright_low = rightx_current - self.margin
win_xright_high = rightx_current + self.margin
good_left_inds = ((self.nonzeroy >= win_y_low) &
(self.nonzeroy < win_y_high) &
(self.nonzerox >= win_xleft_low) &
(self.nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((self.nonzeroy >= win_y_low) &
(self.nonzeroy < win_y_high) &
(self.nonzerox >= win_xright_low) &
(self.nonzerox < win_xright_high)).nonzero()[0]
l_lane_inds.append(good_left_inds)
r_lane_inds.append(good_right_inds)
if len(good_left_inds) > self.minpix:
leftx_current = np.int(np.mean(self.nonzerox[good_left_inds]))
if len(good_right_inds) > self.minpix:
rightx_current = np.int(np.mean(self.nonzerox[good_right_inds]))
self.left_lane_inds = np.concatenate(l_lane_inds)
self.right_lane_inds = np.concatenate(r_lane_inds)
return
def selective_window_search(self):
self.left_lane_inds = ((self.nonzerox >
(self.left_fit[0]*(self.nonzeroy**2) + self.left_fit[1]*self.nonzeroy +
self.left_fit[2] - self.margin)) &
(self.nonzerox <
(self.left_fit[0] * (self.nonzeroy ** 2) + self.left_fit[1]*self.nonzeroy +
self.left_fit[2] + self.margin)))
self.right_lane_inds = ((self.nonzerox >
(self.right_fit[0] * (self.nonzeroy ** 2) + self.right_fit[1] * self.nonzeroy +
self.right_fit[2] - self.margin)) &
(self.nonzerox <
(self.right_fit[0] * (self.nonzeroy ** 2) + self.right_fit[1] * self.nonzeroy +
self.right_fit[2] + self.margin)))
return
def extract_lane_lines(self):
# Extract left and right line pixel positions
leftx = self.nonzerox[self.left_lane_inds]
lefty = self.nonzeroy[self.left_lane_inds]
rightx = self.nonzerox[self.right_lane_inds]
righty = self.nonzeroy[self.right_lane_inds]
if leftx.size == 0 or rightx.size == 0:
if self.left_fit is None or self.right_fit is None:
return False
# Outliers filter, delete those that far away from previous
# recognized lane curve.
if self.left_fit is not None:
leftx_trend = self.left_fit[0]*lefty*lefty + self.left_fit[1]*lefty + self.left_fit[2]
range = abs(leftx - leftx_trend)
indices = (range > self.filter).nonzero()
leftx = np.delete(leftx, indices)
lefty = np.delete(lefty, indices)
if self.right_fit is not None:
rightx_trend = self.right_fit[0]*righty*righty + self.right_fit[1]*righty + self.right_fit[2]
range = abs(rightx - rightx_trend)
indices = (range > self.filter).nonzero()
rightx = np.delete(rightx, indices)
righty = np.delete(righty, indices)
# Take previous identified pixels into 2nd order polynomial
# calculation, in order to alleviate oscillation.
self.leftx = np.append(self.leftx, leftx)
self.lefty = np.append(self.lefty, lefty)
self.rightx = np.append(self.rightx, rightx)
self.righty = np.append(self.righty, righty)
self.leftx = self.leftx[-self.smooth_factor:]
self.lefty = self.lefty[-self.smooth_factor:]
self.rightx = self.rightx[-self.smooth_factor:]
self.righty = self.righty[-self.smooth_factor:]
# Fit a second order polynomial to each
self.left_fit = np.polyfit(self.lefty, self.leftx, 2)
self.right_fit = np.polyfit(self.righty, self.rightx, 2)
return True
def visualization(self):
# Generate x and y values for plotting
ploty = np.linspace(0, self.binary_warped.shape[0] - 1, self.binary_warped.shape[0])
left_fitx = self.left_fit[0] * ploty ** 2 + self.left_fit[1] * ploty + self.left_fit[2]
right_fitx = self.right_fit[0] * ploty ** 2 + self.right_fit[1] * ploty + self.right_fit[2]
# Create an image to draw on and an image to show the selection window
out_img = np.dstack((self.binary_warped, self.binary_warped, self.binary_warped)) * 255
fit_img = np.zeros_like(out_img)
window_img = np.zeros_like(out_img)
# Color in left and right line pixels
out_img[self.nonzeroy[self.left_lane_inds], self.nonzerox[self.left_lane_inds]] = [255, 0, 0]
out_img[self.nonzeroy[self.right_lane_inds], self.nonzerox[self.right_lane_inds]] = [0, 0, 255]
# Generate a polygon to illustrate the search window area
# And recast the x and y points into usable format for cv2.fillPoly()
left_line_window1 = np.array([np.transpose(np.vstack([left_fitx - self.margin, ploty]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx + self.margin,
ploty])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([right_fitx - self.margin, ploty]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx + self.margin,
ploty])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
# Draw the lane onto the warped blank image
cv2.fillPoly(window_img, np.int_([left_line_pts]), (0, 255, 0))
cv2.fillPoly(window_img, np.int_([right_line_pts]), (0, 255, 0))
result = cv2.addWeighted(fit_img, 1, window_img, 0.3, 0)
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10))
ax1.imshow(out_img)
ax1.set_title('Detected Lane Points', fontsize=30)
ax2.imshow(result)
ax2.set_title('Lane Lines', fontsize=30)
plt.plot(left_fitx, ploty, color='yellow')
plt.plot(right_fitx, ploty, color='yellow')
plt.xlim(0, 1280)
plt.ylim(720, 0)
|
[
"numpy.polyfit",
"numpy.hstack",
"numpy.array",
"numpy.mean",
"numpy.delete",
"matplotlib.pyplot.plot",
"cv2.addWeighted",
"numpy.linspace",
"numpy.vstack",
"numpy.concatenate",
"matplotlib.pyplot.ylim",
"numpy.argmax",
"numpy.int_",
"matplotlib.pyplot.xlim",
"numpy.int",
"numpy.dstack",
"numpy.append",
"numpy.sum",
"numpy.zeros_like",
"matplotlib.pyplot.subplots"
] |
[((841, 893), 'numpy.int', 'np.int', (['(self.binary_warped.shape[0] // self.nwindows)'], {}), '(self.binary_warped.shape[0] // self.nwindows)\n', (847, 893), True, 'import numpy as np\n'), ((954, 974), 'numpy.array', 'np.array', (['nonzero[0]'], {}), '(nonzero[0])\n', (962, 974), True, 'import numpy as np\n'), ((999, 1019), 'numpy.array', 'np.array', (['nonzero[1]'], {}), '(nonzero[1])\n', (1007, 1019), True, 'import numpy as np\n'), ((1405, 1477), 'numpy.sum', 'np.sum', (['self.binary_warped[self.binary_warped.shape[0] // 2:, :]'], {'axis': '(0)'}), '(self.binary_warped[self.binary_warped.shape[0] // 2:, :], axis=0)\n', (1411, 1477), True, 'import numpy as np\n'), ((1497, 1528), 'numpy.int', 'np.int', (['(histogram.shape[0] // 2)'], {}), '(histogram.shape[0] // 2)\n', (1503, 1528), True, 'import numpy as np\n'), ((1553, 1584), 'numpy.argmax', 'np.argmax', (['histogram[:midpoint]'], {}), '(histogram[:midpoint])\n', (1562, 1584), True, 'import numpy as np\n'), ((3070, 3097), 'numpy.concatenate', 'np.concatenate', (['l_lane_inds'], {}), '(l_lane_inds)\n', (3084, 3097), True, 'import numpy as np\n'), ((3129, 3156), 'numpy.concatenate', 'np.concatenate', (['r_lane_inds'], {}), '(r_lane_inds)\n', (3143, 3156), True, 'import numpy as np\n'), ((5491, 5519), 'numpy.append', 'np.append', (['self.leftx', 'leftx'], {}), '(self.leftx, leftx)\n', (5500, 5519), True, 'import numpy as np\n'), ((5541, 5569), 'numpy.append', 'np.append', (['self.lefty', 'lefty'], {}), '(self.lefty, lefty)\n', (5550, 5569), True, 'import numpy as np\n'), ((5592, 5622), 'numpy.append', 'np.append', (['self.rightx', 'rightx'], {}), '(self.rightx, rightx)\n', (5601, 5622), True, 'import numpy as np\n'), ((5645, 5675), 'numpy.append', 'np.append', (['self.righty', 'righty'], {}), '(self.righty, righty)\n', (5654, 5675), True, 'import numpy as np\n'), ((5970, 6007), 'numpy.polyfit', 'np.polyfit', (['self.lefty', 'self.leftx', '(2)'], {}), '(self.lefty, self.leftx, 2)\n', (5980, 6007), True, 'import numpy as np\n'), ((6033, 6072), 'numpy.polyfit', 'np.polyfit', (['self.righty', 'self.rightx', '(2)'], {}), '(self.righty, self.rightx, 2)\n', (6043, 6072), True, 'import numpy as np\n'), ((6188, 6264), 'numpy.linspace', 'np.linspace', (['(0)', '(self.binary_warped.shape[0] - 1)', 'self.binary_warped.shape[0]'], {}), '(0, self.binary_warped.shape[0] - 1, self.binary_warped.shape[0])\n', (6199, 6264), True, 'import numpy as np\n'), ((6655, 6677), 'numpy.zeros_like', 'np.zeros_like', (['out_img'], {}), '(out_img)\n', (6668, 6677), True, 'import numpy as np\n'), ((6699, 6721), 'numpy.zeros_like', 'np.zeros_like', (['out_img'], {}), '(out_img)\n', (6712, 6721), True, 'import numpy as np\n'), ((7422, 7471), 'numpy.hstack', 'np.hstack', (['(left_line_window1, left_line_window2)'], {}), '((left_line_window1, left_line_window2))\n', (7431, 7471), True, 'import numpy as np\n'), ((7781, 7832), 'numpy.hstack', 'np.hstack', (['(right_line_window1, right_line_window2)'], {}), '((right_line_window1, right_line_window2))\n', (7790, 7832), True, 'import numpy as np\n'), ((8048, 8095), 'cv2.addWeighted', 'cv2.addWeighted', (['fit_img', '(1)', 'window_img', '(0.3)', '(0)'], {}), '(fit_img, 1, window_img, 0.3, 0)\n', (8063, 8095), False, 'import cv2\n'), ((8121, 8157), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(20, 10)'}), '(1, 2, figsize=(20, 10))\n', (8133, 8157), True, 'import matplotlib.pyplot as plt\n'), ((8331, 8373), 'matplotlib.pyplot.plot', 'plt.plot', (['left_fitx', 'ploty'], {'color': '"""yellow"""'}), "(left_fitx, ploty, color='yellow')\n", (8339, 8373), True, 'import matplotlib.pyplot as plt\n'), ((8382, 8425), 'matplotlib.pyplot.plot', 'plt.plot', (['right_fitx', 'ploty'], {'color': '"""yellow"""'}), "(right_fitx, ploty, color='yellow')\n", (8390, 8425), True, 'import matplotlib.pyplot as plt\n'), ((8434, 8451), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1280)'], {}), '(0, 1280)\n', (8442, 8451), True, 'import matplotlib.pyplot as plt\n'), ((8460, 8476), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(720)', '(0)'], {}), '(720, 0)\n', (8468, 8476), True, 'import matplotlib.pyplot as plt\n'), ((1610, 1641), 'numpy.argmax', 'np.argmax', (['histogram[midpoint:]'], {}), '(histogram[midpoint:])\n', (1619, 1641), True, 'import numpy as np\n'), ((4928, 4953), 'numpy.delete', 'np.delete', (['leftx', 'indices'], {}), '(leftx, indices)\n', (4937, 4953), True, 'import numpy as np\n'), ((4974, 4999), 'numpy.delete', 'np.delete', (['lefty', 'indices'], {}), '(lefty, indices)\n', (4983, 4999), True, 'import numpy as np\n'), ((5268, 5294), 'numpy.delete', 'np.delete', (['rightx', 'indices'], {}), '(rightx, indices)\n', (5277, 5294), True, 'import numpy as np\n'), ((5316, 5342), 'numpy.delete', 'np.delete', (['righty', 'indices'], {}), '(righty, indices)\n', (5325, 5342), True, 'import numpy as np\n'), ((6559, 6630), 'numpy.dstack', 'np.dstack', (['(self.binary_warped, self.binary_warped, self.binary_warped)'], {}), '((self.binary_warped, self.binary_warped, self.binary_warped))\n', (6568, 6630), True, 'import numpy as np\n'), ((7919, 7943), 'numpy.int_', 'np.int_', (['[left_line_pts]'], {}), '([left_line_pts])\n', (7926, 7943), True, 'import numpy as np\n'), ((7991, 8016), 'numpy.int_', 'np.int_', (['[right_line_pts]'], {}), '([right_line_pts])\n', (7998, 8016), True, 'import numpy as np\n'), ((2867, 2905), 'numpy.mean', 'np.mean', (['self.nonzerox[good_left_inds]'], {}), '(self.nonzerox[good_left_inds])\n', (2874, 2905), True, 'import numpy as np\n'), ((2998, 3037), 'numpy.mean', 'np.mean', (['self.nonzerox[good_right_inds]'], {}), '(self.nonzerox[good_right_inds])\n', (3005, 3037), True, 'import numpy as np\n'), ((7170, 7213), 'numpy.vstack', 'np.vstack', (['[left_fitx - self.margin, ploty]'], {}), '([left_fitx - self.margin, ploty])\n', (7179, 7213), True, 'import numpy as np\n'), ((7524, 7568), 'numpy.vstack', 'np.vstack', (['[right_fitx - self.margin, ploty]'], {}), '([right_fitx - self.margin, ploty])\n', (7533, 7568), True, 'import numpy as np\n'), ((7278, 7321), 'numpy.vstack', 'np.vstack', (['[left_fitx + self.margin, ploty]'], {}), '([left_fitx + self.margin, ploty])\n', (7287, 7321), True, 'import numpy as np\n'), ((7634, 7678), 'numpy.vstack', 'np.vstack', (['[right_fitx + self.margin, ploty]'], {}), '([right_fitx + self.margin, ploty])\n', (7643, 7678), True, 'import numpy as np\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.