code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
import unittest
from sys import argv
import numpy as np
import torch
from objective.logistic import Logistic_Gradient
from .utils import Container, assert_all_close, assert_all_close_dict
class TestObj_Logistic_Gradient(unittest.TestCase):
def setUp(self):
np.random.seed(1234)
torch.manual_seed(1234)
n_features = 3
n_samples = 5
n_classes = 7
mu = 0.02
self.hparams = Container(n_classes=n_classes,
n_features=n_features,
n_samples=n_samples,
mu=mu)
self.w = torch.randn(n_features, n_classes, requires_grad=True)
self.x = torch.randn(n_samples, n_features)
self.y = torch.randn(n_samples).long()
self.obj = Logistic_Gradient(self.hparams)
def test_error(self):
error_test = self.obj.task_error(self.w, self.x, self.y)
error_ref = torch.tensor(2.9248)
assert_all_close(error_test, error_ref, "task_error returned value")
def test_oracle(self):
oracle_info_test = self.obj.oracle(self.w, self.x, self.y)
oracle_info_ref = {
'dw': torch.tensor([[ 0.2578, -0.1417, 0.0046, -0.1236, -0.0180, 0.0249, -0.0273],
[-0.3585, 0.1889, -0.0937, 0.0522, 0.0100, 0.1239, 0.0620],
[-0.2921, 0.2251, -0.1870, 0.1791, 0.0171, 0.0109, -0.0156]]),
'obj': torch.tensor(3.1189)}
assert_all_close_dict(oracle_info_ref, oracle_info_test, "oracle returned info")
if __name__ == '__main__':
unittest.main(argv=argv)
|
[
"torch.manual_seed",
"objective.logistic.Logistic_Gradient",
"torch.tensor",
"numpy.random.seed",
"unittest.main",
"torch.randn"
] |
[((1628, 1652), 'unittest.main', 'unittest.main', ([], {'argv': 'argv'}), '(argv=argv)\n', (1641, 1652), False, 'import unittest\n'), ((273, 293), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (287, 293), True, 'import numpy as np\n'), ((302, 325), 'torch.manual_seed', 'torch.manual_seed', (['(1234)'], {}), '(1234)\n', (319, 325), False, 'import torch\n'), ((634, 688), 'torch.randn', 'torch.randn', (['n_features', 'n_classes'], {'requires_grad': '(True)'}), '(n_features, n_classes, requires_grad=True)\n', (645, 688), False, 'import torch\n'), ((706, 740), 'torch.randn', 'torch.randn', (['n_samples', 'n_features'], {}), '(n_samples, n_features)\n', (717, 740), False, 'import torch\n'), ((807, 838), 'objective.logistic.Logistic_Gradient', 'Logistic_Gradient', (['self.hparams'], {}), '(self.hparams)\n', (824, 838), False, 'from objective.logistic import Logistic_Gradient\n'), ((951, 971), 'torch.tensor', 'torch.tensor', (['(2.9248)'], {}), '(2.9248)\n', (963, 971), False, 'import torch\n'), ((1190, 1391), 'torch.tensor', 'torch.tensor', (['[[0.2578, -0.1417, 0.0046, -0.1236, -0.018, 0.0249, -0.0273], [-0.3585, \n 0.1889, -0.0937, 0.0522, 0.01, 0.1239, 0.062], [-0.2921, 0.2251, -0.187,\n 0.1791, 0.0171, 0.0109, -0.0156]]'], {}), '([[0.2578, -0.1417, 0.0046, -0.1236, -0.018, 0.0249, -0.0273],\n [-0.3585, 0.1889, -0.0937, 0.0522, 0.01, 0.1239, 0.062], [-0.2921, \n 0.2251, -0.187, 0.1791, 0.0171, 0.0109, -0.0156]])\n', (1202, 1391), False, 'import torch\n'), ((1484, 1504), 'torch.tensor', 'torch.tensor', (['(3.1189)'], {}), '(3.1189)\n', (1496, 1504), False, 'import torch\n'), ((758, 780), 'torch.randn', 'torch.randn', (['n_samples'], {}), '(n_samples)\n', (769, 780), False, 'import torch\n')]
|
from datasets.dataset_processors import ExtendedDataset
from models.model import IdentificationModel, ResNet50
from models.siamese import SiameseNet, MssNet
from base import BaseExecutor
from utils.utilities import type_error_msg, value_error_msg, timer, load_model
import torch
from torch.utils.data import DataLoader
from torchvision import transforms
from configparser import ConfigParser
from os import makedirs
from os.path import join, exists, dirname
from scipy.io import savemat
import numpy as np
class Tester(BaseExecutor):
"""A general tester for all.
Args:
config (ConfigParser): The ConfigParser which reads setting files.
name (str): A name defined in base.py.
dataset (str): A dataset defined in base.py.
model (str): A model defined in base.py.
epoch (int): The epoch of the saved trained model for testing.
scene (str): A scene defined in base.py.
Attributes:
test_path (str): Path to save features/labels/cams.
"""
DEFAULT_BATCH_SIZE = 64
DEFAULT_NUM_WORKER = 8
def __init__(self, config, name, dataset, model, epoch: int, scene):
if not isinstance(name, Tester.Name):
if isinstance(name, str):
if not name.islower():
name = name.lower()
if name not in Tester.NAME_LIST:
raise ValueError(value_error_msg('name', name, Tester.NAME_LIST))
name = Tester.Name(name)
else:
raise TypeError(type_error_msg('name', name, [Tester.Name, str]))
if not isinstance(dataset, Tester.Dataset):
if isinstance(dataset, str):
if not dataset.islower():
dataset = dataset.lower()
if dataset not in Tester.DATASET_LIST:
raise ValueError(value_error_msg('dataset', dataset, Tester.DATASET_LIST))
dataset = Tester.Dataset(dataset)
else:
raise TypeError(type_error_msg('dataset', dataset, [Tester.Dataset, str]))
if not isinstance(model, Tester.Model):
if isinstance(model, str):
if not model.islower():
model = model.lower()
if model not in Tester.MODEL_LIST:
raise ValueError(value_error_msg('model', model, Tester.MODEL_LIST))
model = Tester.Model(model)
else:
raise TypeError(type_error_msg('model', model, [Tester.MODEL_LIST, str]))
if not isinstance(scene, Tester.Scene):
if isinstance(scene, str):
if not scene.islower():
scene = scene.lower()
if scene not in Tester.SCENE_LIST:
raise ValueError(value_error_msg('scene', scene, Tester.SCENE_LIST))
scene = Tester.Scene(scene)
else:
raise TypeError(type_error_msg('scene', scene, [Tester.SCENE_LIST, str]))
if not isinstance(epoch, int):
raise TypeError(type_error_msg('epoch', epoch, [int]))
if not epoch >= 0:
raise ValueError(value_error_msg('epoch', epoch, 'epoch >= 0'))
self.name = name
self.dataset = dataset
self.model = model
self.scene = scene
self.config = config
self.train_class = config.getint(self.name.value, 'train_class')
# initialize model
model_name = self.model.value
if self.model == Tester.Model.MSSNET:
self.model = MssNet(self.config)
elif self.model == Tester.Model.RESNET50:
self.model = ResNet50(self.config, self.train_class, False)
# else:
# raise ValueError(value_error_msg('model', model, Tester.MODEL_LIST))
transform_list = []
if self.name == Tester.Name.MARKET1501:
transform_list = [
# transforms.Resize((160, 64)),
# transforms.Pad(10),
# transforms.RandomCrop((160, 64)),
# transforms.RandomHorizontalFlip(),
# transforms.ToTensor()
transforms.Resize((256, 128), interpolation=3),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]
self.dataset_type = ['gallery', 'query']
if self.scene == Tester.Scene.MULTI_SHOT:
self.dataset_type.append('multi_query')
# prepare datasets
if self.dataset == Tester.Dataset.EXTENDED:
self.dataset = {}
for item in self.dataset_type:
self.dataset[item] = ExtendedDataset(self.name.value,
join(self.config[self.name.value]['dataset_dir'], item),
transforms.Compose(transform_list))
else:
raise ValueError(value_error_msg('dataset', dataset, Tester.Dataset.EXTENDED))
# load weights
load_model(self.model, self.config[self.name.value]['model_format'] % (model_name, epoch))
if isinstance(self.model, IdentificationModel):
self.model.set_to_test()
self.test_path = self.config[self.name.value]['test_path'] % self.scene.value
@timer
def run(self):
"""
Reads: A pth file of model's state dict.
Processes: Computes the features of gallery and query imgs.
Writes: A mat file of saved gallery and query info.
"""
Tester.run_info(self.__class__.__name__, self.scene.value)
self.model.eval() # for batch norm
test_dict = {}
dataloader = {}
with torch.no_grad():
if self.scene == Tester.Scene.SINGLE_SHOT:
for item in self.dataset_type:
dataloader[item] = DataLoader(self.dataset[item],
num_workers=Tester.DEFAULT_NUM_WORKER,
batch_size=Tester.DEFAULT_BATCH_SIZE)
test_dict[item + '_feature'] = Tester.normalize(self.extract_feature(dataloader[item]))
test_dict[item + '_label'] = self.dataset[item].ids
test_dict[item + '_cam'] = self.dataset[item].cams
elif self.scene == Tester.Scene.MULTI_SHOT:
item = 'gallery'
dataloader[item] = DataLoader(self.dataset[item], num_workers=Tester.DEFAULT_NUM_WORKER,
batch_size=Tester.DEFAULT_BATCH_SIZE)
test_dict[item + '_feature'] = Tester.normalize(self.extract_feature(dataloader[item]))
test_dict[item + '_label'] = self.dataset[item].ids
test_dict[item + '_cam'] = self.dataset[item].cams
item = 'multi_query' # no need to save multi_query features into dict
dataloader[item] = DataLoader(self.dataset[item], num_workers=Tester.DEFAULT_NUM_WORKER,
batch_size=Tester.DEFAULT_BATCH_SIZE)
multi_query_feature = self.extract_feature(dataloader[item]) # no normalization
multi_query_label = self.dataset[item].ids
multi_query_cam = self.dataset[item].cams
item = 'query'
test_dict[item + '_label'] = self.dataset[item].ids
test_dict[item + '_cam'] = self.dataset[item].cams
test_dict[item + '_feature'] = Tester.normalize_numpy(Tester.mean_feature(multi_query_feature,
np.asarray(multi_query_label),
np.asarray(multi_query_cam),
np.asarray(test_dict['query_label']),
np.asarray(test_dict['query_cam']),
test_dict))
test_dir = dirname(self.test_path)
if not exists(test_dir):
makedirs(test_dir)
# WARNING: save test.mat will trigger overwrite if test.mat is already exists
savemat(self.test_path, test_dict)
@staticmethod
def mean_feature(mquery_feature, mquery_label, mquery_cam, query_label, query_cam, dictionary):
"""Averages multi query feature to get (mean) query feature.
Args:
mquery_feature (np.ndarray): The feature of multi query imgs, shape(#multi_query, embedding_dim).
mquery_label (np.ndarray): The people labels of multi query imgs, an 1d int array, shape(#multi_query).
mquery_cam (np.ndarray): The camera labels of multi query imgs, an 1d int array, shape(#multi_query).
query_label (np.ndarray): The people labels of query imgs, an 1d int array, shape(#query).
query_cam (np.ndarray): The camera labels of query imgs, an 1d int array, shape(#query).
dictionary (dict): A mutable dictionary for adding
{'multi_index': [index_array1, index_array2, ...]} (Implicit returns).
Returns:
query_feature (ndarray): The mean feature of mquery_feature.
"""
query_feature = []
multi_index = []
for i in range(len(query_label)):
label_mask = mquery_label == query_label[i]
cam_mask = mquery_cam == query_cam[i]
index = np.flatnonzero(label_mask & cam_mask)
multi_index.append(index)
query_feature.append(np.mean(mquery_feature[index, :], axis=0))
dictionary['multi_index'] = multi_index
return np.asarray(query_feature)
@staticmethod
def flip_lr(img: torch.Tensor):
"""Flips image tensor horizontally.
Args:
img (torch.Tensor): The original image tensor.
Returns:
img_flip (torch.Tensor): The flipped image tensor.
"""
inv_idx = torch.arange(img.size(3) - 1, -1, -1) # N x C x H x W
img_flip = img.index_select(3, inv_idx)
return img_flip
@staticmethod
def normalize(x: torch.Tensor):
"""Normalizes the 2d torch tensor.
Args:
x (torch.Tensor): in 2d.
Returns:
normalized_x (torch.Tensor): in 2d.
"""
xnorm = torch.norm(x, p=2, dim=1, keepdim=True)
return x.div(xnorm.expand_as(x))
@staticmethod
def normalize_numpy(x: np.ndarray): # 25% faster than normalize with torch above
"""Normalizes the 2d numpy array.
Args:
x (np.ndarray): in 2d.
Returns:
normalized_x (np.ndarray): in 2d.
"""
xnorm = np.linalg.norm(x, axis=1, keepdims=True)
return x / np.repeat(xnorm, x.shape[1]).reshape(x.shape)
def extract_feature(self, dataloader):
"""Extracts feature in batches.
Args:
dataloader (torch.utils.data.DataLoader): Initialized dataloader.
Returns:
feature (np.ndarray): shape(#gallery/query/multi_query, embedding_dim).
"""
feature = []
if isinstance(self.model, SiameseNet):
for i, data in enumerate(dataloader):
batch_feature = self.model.forward_once(data)
feature.append(batch_feature)
elif isinstance(self.model, IdentificationModel):
for i, data in enumerate(dataloader):
print(i * Tester.DEFAULT_BATCH_SIZE)
batch_feature = self.model.forward(data)
data = Tester.flip_lr(data)
batch_feature += self.model.forward(data)
feature.append(batch_feature)
return torch.cat(feature, 0).numpy()
|
[
"scipy.io.savemat",
"models.siamese.MssNet",
"numpy.linalg.norm",
"utils.utilities.load_model",
"os.path.exists",
"numpy.mean",
"numpy.repeat",
"numpy.flatnonzero",
"numpy.asarray",
"torchvision.transforms.ToTensor",
"os.path.dirname",
"torch.norm",
"models.model.ResNet50",
"torchvision.transforms.Normalize",
"torchvision.transforms.Resize",
"utils.utilities.type_error_msg",
"torchvision.transforms.Compose",
"torch.cat",
"os.makedirs",
"utils.utilities.value_error_msg",
"os.path.join",
"torch.utils.data.DataLoader",
"torch.no_grad"
] |
[((5047, 5142), 'utils.utilities.load_model', 'load_model', (['self.model', "(self.config[self.name.value]['model_format'] % (model_name, epoch))"], {}), "(self.model, self.config[self.name.value]['model_format'] % (\n model_name, epoch))\n", (5057, 5142), False, 'from utils.utilities import type_error_msg, value_error_msg, timer, load_model\n'), ((8145, 8168), 'os.path.dirname', 'dirname', (['self.test_path'], {}), '(self.test_path)\n', (8152, 8168), False, 'from os.path import join, exists, dirname\n'), ((8327, 8361), 'scipy.io.savemat', 'savemat', (['self.test_path', 'test_dict'], {}), '(self.test_path, test_dict)\n', (8334, 8361), False, 'from scipy.io import savemat\n'), ((9813, 9838), 'numpy.asarray', 'np.asarray', (['query_feature'], {}), '(query_feature)\n', (9823, 9838), True, 'import numpy as np\n'), ((10496, 10535), 'torch.norm', 'torch.norm', (['x'], {'p': '(2)', 'dim': '(1)', 'keepdim': '(True)'}), '(x, p=2, dim=1, keepdim=True)\n', (10506, 10535), False, 'import torch\n'), ((10867, 10907), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {'axis': '(1)', 'keepdims': '(True)'}), '(x, axis=1, keepdims=True)\n', (10881, 10907), True, 'import numpy as np\n'), ((3554, 3573), 'models.siamese.MssNet', 'MssNet', (['self.config'], {}), '(self.config)\n', (3560, 3573), False, 'from models.siamese import SiameseNet, MssNet\n'), ((5727, 5742), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5740, 5742), False, 'import torch\n'), ((8184, 8200), 'os.path.exists', 'exists', (['test_dir'], {}), '(test_dir)\n', (8190, 8200), False, 'from os.path import join, exists, dirname\n'), ((8214, 8232), 'os.makedirs', 'makedirs', (['test_dir'], {}), '(test_dir)\n', (8222, 8232), False, 'from os import makedirs\n'), ((9598, 9635), 'numpy.flatnonzero', 'np.flatnonzero', (['(label_mask & cam_mask)'], {}), '(label_mask & cam_mask)\n', (9612, 9635), True, 'import numpy as np\n'), ((3061, 3098), 'utils.utilities.type_error_msg', 'type_error_msg', (['"""epoch"""', 'epoch', '[int]'], {}), "('epoch', epoch, [int])\n", (3075, 3098), False, 'from utils.utilities import type_error_msg, value_error_msg, timer, load_model\n'), ((3156, 3201), 'utils.utilities.value_error_msg', 'value_error_msg', (['"""epoch"""', 'epoch', '"""epoch >= 0"""'], {}), "('epoch', epoch, 'epoch >= 0')\n", (3171, 3201), False, 'from utils.utilities import type_error_msg, value_error_msg, timer, load_model\n'), ((3649, 3695), 'models.model.ResNet50', 'ResNet50', (['self.config', 'self.train_class', '(False)'], {}), '(self.config, self.train_class, False)\n', (3657, 3695), False, 'from models.model import IdentificationModel, ResNet50\n'), ((4152, 4198), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256, 128)'], {'interpolation': '(3)'}), '((256, 128), interpolation=3)\n', (4169, 4198), False, 'from torchvision import transforms\n'), ((4216, 4237), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4235, 4237), False, 'from torchvision import transforms\n'), ((4255, 4321), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (4275, 4321), False, 'from torchvision import transforms\n'), ((4953, 5013), 'utils.utilities.value_error_msg', 'value_error_msg', (['"""dataset"""', 'dataset', 'Tester.Dataset.EXTENDED'], {}), "('dataset', dataset, Tester.Dataset.EXTENDED)\n", (4968, 5013), False, 'from utils.utilities import type_error_msg, value_error_msg, timer, load_model\n'), ((9707, 9748), 'numpy.mean', 'np.mean', (['mquery_feature[index, :]'], {'axis': '(0)'}), '(mquery_feature[index, :], axis=0)\n', (9714, 9748), True, 'import numpy as np\n'), ((11872, 11893), 'torch.cat', 'torch.cat', (['feature', '(0)'], {}), '(feature, 0)\n', (11881, 11893), False, 'import torch\n'), ((1528, 1576), 'utils.utilities.type_error_msg', 'type_error_msg', (['"""name"""', 'name', '[Tester.Name, str]'], {}), "('name', name, [Tester.Name, str])\n", (1542, 1576), False, 'from utils.utilities import type_error_msg, value_error_msg, timer, load_model\n'), ((2010, 2067), 'utils.utilities.type_error_msg', 'type_error_msg', (['"""dataset"""', 'dataset', '[Tester.Dataset, str]'], {}), "('dataset', dataset, [Tester.Dataset, str])\n", (2024, 2067), False, 'from utils.utilities import type_error_msg, value_error_msg, timer, load_model\n'), ((2473, 2529), 'utils.utilities.type_error_msg', 'type_error_msg', (['"""model"""', 'model', '[Tester.MODEL_LIST, str]'], {}), "('model', model, [Tester.MODEL_LIST, str])\n", (2487, 2529), False, 'from utils.utilities import type_error_msg, value_error_msg, timer, load_model\n'), ((2935, 2991), 'utils.utilities.type_error_msg', 'type_error_msg', (['"""scene"""', 'scene', '[Tester.SCENE_LIST, str]'], {}), "('scene', scene, [Tester.SCENE_LIST, str])\n", (2949, 2991), False, 'from utils.utilities import type_error_msg, value_error_msg, timer, load_model\n'), ((4764, 4819), 'os.path.join', 'join', (["self.config[self.name.value]['dataset_dir']", 'item'], {}), "(self.config[self.name.value]['dataset_dir'], item)\n", (4768, 4819), False, 'from os.path import join, exists, dirname\n'), ((4874, 4908), 'torchvision.transforms.Compose', 'transforms.Compose', (['transform_list'], {}), '(transform_list)\n', (4892, 4908), False, 'from torchvision import transforms\n'), ((5885, 5996), 'torch.utils.data.DataLoader', 'DataLoader', (['self.dataset[item]'], {'num_workers': 'Tester.DEFAULT_NUM_WORKER', 'batch_size': 'Tester.DEFAULT_BATCH_SIZE'}), '(self.dataset[item], num_workers=Tester.DEFAULT_NUM_WORKER,\n batch_size=Tester.DEFAULT_BATCH_SIZE)\n', (5895, 5996), False, 'from torch.utils.data import DataLoader\n'), ((6468, 6579), 'torch.utils.data.DataLoader', 'DataLoader', (['self.dataset[item]'], {'num_workers': 'Tester.DEFAULT_NUM_WORKER', 'batch_size': 'Tester.DEFAULT_BATCH_SIZE'}), '(self.dataset[item], num_workers=Tester.DEFAULT_NUM_WORKER,\n batch_size=Tester.DEFAULT_BATCH_SIZE)\n', (6478, 6579), False, 'from torch.utils.data import DataLoader\n'), ((6984, 7095), 'torch.utils.data.DataLoader', 'DataLoader', (['self.dataset[item]'], {'num_workers': 'Tester.DEFAULT_NUM_WORKER', 'batch_size': 'Tester.DEFAULT_BATCH_SIZE'}), '(self.dataset[item], num_workers=Tester.DEFAULT_NUM_WORKER,\n batch_size=Tester.DEFAULT_BATCH_SIZE)\n', (6994, 7095), False, 'from torch.utils.data import DataLoader\n'), ((10927, 10955), 'numpy.repeat', 'np.repeat', (['xnorm', 'x.shape[1]'], {}), '(xnorm, x.shape[1])\n', (10936, 10955), True, 'import numpy as np\n'), ((1388, 1435), 'utils.utilities.value_error_msg', 'value_error_msg', (['"""name"""', 'name', 'Tester.NAME_LIST'], {}), "('name', name, Tester.NAME_LIST)\n", (1403, 1435), False, 'from utils.utilities import type_error_msg, value_error_msg, timer, load_model\n'), ((1852, 1908), 'utils.utilities.value_error_msg', 'value_error_msg', (['"""dataset"""', 'dataset', 'Tester.DATASET_LIST'], {}), "('dataset', dataset, Tester.DATASET_LIST)\n", (1867, 1908), False, 'from utils.utilities import type_error_msg, value_error_msg, timer, load_model\n'), ((2327, 2377), 'utils.utilities.value_error_msg', 'value_error_msg', (['"""model"""', 'model', 'Tester.MODEL_LIST'], {}), "('model', model, Tester.MODEL_LIST)\n", (2342, 2377), False, 'from utils.utilities import type_error_msg, value_error_msg, timer, load_model\n'), ((2789, 2839), 'utils.utilities.value_error_msg', 'value_error_msg', (['"""scene"""', 'scene', 'Tester.SCENE_LIST'], {}), "('scene', scene, Tester.SCENE_LIST)\n", (2804, 2839), False, 'from utils.utilities import type_error_msg, value_error_msg, timer, load_model\n'), ((7700, 7729), 'numpy.asarray', 'np.asarray', (['multi_query_label'], {}), '(multi_query_label)\n', (7710, 7729), True, 'import numpy as np\n'), ((7801, 7828), 'numpy.asarray', 'np.asarray', (['multi_query_cam'], {}), '(multi_query_cam)\n', (7811, 7828), True, 'import numpy as np\n'), ((7900, 7936), 'numpy.asarray', 'np.asarray', (["test_dict['query_label']"], {}), "(test_dict['query_label'])\n", (7910, 7936), True, 'import numpy as np\n'), ((8008, 8042), 'numpy.asarray', 'np.asarray', (["test_dict['query_cam']"], {}), "(test_dict['query_cam'])\n", (8018, 8042), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 29 20:53:21 2020
@author: asherhensley
"""
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.express as px
import pandas as pd
import yulesimon as ys
from plotly.subplots import make_subplots
import plotly.graph_objects as go
from dash.dependencies import Input, Output
from dash.exceptions import PreventUpdate
import numpy as np
import dash_table
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
colors = {
'background': '#000000',
'text': '#4ae2ed'
}
fig1 = make_subplots()
# fig1.update_layout(
# autosize=False,
# height=400,
# width=600,
# showlegend=False,
# #margin=dict(l=0,r=0,b=50,t=50),
# )
fig2 = make_subplots()
# fig2.update_layout(
# autosize=False,
# height=400,
# width=600,
# showlegend=False,
# #margin=dict(l=0,r=0,b=50,t=50),
# )
fig3 = make_subplots()
colors = {
'background': '#000000',
'text': '#7FDBFF'
}
df = pd.DataFrame(data={
"Key Statistics":[6],
"Values":[4]})
app.layout = html.Div(children=[
html.H1(children='CIRCLON-8', style={'textAlign':'left'}),
html.Div(children=[
'Ticker: ',
dcc.Input(id='Ticker',value='MSFT',type='text', size='50'),
html.Button('Search',id='Search',n_clicks=0)]
),
html.Br(),
html.H6(id='Status',children='Ready', style={'textAlign':'left'}),
# dash_table.DataTable(
# id='table',
# columns=[{"name": "Key Statistics", "id": "Key Statistics"},
# {"name": "Values", "id": "Values"}],
# data=df.to_dict('records')
# ),
dcc.Tabs(id="tabs", value='tab-1', children=[
dcc.Tab(label='Prices/Returns',
children=[dcc.Graph(id='Figure1',figure=fig1)]),
dcc.Tab(label='Volatility Profile',
children=[dcc.Graph(id='Figure2',figure=fig2)]),
dcc.Tab(label='Modeling Analysis',
children=[dcc.Graph(id='Figure3',figure=fig2)]),
]),
html.Div(id='tabs-content')
])
@app.callback(
Output(component_id='Status', component_property='children'),
Input(component_id='Search', component_property='n_clicks')
)
def set_status(n_clicks):
status = 'Searching...'
if n_clicks==0:
status = 'Initializing...'
return status
@app.callback(
Output(component_id='Figure1', component_property='figure'),
Output(component_id='Figure2', component_property='figure'),
Output(component_id='Figure3', component_property='figure'),
Output(component_id='Status', component_property='children'),
Input(component_id='Ticker', component_property='value'),
Input(component_id='Search', component_property='n_clicks')
)
def update_figure(ticker_in, n_clicks):
ctx = dash.callback_context
if not ctx.triggered:
ticker = 'MSFT'
else:
callback_id = ctx.triggered[0]['prop_id'].split('.')[0]
if callback_id=='Search':
ticker = ticker_in
else:
ticker = None
if ticker==None:
raise PreventUpdate
else:
# Run Model
closing_prices, log_returns, dates = ys.GetYahooFeed(ticker,5)
Chain = ys.TimeSeries(log_returns)
nsteps = 200
burnin = nsteps/2.0
downsample = 2
history = Chain.step(nsteps)
sigma, sample_size = ys.ExpectedValue(history.std_deviation, burnin, downsample)
mu, sample_size = ys.ExpectedValue(history.mean, burnin, downsample)
z = np.arange(-0.2,0.2,0.001)
yulesimon_PDF = ys.MixtureModel(z,mu/100,sigma/100)
H,b = np.histogram(log_returns,200)
delta = b[1]-b[0]
bctr = b[1:]-delta/2.0
empirical_PDF = H/(sum(H)*delta)
gaussian_PDF = ys.Gaussian(z,np.mean(log_returns),1/np.var(log_returns))
# Update Prices/Returns
fig1 = make_subplots(rows=2,cols=1,shared_xaxes=True,vertical_spacing=0.05)
fig1.add_trace(go.Scatter(x=dates[1:],y=closing_prices[1:],
fill='tozeroy',
line_color='#0000ff',
fillcolor='#7474f7'), row=1,col=1)
fig1.add_trace(go.Scatter(x=dates[1:],y=mu/100+2*sigma/100,
fill='tozeroy',
fillcolor='#ffb0b0',
mode='none'), row=2,col=1)
fig1.add_trace(go.Scatter(x=dates[1:],y=mu/100-2*sigma/100,
fill='tozeroy',
fillcolor='#ffb0b0',
mode='none'), row=2,col=1)
fig1.add_trace(go.Scatter(x=dates[1:],y=log_returns,
line_color='#ff0000'), row=2,col=1)
fig1.add_trace(go.Scatter(x=dates[1:],y=mu,
line_color='#000000'), row=2,col=1)
#fig1.add_trace(go.Scatter(x=dates[1:],y=mu*0,line=dict(dash='dash'),
# line_color='#000000'), row=2,col=1)
fig1.update_layout(
showlegend=False,
height=700
)
fig1.update_yaxes(title_text='Daily Close',row=1,col=1)
fig1.update_yaxes(title_text='Daily Log-Return',row=2,col=1)
# Update Volatility Profile
fig2 = make_subplots(rows=1,cols=2,
shared_xaxes=True,
subplot_titles=("Linear Scale","Log Scale"))
fig2.add_trace(go.Scatter(x=bctr,y=empirical_PDF,mode='markers',marker_color='#ff0000'),row=1,col=1)
#fig2.add_trace(go.Scatter(x=z,y=gaussian_PDF,line_color='#edc24a',),row=1,col=1)
fig2.add_trace(go.Scatter(x=z,y=yulesimon_PDF,line_color='#0000ff',),row=1,col=1)
fig2.add_trace(go.Scatter(x=bctr,y=empirical_PDF,mode='markers',marker_color='#ff0000'),row=1,col=2)
#fig2.add_trace(go.Scatter(x=z,y=gaussian_PDF,line_color='#edc24a',),row=1,col=2)
fig2.add_trace(go.Scatter(x=z,y=yulesimon_PDF,line_color='#0000ff',),row=1,col=2)
fig2.update_xaxes(title_text='Log Returns',row=1,col=1)
fig2.update_yaxes(title_text='Probability Density',row=1,col=1)
fig2.update_xaxes(title_text='Log Returns',row=1,col=2)
fig2.update_yaxes(title_text='Probability Density',type="log",row=1,col=2)
fig2.update_layout(showlegend=False)
# Update Modeling Analysis Tab
fig3 = make_subplots(rows=1,cols=2)
fig3.add_trace(go.Scatter(y=history.log_likelihood,line_color='#0000ff',),row=1,col=1)
fig3.add_trace(go.Scatter(y=history.pvalue,line_color='#ff0000',),row=1,col=2)
fig3.update_xaxes(title_text='Iteration',row=1,col=1)
fig3.update_yaxes(title_text='Log-Likelihood',row=1,col=1)
fig3.update_xaxes(title_text='Iteration',row=1,col=2)
fig3.update_yaxes(title_text='p-Value',type="log",row=1,col=2)
fig3.update_layout(showlegend=False)
return fig1, fig2, fig3, 'Ready'
if __name__ == '__main__':
app.run_server(debug=True)
|
[
"dash_html_components.Button",
"dash.dependencies.Input",
"numpy.arange",
"dash_html_components.Div",
"yulesimon.TimeSeries",
"numpy.mean",
"numpy.histogram",
"dash.Dash",
"dash.dependencies.Output",
"dash_html_components.Br",
"plotly.graph_objects.Scatter",
"yulesimon.GetYahooFeed",
"dash_html_components.H6",
"pandas.DataFrame",
"plotly.subplots.make_subplots",
"dash_html_components.H1",
"yulesimon.MixtureModel",
"yulesimon.ExpectedValue",
"dash_core_components.Graph",
"dash_core_components.Input",
"numpy.var"
] |
[((545, 607), 'dash.Dash', 'dash.Dash', (['__name__'], {'external_stylesheets': 'external_stylesheets'}), '(__name__, external_stylesheets=external_stylesheets)\n', (554, 607), False, 'import dash\n'), ((681, 696), 'plotly.subplots.make_subplots', 'make_subplots', ([], {}), '()\n', (694, 696), False, 'from plotly.subplots import make_subplots\n'), ((855, 870), 'plotly.subplots.make_subplots', 'make_subplots', ([], {}), '()\n', (868, 870), False, 'from plotly.subplots import make_subplots\n'), ((1029, 1044), 'plotly.subplots.make_subplots', 'make_subplots', ([], {}), '()\n', (1042, 1044), False, 'from plotly.subplots import make_subplots\n'), ((1116, 1173), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'Key Statistics': [6], 'Values': [4]}"}), "(data={'Key Statistics': [6], 'Values': [4]})\n", (1128, 1173), True, 'import pandas as pd\n'), ((2248, 2308), 'dash.dependencies.Output', 'Output', ([], {'component_id': '"""Status"""', 'component_property': '"""children"""'}), "(component_id='Status', component_property='children')\n", (2254, 2308), False, 'from dash.dependencies import Input, Output\n'), ((2314, 2373), 'dash.dependencies.Input', 'Input', ([], {'component_id': '"""Search"""', 'component_property': '"""n_clicks"""'}), "(component_id='Search', component_property='n_clicks')\n", (2319, 2373), False, 'from dash.dependencies import Input, Output\n'), ((2527, 2586), 'dash.dependencies.Output', 'Output', ([], {'component_id': '"""Figure1"""', 'component_property': '"""figure"""'}), "(component_id='Figure1', component_property='figure')\n", (2533, 2586), False, 'from dash.dependencies import Input, Output\n'), ((2592, 2651), 'dash.dependencies.Output', 'Output', ([], {'component_id': '"""Figure2"""', 'component_property': '"""figure"""'}), "(component_id='Figure2', component_property='figure')\n", (2598, 2651), False, 'from dash.dependencies import Input, Output\n'), ((2657, 2716), 'dash.dependencies.Output', 'Output', ([], {'component_id': '"""Figure3"""', 'component_property': '"""figure"""'}), "(component_id='Figure3', component_property='figure')\n", (2663, 2716), False, 'from dash.dependencies import Input, Output\n'), ((2722, 2782), 'dash.dependencies.Output', 'Output', ([], {'component_id': '"""Status"""', 'component_property': '"""children"""'}), "(component_id='Status', component_property='children')\n", (2728, 2782), False, 'from dash.dependencies import Input, Output\n'), ((2788, 2844), 'dash.dependencies.Input', 'Input', ([], {'component_id': '"""Ticker"""', 'component_property': '"""value"""'}), "(component_id='Ticker', component_property='value')\n", (2793, 2844), False, 'from dash.dependencies import Input, Output\n'), ((2850, 2909), 'dash.dependencies.Input', 'Input', ([], {'component_id': '"""Search"""', 'component_property': '"""n_clicks"""'}), "(component_id='Search', component_property='n_clicks')\n", (2855, 2909), False, 'from dash.dependencies import Input, Output\n'), ((3373, 3399), 'yulesimon.GetYahooFeed', 'ys.GetYahooFeed', (['ticker', '(5)'], {}), '(ticker, 5)\n', (3388, 3399), True, 'import yulesimon as ys\n'), ((3415, 3441), 'yulesimon.TimeSeries', 'ys.TimeSeries', (['log_returns'], {}), '(log_returns)\n', (3428, 3441), True, 'import yulesimon as ys\n'), ((3580, 3639), 'yulesimon.ExpectedValue', 'ys.ExpectedValue', (['history.std_deviation', 'burnin', 'downsample'], {}), '(history.std_deviation, burnin, downsample)\n', (3596, 3639), True, 'import yulesimon as ys\n'), ((3666, 3716), 'yulesimon.ExpectedValue', 'ys.ExpectedValue', (['history.mean', 'burnin', 'downsample'], {}), '(history.mean, burnin, downsample)\n', (3682, 3716), True, 'import yulesimon as ys\n'), ((3747, 3774), 'numpy.arange', 'np.arange', (['(-0.2)', '(0.2)', '(0.001)'], {}), '(-0.2, 0.2, 0.001)\n', (3756, 3774), True, 'import numpy as np\n'), ((3797, 3838), 'yulesimon.MixtureModel', 'ys.MixtureModel', (['z', '(mu / 100)', '(sigma / 100)'], {}), '(z, mu / 100, sigma / 100)\n', (3812, 3838), True, 'import yulesimon as ys\n'), ((3847, 3877), 'numpy.histogram', 'np.histogram', (['log_returns', '(200)'], {}), '(log_returns, 200)\n', (3859, 3877), True, 'import numpy as np\n'), ((4112, 4183), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'rows': '(2)', 'cols': '(1)', 'shared_xaxes': '(True)', 'vertical_spacing': '(0.05)'}), '(rows=2, cols=1, shared_xaxes=True, vertical_spacing=0.05)\n', (4125, 4183), False, 'from plotly.subplots import make_subplots\n'), ((5600, 5699), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'rows': '(1)', 'cols': '(2)', 'shared_xaxes': '(True)', 'subplot_titles': "('Linear Scale', 'Log Scale')"}), "(rows=1, cols=2, shared_xaxes=True, subplot_titles=(\n 'Linear Scale', 'Log Scale'))\n", (5613, 5699), False, 'from plotly.subplots import make_subplots\n'), ((6739, 6768), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'rows': '(1)', 'cols': '(2)'}), '(rows=1, cols=2)\n', (6752, 6768), False, 'from plotly.subplots import make_subplots\n'), ((1232, 1290), 'dash_html_components.H1', 'html.H1', ([], {'children': '"""CIRCLON-8"""', 'style': "{'textAlign': 'left'}"}), "(children='CIRCLON-8', style={'textAlign': 'left'})\n", (1239, 1290), True, 'import dash_html_components as html\n'), ((1482, 1491), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (1489, 1491), True, 'import dash_html_components as html\n'), ((1502, 1569), 'dash_html_components.H6', 'html.H6', ([], {'id': '"""Status"""', 'children': '"""Ready"""', 'style': "{'textAlign': 'left'}"}), "(id='Status', children='Ready', style={'textAlign': 'left'})\n", (1509, 1569), True, 'import dash_html_components as html\n'), ((2192, 2219), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""tabs-content"""'}), "(id='tabs-content')\n", (2200, 2219), True, 'import dash_html_components as html\n'), ((4012, 4032), 'numpy.mean', 'np.mean', (['log_returns'], {}), '(log_returns)\n', (4019, 4032), True, 'import numpy as np\n'), ((4204, 4313), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'dates[1:]', 'y': 'closing_prices[1:]', 'fill': '"""tozeroy"""', 'line_color': '"""#0000ff"""', 'fillcolor': '"""#7474f7"""'}), "(x=dates[1:], y=closing_prices[1:], fill='tozeroy', line_color=\n '#0000ff', fillcolor='#7474f7')\n", (4214, 4313), True, 'import plotly.graph_objects as go\n'), ((4447, 4554), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'dates[1:]', 'y': '(mu / 100 + 2 * sigma / 100)', 'fill': '"""tozeroy"""', 'fillcolor': '"""#ffb0b0"""', 'mode': '"""none"""'}), "(x=dates[1:], y=mu / 100 + 2 * sigma / 100, fill='tozeroy',\n fillcolor='#ffb0b0', mode='none')\n", (4457, 4554), True, 'import plotly.graph_objects as go\n'), ((4681, 4788), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'dates[1:]', 'y': '(mu / 100 - 2 * sigma / 100)', 'fill': '"""tozeroy"""', 'fillcolor': '"""#ffb0b0"""', 'mode': '"""none"""'}), "(x=dates[1:], y=mu / 100 - 2 * sigma / 100, fill='tozeroy',\n fillcolor='#ffb0b0', mode='none')\n", (4691, 4788), True, 'import plotly.graph_objects as go\n'), ((4915, 4975), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'dates[1:]', 'y': 'log_returns', 'line_color': '"""#ff0000"""'}), "(x=dates[1:], y=log_returns, line_color='#ff0000')\n", (4925, 4975), True, 'import plotly.graph_objects as go\n'), ((5046, 5097), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'dates[1:]', 'y': 'mu', 'line_color': '"""#000000"""'}), "(x=dates[1:], y=mu, line_color='#000000')\n", (5056, 5097), True, 'import plotly.graph_objects as go\n'), ((5783, 5858), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'bctr', 'y': 'empirical_PDF', 'mode': '"""markers"""', 'marker_color': '"""#ff0000"""'}), "(x=bctr, y=empirical_PDF, mode='markers', marker_color='#ff0000')\n", (5793, 5858), True, 'import plotly.graph_objects as go\n'), ((5982, 6036), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'z', 'y': 'yulesimon_PDF', 'line_color': '"""#0000ff"""'}), "(x=z, y=yulesimon_PDF, line_color='#0000ff')\n", (5992, 6036), True, 'import plotly.graph_objects as go\n'), ((6072, 6147), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'bctr', 'y': 'empirical_PDF', 'mode': '"""markers"""', 'marker_color': '"""#ff0000"""'}), "(x=bctr, y=empirical_PDF, mode='markers', marker_color='#ff0000')\n", (6082, 6147), True, 'import plotly.graph_objects as go\n'), ((6271, 6325), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'z', 'y': 'yulesimon_PDF', 'line_color': '"""#0000ff"""'}), "(x=z, y=yulesimon_PDF, line_color='#0000ff')\n", (6281, 6325), True, 'import plotly.graph_objects as go\n'), ((6791, 6849), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'y': 'history.log_likelihood', 'line_color': '"""#0000ff"""'}), "(y=history.log_likelihood, line_color='#0000ff')\n", (6801, 6849), True, 'import plotly.graph_objects as go\n'), ((6886, 6936), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'y': 'history.pvalue', 'line_color': '"""#ff0000"""'}), "(y=history.pvalue, line_color='#ff0000')\n", (6896, 6936), True, 'import plotly.graph_objects as go\n'), ((4035, 4054), 'numpy.var', 'np.var', (['log_returns'], {}), '(log_returns)\n', (4041, 4054), True, 'import numpy as np\n'), ((1348, 1408), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""Ticker"""', 'value': '"""MSFT"""', 'type': '"""text"""', 'size': '"""50"""'}), "(id='Ticker', value='MSFT', type='text', size='50')\n", (1357, 1408), True, 'import dash_core_components as dcc\n'), ((1416, 1462), 'dash_html_components.Button', 'html.Button', (['"""Search"""'], {'id': '"""Search"""', 'n_clicks': '(0)'}), "('Search', id='Search', n_clicks=0)\n", (1427, 1462), True, 'import dash_html_components as html\n'), ((1922, 1958), 'dash_core_components.Graph', 'dcc.Graph', ([], {'id': '"""Figure1"""', 'figure': 'fig1'}), "(id='Figure1', figure=fig1)\n", (1931, 1958), True, 'import dash_core_components as dcc\n'), ((2032, 2068), 'dash_core_components.Graph', 'dcc.Graph', ([], {'id': '"""Figure2"""', 'figure': 'fig2'}), "(id='Figure2', figure=fig2)\n", (2041, 2068), True, 'import dash_core_components as dcc\n'), ((2141, 2177), 'dash_core_components.Graph', 'dcc.Graph', ([], {'id': '"""Figure3"""', 'figure': 'fig2'}), "(id='Figure3', figure=fig2)\n", (2150, 2177), True, 'import dash_core_components as dcc\n')]
|
import argparse
import os
import sys
import time
import warnings
from ast import literal_eval
warnings.filterwarnings("ignore")
import IPython
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
import context
from context import utils
import utils.plotting as plot
import utils.db as db
import utils.filesystem as fs
from utils.misc import get_equal_dicts, length_of_longest
from data_analysis import print_group_info, get_best, get_max_chkpt_int, invert_signs, get_checkpoint_directories
def load_data(checkpoint_directories, old_mtimes=None, old_states=None, old_stats=None, best=False):
# TODO This is a mess
# Parse inputs
if best:
filename = 'state-dict-best-algorithm.pkl'
else:
filename = 'state-dict-algorithm.pkl'
n_tot_files = len(checkpoint_directories)
if old_mtimes is not None:
assert old_states is not None, "If given modification times, must also get old data to overwrite"
assert old_stats is not None, "If given modification times, must also get old stats to overwrite"
# Find files that have been modified
mtimes = fs.get_modified_times(checkpoint_directories, filename)
if len(mtimes)-len(old_mtimes) > 0:
old_mtimes = np.pad(old_mtimes, (0, len(mtimes)-len(old_mtimes)), mode='constant', constant_values=0)
elif len(old_mtimes)-len(mtimes) > 0:
mtimes = np.pad(mtimes, (0, len(old_mtimes)-len(mtimes)), mode='constant', constant_values=0)
is_changed = ~np.equal(old_mtimes, mtimes)
checkpoint_directories = [d for i, d in enumerate(checkpoint_directories) if is_changed[i]]
n_files = len(checkpoint_directories)
idxs = np.where(is_changed)[0]
algorithm_states_list = old_states
stats_list = old_stats
print("Loading " + str(n_files) + " modified files of " + str(n_tot_files) + " total files...")
else:
n_files = len(checkpoint_directories)
print("Loading " + str(n_files) + " files...")
algorithm_states_list = [None]*len(checkpoint_directories)
stats_list = [None]*len(checkpoint_directories)
idxs = range(0, len(checkpoint_directories))
# Strings and constants
n_chars = len(str(n_files))
f = ' {:' + str(n_chars) + 'd}/{:' + str(n_chars) + 'd} files loaded'
text = ""
# Loop over files to load (all or only changed ones)
i_file = -1
for i, chkpt_dir in zip(idxs, checkpoint_directories):
try:
algorithm_states_list[i] = torch.load(os.path.join(chkpt_dir, filename))
stats_list[i] = pd.read_csv(os.path.join(chkpt_dir, 'stats.csv'))
i_file += 1
if i_file + 1 != n_files:
print(f.format(i_file + 1, n_files), end='\r')
except Exception:
text += " Required files not (yet) present in: " + chkpt_dir + "\n"
# Remove any None
algorithm_states_list = [s for s in algorithm_states_list if s is not None]
stats_list = [s for s in stats_list if s is not None]
# Evaluate any strings as literal types
for s in stats_list:
for k in s.keys()[s.dtypes == object]:
s[k] = s[k].apply(literal_eval)
print(f.format(i_file + 1, n_files), end='\n')
if text:
print(text[:-2])
return algorithm_states_list, stats_list
def sub_into_lists(stats_list, keys_to_monitor):
for s in stats_list:
for k in keys_to_monitor:
if k in s and type(s[k][0]) is list:
s[k] = [vals_group[0] for vals_group in s[k]]
if 'lr' in k and 'lr' not in s.keys():
s['lr'] = s[k][0]
def create_plots(args, stats_list, keys_to_monitor, groups):
unique_groups = set(groups)
n_keys = len(keys_to_monitor)
n_chars = len(str(n_keys))
f = ' {:' + str(n_chars) + 'd}/{:' + str(n_chars) + 'd} monitored keys plotted'
for i_key, k in enumerate(keys_to_monitor):
list_of_series = [s[k].tolist() for s in stats_list if k in s]
list_of_genera = [range(len(s)) for s in stats_list if k in s]
plot.timeseries(list_of_genera, list_of_series, xlabel='generations', ylabel=k)
plt.savefig(os.path.join(args.monitor_dir, k + '-all-series.pdf'), bbox_inches='tight')
plt.close()
plot.timeseries_distribution(list_of_genera, list_of_series, xlabel='generations', ylabel=k)
plt.savefig(os.path.join(args.monitor_dir, k + '-all-distribution.pdf'), bbox_inches='tight')
plt.close()
plot.timeseries_median(list_of_genera, list_of_series, xlabel='generations', ylabel=k)
plt.savefig(os.path.join(args.monitor_dir, k + '-all-median.pdf'), bbox_inches='tight')
plt.close()
plot.timeseries_final_distribution(list_of_series, label=k, ybins=len(list_of_series)*10)
plt.savefig(os.path.join(args.monitor_dir, k + '-all-final-distribution.pdf'), bbox_inches='tight')
plt.close()
# Subset only those series that are done (or the one that is the longest)
l = length_of_longest(list_of_series)
indices = [i for i, series in enumerate(list_of_series) if len(series) == l]
list_of_longest_series = [list_of_series[i] for i in indices]
list_of_longest_genera = [list_of_genera[i] for i in indices]
groups_longest_series = groups[indices]
plot.timeseries_mean_grouped(list_of_longest_genera, list_of_longest_series, groups_longest_series, xlabel='generations', ylabel=k)
plt.savefig(os.path.join(args.monitor_dir, k + '-all-series-mean-sd' + '.pdf'), bbox_inches='tight')
plt.close()
if len(unique_groups) > 1:
for g in unique_groups:
gstr = '{0:02d}'.format(g)
g_indices = np.where(groups == g)[0]
group_stats = [stats_list[i] for i in g_indices]
list_of_series = [s[k].tolist() for s in group_stats if k in s]
list_of_genera = [range(len(s)) for s in group_stats if k in s]
if list_of_genera and list_of_series:
plot.timeseries(list_of_genera, list_of_series, xlabel='generations', ylabel=k)
plt.savefig(os.path.join(args.monitor_dir, k + '-group-' + gstr + '-series.pdf'), bbox_inches='tight')
plt.close()
plot.timeseries_distribution(list_of_genera, list_of_series, xlabel='generations', ylabel=k)
plt.savefig(os.path.join(args.monitor_dir, k + '-group-' + gstr + '-distribution.pdf'), bbox_inches='tight')
plt.close()
plot.timeseries_median(list_of_genera, list_of_series, xlabel='generations', ylabel=k)
plt.savefig(os.path.join(args.monitor_dir, k + '-group-' + gstr + '-median.pdf'), bbox_inches='tight')
plt.close()
plot.timeseries_final_distribution(list_of_series, label=k, ybins=len(list_of_series)*10)
plt.savefig(os.path.join(args.monitor_dir, k + '-group-' + gstr + '-final-distribution.pdf'), bbox_inches='tight')
plt.close()
if i_key + 1 == n_keys:
print(f.format(i_key+1, n_keys), end='\n')
else:
print(f.format(i_key+1, n_keys), end='\r')
def wait_for_updates(args, last_refresh, max_chkpt_int, mtimes_last):
"""Wait for updates to the chyeckpoint directories.
If no updates are seen after waiting more than the maximum checkpoint
interval, returns False. Otherwise returns True.
"""
print("Waiting 'max checkpoint interval' x 2 = " + str(int(max_chkpt_int * 2)) + " seconds before checking for updates...")
count_down(count_down_started_at=last_refresh, wait=max_chkpt_int * 2)
checkpoint_directories = get_checkpoint_directories(args.d)
mtimes = fs.get_modified_times(checkpoint_directories, 'state-dict-algorithm.pkl')
if mtimes == mtimes_last:
print("Monitoring stopped since loaded data did not change for " + str(int(max_chkpt_int*2)) + " seconds.")
return True
return False
def get_keys_to_monitor(stats_list):
keys_to_monitor = {'return_unp', 'accuracy_unp', 'sigma'}
for s in stats_list:
for c in s.columns:
addkeys = set()
for k in keys_to_monitor:
if k in c:
addkeys.add(c)
if addkeys: keys_to_monitor.add(*addkeys)
return keys_to_monitor
def get_data(old_mtimes=None, old_states=None, old_stats=None, timeout=30*60, checkevery=30):
checkpoint_directories = get_checkpoint_directories(args.d)
algorithm_states, stats_list = load_data(checkpoint_directories, old_mtimes=old_mtimes, old_states=old_states, old_stats=old_stats)
# Check if any data found
if not algorithm_states:
print("No data found.")
print("Rechecking directory for files every " + str(checkevery) + " seconds for " + str(int(timeout/60)) + " minutes.")
for i in range(0, timeout, checkevery):
count_down(wait=checkevery, info_interval=1)
checkpoint_directories = get_checkpoint_directories(args.d)
algorithm_states, stats_list = load_data(checkpoint_directories, old_mtimes=old_mtimes, old_states=old_states, old_stats=old_stats)
if algorithm_states:
return algorithm_states, stats_list
print("{:2.2f} minutes remaining".format((timeout - i-checkevery)/60))
print("No data found to monitor after checking for " + str(int(timeout/60)) + " minutes.")
return algorithm_states, stats_list
def count_down(wait=60, count_down_started_at=None, info_interval=5):
if count_down_started_at is not None:
seconds_remaining = int(wait - (time.time() - count_down_started_at))
else:
seconds_remaining = wait
for i in range(0, seconds_remaining, info_interval):
print("Updating in {:s} seconds".format(str(seconds_remaining-i)), end="\r")
time.sleep(info_interval)
print("Updating... ", end='\n')
return time.time()
def monitor(args):
this_file_dir_local = os.path.dirname(os.path.abspath(__file__))
# Get the root of the package locally and where monitored (may be the same)
package_root_this_file = fs.get_parent(this_file_dir_local, 'es-rl')
# Get directory to monitor
if not args.d:
args.d = os.path.join(package_root_this_file, 'experiments', 'checkpoints', args.i)
elif not os.path.isabs(args.d):
args.d = os.path.join(package_root_this_file, 'experiments', 'checkpoints', args.d)
if not os.path.exists(args.d):
os.mkdir(args.d)
package_root_monitored_directory = fs.get_parent(args.d, 'es-rl')
print("Monitoring: " + args.d)
# Load data
last_refresh = time.time()
checkpoint_directories = get_checkpoint_directories(args.d)
mtimes = fs.get_modified_times(checkpoint_directories, 'state-dict-algorithm.pkl')
algorithm_states, stats_list = get_data(timeout=args.t)
if not algorithm_states:
print("Monitoring stopped. No data available after " + str(args.t) + " minutes.")
return
# Create directory for monitoring plots
monitor_dir = os.path.join(args.d, 'monitoring')
if not os.path.exists(monitor_dir):
os.mkdir(monitor_dir)
args.monitor_dir = monitor_dir
# Setup drobbox
if args.c:
package_parent_folder_monitored_directory = os.path.join(os.sep,*package_root_monitored_directory.split(os.sep)[:-1])
# args.dbx_dir = os.sep + os.path.relpath(args.monitor_dir, package_parent_folder_monitored_directory)
args.dbx_dir = os.sep + os.path.relpath(args.d, package_parent_folder_monitored_directory)
token_file = os.path.join(this_file_dir_local, 'dropboxtoken.tok')
assert os.path.exists(token_file)
dbx = db.get_dropbox_client(token_file)
ignored_keys = ['chkpt_dir', 'sensitivities', 'sens_inputs', '_weight_update_scale']
ignored_keys.extend([k for k in algorithm_states[0].keys() if k[0] == '_'])
ignored_keys = set(ignored_keys)
for s in algorithm_states:
if s['optimize_sigma']:
ignored_keys.add('sigma')
break
# Monitoring loop
while True:
# Prepare data
print("Preparing data...")
keys_to_monitor = get_keys_to_monitor(stats_list)
invert_signs(stats_list, keys_to_monitor)
sub_into_lists(stats_list, keys_to_monitor)
# Find groups of algorithms
groups = get_equal_dicts(algorithm_states, ignored_keys=ignored_keys)
print_group_info(algorithm_states, groups, directory=args.monitor_dir)
# Plot
print("Creating and saving plots...")
# try:
create_plots(args, stats_list, keys_to_monitor, groups)
# except:
# pass
# Upload results to dropbox
if args.c:
# db.upload_directory(dbx, args.monitor_dir, args.dbx_dir)
db.upload_directory(dbx, args.d, args.dbx_dir, upload_older_files=False)
# Break condition
if wait_for_updates(args, last_refresh, get_max_chkpt_int(algorithm_states), mtimes):
return
# Load data
print()
last_refresh = time.time()
algorithm_states, stats_list = get_data(timeout=args.t, old_mtimes=mtimes, old_states=algorithm_states, old_stats=stats_list)
checkpoint_directories = get_checkpoint_directories(args.d)
mtimes = fs.get_modified_times(checkpoint_directories, 'state-dict-algorithm.pkl')
if __name__ == '__main__':
# Parse inputs
parser = argparse.ArgumentParser(description='Monitorer')
parser.add_argument('-d', type=str, metavar='--directory', help='The directory of checkpoints to monitor.')
parser.add_argument('-i', type=str, metavar='--identifier', help='The identifier of the checkpoints to monitor.')
parser.add_argument('-t', type=int, metavar='--timeout', default=4000, help='If no files are modified during a period of timeout minutes, monitoring is stopped.')
parser.add_argument('-c', action='store_true', help='Copying of monitor directory to dropbox.')
parser.add_argument('-s', action='store_true', help='Silent mode.')
args = parser.parse_args()
if args.s:
sys.stdout = open(os.devnull, 'w')
assert args.d or args.i, "Must specify directory or identifier of checkpoints to monitor"
# Colormap
# plt.rcParams['image.cmap'] = 'magma'
# plt.rcParams['image.cmap'] = 'inferno'
# plt.rcParams['image.cmap'] = 'plasma'
plt.rcParams['image.cmap'] = 'viridis'
try:
monitor(args)
except KeyboardInterrupt:
print("\nMonitoring halted by user KeyboardInterrupt")
"""
SSHFS
sshfs s<EMAIL>@login.<EMAIL>:/zhome/c2/b/86488 ~/mnt
LINUX
python monitor.py -d ~/mnt/Documents/es-rl/experiments/checkpoints/E001-SM/ -c
MAC
python monitor.py -d /Users/Jakob/mnt/Documents/es-rl/experiments/checkpoints/E001-SM/ -c
python monitor.py -d sftp://[email protected]/zhome/c2/b/86488/Documents/es-rl/experiments/checkpoints/E001-SM
"""
|
[
"utils.db.upload_directory",
"time.sleep",
"numpy.equal",
"utils.plotting.timeseries_median",
"utils.misc.get_equal_dicts",
"os.path.exists",
"utils.filesystem.get_parent",
"argparse.ArgumentParser",
"utils.plotting.timeseries_mean_grouped",
"numpy.where",
"data_analysis.invert_signs",
"matplotlib.pyplot.close",
"os.mkdir",
"os.path.relpath",
"data_analysis.get_checkpoint_directories",
"utils.misc.length_of_longest",
"data_analysis.print_group_info",
"os.path.isabs",
"matplotlib.use",
"utils.plotting.timeseries_distribution",
"utils.db.get_dropbox_client",
"time.time",
"utils.plotting.timeseries",
"warnings.filterwarnings",
"data_analysis.get_max_chkpt_int",
"os.path.join",
"utils.filesystem.get_modified_times",
"os.path.abspath"
] |
[((94, 127), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (117, 127), False, 'import warnings\n'), ((169, 183), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (176, 183), True, 'import matplotlib as mpl\n'), ((7860, 7894), 'data_analysis.get_checkpoint_directories', 'get_checkpoint_directories', (['args.d'], {}), '(args.d)\n', (7886, 7894), False, 'from data_analysis import print_group_info, get_best, get_max_chkpt_int, invert_signs, get_checkpoint_directories\n'), ((7908, 7981), 'utils.filesystem.get_modified_times', 'fs.get_modified_times', (['checkpoint_directories', '"""state-dict-algorithm.pkl"""'], {}), "(checkpoint_directories, 'state-dict-algorithm.pkl')\n", (7929, 7981), True, 'import utils.filesystem as fs\n'), ((8653, 8687), 'data_analysis.get_checkpoint_directories', 'get_checkpoint_directories', (['args.d'], {}), '(args.d)\n', (8679, 8687), False, 'from data_analysis import print_group_info, get_best, get_max_chkpt_int, invert_signs, get_checkpoint_directories\n'), ((10143, 10154), 'time.time', 'time.time', ([], {}), '()\n', (10152, 10154), False, 'import time\n'), ((10354, 10397), 'utils.filesystem.get_parent', 'fs.get_parent', (['this_file_dir_local', '"""es-rl"""'], {}), "(this_file_dir_local, 'es-rl')\n", (10367, 10397), True, 'import utils.filesystem as fs\n'), ((10768, 10798), 'utils.filesystem.get_parent', 'fs.get_parent', (['args.d', '"""es-rl"""'], {}), "(args.d, 'es-rl')\n", (10781, 10798), True, 'import utils.filesystem as fs\n'), ((10870, 10881), 'time.time', 'time.time', ([], {}), '()\n', (10879, 10881), False, 'import time\n'), ((10911, 10945), 'data_analysis.get_checkpoint_directories', 'get_checkpoint_directories', (['args.d'], {}), '(args.d)\n', (10937, 10945), False, 'from data_analysis import print_group_info, get_best, get_max_chkpt_int, invert_signs, get_checkpoint_directories\n'), ((10959, 11032), 'utils.filesystem.get_modified_times', 'fs.get_modified_times', (['checkpoint_directories', '"""state-dict-algorithm.pkl"""'], {}), "(checkpoint_directories, 'state-dict-algorithm.pkl')\n", (10980, 11032), True, 'import utils.filesystem as fs\n'), ((11290, 11324), 'os.path.join', 'os.path.join', (['args.d', '"""monitoring"""'], {}), "(args.d, 'monitoring')\n", (11302, 11324), False, 'import os\n'), ((13698, 13746), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Monitorer"""'}), "(description='Monitorer')\n", (13721, 13746), False, 'import argparse\n'), ((1183, 1238), 'utils.filesystem.get_modified_times', 'fs.get_modified_times', (['checkpoint_directories', 'filename'], {}), '(checkpoint_directories, filename)\n', (1204, 1238), True, 'import utils.filesystem as fs\n'), ((4158, 4237), 'utils.plotting.timeseries', 'plot.timeseries', (['list_of_genera', 'list_of_series'], {'xlabel': '"""generations"""', 'ylabel': 'k'}), "(list_of_genera, list_of_series, xlabel='generations', ylabel=k)\n", (4173, 4237), True, 'import utils.plotting as plot\n'), ((4342, 4353), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4351, 4353), True, 'import matplotlib.pyplot as plt\n'), ((4363, 4460), 'utils.plotting.timeseries_distribution', 'plot.timeseries_distribution', (['list_of_genera', 'list_of_series'], {'xlabel': '"""generations"""', 'ylabel': 'k'}), "(list_of_genera, list_of_series, xlabel=\n 'generations', ylabel=k)\n", (4391, 4460), True, 'import utils.plotting as plot\n'), ((4566, 4577), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4575, 4577), True, 'import matplotlib.pyplot as plt\n'), ((4587, 4677), 'utils.plotting.timeseries_median', 'plot.timeseries_median', (['list_of_genera', 'list_of_series'], {'xlabel': '"""generations"""', 'ylabel': 'k'}), "(list_of_genera, list_of_series, xlabel='generations',\n ylabel=k)\n", (4609, 4677), True, 'import utils.plotting as plot\n'), ((4778, 4789), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4787, 4789), True, 'import matplotlib.pyplot as plt\n'), ((5005, 5016), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5014, 5016), True, 'import matplotlib.pyplot as plt\n'), ((5112, 5145), 'utils.misc.length_of_longest', 'length_of_longest', (['list_of_series'], {}), '(list_of_series)\n', (5129, 5145), False, 'from utils.misc import get_equal_dicts, length_of_longest\n'), ((5427, 5562), 'utils.plotting.timeseries_mean_grouped', 'plot.timeseries_mean_grouped', (['list_of_longest_genera', 'list_of_longest_series', 'groups_longest_series'], {'xlabel': '"""generations"""', 'ylabel': 'k'}), "(list_of_longest_genera, list_of_longest_series,\n groups_longest_series, xlabel='generations', ylabel=k)\n", (5455, 5562), True, 'import utils.plotting as plot\n'), ((5676, 5687), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5685, 5687), True, 'import matplotlib.pyplot as plt\n'), ((10057, 10082), 'time.sleep', 'time.sleep', (['info_interval'], {}), '(info_interval)\n', (10067, 10082), False, 'import time\n'), ((10218, 10243), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (10233, 10243), False, 'import os\n'), ((10466, 10540), 'os.path.join', 'os.path.join', (['package_root_this_file', '"""experiments"""', '"""checkpoints"""', 'args.i'], {}), "(package_root_this_file, 'experiments', 'checkpoints', args.i)\n", (10478, 10540), False, 'import os\n'), ((10680, 10702), 'os.path.exists', 'os.path.exists', (['args.d'], {}), '(args.d)\n', (10694, 10702), False, 'import os\n'), ((10712, 10728), 'os.mkdir', 'os.mkdir', (['args.d'], {}), '(args.d)\n', (10720, 10728), False, 'import os\n'), ((11336, 11363), 'os.path.exists', 'os.path.exists', (['monitor_dir'], {}), '(monitor_dir)\n', (11350, 11363), False, 'import os\n'), ((11373, 11394), 'os.mkdir', 'os.mkdir', (['monitor_dir'], {}), '(monitor_dir)\n', (11381, 11394), False, 'import os\n'), ((11823, 11876), 'os.path.join', 'os.path.join', (['this_file_dir_local', '"""dropboxtoken.tok"""'], {}), "(this_file_dir_local, 'dropboxtoken.tok')\n", (11835, 11876), False, 'import os\n'), ((11892, 11918), 'os.path.exists', 'os.path.exists', (['token_file'], {}), '(token_file)\n', (11906, 11918), False, 'import os\n'), ((11933, 11966), 'utils.db.get_dropbox_client', 'db.get_dropbox_client', (['token_file'], {}), '(token_file)\n', (11954, 11966), True, 'import utils.db as db\n'), ((12456, 12497), 'data_analysis.invert_signs', 'invert_signs', (['stats_list', 'keys_to_monitor'], {}), '(stats_list, keys_to_monitor)\n', (12468, 12497), False, 'from data_analysis import print_group_info, get_best, get_max_chkpt_int, invert_signs, get_checkpoint_directories\n'), ((12603, 12663), 'utils.misc.get_equal_dicts', 'get_equal_dicts', (['algorithm_states'], {'ignored_keys': 'ignored_keys'}), '(algorithm_states, ignored_keys=ignored_keys)\n', (12618, 12663), False, 'from utils.misc import get_equal_dicts, length_of_longest\n'), ((12672, 12742), 'data_analysis.print_group_info', 'print_group_info', (['algorithm_states', 'groups'], {'directory': 'args.monitor_dir'}), '(algorithm_states, groups, directory=args.monitor_dir)\n', (12688, 12742), False, 'from data_analysis import print_group_info, get_best, get_max_chkpt_int, invert_signs, get_checkpoint_directories\n'), ((13332, 13343), 'time.time', 'time.time', ([], {}), '()\n', (13341, 13343), False, 'import time\n'), ((13511, 13545), 'data_analysis.get_checkpoint_directories', 'get_checkpoint_directories', (['args.d'], {}), '(args.d)\n', (13537, 13545), False, 'from data_analysis import print_group_info, get_best, get_max_chkpt_int, invert_signs, get_checkpoint_directories\n'), ((13563, 13636), 'utils.filesystem.get_modified_times', 'fs.get_modified_times', (['checkpoint_directories', '"""state-dict-algorithm.pkl"""'], {}), "(checkpoint_directories, 'state-dict-algorithm.pkl')\n", (13584, 13636), True, 'import utils.filesystem as fs\n'), ((1571, 1599), 'numpy.equal', 'np.equal', (['old_mtimes', 'mtimes'], {}), '(old_mtimes, mtimes)\n', (1579, 1599), True, 'import numpy as np\n'), ((1761, 1781), 'numpy.where', 'np.where', (['is_changed'], {}), '(is_changed)\n', (1769, 1781), True, 'import numpy as np\n'), ((4258, 4311), 'os.path.join', 'os.path.join', (['args.monitor_dir', "(k + '-all-series.pdf')"], {}), "(args.monitor_dir, k + '-all-series.pdf')\n", (4270, 4311), False, 'import os\n'), ((4476, 4535), 'os.path.join', 'os.path.join', (['args.monitor_dir', "(k + '-all-distribution.pdf')"], {}), "(args.monitor_dir, k + '-all-distribution.pdf')\n", (4488, 4535), False, 'import os\n'), ((4694, 4747), 'os.path.join', 'os.path.join', (['args.monitor_dir', "(k + '-all-median.pdf')"], {}), "(args.monitor_dir, k + '-all-median.pdf')\n", (4706, 4747), False, 'import os\n'), ((4909, 4974), 'os.path.join', 'os.path.join', (['args.monitor_dir', "(k + '-all-final-distribution.pdf')"], {}), "(args.monitor_dir, k + '-all-final-distribution.pdf')\n", (4921, 4974), False, 'import os\n'), ((5579, 5645), 'os.path.join', 'os.path.join', (['args.monitor_dir', "(k + '-all-series-mean-sd' + '.pdf')"], {}), "(args.monitor_dir, k + '-all-series-mean-sd' + '.pdf')\n", (5591, 5645), False, 'import os\n'), ((9185, 9219), 'data_analysis.get_checkpoint_directories', 'get_checkpoint_directories', (['args.d'], {}), '(args.d)\n', (9211, 9219), False, 'from data_analysis import print_group_info, get_best, get_max_chkpt_int, invert_signs, get_checkpoint_directories\n'), ((10554, 10575), 'os.path.isabs', 'os.path.isabs', (['args.d'], {}), '(args.d)\n', (10567, 10575), False, 'import os\n'), ((10594, 10668), 'os.path.join', 'os.path.join', (['package_root_this_file', '"""experiments"""', '"""checkpoints"""', 'args.d'], {}), "(package_root_this_file, 'experiments', 'checkpoints', args.d)\n", (10606, 10668), False, 'import os\n'), ((11735, 11801), 'os.path.relpath', 'os.path.relpath', (['args.d', 'package_parent_folder_monitored_directory'], {}), '(args.d, package_parent_folder_monitored_directory)\n', (11750, 11801), False, 'import os\n'), ((13060, 13132), 'utils.db.upload_directory', 'db.upload_directory', (['dbx', 'args.d', 'args.dbx_dir'], {'upload_older_files': '(False)'}), '(dbx, args.d, args.dbx_dir, upload_older_files=False)\n', (13079, 13132), True, 'import utils.db as db\n'), ((13208, 13243), 'data_analysis.get_max_chkpt_int', 'get_max_chkpt_int', (['algorithm_states'], {}), '(algorithm_states)\n', (13225, 13243), False, 'from data_analysis import print_group_info, get_best, get_max_chkpt_int, invert_signs, get_checkpoint_directories\n'), ((2596, 2629), 'os.path.join', 'os.path.join', (['chkpt_dir', 'filename'], {}), '(chkpt_dir, filename)\n', (2608, 2629), False, 'import os\n'), ((2671, 2707), 'os.path.join', 'os.path.join', (['chkpt_dir', '"""stats.csv"""'], {}), "(chkpt_dir, 'stats.csv')\n", (2683, 2707), False, 'import os\n'), ((5831, 5852), 'numpy.where', 'np.where', (['(groups == g)'], {}), '(groups == g)\n', (5839, 5852), True, 'import numpy as np\n'), ((6156, 6235), 'utils.plotting.timeseries', 'plot.timeseries', (['list_of_genera', 'list_of_series'], {'xlabel': '"""generations"""', 'ylabel': 'k'}), "(list_of_genera, list_of_series, xlabel='generations', ylabel=k)\n", (6171, 6235), True, 'import utils.plotting as plot\n'), ((6379, 6390), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6388, 6390), True, 'import matplotlib.pyplot as plt\n'), ((6412, 6509), 'utils.plotting.timeseries_distribution', 'plot.timeseries_distribution', (['list_of_genera', 'list_of_series'], {'xlabel': '"""generations"""', 'ylabel': 'k'}), "(list_of_genera, list_of_series, xlabel=\n 'generations', ylabel=k)\n", (6440, 6509), True, 'import utils.plotting as plot\n'), ((6654, 6665), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6663, 6665), True, 'import matplotlib.pyplot as plt\n'), ((6687, 6777), 'utils.plotting.timeseries_median', 'plot.timeseries_median', (['list_of_genera', 'list_of_series'], {'xlabel': '"""generations"""', 'ylabel': 'k'}), "(list_of_genera, list_of_series, xlabel='generations',\n ylabel=k)\n", (6709, 6777), True, 'import utils.plotting as plot\n'), ((6917, 6928), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6926, 6928), True, 'import matplotlib.pyplot as plt\n'), ((7195, 7206), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7204, 7206), True, 'import matplotlib.pyplot as plt\n'), ((9826, 9837), 'time.time', 'time.time', ([], {}), '()\n', (9835, 9837), False, 'import time\n'), ((6268, 6336), 'os.path.join', 'os.path.join', (['args.monitor_dir', "(k + '-group-' + gstr + '-series.pdf')"], {}), "(args.monitor_dir, k + '-group-' + gstr + '-series.pdf')\n", (6280, 6336), False, 'import os\n'), ((6537, 6611), 'os.path.join', 'os.path.join', (['args.monitor_dir', "(k + '-group-' + gstr + '-distribution.pdf')"], {}), "(args.monitor_dir, k + '-group-' + gstr + '-distribution.pdf')\n", (6549, 6611), False, 'import os\n'), ((6806, 6874), 'os.path.join', 'os.path.join', (['args.monitor_dir', "(k + '-group-' + gstr + '-median.pdf')"], {}), "(args.monitor_dir, k + '-group-' + gstr + '-median.pdf')\n", (6818, 6874), False, 'import os\n'), ((7072, 7157), 'os.path.join', 'os.path.join', (['args.monitor_dir', "(k + '-group-' + gstr + '-final-distribution.pdf')"], {}), "(args.monitor_dir, k + '-group-' + gstr + '-final-distribution.pdf'\n )\n", (7084, 7157), False, 'import os\n')]
|
# analyzing each point forecast and selecting the best, day by day, saving forecasts and making final forecast
import os
import sys
import datetime
import logging
import logging.handlers as handlers
import json
import itertools as it
import pandas as pd
import numpy as np
# open local settings
with open('./settings.json') as local_json_file:
local_submodule_settings = json.loads(local_json_file.read())
local_json_file.close()
# log setup
current_script_name = os.path.basename(__file__).split('.')[0]
log_path_filename = ''.join([local_submodule_settings['log_path'], current_script_name, '.log'])
logging.basicConfig(filename=log_path_filename, level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(name)s %(message)s')
logger = logging.getLogger(__name__)
logHandler = handlers.RotatingFileHandler(log_path_filename, maxBytes=10485760, backupCount=5)
logger.addHandler(logHandler)
# load custom libraries
sys.path.insert(1, local_submodule_settings['custom_library_path'])
from save_forecast_and_make_submission import save_forecast_and_submission
from stochastic_model_obtain_results import stochastic_simulation_results_analysis
class explore_day_by_day_results_and_generate_submission:
def run(self, submission_name, local_ergs_settings):
try:
print('\nstarting the granular day_by_day ts_by_ts point forecast selection approach')
# first check the stage, if evaluation stage, this means that no MSE are available, warning about this
if local_ergs_settings['competition_stage'] != 'submitting_after_June_1th_using_1913days':
print('settings indicate that the final stage is now in progress')
print('so there not available real MSE for comparison')
print('the last saved data will be used and allow to continue..')
print(''.join(['\x1b[0;2;41m',
'but be careful with this submission and consider other way to make the final submit',
'\x1b[0m']))
# loading the forecasts
first_model_forecast = np.load(''.join([local_ergs_settings['train_data_path'],
'first_model_forecast_data.npy']))
second_model_forecast = np.load(''.join([local_ergs_settings['train_data_path'],
'second_model_forecast_data.npy']))
third_model_forecast = np.load(''.join([local_ergs_settings['train_data_path'],
'third_model_forecast_data.npy']))
fourth_model_forecast = np.load(''.join([local_ergs_settings['train_data_path'],
'fourth_model_forecast_data.npy']))
# this forecast has the shape=30490, 28
fifth_model_forecast_30490_28 = np.load(''.join([local_ergs_settings['train_data_path'],
'fifth_model_forecast_data.npy']))
fifth_model_forecast = np.zeros(shape=(60980, 28), dtype=np.dtype('float32'))
fifth_model_forecast[0: 30490, :] = fifth_model_forecast_30490_28
sixth_model_forecast = np.load(''.join([local_ergs_settings['train_data_path'],
'sixth_model_forecast_data.npy']))
seventh_model_forecast = np.load(''.join([local_ergs_settings['train_data_path'],
'seventh_model_forecast_data.npy']))
eighth_model_forecast = np.load(''.join([local_ergs_settings['train_data_path'],
'eighth_model_nearest_neighbor_forecast_data.npy']))
ninth_model_forecast = np.load(''.join([local_ergs_settings['train_data_path'],
'ninth_model_random_average_simulation_forecast_data.npy']))
best_mse_model_forecast = np.load(''.join([local_ergs_settings['train_data_path'],
'mse_based_best_ts_forecast.npy']))
# day by day comparison
with open(''.join([local_ergs_settings['hyperparameters_path'],
'organic_in_block_time_serie_based_model_hyperparameters.json'])) \
as local_r_json_file:
local_model_ergs_hyperparameters = json.loads(local_r_json_file.read())
local_r_json_file.close()
nof_ts = local_ergs_settings['number_of_time_series']
local_forecast_horizon_days = local_ergs_settings['forecast_horizon_days']
best_lower_error_ts_day_by_day_y_pred = np.zeros(shape=(nof_ts, local_forecast_horizon_days),
dtype=np.dtype('float32'))
count_best_first_model, count_best_second_model, count_best_third_model, count_best_fourth_model,\
count_best_fifth_model, count_best_sixth_model, count_best_seventh_model, count_best_eighth_model,\
count_best_ninth_model, count_best_mse_model = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
ts_model_mse = []
# accessing ground_truth data and rechecking stage of competition
local_ergs_raw_data_filename = 'sales_train_evaluation.csv'
local_ergs_raw_unit_sales = pd.read_csv(''.join([local_ergs_settings['raw_data_path'],
local_ergs_raw_data_filename]))
print('raw sales data accessed (day_by_day_approach_best_lower_error_model results evaluation)')
# extract data and check dimensions
local_ergs_raw_unit_sales = local_ergs_raw_unit_sales.iloc[:, 6:].values
local_max_selling_time = np.shape(local_ergs_raw_unit_sales)[1]
local_settings_max_selling_time = local_ergs_settings['max_selling_time']
if local_settings_max_selling_time + 28 <= local_max_selling_time:
local_ergs_raw_unit_sales_ground_truth = local_ergs_raw_unit_sales
print('ground_truth data obtained')
print('length raw data ground truth:', local_ergs_raw_unit_sales_ground_truth.shape[1])
local_ergs_raw_unit_sales = local_ergs_raw_unit_sales[:, :local_settings_max_selling_time]
print('length raw data for training:', local_ergs_raw_unit_sales.shape[1])
elif local_max_selling_time != local_settings_max_selling_time:
print("settings doesn't match data dimensions, it must be rechecked before continue"
"(_day_by_day_best_lower_error_model_module)")
logger.info(''.join(['\n', datetime.datetime.now().strftime("%d.%b %Y %H:%M:%S"),
' data dimensions does not match settings']))
return False
else:
if local_ergs_settings['competition_stage'] != 'submitting_after_June_1th_using_1941days':
print(''.join(['\x1b[0;2;41m', 'Warning', '\x1b[0m']))
print('please check: forecast horizon days will be included within training data')
print('It was expected that the last 28 days were not included..')
print('to avoid overfitting')
elif local_ergs_settings['competition_stage'] == 'submitting_after_June_1th_using_1941days':
print(''.join(['\x1b[0;2;41m', 'Straight end of the competition', '\x1b[0m']))
print('settings indicate that this is the last stage!')
print('caution: take in consideration that evaluations in this point are not useful, '
'because will be made using the last data (the same used in training)')
# will only use the last data available
local_ergs_raw_unit_sales_ground_truth = \
local_ergs_raw_unit_sales_ground_truth[:, -local_forecast_horizon_days:]
# very granular approach
# iterating in each point_forecast, calculating error and selecting best lower error model forecast
for time_serie_index, day_index in it.product(range(nof_ts), range(local_forecast_horizon_days)):
# acquiring day_by_day data
ground_truth_ts_day = local_ergs_raw_unit_sales_ground_truth[time_serie_index, day_index]
first_model_ts_day = first_model_forecast[time_serie_index, day_index]
second_model_ts_day = second_model_forecast[time_serie_index, day_index]
third_model_ts_day = third_model_forecast[time_serie_index, day_index]
fourth_model_ts_day = fourth_model_forecast[time_serie_index, day_index]
fifth_model_ts_day = fifth_model_forecast[time_serie_index, day_index]
sixth_model_ts_day = sixth_model_forecast[time_serie_index, day_index]
seventh_model_ts_day = seventh_model_forecast[time_serie_index, day_index]
eighth_model_ts_day = eighth_model_forecast[time_serie_index, day_index].astype(np.dtype('float32'))
ninth_model_ts_day = ninth_model_forecast[time_serie_index, day_index]
best_mse_model_ts_day = best_mse_model_forecast[time_serie_index, day_index]
# calculating error
first_model_ts_day_error = np.abs(ground_truth_ts_day - first_model_ts_day)
second_model_ts_day_error = np.abs(ground_truth_ts_day - second_model_ts_day)
third_model_ts_day_error = np.abs(ground_truth_ts_day - third_model_ts_day)
fourth_model_ts_day_error = np.abs(ground_truth_ts_day - fourth_model_ts_day)
fifth_model_ts_day_error = np.abs(ground_truth_ts_day - fifth_model_ts_day)
sixth_model_ts_day_error = np.abs(ground_truth_ts_day - sixth_model_ts_day)
seventh_model_ts_day_error = np.abs(ground_truth_ts_day - seventh_model_ts_day)
eighth_model_ts_day_error = np.abs(ground_truth_ts_day - eighth_model_ts_day)
ninth_model_ts_day_error = np.abs(ground_truth_ts_day - ninth_model_ts_day)
best_mse_model_ts_day_error = np.abs(ground_truth_ts_day - best_mse_model_ts_day)
# selecting best point ts_day forecast
if first_model_ts_day_error <= second_model_ts_day_error and \
first_model_ts_day_error <= third_model_ts_day_error \
and first_model_ts_day_error <= fourth_model_ts_day_error \
and first_model_ts_day_error <= fifth_model_ts_day_error\
and first_model_ts_day_error <= sixth_model_ts_day_error \
and first_model_ts_day_error <= seventh_model_ts_day_error\
and first_model_ts_day_error <= eighth_model_ts_day_error \
and first_model_ts_day_error <= ninth_model_ts_day_error \
and first_model_ts_day_error <= best_mse_model_ts_day_error:
best_lower_error_ts_day_by_day_y_pred[time_serie_index, day_index] = first_model_ts_day
count_best_first_model += 1
ts_model_mse.append([time_serie_index, int(1), first_model_ts_day_error])
# elif best_mse_model_ts_day_error <= first_model_ts_day_error \
# and best_mse_model_ts_day_error <= second_model_ts_day_error \
# and best_mse_model_ts_day_error <= third_model_ts_day_error \
# and best_mse_model_ts_day_error <= fourth_model_ts_day_error\
# and best_mse_model_ts_day_error <= fifth_model_ts_day_error \
# and best_mse_model_ts_day_error <= sixth_model_ts_day_error\
# and best_mse_model_ts_day_error <= seventh_model_ts_day_error \
# and best_mse_model_ts_day_error <= eighth_model_ts_day_error\
# and best_mse_model_ts_day_error <= ninth_model_ts_day_error:
# best_lower_error_ts_day_by_day_y_pred[time_serie_index, day_index] = best_mse_model_ts_day
# count_best_mse_model += 1
# ts_model_mse.append([time_serie_index, int(10), best_mse_model_ts_day_error])
elif second_model_ts_day_error <= first_model_ts_day_error \
and second_model_ts_day_error <= third_model_ts_day_error \
and second_model_ts_day_error <= fourth_model_ts_day_error \
and second_model_ts_day_error <= fifth_model_ts_day_error\
and second_model_ts_day_error <= sixth_model_ts_day_error \
and second_model_ts_day_error <= seventh_model_ts_day_error\
and second_model_ts_day_error <= eighth_model_ts_day_error \
and second_model_ts_day_error <= ninth_model_ts_day_error\
and second_model_ts_day_error <= best_mse_model_ts_day_error:
best_lower_error_ts_day_by_day_y_pred[time_serie_index, day_index] = second_model_ts_day
count_best_second_model += 1
ts_model_mse.append([time_serie_index, int(2), second_model_ts_day_error])
elif third_model_ts_day_error <= first_model_ts_day_error \
and third_model_ts_day_error <= second_model_ts_day_error \
and third_model_ts_day_error <= fourth_model_ts_day_error \
and third_model_ts_day_error <= fifth_model_ts_day_error\
and third_model_ts_day_error <= sixth_model_ts_day_error \
and third_model_ts_day_error <= seventh_model_ts_day_error\
and third_model_ts_day_error <= eighth_model_ts_day_error \
and third_model_ts_day_error <= ninth_model_ts_day_error\
and third_model_ts_day_error <= best_mse_model_ts_day_error:
best_lower_error_ts_day_by_day_y_pred[time_serie_index, day_index] = third_model_ts_day
count_best_third_model += 1
ts_model_mse.append([time_serie_index, int(3), third_model_ts_day_error])
# elif fourth_model_ts_day_error <= first_model_ts_day_error \
# and fourth_model_ts_day_error <= second_model_ts_day_error \
# and fourth_model_ts_day_error <= third_model_ts_day_error \
# and fourth_model_ts_day_error <= fifth_model_ts_day_error\
# and fourth_model_ts_day_error <= sixth_model_ts_day_error \
# and fourth_model_ts_day_error <= seventh_model_ts_day_error\
# and fourth_model_ts_day_error <= eighth_model_ts_day_error \
# and fourth_model_ts_day_error <= ninth_model_ts_day_error\
# and fourth_model_ts_day_error <= best_mse_model_ts_day_error:
# best_lower_error_ts_day_by_day_y_pred[time_serie_index, day_index] = fourth_model_ts_day
# count_best_fourth_model += 1
# ts_model_mse.append([time_serie_index, int(4), fourth_model_ts_day_error])
elif fifth_model_ts_day_error <= first_model_ts_day_error \
and fifth_model_ts_day_error <= second_model_ts_day_error \
and fifth_model_ts_day_error <= third_model_ts_day_error \
and fifth_model_ts_day_error <= fourth_model_ts_day_error\
and fifth_model_ts_day_error <= sixth_model_ts_day_error \
and fifth_model_ts_day_error <= seventh_model_ts_day_error\
and fifth_model_ts_day_error <= eighth_model_ts_day_error \
and fifth_model_ts_day_error <= ninth_model_ts_day_error\
and fifth_model_ts_day_error <= best_mse_model_ts_day_error:
best_lower_error_ts_day_by_day_y_pred[time_serie_index, day_index] = fifth_model_ts_day
count_best_fifth_model += 1
ts_model_mse.append([time_serie_index, int(5), fifth_model_ts_day_error])
elif sixth_model_ts_day_error <= first_model_ts_day_error \
and sixth_model_ts_day_error <= second_model_ts_day_error \
and sixth_model_ts_day_error <= third_model_ts_day_error \
and sixth_model_ts_day_error <= fourth_model_ts_day_error\
and sixth_model_ts_day_error <= fifth_model_ts_day_error \
and sixth_model_ts_day_error <= seventh_model_ts_day_error\
and sixth_model_ts_day_error <= eighth_model_ts_day_error \
and sixth_model_ts_day_error <= ninth_model_ts_day_error\
and sixth_model_ts_day_error <= best_mse_model_ts_day_error:
best_lower_error_ts_day_by_day_y_pred[time_serie_index, day_index] = sixth_model_ts_day
count_best_sixth_model += 1
ts_model_mse.append([time_serie_index, int(6), sixth_model_ts_day_error])
elif seventh_model_ts_day_error <= first_model_ts_day_error \
and seventh_model_ts_day_error <= second_model_ts_day_error \
and seventh_model_ts_day_error <= third_model_ts_day_error \
and seventh_model_ts_day_error <= fourth_model_ts_day_error\
and seventh_model_ts_day_error <= fifth_model_ts_day_error \
and seventh_model_ts_day_error <= sixth_model_ts_day_error\
and seventh_model_ts_day_error <= eighth_model_ts_day_error \
and seventh_model_ts_day_error <= ninth_model_ts_day_error\
and seventh_model_ts_day_error <= best_mse_model_ts_day_error:
best_lower_error_ts_day_by_day_y_pred[time_serie_index, day_index] = seventh_model_ts_day
count_best_seventh_model += 1
ts_model_mse.append([time_serie_index, int(7), seventh_model_ts_day_error])
elif ninth_model_ts_day_error <= first_model_ts_day_error \
and ninth_model_ts_day_error <= second_model_ts_day_error \
and ninth_model_ts_day_error <= third_model_ts_day_error \
and ninth_model_ts_day_error <= fourth_model_ts_day_error\
and ninth_model_ts_day_error <= fifth_model_ts_day_error \
and ninth_model_ts_day_error <= sixth_model_ts_day_error\
and ninth_model_ts_day_error <= seventh_model_ts_day_error \
and ninth_model_ts_day_error <= eighth_model_ts_day_error\
and ninth_model_ts_day_error <= best_mse_model_ts_day_error:
best_lower_error_ts_day_by_day_y_pred[time_serie_index, day_index] = ninth_model_ts_day
count_best_ninth_model += 1
ts_model_mse.append([time_serie_index, int(9), ninth_model_ts_day_error])
else:
best_lower_error_ts_day_by_day_y_pred[time_serie_index, day_index] = eighth_model_ts_day
count_best_eighth_model += 1
ts_model_mse.append([time_serie_index, int(8), eighth_model_ts_day_error])
# finally reporting the results
print('it was used ', count_best_first_model, ' ts day_by_day forecasts from first model')
print('it was used ', count_best_second_model, ' ts day_by_day forecasts from second model')
print('it was used ', count_best_third_model, ' ts day_by_day forecasts from third model')
print('it was used ', count_best_fourth_model, ' ts day_by_day forecasts from fourth model')
print('it was used ', count_best_fifth_model, ' ts day_by_day forecasts from fifth model')
print('it was used ', count_best_sixth_model, ' ts day_by_day forecasts from sixth model')
print('it was used ', count_best_seventh_model, ' ts day_by_day forecasts from seventh model')
print('it was used ', count_best_eighth_model, ' ts day_by_day forecasts from eighth model')
print('it was used ', count_best_ninth_model, ' ts day_by_day forecasts from ninth model')
print('it was used ', count_best_mse_model, ' ts day_by_day forecasts from best_mse (tenth) model')
# saving best mse_based between different models forecast and submission
store_and_submit_best_model_forecast = save_forecast_and_submission()
point_error_based_best_model_save_review = \
store_and_submit_best_model_forecast.store_and_submit(submission_name, local_ergs_settings,
best_lower_error_ts_day_by_day_y_pred)
if point_error_based_best_model_save_review:
print('best low point forecast error and generate_submission data and submission done')
else:
print('error at storing best_low_point_forecast_error data and generate_submission or submission')
# evaluating the best_lower_error criteria granular_model forecast
local_ergs_forecasts_name = 'day_by_day_best_low_error_criteria_model_forecast'
zeros_as_forecast = stochastic_simulation_results_analysis()
zeros_as_forecast_review = \
zeros_as_forecast.evaluate_stochastic_simulation(local_ergs_settings,
local_model_ergs_hyperparameters,
local_ergs_raw_unit_sales,
local_ergs_raw_unit_sales_ground_truth,
local_ergs_forecasts_name)
# saving errors by time_serie and storing the estimated best model
ts_model_mse = np.array(ts_model_mse)
np.save(''.join([local_ergs_settings['models_evaluation_path'],
'best_low_point_forecast_error_ts_model_mse']), ts_model_mse)
np.savetxt(''.join([local_ergs_settings['models_evaluation_path'],
'best_low_point_forecast_error_ts_model_mse.csv']),
ts_model_mse, fmt='%10.15f', delimiter=',', newline='\n')
except Exception as submodule_error:
print('best low point forecast error and generate_submission submodule_error: ', submodule_error)
logger.info('error in best low point forecast error and generate_submission submodule')
logger.error(str(submodule_error), exc_info=True)
return False
return True
|
[
"logging.basicConfig",
"logging.getLogger",
"numpy.shape",
"sys.path.insert",
"numpy.abs",
"numpy.dtype",
"logging.handlers.RotatingFileHandler",
"stochastic_model_obtain_results.stochastic_simulation_results_analysis",
"numpy.array",
"datetime.datetime.now",
"os.path.basename",
"save_forecast_and_make_submission.save_forecast_and_submission"
] |
[((612, 742), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': 'log_path_filename', 'level': 'logging.DEBUG', 'format': '"""%(asctime)s %(levelname)s %(name)s %(message)s"""'}), "(filename=log_path_filename, level=logging.DEBUG, format\n ='%(asctime)s %(levelname)s %(name)s %(message)s')\n", (631, 742), False, 'import logging\n'), ((767, 794), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (784, 794), False, 'import logging\n'), ((808, 893), 'logging.handlers.RotatingFileHandler', 'handlers.RotatingFileHandler', (['log_path_filename'], {'maxBytes': '(10485760)', 'backupCount': '(5)'}), '(log_path_filename, maxBytes=10485760,\n backupCount=5)\n', (836, 893), True, 'import logging.handlers as handlers\n'), ((945, 1012), 'sys.path.insert', 'sys.path.insert', (['(1)', "local_submodule_settings['custom_library_path']"], {}), "(1, local_submodule_settings['custom_library_path'])\n", (960, 1012), False, 'import sys\n'), ((474, 500), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (490, 500), False, 'import os\n'), ((21052, 21082), 'save_forecast_and_make_submission.save_forecast_and_submission', 'save_forecast_and_submission', ([], {}), '()\n', (21080, 21082), False, 'from save_forecast_and_make_submission import save_forecast_and_submission\n'), ((21855, 21895), 'stochastic_model_obtain_results.stochastic_simulation_results_analysis', 'stochastic_simulation_results_analysis', ([], {}), '()\n', (21893, 21895), False, 'from stochastic_model_obtain_results import stochastic_simulation_results_analysis\n'), ((22518, 22540), 'numpy.array', 'np.array', (['ts_model_mse'], {}), '(ts_model_mse)\n', (22526, 22540), True, 'import numpy as np\n'), ((5904, 5939), 'numpy.shape', 'np.shape', (['local_ergs_raw_unit_sales'], {}), '(local_ergs_raw_unit_sales)\n', (5912, 5939), True, 'import numpy as np\n'), ((9531, 9579), 'numpy.abs', 'np.abs', (['(ground_truth_ts_day - first_model_ts_day)'], {}), '(ground_truth_ts_day - first_model_ts_day)\n', (9537, 9579), True, 'import numpy as np\n'), ((9624, 9673), 'numpy.abs', 'np.abs', (['(ground_truth_ts_day - second_model_ts_day)'], {}), '(ground_truth_ts_day - second_model_ts_day)\n', (9630, 9673), True, 'import numpy as np\n'), ((9717, 9765), 'numpy.abs', 'np.abs', (['(ground_truth_ts_day - third_model_ts_day)'], {}), '(ground_truth_ts_day - third_model_ts_day)\n', (9723, 9765), True, 'import numpy as np\n'), ((9810, 9859), 'numpy.abs', 'np.abs', (['(ground_truth_ts_day - fourth_model_ts_day)'], {}), '(ground_truth_ts_day - fourth_model_ts_day)\n', (9816, 9859), True, 'import numpy as np\n'), ((9903, 9951), 'numpy.abs', 'np.abs', (['(ground_truth_ts_day - fifth_model_ts_day)'], {}), '(ground_truth_ts_day - fifth_model_ts_day)\n', (9909, 9951), True, 'import numpy as np\n'), ((9995, 10043), 'numpy.abs', 'np.abs', (['(ground_truth_ts_day - sixth_model_ts_day)'], {}), '(ground_truth_ts_day - sixth_model_ts_day)\n', (10001, 10043), True, 'import numpy as np\n'), ((10089, 10139), 'numpy.abs', 'np.abs', (['(ground_truth_ts_day - seventh_model_ts_day)'], {}), '(ground_truth_ts_day - seventh_model_ts_day)\n', (10095, 10139), True, 'import numpy as np\n'), ((10184, 10233), 'numpy.abs', 'np.abs', (['(ground_truth_ts_day - eighth_model_ts_day)'], {}), '(ground_truth_ts_day - eighth_model_ts_day)\n', (10190, 10233), True, 'import numpy as np\n'), ((10277, 10325), 'numpy.abs', 'np.abs', (['(ground_truth_ts_day - ninth_model_ts_day)'], {}), '(ground_truth_ts_day - ninth_model_ts_day)\n', (10283, 10325), True, 'import numpy as np\n'), ((10372, 10423), 'numpy.abs', 'np.abs', (['(ground_truth_ts_day - best_mse_model_ts_day)'], {}), '(ground_truth_ts_day - best_mse_model_ts_day)\n', (10378, 10423), True, 'import numpy as np\n'), ((3138, 3157), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (3146, 3157), True, 'import numpy as np\n'), ((4911, 4930), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (4919, 4930), True, 'import numpy as np\n'), ((9250, 9269), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (9258, 9269), True, 'import numpy as np\n'), ((6834, 6857), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (6855, 6857), False, 'import datetime\n')]
|
"""
Utility functions for running NEB calculations
"""
import numpy as np
from aiida.orm import StructureData
from aiida.engine import calcfunction
from ase.neb import NEB
@calcfunction
def neb_interpolate(init_structure, final_strucrture, nimages):
"""
Interplate NEB frames using the starting and the final structures
Get around the PBC warpping problem by calculating the MIC displacements
from the initial to the final structure
"""
ainit = init_structure.get_ase()
afinal = final_strucrture.get_ase()
disps = []
# Find distances
acombined = ainit.copy()
acombined.extend(afinal)
# Get piece-wise MIC distances
for i in range(len(ainit)):
dist = acombined.get_distance(i, i + len(ainit), vector=True, mic=True)
disps.append(dist.tolist())
disps = np.asarray(disps)
ainit.wrap(eps=1e-1)
afinal = ainit.copy()
# Displace the atoms according to MIC distances
afinal.positions += disps
neb = NEB([ainit.copy() for i in range(int(nimages) + 1)] + [afinal.copy()])
neb.interpolate()
out_init = StructureData(ase=neb.images[0])
out_init.label = init_structure.label + ' INIT'
out_final = StructureData(ase=neb.images[-1])
out_final.label = init_structure.label + ' FINAL'
outputs = {'image_init': out_init}
for i, out in enumerate(neb.images[1:-1]):
outputs[f'image_{i+1:02d}'] = StructureData(ase=out)
outputs[f'image_{i+1:02d}'].label = init_structure.label + f' FRAME {i+1:02d}'
outputs['image_final'] = out_final
return outputs
@calcfunction
def fix_atom_order(reference, to_fix):
"""
Fix atom order by finding NN distances bet ween two frames. This resolves
the issue where two closely matching structures having diffferent atomic orders.
Note that the two frames must be close enough for this to work
"""
aref = reference.get_ase()
afix = to_fix.get_ase()
# Index of the reference atom in the second structure
new_indices = np.zeros(len(aref), dtype=int)
# Find distances
acombined = aref.copy()
acombined.extend(afix)
# Get piece-wise MIC distances
for i in range(len(aref)):
dists = []
for j in range(len(aref)):
dist = acombined.get_distance(i, j + len(aref), mic=True)
dists.append(dist)
min_idx = np.argmin(dists)
min_dist = min(dists)
if min_dist > 0.5:
print(f'Large displacement found - moving atom {j} to {i} - please check if this is correct!')
new_indices[i] = min_idx
afixed = afix[new_indices]
fixed_structure = StructureData(ase=afixed)
fixed_structure.label = to_fix.label + ' UPDATED ORDER'
return fixed_structure
|
[
"numpy.argmin",
"aiida.orm.StructureData",
"numpy.asarray"
] |
[((828, 845), 'numpy.asarray', 'np.asarray', (['disps'], {}), '(disps)\n', (838, 845), True, 'import numpy as np\n'), ((1098, 1130), 'aiida.orm.StructureData', 'StructureData', ([], {'ase': 'neb.images[0]'}), '(ase=neb.images[0])\n', (1111, 1130), False, 'from aiida.orm import StructureData\n'), ((1199, 1232), 'aiida.orm.StructureData', 'StructureData', ([], {'ase': 'neb.images[-1]'}), '(ase=neb.images[-1])\n', (1212, 1232), False, 'from aiida.orm import StructureData\n'), ((2633, 2658), 'aiida.orm.StructureData', 'StructureData', ([], {'ase': 'afixed'}), '(ase=afixed)\n', (2646, 2658), False, 'from aiida.orm import StructureData\n'), ((1412, 1434), 'aiida.orm.StructureData', 'StructureData', ([], {'ase': 'out'}), '(ase=out)\n', (1425, 1434), False, 'from aiida.orm import StructureData\n'), ((2365, 2381), 'numpy.argmin', 'np.argmin', (['dists'], {}), '(dists)\n', (2374, 2381), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# @Author: TD21forever
# @Date: 2019-05-26 12:14:07
# @Last Modified by: TD21forever
# @Last Modified time: 2019-06-17 23:11:15
import numpy as np
'''
dp[item][cap]的意思是 从前item个物品中拿东西 放到容量为cap 的背包中 能拿到的最大价值
'''
def solution(num,waste,value,capacity):
dp = np.zeros([num+5,capacity+2])
for item in range(1,num+1):
for cap in range(1,capacity+1):
if waste[item] > cap:#第item个太大了 拿不了
dp[item][cap] = dp[item-1][cap]
else:
situation1 = dp[item-1][cap]#不拿
situation2 = dp[item-1][cap-waste[item]] + value[item]#拿
if situation1 > situation2:
dp[item][cap] = situation1
else:
dp[item][cap] = situation2
return dp
if __name__ == '__main__':
waste = [0, 2, 3, 4]
value = [0, 3, 4, 5]
num = 3
capacity = 10
res = solution(num,waste,value,capacity)
print(res)
# print(res[5][20])
|
[
"numpy.zeros"
] |
[((295, 328), 'numpy.zeros', 'np.zeros', (['[num + 5, capacity + 2]'], {}), '([num + 5, capacity + 2])\n', (303, 328), True, 'import numpy as np\n')]
|
# coding=utf-8
# Copyright 2018 The DisentanglementLib Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to visualize the unified scores.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pathlib
import matplotlib
matplotlib.use("Agg") # Set headless-friendly backend.
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
import numpy as np
import pandas as pd
import seaborn as sns
def heat_square(matrix, output_dir, name, xlabel, ylabel, max_val=None,
factor_names=None):
"""Plot values of a matrix.
Each entry is represented as a square of increasing size and different color.
Args:
matrix: Matrix of values to plot. Values should be in range [0, max_val].
output_dir: Where to save the image.
name: File name.
xlabel: Name of the x axis of the matrix.
ylabel: Name of the y axis of the matrix.
max_val: Maximum value acceptable in the matrix. If None, the max_val will
be set as the maximum value in the matrix.
factor_names: Names of the factors of variation.
"""
sns.set_context("notebook", font_scale=1.5, rc={"lines.linewidth": 2})
sns.set_style("whitegrid")
fig, _ = plt.subplots()
plot_grid = plt.GridSpec(1, 15, hspace=0.2, wspace=1.2)
ax = plt.subplot(plot_grid[:, :-1])
if max_val is None:
max_val = np.max(matrix)
if max_val == 0:
max_val = 1.
else:
if max_val < np.max(matrix):
raise ValueError("The matrix has maximum value larger than max_val")
palette = sns.color_palette("Blues", 256)
# Estimates the area of the squares: the length of the edge is
# roughly: length of the grid in inches * how many points per inch - space for
# the axis names times * 14/15 as the last 1/15 part of the figure is occupied
# by the colorbar legend.
size_scale = ((((ax.get_position().xmax - ax.get_position().xmin) *
fig.get_size_inches()[0] * fig.get_dpi() - 40) * 14 / 15 * 0.8) /
(matrix.shape[0])) ** 2
plot_matrix_squares(matrix, max_val, palette, size_scale, ax)
plt.xticks(range(matrix.shape[0]))
if factor_names is not None:
plt.yticks(range(matrix.shape[1]), factor_names)
else:
plt.yticks(range(matrix.shape[1]))
plt.xlabel(xlabel)
plt.ylabel(ylabel)
# Add color legend on the right side of the plot.
ax = plt.subplot(plot_grid[:, -1])
plot_bar_palette(palette, max_val, ax)
if not os.path.isdir(output_dir):
pathlib.Path(output_dir).mkdir(parents=True)
output_path = os.path.join(output_dir, "{}.png".format(name))
with open(output_path, "wb") as path:
fig.savefig(path, bbox_inches="tight")
def plot_matrix_squares(matrix, max_val, palette, size_scale, ax):
"""Grid of squares where the size is proportional to the matrix values.
Args:
matrix: Matrix of values to plot.
max_val: Maximum value that is allowed in the matrix.
palette: Color palette.
size_scale: Maximum size of the squares.
ax: Axis of the subplot.
"""
tmp = pd.melt(pd.DataFrame(matrix).reset_index(), id_vars="index")
# The columns of the dataframe are: index, variable and value.
def to_color(val):
ind = int(val / max_val * 255)
return palette[ind]
ax.scatter(x=tmp["index"], y=tmp["variable"],
s=size_scale * tmp["value"] / max_val, marker="s",
c=tmp["value"].apply(to_color))
ax.set_xticks([v + 0.5 for v in range(matrix.shape[0])], minor=True)
ax.set_yticks([v + 0.5 for v in range(matrix.shape[1])], minor=True)
ax.grid(False, "major")
ax.grid(True, "minor")
ax.set_xlim([-0.5, matrix.shape[0] - 0.5])
ax.set_ylim([-0.5, matrix.shape[1] - 0.5])
ax.tick_params(right=False, top=False, left=False, bottom=False)
ax.set_aspect(aspect=1.)
def plot_bar_palette(palette, max_val, ax):
"""Plot color bar legend."""
col_x = [0] * len(palette)
bar_y = np.linspace(0, max_val, 256, ax)
bar_height = bar_y[1] - bar_y[0]
ax.barh(bar_y, np.array([5] * len(palette)), height=bar_height, left=col_x,
align="center", color=palette, linewidth=0)
ax.set_xlim(1, 2)
ax.set_ylim(0, max_val)
ax.grid(False)
ax.set_xticks([])
ax.set_yticks(np.linspace(0, max_val, 3))
ax.yaxis.tick_right()
def plot_recovery_vs_independent(matrix, output_dir, name):
"""Plot how many factors are recovered and in how many independent groups.
Plot how many factors of variation are independently captured in a
representation at different thresholds. It takes as input a matrix
relating factors of variation and latent dimensions, sort the elements and
then plot for each threshold (1) how many factors are discovered and (2)
how many factors are encoded independently in the representation.
Args:
matrix: Contains statistical relations between factors of variation and
latent codes.
output_dir: Output directory where to save the plot.
name: Filename of the plot.
"""
thresholds = np.sort(matrix.flatten())[::-1]
precisions = [precision(matrix, x) for x in thresholds]
recalls = [recall(matrix, x) for x in thresholds]
sns.set_context("notebook", font_scale=1.5, rc={"lines.linewidth": 2})
sns.set_style("whitegrid")
fig, ax = plt.subplots()
palette = sns.color_palette()
plt.plot(range(thresholds.shape[0]), precisions, label="Independent groups",
color=palette[0], linewidth=3)
plt.plot(range(thresholds.shape[0]), recalls, "--", label="Discovered",
color=palette[1], linewidth=3)
thresholds_ids = range(0, thresholds.shape[0], 10)
plt.xticks(thresholds_ids, np.around(thresholds[thresholds_ids], 2))
ax.set_ylim([0, matrix.shape[0] * 1.1])
ax.tick_params(right=False, top=False, left=False, bottom=False)
ax.set_yticks(np.linspace(0, matrix.shape[0], matrix.shape[0] + 1))
plt.legend(loc="upper center", bbox_to_anchor=(0.5, 1.25), ncol=2)
plt.xlabel("Threshold")
plt.ylabel("Number of Factors")
if not os.path.isdir(output_dir):
pathlib.Path(output_dir).mkdir(parents=True)
output_path = os.path.join(output_dir, name + ".png")
with open(output_path, "wb") as path:
fig.savefig(path, bbox_inches="tight")
def precision(matrix, th):
"""How many independent components are discovered for a given threshold.
Args:
matrix: Adjacency matrix of shape (num_codes, num_factors) encoding the
statistical relations between factors and codes.
th: Eliminate all edges smaller than this threshold.
Returns:
Number of connected components.
"""
tmp = matrix.copy()
tmp[tmp < th] = 0
factors = np.zeros(tmp.shape[0])
codes = np.zeros(tmp.shape[1])
cc = 0
for i in range(len(factors)):
if factors[i] == 0:
to_visit = [(i, 0)]
factors, codes, size = bfs(tmp, to_visit, factors, codes, 1)
if size > 1:
cc += 1
return cc
def recall(matrix, th):
"""How many factors are discovered for a given threshold.
Counts as many factors of variation are captured in the representation.
First, we remove all edges in the adjacency matrix with weight smaller than
the threshold. Then, we count how many factors are connected to some codes.
Args:
matrix: Adjacency matrix for the graph.
th: Eliminate all edges smaller than this threshold.
Returns:
Number of discovered factors of variation for the given threshold.
"""
tmp = matrix.copy()
tmp[tmp < th] = 0
return np.sum(np.sum(tmp, axis=1) != 0)
def bfs(matrix, to_visit, factors, codes, size):
"""Traverse the matrix across connected components.
Implements breadth first search on an adjacency matrix. In our case, the
adjacency matrix encodes the statistical relations between factors of
variation and codes. This is used to traverse the adjacency matrix and
discover whether a factor is captured in multiple codes and whether there is a
path in the graph connecting two factors.
Args:
matrix: Adjacency matrix for the graph.
to_visit: Queue with the nodes to visit. We index the factors and codes in
the adjacency matrix and implement the queue with an array containing the
nodes that need to be visited.
factors: Array of shape (num_factors, ) with flags marking whether factors
of variation are visited.
codes: Array of shape (num_codes, ) with flags marking whether codes are
visited.
size: Count how many node are in the same connected component.
Returns:
factors: Array of shape (num_factors, ) with flags marking whether factors
of variation are visited.
codes: Array of shape (num_codes, ) with flags marking whether codes are
visited.
size: How many nodes were visited.
"""
(current_node, flag) = to_visit.pop()
if flag == 0:
factors[current_node] = 1
for i in range(len(matrix[current_node, :])):
if matrix[current_node, i] != 0:
if codes[i] == 0:
to_visit.append((i, 1))
size += 1
factors, codes, size = bfs(matrix, to_visit, factors, codes, size)
else:
codes[current_node] = 1
for i in range(len(matrix[:, current_node])):
if matrix[i, current_node] != 0:
if factors[i] == 0:
to_visit.append((i, 0))
size += 1
factors, codes, size = bfs(matrix, to_visit, factors, codes, size)
return factors, codes, size
|
[
"matplotlib.pyplot.ylabel",
"seaborn.set_style",
"matplotlib.pyplot.GridSpec",
"seaborn.color_palette",
"pathlib.Path",
"matplotlib.pyplot.xlabel",
"numpy.max",
"numpy.linspace",
"os.path.isdir",
"pandas.DataFrame",
"matplotlib.use",
"seaborn.set_context",
"numpy.around",
"matplotlib.pyplot.legend",
"os.path.join",
"numpy.sum",
"numpy.zeros",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.subplots"
] |
[((838, 859), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (852, 859), False, 'import matplotlib\n'), ((1703, 1773), 'seaborn.set_context', 'sns.set_context', (['"""notebook"""'], {'font_scale': '(1.5)', 'rc': "{'lines.linewidth': 2}"}), "('notebook', font_scale=1.5, rc={'lines.linewidth': 2})\n", (1718, 1773), True, 'import seaborn as sns\n'), ((1778, 1804), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (1791, 1804), True, 'import seaborn as sns\n'), ((1818, 1832), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1830, 1832), True, 'import matplotlib.pyplot as plt\n'), ((1849, 1892), 'matplotlib.pyplot.GridSpec', 'plt.GridSpec', (['(1)', '(15)'], {'hspace': '(0.2)', 'wspace': '(1.2)'}), '(1, 15, hspace=0.2, wspace=1.2)\n', (1861, 1892), True, 'import matplotlib.pyplot as plt\n'), ((1902, 1932), 'matplotlib.pyplot.subplot', 'plt.subplot', (['plot_grid[:, :-1]'], {}), '(plot_grid[:, :-1])\n', (1913, 1932), True, 'import matplotlib.pyplot as plt\n'), ((2182, 2213), 'seaborn.color_palette', 'sns.color_palette', (['"""Blues"""', '(256)'], {}), "('Blues', 256)\n", (2199, 2213), True, 'import seaborn as sns\n'), ((2929, 2947), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (2939, 2947), True, 'import matplotlib.pyplot as plt\n'), ((2952, 2970), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (2962, 2970), True, 'import matplotlib.pyplot as plt\n'), ((3034, 3063), 'matplotlib.pyplot.subplot', 'plt.subplot', (['plot_grid[:, -1]'], {}), '(plot_grid[:, -1])\n', (3045, 3063), True, 'import matplotlib.pyplot as plt\n'), ((4635, 4667), 'numpy.linspace', 'np.linspace', (['(0)', 'max_val', '(256)', 'ax'], {}), '(0, max_val, 256, ax)\n', (4646, 4667), True, 'import numpy as np\n'), ((5895, 5965), 'seaborn.set_context', 'sns.set_context', (['"""notebook"""'], {'font_scale': '(1.5)', 'rc': "{'lines.linewidth': 2}"}), "('notebook', font_scale=1.5, rc={'lines.linewidth': 2})\n", (5910, 5965), True, 'import seaborn as sns\n'), ((5970, 5996), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (5983, 5996), True, 'import seaborn as sns\n'), ((6011, 6025), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6023, 6025), True, 'import matplotlib.pyplot as plt\n'), ((6040, 6059), 'seaborn.color_palette', 'sns.color_palette', ([], {}), '()\n', (6057, 6059), True, 'import seaborn as sns\n'), ((6622, 6688), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper center"""', 'bbox_to_anchor': '(0.5, 1.25)', 'ncol': '(2)'}), "(loc='upper center', bbox_to_anchor=(0.5, 1.25), ncol=2)\n", (6632, 6688), True, 'import matplotlib.pyplot as plt\n'), ((6693, 6716), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Threshold"""'], {}), "('Threshold')\n", (6703, 6716), True, 'import matplotlib.pyplot as plt\n'), ((6721, 6752), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of Factors"""'], {}), "('Number of Factors')\n", (6731, 6752), True, 'import matplotlib.pyplot as plt\n'), ((6862, 6901), 'os.path.join', 'os.path.join', (['output_dir', "(name + '.png')"], {}), "(output_dir, name + '.png')\n", (6874, 6901), False, 'import os\n'), ((7423, 7445), 'numpy.zeros', 'np.zeros', (['tmp.shape[0]'], {}), '(tmp.shape[0])\n', (7431, 7445), True, 'import numpy as np\n'), ((7458, 7480), 'numpy.zeros', 'np.zeros', (['tmp.shape[1]'], {}), '(tmp.shape[1])\n', (7466, 7480), True, 'import numpy as np\n'), ((1975, 1989), 'numpy.max', 'np.max', (['matrix'], {}), '(matrix)\n', (1981, 1989), True, 'import numpy as np\n'), ((3119, 3144), 'os.path.isdir', 'os.path.isdir', (['output_dir'], {}), '(output_dir)\n', (3132, 3144), False, 'import os\n'), ((4952, 4978), 'numpy.linspace', 'np.linspace', (['(0)', 'max_val', '(3)'], {}), '(0, max_val, 3)\n', (4963, 4978), True, 'import numpy as np\n'), ((6391, 6431), 'numpy.around', 'np.around', (['thresholds[thresholds_ids]', '(2)'], {}), '(thresholds[thresholds_ids], 2)\n', (6400, 6431), True, 'import numpy as np\n'), ((6564, 6616), 'numpy.linspace', 'np.linspace', (['(0)', 'matrix.shape[0]', '(matrix.shape[0] + 1)'], {}), '(0, matrix.shape[0], matrix.shape[0] + 1)\n', (6575, 6616), True, 'import numpy as np\n'), ((6764, 6789), 'os.path.isdir', 'os.path.isdir', (['output_dir'], {}), '(output_dir)\n', (6777, 6789), False, 'import os\n'), ((2071, 2085), 'numpy.max', 'np.max', (['matrix'], {}), '(matrix)\n', (2077, 2085), True, 'import numpy as np\n'), ((8323, 8342), 'numpy.sum', 'np.sum', (['tmp'], {'axis': '(1)'}), '(tmp, axis=1)\n', (8329, 8342), True, 'import numpy as np\n'), ((3154, 3178), 'pathlib.Path', 'pathlib.Path', (['output_dir'], {}), '(output_dir)\n', (3166, 3178), False, 'import pathlib\n'), ((3744, 3764), 'pandas.DataFrame', 'pd.DataFrame', (['matrix'], {}), '(matrix)\n', (3756, 3764), True, 'import pandas as pd\n'), ((6799, 6823), 'pathlib.Path', 'pathlib.Path', (['output_dir'], {}), '(output_dir)\n', (6811, 6823), False, 'import pathlib\n')]
|
import argparse
import cv2
import numpy as np
from inference import Network
from openvino.inference_engine import IENetwork, IECore
import pylab as plt
import math
import matplotlib
from scipy.ndimage.filters import gaussian_filter
INPUT_STREAM = "emotion.mp4"
CPU_EXTENSION = "C:\\Program Files (x86)\\IntelSWTools\\openvino\\deployment_tools\\inference_engine\\bin\\intel64\\Release\\cpu_extension_avx2.dll"
MODEL = "C:/Users/gremi/Documents/Julien/udacity_intel/models/intel/emotions-recognition-retail-0003/INT8/emotions-recognition-retail-0003.xml"
# if linux : /opt/intel/openvino/deployment_tools/inference_engine/lib/intel64/libcpu_extension_sse4.so"
COLORS = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], \
[0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], \
[170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]
EMOTIONS = ['neutral', 'happy', 'sad', 'surprise', 'anger']
def get_args():
'''
Gets the arguments from the command line.
'''
parser = argparse.ArgumentParser("Run inference on an input video")
# -- Create the descriptions for the commands
i_desc = "The location of the input file"
d_desc = "The device name, if not 'CPU'"
### Add additional arguments and descriptions for:
### 1) Different confidence thresholds used to draw bounding boxes
t_desc = "The confidence thresholds used to draw bounding boxes"
### 2) The user choosing the color of the bounding boxes
c_desc = "The color name of the bounding boxes"
# -- Add required and optional groups
parser._action_groups.pop()
optional = parser.add_argument_group('optional arguments')
# -- Create the arguments
optional.add_argument("-i", help=i_desc, default=INPUT_STREAM)
optional.add_argument("-d", help=d_desc, default='CPU')
optional.add_argument("-t", help=t_desc, default=0.2)
optional.add_argument("-c", help=c_desc, default="green")
args = parser.parse_args()
return args
def preprocessing(input_image, height, width):
'''
Given an input image, height and width:
- Resize to width and height
- Transpose the final "channel" dimension to be first
- Reshape the image to add a "batch" of 1 at the start
'''
image = cv2.resize(input_image, (width, height))
image = image.transpose((2,0,1))
#image = image.reshape(1, 3, height, width)
#print("in preprocessing", *image.shape) # same thine : in preprocessing 3 384 672
image = image.reshape(1, *image.shape)
return image
def get_mask(processed_output):
'''
Given an input image size and processed output for a semantic mask,
returns a masks able to be combined with the original image.
'''
# Create an empty array for other color channels of mask
empty = np.zeros(processed_output.shape)
# Stack to make a Green mask where text detected
mask = np.dstack((empty, processed_output, empty))
return mask
def create_output_image(image, output):
'''
creates an output image showing the result of inference.
'''
# Remove final part of output not used for heatmaps
output = output[:-1]
# Get only pose detections above 0.5 confidence, set to 255
#for c in range(len(output)):
# output[c] = np.where(output[c]>0.5, 255, 0)
# Sum along the "class" axis
output = np.sum(output, axis=0)
# Get semantic mask
pose_mask = get_mask(output)
# Combine with original image
image = image + pose_mask
#return image.astype('uint8')
return pose_mask.astype('uint8')
def infer_on_video(args):
'''
Performs inference on video - main method
'''
### Load the network model into the IE
print("Load the network model into the IE")
net = Network()
net.load_model(MODEL, "CPU", CPU_EXTENSION)
# Get and open video capture
cap = cv2.VideoCapture(args.i)
cap.open(args.i)
# Grab the shape of the input
width = int(cap.get(3))
height = int(cap.get(4))
# Create a video writer for the output video
# The second argument should be `cv2.VideoWriter_fourcc('M','J','P','G')`
# on Mac, and `0x00000021` on Linux
out = cv2.VideoWriter('out-' + INPUT_STREAM, 0x00000021, 30, (width,height))
# Process frames until the video ends, or process is exited
frame_count = 0;
while cap.isOpened():
# Read the next frame
flag, frame = cap.read()
if not flag:
break
key_pressed = cv2.waitKey(60)
preprocessed_frame = preprocessing(frame, net.get_input_shape()[2], net.get_input_shape()[3])
#print("Perform inference on the frame")
net.async_inference(preprocessed_frame)
if net.wait() == 0:
# Get the output of inference
output_blobs = net.extract_output()
probs = output_blobs['prob_emotion'][0]
index_of_maximum = np.argmax(probs)
emotion = EMOTIONS[index_of_maximum]
if index_of_maximum == 0:
probs[0] = 0
emotion = emotion + " (" + EMOTIONS[np.argmax(probs)] + ")"
print("emotion=", emotion)
# Scale the output text by the image shape
scaler = max(int(frame.shape[0] / 1000), 1)
# Write the text of color and type onto the image
frame = cv2.putText(frame,
"Detected: {}".format(emotion),
(750 * scaler, 50 * scaler), cv2.FONT_HERSHEY_SIMPLEX,
scaler, (0, 0, 0), 3 * scaler)
# Write a frame here for debug purpose
#cv2.imwrite("frame" + str(frame_count) + ".png", frame)
# Write out the frame in the video
out.write(frame)
# frame count
frame_count = frame_count + 1
# Break if escape key pressed
if key_pressed == 27:
break
# Release the out writer, capture, and destroy any OpenCV windows
out.release()
cap.release()
cv2.destroyAllWindows()
def main():
print("Starting")
args = get_args()
infer_on_video(args)
if __name__ == "__main__":
main()
|
[
"numpy.dstack",
"argparse.ArgumentParser",
"numpy.argmax",
"cv2.VideoWriter",
"numpy.sum",
"numpy.zeros",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"inference.Network",
"cv2.resize",
"cv2.waitKey"
] |
[((1115, 1173), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Run inference on an input video"""'], {}), "('Run inference on an input video')\n", (1138, 1173), False, 'import argparse\n'), ((2370, 2410), 'cv2.resize', 'cv2.resize', (['input_image', '(width, height)'], {}), '(input_image, (width, height))\n', (2380, 2410), False, 'import cv2\n'), ((2907, 2939), 'numpy.zeros', 'np.zeros', (['processed_output.shape'], {}), '(processed_output.shape)\n', (2915, 2939), True, 'import numpy as np\n'), ((3004, 3047), 'numpy.dstack', 'np.dstack', (['(empty, processed_output, empty)'], {}), '((empty, processed_output, empty))\n', (3013, 3047), True, 'import numpy as np\n'), ((3461, 3483), 'numpy.sum', 'np.sum', (['output'], {'axis': '(0)'}), '(output, axis=0)\n', (3467, 3483), True, 'import numpy as np\n'), ((3866, 3875), 'inference.Network', 'Network', ([], {}), '()\n', (3873, 3875), False, 'from inference import Network\n'), ((3968, 3992), 'cv2.VideoCapture', 'cv2.VideoCapture', (['args.i'], {}), '(args.i)\n', (3984, 3992), False, 'import cv2\n'), ((4285, 4348), 'cv2.VideoWriter', 'cv2.VideoWriter', (["('out-' + INPUT_STREAM)", '(33)', '(30)', '(width, height)'], {}), "('out-' + INPUT_STREAM, 33, 30, (width, height))\n", (4300, 4348), False, 'import cv2\n'), ((6152, 6175), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (6173, 6175), False, 'import cv2\n'), ((4605, 4620), 'cv2.waitKey', 'cv2.waitKey', (['(60)'], {}), '(60)\n', (4616, 4620), False, 'import cv2\n'), ((5022, 5038), 'numpy.argmax', 'np.argmax', (['probs'], {}), '(probs)\n', (5031, 5038), True, 'import numpy as np\n'), ((5207, 5223), 'numpy.argmax', 'np.argmax', (['probs'], {}), '(probs)\n', (5216, 5223), True, 'import numpy as np\n')]
|
# Copyright FMR LLC <<EMAIL>>
# SPDX-License-Identifier: Apache-2.0
"""
The script generates variations for the parameters using configuration file and stores them in respective named tuple
"""
import math
import random
from collections import namedtuple
import numpy as np
# configuration parameters
scene_options = [
"aspect_ratio",
"color_mode",
"exposure_value",
"contrast",
"crop_min_x",
"crop_max_x",
"crop_min_y",
"crop_max_y",
"resolution_x",
"resolution_y",
"resolution_percentage",
"render_engine",
]
Scene_tuple = namedtuple(
"SceneParameters", scene_options, defaults=[None] * len(scene_options)
)
light_options = [
"light_energies",
"light_x_location",
"light_y_location",
"light_z_location",
"color_hue",
"color_saturation",
"color_value",
"light_type",
]
Light_tuple = namedtuple(
"LightParameters", light_options, defaults=[None] * len(light_options)
)
camera_options = [
"camera_x_location",
"camera_y_location",
"camera_z_location",
"camera_x_rotation",
"camera_y_rotation",
"camera_z_rotation",
"camera_focal_length",
]
Camera_tuple = namedtuple(
"CameraParameters", camera_options, defaults=[None] * len(camera_options)
)
image_options = [
"image_x_scale",
"image_y_scale",
"image_z_scale",
"image_x_rotation",
"image_y_rotation",
"image_z_rotation",
"image_bbs",
"background_image_name",
"image_name",
]
Image_tuple = namedtuple(
"ImageParameters", image_options, defaults=[None] * len(image_options)
)
other_options = ["render_device_type"]
other_parameter_tuple = namedtuple(
"OtherBlenderParameters", other_options, defaults=[None] * len(other_options)
)
def random_range(configs, variable, variations):
"""
Generate random values for the variable in continous scale
"""
random_values = np.random.uniform(
configs[variable]["range"][0], configs[variable]["range"][1], variations
)
return random_values
def random_categorical_values(configs, variable, variations):
"""
Generate random values for the variable (e.g aspect ratio etc)
If weights values are not given, the function assign equal weight to all the values
"""
try:
weight_values = configs[variable]["weights"]
except:
weight_values = [1.0] * len(configs[variable]["range"])
random_values = random.choices(
configs[variable]["range"], k=variations, weights=weight_values
)
return random_values
def get_image_parameters(
n_variations: int, image_configs: dict, image_files: list, bg_list: list
):
"""
Generate scene variations based on random values in config file and creates a named tuple for each variation
"""
# sampling background images from background image files
if len(bg_list) == 0:
bg_images = [""] * len(image_files)
else:
bg_images = [random.choice(bg_list) for i in range(len(image_files))]
image_parameters_list = [Image_tuple for i in range(n_variations)]
image_scale_x_values = random_range(image_configs, "image_x_scale", n_variations)
image_scale_y_values = random_range(image_configs, "image_y_scale", n_variations)
image_scale_z_values = random_range(image_configs, "image_z_scale", n_variations)
image_rotation_x_values = random_range(
image_configs, "image_x_rotation", n_variations
)
image_rotation_y_values = random_range(
image_configs, "image_y_rotation", n_variations
)
image_rotation_z_values = random_range(
image_configs, "image_z_rotation", n_variations
)
for index, _ in enumerate(image_parameters_list):
image_parameters_list[index] = image_parameters_list[index](
image_x_scale=image_scale_x_values[index],
image_y_scale=image_scale_y_values[index],
image_z_scale=image_scale_z_values[index],
image_x_rotation=image_rotation_x_values[index],
image_y_rotation=image_rotation_y_values[index],
image_z_rotation=image_rotation_z_values[index],
image_bbs=[],
image_name=image_files[index],
background_image_name=bg_images[index],
)
return image_parameters_list
def get_other_blender_parameters(other_parameters: dict):
other_parameter_tuple_value = other_parameter_tuple(
render_device_type=other_parameters["render_device_type"]
)
return other_parameter_tuple_value
def get_camera_parameters(n_variations: int, camera_configs: dict):
"""
Generate camera variations based on random values in config file and creates a named tuple for each variation
"""
camera_parameters_list = [Camera_tuple for i in range(n_variations)]
camera_focal_length_values = random_range(
camera_configs, "camera_focal_length", n_variations
)
camera_x_location_values = random_range(
camera_configs, "camera_x_location", n_variations
)
camera_y_location_values = random_range(
camera_configs, "camera_y_location", n_variations
)
camera_z_location_values = random_range(
camera_configs, "camera_z_location", n_variations
)
camera_x_rotation_values = random_range(
camera_configs, "camera_x_rotation", n_variations
)
camera_y_rotation_values = random_range(
camera_configs, "camera_y_rotation", n_variations
)
camera_z_rotation_values = random_range(
camera_configs, "camera_z_rotation", n_variations
)
for index, _ in enumerate(camera_parameters_list):
camera_parameters_list[index] = camera_parameters_list[index](
camera_x_location=camera_x_location_values[index],
camera_y_location=camera_y_location_values[index],
camera_z_location=camera_z_location_values[index],
camera_focal_length=camera_focal_length_values[index],
camera_x_rotation=math.radians(camera_x_rotation_values[index]),
camera_y_rotation=math.radians(camera_y_rotation_values[index]),
camera_z_rotation=math.radians(camera_z_rotation_values[index]),
)
return camera_parameters_list
def get_light_parameters(n_variations: int, light_configs: dict):
"""
Generate light variations based on random values in config file and creates a named tuple for each variation
"""
light_parameters_list = [Light_tuple for i in range(n_variations)]
light_energies = random_range(light_configs, "light_energy", n_variations)
light_type_values = random_categorical_values(
light_configs, "light_types", n_variations
)
hue = random_range(light_configs, "hue", n_variations)
saturation = random_range(light_configs, "saturation", n_variations)
value = random_range(light_configs, "value", n_variations)
light_x_values = random_range(light_configs, "light_x_location", n_variations)
light_y_values = random_range(light_configs, "light_x_location", n_variations)
light_z_values = random_range(light_configs, "light_x_location", n_variations)
for index, _ in enumerate(light_parameters_list):
light_parameters_list[index] = light_parameters_list[index](
light_energies=light_energies[index],
light_x_location=light_x_values[index],
light_y_location=light_y_values[index],
light_z_location=light_z_values[index],
color_hue=hue[index],
color_saturation=saturation[index],
color_value=value[index],
light_type=light_type_values[index],
)
return light_parameters_list
def get_scene_parameters(n_variations: int, scene_config: dict):
"""
Generate scene variations based on random values in config file and creates a named tuple for each variation
"""
scene_parameters_list = [Scene_tuple for i in range(n_variations)]
aspect_ratio_values = random_categorical_values(
scene_config, "aspect_ratio", n_variations
)
color_mode_values = random_categorical_values(
scene_config, "color_modes", n_variations
)
resolution_values = random_categorical_values(
scene_config, "resolution", n_variations
)
contrast_values = random_categorical_values(scene_config, "contrast", n_variations)
render_engine_values = random_categorical_values(
scene_config, "render_engine", n_variations
)
exposure_value_values = random_range(scene_config, "exposure", n_variations)
crop_min_x_values = random_range(scene_config, "crop_min_x", n_variations)
crop_max_x_values = random_range(scene_config, "crop_max_x", n_variations)
crop_min_y_values = random_range(scene_config, "crop_min_y", n_variations)
crop_max_y_values = random_range(scene_config, "crop_max_y", n_variations)
resolution_percentage_values = random_range(
scene_config, "resolution_percentage", n_variations
)
for index, _ in enumerate(scene_parameters_list):
scene_parameters_list[index] = scene_parameters_list[index](
aspect_ratio=aspect_ratio_values[index],
color_mode=color_mode_values[index],
exposure_value=exposure_value_values[index],
contrast=contrast_values[index],
crop_min_x=crop_min_x_values[index],
crop_max_x=crop_max_x_values[index],
crop_min_y=crop_min_y_values[index],
crop_max_y=crop_max_y_values[index],
resolution_x=resolution_values[index][0],
resolution_y=resolution_values[index][1],
resolution_percentage=resolution_percentage_values[index],
render_engine=render_engine_values[index],
)
return scene_parameters_list
|
[
"random.choices",
"random.choice",
"math.radians",
"numpy.random.uniform"
] |
[((1896, 1992), 'numpy.random.uniform', 'np.random.uniform', (["configs[variable]['range'][0]", "configs[variable]['range'][1]", 'variations'], {}), "(configs[variable]['range'][0], configs[variable]['range']\n [1], variations)\n", (1913, 1992), True, 'import numpy as np\n'), ((2421, 2500), 'random.choices', 'random.choices', (["configs[variable]['range']"], {'k': 'variations', 'weights': 'weight_values'}), "(configs[variable]['range'], k=variations, weights=weight_values)\n", (2435, 2500), False, 'import random\n'), ((2939, 2961), 'random.choice', 'random.choice', (['bg_list'], {}), '(bg_list)\n', (2952, 2961), False, 'import random\n'), ((5959, 6004), 'math.radians', 'math.radians', (['camera_x_rotation_values[index]'], {}), '(camera_x_rotation_values[index])\n', (5971, 6004), False, 'import math\n'), ((6036, 6081), 'math.radians', 'math.radians', (['camera_y_rotation_values[index]'], {}), '(camera_y_rotation_values[index])\n', (6048, 6081), False, 'import math\n'), ((6113, 6158), 'math.radians', 'math.radians', (['camera_z_rotation_values[index]'], {}), '(camera_z_rotation_values[index])\n', (6125, 6158), False, 'import math\n')]
|
import torch
import torchvision
import torch.nn as nn
import numpy as np
import torchvision.transforms as transforms
# ================================================================== #
# 目录 #
# ================================================================== #
# 1. autograd计算梯度举例1 (Line 25 to 39)
# 2. autograd计算梯度举例2 (Line 46 to 83)
# 3. 从numpy加载数据 (Line 90 to 97)
# 4. 输入pipline (Line 104 to 129)
# 5. 自定义数据的输入pipline (Line 136 to 156)
# 6. 预定义模型 (Line 163 to 176)
# 7. 保存和加载模型 (Line 183 to 189)
# ================================================================== #
# 1. autograd计算梯度举例1 #
# ================================================================== #
# 创建张量
x = torch.tensor(1., requires_grad=True)
w = torch.tensor(2., requires_grad=True)
b = torch.tensor(3., requires_grad=True)
# 创建计算图
y = w * x + b # y = 2 * x + 3
# 计算梯度
y.backward()
# 输出梯度
print(x.grad)
print(w.grad)
print(b.grad)
'''
x.grad = tensor(2.)
w.grad = tensor(1.)
b.grad = tensor(1.)
'''
# ================================================================== #
# 2. autograd计算梯度举例2 #
# ================================================================== #
# 创建10×3和10×2的两个随机张量
x = torch.randn(10, 3)
y = torch.randn(10, 2)
# 构建两个全连接层
linear = nn.Linear(3, 2)
print('w: ', linear.weight)
print('b: ', linear.bias)
'''
w: Parameter containing:
tensor([[-0.0707, 0.2341, 0.4827],
[-0.5092, -0.1537, 0.2582]], requires_grad=True)
b: Parameter containing:
tensor([ 0.5335, -0.2167], requires_grad=True)
'''
# 构建损失函数和优化器
criterion = nn.MSELoss()
optimizer = torch.optim.SGD(linear.parameters(), lr=0.01)
# 前向传播
pred = linear(x)
# 计算损失
loss = criterion(pred, y)
print('loss: ', loss.item())
'''
loss: 1.831163763999939
'''
# 反向传播
loss.backward()
# 输出梯度
print('dL/dw: ', linear.weight.grad)
print('dL/db: ', linear.bias.grad)
'''
dL/dw: tensor([[ 0.5340, 0.4947, 0.1947],
[-0.1455, 0.5270, 0.6877]])
dL/db: tensor([ 0.5586, -0.8556])
'''
# 1步梯度下降
optimizer.step()
# You can also perform gradient descent at the low level.
# linear.weight.data.sub_(0.01 * linear.weight.grad.data)
# linear.bias.data.sub_(0.01 * linear.bias.grad.data)
# 打印出1步梯度下降后的损失
pred = linear(x)
loss = criterion(pred, y)
print('1步优化后的损失: ', loss.item())
'''
1步优化后的损失: 1.631872534751892
'''
# ================================================================== #
# 3. 从numpy加载数据 #
# ================================================================== #
# 创建一个numpy数组
x = np.array([[1, 2], [3, 4]])
# 将numpy数组转换为张量
y = torch.from_numpy(x)
# 将张量转换为numpy数组
z = y.numpy()
# ================================================================== #
# 4. 输入pipeline #
# ================================================================== #
# 下载并构建CIFAR-10数据集.
train_dataset = torchvision.datasets.CIFAR10(root='./data/',
train=True,
transform=transforms.ToTensor(),
download=True)
# 获取一对数据(从磁盘读数据)
image, label = train_dataset[0]
print(image.size())
print(label)
'''
torch.Size([3, 32, 32])
6
'''
# 数据加载器(提供队列和线程的方法).
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=64,
shuffle=True)
# 迭代开始,队列和线程开始加载数据
data_iter = iter(train_loader)
# 小批量图像和标签.
images, labels = data_iter.next()
# 数据加载器的实际使用情况
for images, labels in train_loader:
# 训练代码写在此处
pass
# ================================================================== #
# 5. 自定义数据集的输入pipeline #
# ================================================================== #
# 构建自定义数据集
class CustomDataset(torch.utils.data.Dataset):
def __init__(self):
# TODO
# 1. 初始化文件路径或者文件名列表
pass
def __getitem__(self, index):
# TODO
# 1. 从文件中读取一个数据(例如numpy.fromfile, PIL.Image.open).
# 2. 预处理数据(例如torchvision.Transform).
# 3. 返回数据对(例如image and label).
pass
def __len__(self):
# 返回数据集大小
return 0
# 使用预构建的数据加载器
# custom_dataset = CustomDataset()
# train_loader = torch.utils.data.DataLoader(dataset=custom_dataset,
# batch_size=64,
# shuffle=True)
# ================================================================== #
# 6. 预训练模型 #
# ================================================================== #
# 下载并加载预训练的ResNet-18模型.
resnet = torchvision.models.resnet18(pretrained=True)
# 如果只想微调模型的顶层,请进行如下设置.
for param in resnet.parameters():
param.requires_grad = False
# 更换顶层以进行微调.
resnet.fc = nn.Linear(resnet.fc.in_features, 100)
# 前向计算
images = torch.randn(64, 3, 224, 224)
outputs = resnet(images)
print(outputs.size())
'''
64x3x224x224->64x100
torch.Size([64, 100])
'''
# ================================================================== #
# 7. 保存并加载模型 #
# ================================================================== #
# 保存并加载整个模型
torch.save(resnet, 'model.ckpt')
model = torch.load('model.ckpt')
# 仅保存和加载模型参数(推荐)
torch.save(resnet.state_dict(), 'params.ckpt')
resnet.load_state_dict(torch.load('params.ckpt'))
|
[
"torch.load",
"torchvision.models.resnet18",
"torch.from_numpy",
"numpy.array",
"torch.tensor",
"torch.nn.MSELoss",
"torch.nn.Linear",
"torch.utils.data.DataLoader",
"torch.save",
"torchvision.transforms.ToTensor",
"torch.randn"
] |
[((953, 990), 'torch.tensor', 'torch.tensor', (['(1.0)'], {'requires_grad': '(True)'}), '(1.0, requires_grad=True)\n', (965, 990), False, 'import torch\n'), ((994, 1031), 'torch.tensor', 'torch.tensor', (['(2.0)'], {'requires_grad': '(True)'}), '(2.0, requires_grad=True)\n', (1006, 1031), False, 'import torch\n'), ((1035, 1072), 'torch.tensor', 'torch.tensor', (['(3.0)'], {'requires_grad': '(True)'}), '(3.0, requires_grad=True)\n', (1047, 1072), False, 'import torch\n'), ((1490, 1508), 'torch.randn', 'torch.randn', (['(10)', '(3)'], {}), '(10, 3)\n', (1501, 1508), False, 'import torch\n'), ((1513, 1531), 'torch.randn', 'torch.randn', (['(10)', '(2)'], {}), '(10, 2)\n', (1524, 1531), False, 'import torch\n'), ((1553, 1568), 'torch.nn.Linear', 'nn.Linear', (['(3)', '(2)'], {}), '(3, 2)\n', (1562, 1568), True, 'import torch.nn as nn\n'), ((1852, 1864), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (1862, 1864), True, 'import torch.nn as nn\n'), ((2833, 2859), 'numpy.array', 'np.array', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (2841, 2859), True, 'import numpy as np\n'), ((2881, 2900), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (2897, 2900), False, 'import torch\n'), ((3575, 3654), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'train_dataset', 'batch_size': '(64)', 'shuffle': '(True)'}), '(dataset=train_dataset, batch_size=64, shuffle=True)\n', (3602, 3654), False, 'import torch\n'), ((5011, 5055), 'torchvision.models.resnet18', 'torchvision.models.resnet18', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (5038, 5055), False, 'import torchvision\n'), ((5172, 5209), 'torch.nn.Linear', 'nn.Linear', (['resnet.fc.in_features', '(100)'], {}), '(resnet.fc.in_features, 100)\n', (5181, 5209), True, 'import torch.nn as nn\n'), ((5227, 5255), 'torch.randn', 'torch.randn', (['(64)', '(3)', '(224)', '(224)'], {}), '(64, 3, 224, 224)\n', (5238, 5255), False, 'import torch\n'), ((5575, 5607), 'torch.save', 'torch.save', (['resnet', '"""model.ckpt"""'], {}), "(resnet, 'model.ckpt')\n", (5585, 5607), False, 'import torch\n'), ((5616, 5640), 'torch.load', 'torch.load', (['"""model.ckpt"""'], {}), "('model.ckpt')\n", (5626, 5640), False, 'import torch\n'), ((5729, 5754), 'torch.load', 'torch.load', (['"""params.ckpt"""'], {}), "('params.ckpt')\n", (5739, 5754), False, 'import torch\n'), ((3338, 3359), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3357, 3359), True, 'import torchvision.transforms as transforms\n')]
|
import astropy.units as u
import numpy as np
from ..utils import cone_solid_angle
#: Unit of the background rate IRF
BACKGROUND_UNIT = u.Unit('s-1 TeV-1 sr-1')
def background_2d(events, reco_energy_bins, fov_offset_bins, t_obs):
"""
Calculate background rates in radially symmetric bins in the field of view.
GADF documentation here:
https://gamma-astro-data-formats.readthedocs.io/en/latest/irfs/full_enclosure/bkg/index.html#bkg-2d
Parameters
----------
events: astropy.table.QTable
DL2 events table of the selected background events.
Needed columns for this function: `reco_source_fov_offset`, `reco_energy`, `weight`
reco_energy: astropy.units.Quantity[energy]
The bins in reconstructed energy to be used for the IRF
fov_offset_bins: astropy.units.Quantity[angle]
The bins in the field of view offset to be used for the IRF
t_obs: astropy.units.Quantity[time]
Observation time. This must match with how the individual event
weights are calculated.
Returns
-------
bg_rate: astropy.units.Quantity
The background rate as particles per energy, time and solid angle
in the specified bins.
Shape: (len(reco_energy_bins) - 1, len(fov_offset_bins) - 1)
"""
hist, _, _ = np.histogram2d(
events["reco_energy"].to_value(u.TeV),
events["reco_source_fov_offset"].to_value(u.deg),
bins=[
reco_energy_bins.to_value(u.TeV),
fov_offset_bins.to_value(u.deg),
],
weights=events['weight'],
)
# divide all energy bins by their width
# hist has shape (n_energy, n_fov_offset) so we need to transpose and then back
bin_width_energy = np.diff(reco_energy_bins)
per_energy = (hist.T / bin_width_energy).T
# divide by solid angle in each fov bin and the observation time
bin_solid_angle = np.diff(cone_solid_angle(fov_offset_bins))
bg_rate = per_energy / t_obs / bin_solid_angle
return bg_rate.to(BACKGROUND_UNIT)
|
[
"numpy.diff",
"astropy.units.Unit"
] |
[((137, 161), 'astropy.units.Unit', 'u.Unit', (['"""s-1 TeV-1 sr-1"""'], {}), "('s-1 TeV-1 sr-1')\n", (143, 161), True, 'import astropy.units as u\n'), ((1738, 1763), 'numpy.diff', 'np.diff', (['reco_energy_bins'], {}), '(reco_energy_bins)\n', (1745, 1763), True, 'import numpy as np\n')]
|
#!/usr/bin/env pythonw
import numpy as np
import matplotlib.pyplot as plt
def flip_coins(flips = 1000000, bins=100):
# Uninformative prior
prior = np.ones(bins, dtype='float')/bins
likelihood_heads = np.arange(bins)/float(bins)
likelihood_tails = 1-likelihood_heads
flips = np.random.choice(a=[True, False], size=flips, p=[0.75, 0.25])
for coin in flips:
if coin: # Heads
posterior = prior * likelihood_heads
else: # Tails
posterior = prior * likelihood_tails
# Normalize
posterior /= np.sum(posterior)
# The posterior is now the new prior
prior = posterior
return posterior
plt.plot(np.arange(100)/float(100), flip_coins(10))
plt.plot(np.arange(100)/float(100), flip_coins(100))
plt.plot(np.arange(100)/float(100), flip_coins(1000))
plt.plot(np.arange(100)/float(100), flip_coins(10000))
plt.plot(np.arange(100)/float(100), flip_coins(100000))
plt.legend([10, 100, 1000, 10000, 100000])
plt.show()
|
[
"numpy.ones",
"numpy.random.choice",
"matplotlib.pyplot.legend",
"numpy.sum",
"numpy.arange",
"matplotlib.pyplot.show"
] |
[((954, 996), 'matplotlib.pyplot.legend', 'plt.legend', (['[10, 100, 1000, 10000, 100000]'], {}), '([10, 100, 1000, 10000, 100000])\n', (964, 996), True, 'import matplotlib.pyplot as plt\n'), ((997, 1007), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1005, 1007), True, 'import matplotlib.pyplot as plt\n'), ((296, 357), 'numpy.random.choice', 'np.random.choice', ([], {'a': '[True, False]', 'size': 'flips', 'p': '[0.75, 0.25]'}), '(a=[True, False], size=flips, p=[0.75, 0.25])\n', (312, 357), True, 'import numpy as np\n'), ((157, 185), 'numpy.ones', 'np.ones', (['bins'], {'dtype': '"""float"""'}), "(bins, dtype='float')\n", (164, 185), True, 'import numpy as np\n'), ((214, 229), 'numpy.arange', 'np.arange', (['bins'], {}), '(bins)\n', (223, 229), True, 'import numpy as np\n'), ((571, 588), 'numpy.sum', 'np.sum', (['posterior'], {}), '(posterior)\n', (577, 588), True, 'import numpy as np\n'), ((693, 707), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (702, 707), True, 'import numpy as np\n'), ((745, 759), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (754, 759), True, 'import numpy as np\n'), ((798, 812), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (807, 812), True, 'import numpy as np\n'), ((852, 866), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (861, 866), True, 'import numpy as np\n'), ((907, 921), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (916, 921), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
def batch_df2batch(df, evaluate_ids=(), n_obs=-1, tform=np.eye(3), is_vehicles_evaluated=False):
"""
Convert dataframe to SGAN input
:param df:
:param evaluate_ids:
:param n_obs: number of timesteps observed
:param tform:
:param is_vehicles_evaluated:
:return:
"""
if is_vehicles_evaluated:
agent_ids = np.unique(df['agent_id'])
else:
agent_ids = np.unique(df[df['agent_type'] == 0]['agent_id']) # peds only
# input transform
df = tform_df(df, tform)
# assume min t is the start
t_inds = np.unique(np.sort(df['t']))
t0 = t_inds[0]
skip = t_inds[1] - t_inds[0]
abs_xy = np.zeros((n_obs, agent_ids.size, 2), dtype=np.float32)
rel_xy = np.zeros_like(abs_xy)
for i, agent_id in enumerate(agent_ids):
for step, t in enumerate(range(t0, t0+n_obs*skip, skip)):
xy = df[(df['agent_id'] == agent_id) & (df['t'] == t)][['x', 'y']]
if xy.size > 0:
abs_xy[step, i, :] = xy.values[0]
else:
abs_xy[step, i, :] = np.nan
# for relative, 1st entry is 0,0, rest are the differences
rel_xy[1:, i, :] = abs_xy[1:, i, :] - abs_xy[:-1, i, :]
# handle observations w/zeros
abs_xy[np.isnan(abs_xy)] = 0.
rel_xy[np.isnan(rel_xy)] = 0.
seq_start_end = [(0, agent_ids.size)]
return abs_xy, rel_xy, seq_start_end
def raw_pred2df(pred_list, evaluate_ids, evaluate_inds, tform=np.eye(3)):
"""
:param pred_list: [i] = n_preds, n_peds, 2 | list of sampled predictions
- n_preds = number of timesteps predicted into future
:param evaluate_ids: list of agent ids
:param evaluate_inds: [i] = index of agent_id=evaluate_ids[i] in prediction
:param tform: (3,3) | transformation matrix
:return:
"""
merged_peds = np.stack(pred_list, axis=-1) # (n_preds, n_peds, 2, n_samples)
n_preds = merged_peds.shape[0]
n_samples = merged_peds.shape[3]
cols = ['t', 'agent_id', 'x', 'y', 'sample_id', 'p']
INT_COLUMNS = [cols[i] for i in [0, 1, -2]]
data = []
for ind, id in zip(evaluate_inds, evaluate_ids):
for t in range(n_preds):
z = np.zeros((n_samples, 1))
agent_t_info = np.hstack([
t + z,
id + z,
merged_peds[t, ind, :, :].T,
np.arange(n_samples).reshape((n_samples, 1)),
1./n_samples + z,
])
data.append(agent_t_info)
df = pd.DataFrame(np.vstack(data), columns=cols)
df[['x', 'y']] = tform_2d_mat(df[['x', 'y']].values, tform)
df[INT_COLUMNS] = df[INT_COLUMNS].astype(np.int)
return df
def tform_df(df, tform):
xy = df[['x', 'y']]
ret_df = df.copy()
ret_df[['x', 'y']] = tform_2d_mat(xy, tform)
return ret_df
def tform_2d_mat(xy, tform):
xy1 = np.hstack([xy, np.ones((xy.shape[0], 1))])
xy1_p = (tform.dot(xy1.T)).T
return xy1_p[:, :2]
|
[
"numpy.eye",
"numpy.unique",
"numpy.ones",
"numpy.sort",
"numpy.stack",
"numpy.zeros",
"numpy.isnan",
"numpy.vstack",
"numpy.zeros_like",
"numpy.arange"
] |
[((97, 106), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (103, 106), True, 'import numpy as np\n'), ((707, 761), 'numpy.zeros', 'np.zeros', (['(n_obs, agent_ids.size, 2)'], {'dtype': 'np.float32'}), '((n_obs, agent_ids.size, 2), dtype=np.float32)\n', (715, 761), True, 'import numpy as np\n'), ((775, 796), 'numpy.zeros_like', 'np.zeros_like', (['abs_xy'], {}), '(abs_xy)\n', (788, 796), True, 'import numpy as np\n'), ((1507, 1516), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (1513, 1516), True, 'import numpy as np\n'), ((1882, 1910), 'numpy.stack', 'np.stack', (['pred_list'], {'axis': '(-1)'}), '(pred_list, axis=-1)\n', (1890, 1910), True, 'import numpy as np\n'), ((397, 422), 'numpy.unique', 'np.unique', (["df['agent_id']"], {}), "(df['agent_id'])\n", (406, 422), True, 'import numpy as np\n'), ((453, 501), 'numpy.unique', 'np.unique', (["df[df['agent_type'] == 0]['agent_id']"], {}), "(df[df['agent_type'] == 0]['agent_id'])\n", (462, 501), True, 'import numpy as np\n'), ((623, 639), 'numpy.sort', 'np.sort', (["df['t']"], {}), "(df['t'])\n", (630, 639), True, 'import numpy as np\n'), ((1303, 1319), 'numpy.isnan', 'np.isnan', (['abs_xy'], {}), '(abs_xy)\n', (1311, 1319), True, 'import numpy as np\n'), ((1337, 1353), 'numpy.isnan', 'np.isnan', (['rel_xy'], {}), '(rel_xy)\n', (1345, 1353), True, 'import numpy as np\n'), ((2566, 2581), 'numpy.vstack', 'np.vstack', (['data'], {}), '(data)\n', (2575, 2581), True, 'import numpy as np\n'), ((2239, 2263), 'numpy.zeros', 'np.zeros', (['(n_samples, 1)'], {}), '((n_samples, 1))\n', (2247, 2263), True, 'import numpy as np\n'), ((2925, 2950), 'numpy.ones', 'np.ones', (['(xy.shape[0], 1)'], {}), '((xy.shape[0], 1))\n', (2932, 2950), True, 'import numpy as np\n'), ((2411, 2431), 'numpy.arange', 'np.arange', (['n_samples'], {}), '(n_samples)\n', (2420, 2431), True, 'import numpy as np\n')]
|
import numpy as npy
def convert(num):
if num < 0:
# num = -num
num *= 1024
# num += 32768
num = int(num - 0.5)
num = 65535 + num
n_str = str(hex(num))[2:]
if len(n_str) == 1:
n_str = 'fff' + n_str
elif len(n_str) == 2:
n_str = 'ff' + n_str
elif len(n_str) == 3:
n_str = 'f' + n_str
return n_str
else:
num *= 1024
num = int(num + 0.5)
n_str = str(hex(num))[2:]
if len(n_str) == 1:
n_str = '000' + n_str
elif len(n_str) == 2:
n_str = '00' + n_str
elif len(n_str) == 3:
n_str = '0' + n_str
return n_str
file = open('16bit.coe', 'w')
file_str = "memory_initialization_radix=16;\nmemory_initialization_vector=\n"
# fc1 params
fc1_w = npy.load('dense_kernel_0.npy')
for r in range(128):
cnt_128 = 0
unit_128_8 = ''
for c in range(1024):
unit_128_8 += convert(fc1_w[c][r])
if cnt_128 < 127:
cnt_128 += 1
else:
cnt_128 = 0
file_str += unit_128_8 + ',\n'
unit_128_8 = ''
fc1_b = npy.load('dense_bias_0.npy')
unit_128_8 = ''
for i in range(128):
unit_128_8 += convert(fc1_b[i])
file_str += unit_128_8 + ',\n'
# fc2 params
fc2_w = npy.load('dense_1_kernel_0.npy')
for r in range(128):
unit_128_8 = ''
for c in range(128):
unit_128_8 += convert(fc2_w[c][r])
unit_128_8 += ',\n'
file_str += unit_128_8
fc2_b = npy.load('dense_1_bias_0.npy')
unit_128_8 = ''
for i in range(128):
unit_128_8 += convert(fc2_b[i])
file_str += unit_128_8 + ',\n'
# fc3 params
fc3_w = npy.load('dense_2_kernel_0.npy')
for r in range(10):
unit_128_8 = ''
for c in range(128):
unit_128_8 += convert(fc3_w[c][r])
unit_128_8 += ',\n'
file_str += unit_128_8
fc3_b = npy.load('dense_2_bias_0.npy')
unit_128_8 = ''
for i in range(128):
if i < 10:
unit_128_8 += convert(fc3_b[i])
else:
unit_128_8 += '0000'
file_str += unit_128_8 + ';'
file.write(file_str)
|
[
"numpy.load"
] |
[((849, 879), 'numpy.load', 'npy.load', (['"""dense_kernel_0.npy"""'], {}), "('dense_kernel_0.npy')\n", (857, 879), True, 'import numpy as npy\n'), ((1175, 1203), 'numpy.load', 'npy.load', (['"""dense_bias_0.npy"""'], {}), "('dense_bias_0.npy')\n", (1183, 1203), True, 'import numpy as npy\n'), ((1330, 1362), 'numpy.load', 'npy.load', (['"""dense_1_kernel_0.npy"""'], {}), "('dense_1_kernel_0.npy')\n", (1338, 1362), True, 'import numpy as npy\n'), ((1532, 1562), 'numpy.load', 'npy.load', (['"""dense_1_bias_0.npy"""'], {}), "('dense_1_bias_0.npy')\n", (1540, 1562), True, 'import numpy as npy\n'), ((1689, 1721), 'numpy.load', 'npy.load', (['"""dense_2_kernel_0.npy"""'], {}), "('dense_2_kernel_0.npy')\n", (1697, 1721), True, 'import numpy as npy\n'), ((1890, 1920), 'numpy.load', 'npy.load', (['"""dense_2_bias_0.npy"""'], {}), "('dense_2_bias_0.npy')\n", (1898, 1920), True, 'import numpy as npy\n')]
|
import numpy as np
import queue
import cv2
import os
import datetime
SIZE = 32
SCALE = 0.007874015748031496
def quantized_np(array,scale,data_width=8):
quantized_array= np.round(array/scale)
quantized_array = np.maximum(quantized_array, -2**(data_width-1))
quantized_array = np.minimum(quantized_array, 2**(data_width-1)-1)
return quantized_array
def get_x_y_cuts(data, n_lines=1):
w, h = data.shape
visited = set()
q = queue.Queue()
offset = [(-1, -1), (0, -1), (1, -1), (-1, 0),
(1, 0), (-1, 1), (0, 1), (1, 1)]
cuts = []
for y in range(h):
for x in range(w):
x_axis = []
y_axis = []
if data[x][y] < 200 and (x, y) not in visited:
q.put((x, y))
visited.add((x, y))
while not q.empty():
x_p, y_p = q.get()
for x_offset, y_offset in offset:
x_c, y_c = x_p + x_offset, y_p + y_offset
if (x_c, y_c) in visited:
continue
visited.add((x_c, y_c))
try:
if data[x_c][y_c] < 200:
q.put((x_c, y_c))
x_axis.append(x_c)
y_axis.append(y_c)
except:
pass
if x_axis:
min_x, max_x = min(x_axis), max(x_axis)
min_y, max_y = min(y_axis), max(y_axis)
if max_x - min_x > 3 and max_y - min_y > 3:
cuts.append([min_x, max_x + 1, min_y, max_y + 1])
if n_lines == 1:
cuts = sorted(cuts, key=lambda x: x[2])
pr_item = cuts[0]
count = 1
len_cuts = len(cuts)
new_cuts = [cuts[0]]
pr_k = 0
for i in range(1, len_cuts):
pr_item = new_cuts[pr_k]
now_item = cuts[i]
if not (now_item[2] > pr_item[3]):
new_cuts[pr_k][0] = min(pr_item[0], now_item[0])
new_cuts[pr_k][1] = max(pr_item[1], now_item[1])
new_cuts[pr_k][2] = min(pr_item[2], now_item[2])
new_cuts[pr_k][3] = max(pr_item[3], now_item[3])
else:
new_cuts.append(now_item)
pr_k += 1
cuts = new_cuts
return cuts
def get_image_cuts(image, dir=None, is_data=False, n_lines=1, data_needed=False, count=0,QUAN = False):
if is_data:
data = image
else:
data = cv2.imread(image, 2)
cuts = get_x_y_cuts(data, n_lines=n_lines)
image_cuts = None
for i, item in enumerate(cuts):
count += 1
max_dim = max(item[1] - item[0], item[3] - item[2])
new_data = np.ones((int(1.4 * max_dim), int(1.4 * max_dim))) * 255
x_min, x_max = (
max_dim - item[1] + item[0]) // 2, (max_dim - item[1] + item[0]) // 2 + item[1] - item[0]
y_min, y_max = (
max_dim - item[3] + item[2]) // 2, (max_dim - item[3] + item[2]) // 2 + item[3] - item[2]
new_data[int(0.2 * max_dim) + x_min:int(0.2 * max_dim) + x_max, int(0.2 * max_dim) +
y_min:int(0.2 * max_dim) + y_max] = data[item[0]:item[1], item[2]:item[3]]
standard_data = cv2.resize(new_data, (SIZE, SIZE))
if not data_needed:
cv2.imwrite(dir + str(count) + ".jpg", standard_data)
if data_needed:
data_flat = np.reshape(standard_data, (1, SIZE*SIZE))
data_flat = (255 - data_flat) / 255
if QUAN == True:
data_flat = quantized_np(data_flat,SCALE,data_width=8)
else:
pass
if image_cuts is None:
image_cuts = data_flat
else:
image_cuts = np.r_[image_cuts, data_flat]
if data_needed:
return image_cuts
return count
def main(img_dir):
for file in os.listdir(img_dir):
if file.endswith('jpeg'):
path = os.path.join(img_dir, file)
oldtime = datetime.datetime.now()
#count = process.get_image_cuts(path, dir='./dataset/'+file.split('.')[0]+'_cut',count=0)
image_cuts = get_image_cuts(
path, dir = img_dir + file.split('.')[0]+'_cut', count=0, data_needed=True)
newtime = datetime.datetime.now()
Totaltime = (newtime-oldtime).microseconds
print("image cut time: ", Totaltime)
print(np.size(image_cuts, 0))
if __name__ == '__main__':
img_dir = './dataset'
main(img_dir)
|
[
"os.listdir",
"numpy.reshape",
"numpy.minimum",
"cv2.resize",
"numpy.size",
"os.path.join",
"queue.Queue",
"datetime.datetime.now",
"numpy.maximum",
"cv2.imread",
"numpy.round"
] |
[((175, 198), 'numpy.round', 'np.round', (['(array / scale)'], {}), '(array / scale)\n', (183, 198), True, 'import numpy as np\n'), ((219, 270), 'numpy.maximum', 'np.maximum', (['quantized_array', '(-2 ** (data_width - 1))'], {}), '(quantized_array, -2 ** (data_width - 1))\n', (229, 270), True, 'import numpy as np\n'), ((289, 343), 'numpy.minimum', 'np.minimum', (['quantized_array', '(2 ** (data_width - 1) - 1)'], {}), '(quantized_array, 2 ** (data_width - 1) - 1)\n', (299, 343), True, 'import numpy as np\n'), ((451, 464), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (462, 464), False, 'import queue\n'), ((3947, 3966), 'os.listdir', 'os.listdir', (['img_dir'], {}), '(img_dir)\n', (3957, 3966), False, 'import os\n'), ((2533, 2553), 'cv2.imread', 'cv2.imread', (['image', '(2)'], {}), '(image, 2)\n', (2543, 2553), False, 'import cv2\n'), ((3277, 3311), 'cv2.resize', 'cv2.resize', (['new_data', '(SIZE, SIZE)'], {}), '(new_data, (SIZE, SIZE))\n', (3287, 3311), False, 'import cv2\n'), ((3454, 3497), 'numpy.reshape', 'np.reshape', (['standard_data', '(1, SIZE * SIZE)'], {}), '(standard_data, (1, SIZE * SIZE))\n', (3464, 3497), True, 'import numpy as np\n'), ((4021, 4048), 'os.path.join', 'os.path.join', (['img_dir', 'file'], {}), '(img_dir, file)\n', (4033, 4048), False, 'import os\n'), ((4071, 4094), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4092, 4094), False, 'import datetime\n'), ((4352, 4375), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4373, 4375), False, 'import datetime\n'), ((4498, 4520), 'numpy.size', 'np.size', (['image_cuts', '(0)'], {}), '(image_cuts, 0)\n', (4505, 4520), True, 'import numpy as np\n')]
|
import numpy as np
import time
def max_subsequence_sum(sequence):
max_sum = 0
for i in range(0, len(sequence)):
for j in range(i, len(sequence)):
this_sum = 0
for k in range(i, j+1):
this_sum += sequence[k]
if this_sum > max_sum:
max_sum = this_sum
return max_sum
seq = np.random.randint(-100000,100000,size=1000)
start = time.time()
result = max_subsequence_sum(seq)
print(time.time() - start)
|
[
"numpy.random.randint",
"time.time"
] |
[((372, 417), 'numpy.random.randint', 'np.random.randint', (['(-100000)', '(100000)'], {'size': '(1000)'}), '(-100000, 100000, size=1000)\n', (389, 417), True, 'import numpy as np\n'), ((424, 435), 'time.time', 'time.time', ([], {}), '()\n', (433, 435), False, 'import time\n'), ((476, 487), 'time.time', 'time.time', ([], {}), '()\n', (485, 487), False, 'import time\n')]
|
import numpy as np
from pyFAI.multi_geometry import MultiGeometry
from pyFAI.ext import splitBBox
def inpaint_saxs(imgs, ais, masks):
"""
Inpaint the 2D image collected by the pixel detector to remove artifacts in later data reduction
Parameters:
-----------
:param imgs: List of 2D image in pixel
:type imgs: ndarray
:param ais: List of AzimuthalIntegrator/Transform generated using pyGIX/pyFAI which contain the information about the experiment geometry
:type ais: list of AzimuthalIntegrator / TransformIntegrator
:param masks: List of 2D image (same dimension as imgs)
:type masks: ndarray
"""
inpaints, mask_inpaints = [], []
for i, (img, ai, mask) in enumerate(zip(imgs, ais, masks)):
inpaints.append(ai.inpainting(img.copy(order='C'),
mask))
mask_inpaints.append(np.logical_not(np.ones_like(mask)))
return inpaints, mask_inpaints
def cake_saxs(inpaints, ais, masks, radial_range=(0, 60), azimuth_range=(-90, 90), npt_rad=250, npt_azim=250):
"""
Unwrapp the stitched image from q-space to 2theta-Chi space (Radial-Azimuthal angle)
Parameters:
-----------
:param inpaints: List of 2D inpainted images
:type inpaints: List of ndarray
:param ais: List of AzimuthalIntegrator/Transform generated using pyGIX/pyFAI which contain the information about the experiment geometry
:type ais: list of AzimuthalIntegrator / TransformIntegrator
:param masks: List of 2D image (same dimension as inpaints)
:type masks: List of ndarray
:param radial_range: minimum and maximum of the radial range in degree
:type radial_range: Tuple
:param azimuth_range: minimum and maximum of the 2th range in degree
:type azimuth_range: Tuple
:param npt_rad: number of point in the radial range
:type npt_rad: int
:param npt_azim: number of point in the azimuthal range
:type npt_azim: int
"""
mg = MultiGeometry(ais,
unit='q_A^-1',
radial_range=radial_range,
azimuth_range=azimuth_range,
wavelength=None,
empty=0.0,
chi_disc=180)
cake, q, chi = mg.integrate2d(lst_data=inpaints,
npt_rad=npt_rad,
npt_azim=npt_azim,
correctSolidAngle=True,
lst_mask=masks)
return cake, q, chi[::-1]
def integrate_rad_saxs(inpaints, ais, masks, radial_range=(0, 40), azimuth_range=(0, 90), npt=2000):
"""
Radial integration of transmission data using the pyFAI multigeometry module
Parameters:
-----------
:param inpaints: List of 2D inpainted images
:type inpaints: List of ndarray
:param ais: List of AzimuthalIntegrator/Transform generated using pyGIX/pyFAI which contain the information about the experiment geometry
:type ais: list of AzimuthalIntegrator / TransformIntegrator
:param masks: List of 2D image (same dimension as inpaints)
:type masks: List of ndarray
:param radial_range: minimum and maximum of the radial range in degree
:type radial_range: Tuple
:param azimuth_range: minimum and maximum of the 2th range in degree
:type azimuth_range: Tuple
:param npt: number of point of the final 1D profile
:type npt: int
"""
mg = MultiGeometry(ais,
unit='q_A^-1',
radial_range=radial_range,
azimuth_range=azimuth_range,
wavelength=None,
empty=-1,
chi_disc=180)
q, i_rad = mg.integrate1d(lst_data=inpaints,
npt=npt,
correctSolidAngle=True,
lst_mask=masks)
return q, i_rad
def integrate_azi_saxs(cake, q_array, chi_array, radial_range=(0, 10), azimuth_range=(-90, 0)):
"""
Azimuthal integration of transmission data using masked array on a caked images (image in 2-theta_chi space)
Parameters:
-----------
:param cake: 2D array unwrapped in 2th-chi space
:type cake: ndarray (same dimension as tth_array and chiarray)
:param q_array: 2D array containing 2th angles of each pixel
:type q_array: ndarray (same dimension as cake and chiarray)
:param chi_array: 2D array containing chi angles of each pixel
:type chi_array: ndarray (same dimension as cake and tth_array)
:param radial_range: minimum and maximum of the radial range in degree
:type radial_range: Tuple
:param azimuth_range: minimum and maximum of the 2th range in degree
:type azimuth_range: Tuple
"""
q_mesh, chi_mesh = np.meshgrid(q_array, chi_array)
cake_mask = np.ma.masked_array(cake)
cake_mask = np.ma.masked_where(q_mesh < radial_range[0], cake_mask)
cake_mask = np.ma.masked_where(q_mesh > radial_range[1], cake_mask)
cake_mask = np.ma.masked_where(azimuth_range[0] > chi_mesh, cake_mask)
cake_mask = np.ma.masked_where(azimuth_range[1] < chi_mesh, cake_mask)
i_azi = cake_mask.mean(axis=1)
return chi_array, i_azi
def integrate_rad_gisaxs(img, q_par, q_per, bins=1000, radial_range=None, azimuth_range=None):
"""
Radial integration of Grazing incidence data using the pyFAI multigeometry module
Parameters:
-----------
:param q_par: minimum and maximum q_par (in A-1) of the input image
:type q_par: Tuple
:param q_per: minimum and maximum of q_par in A-1
:type q_per: Tuple
:param bins: number of point of the final 1D profile
:type bins: int
:param img: 2D array containing the stitched intensity
:type img: ndarray
:param radial_range: q_par range (in A-1) at the which the integration will be done
:type radial_range: Tuple
:param azimuth_range: q_per range (in A-1) at the which the integration will be done
:type azimuth_range: Tuple
"""
# recalculate the q-range of the input array
q_h = np.linspace(q_par[0], q_par[-1], np.shape(img)[1])
q_v = np.linspace(q_per[0], q_per[-1], np.shape(img)[0])[::-1]
if radial_range is None:
radial_range = (0, q_h.max())
if azimuth_range is None:
azimuth_range = (0, q_v.max())
q_h_te, q_v_te = np.meshgrid(q_h, q_v)
tth_array = np.sqrt(q_h_te ** 2 + q_v_te ** 2)
chi_array = np.rad2deg(np.arctan2(q_h_te, q_v_te))
# Mask the remeshed array
img_mask = np.ma.masked_array(img, mask=img == 0)
img_mask = np.ma.masked_where(img < 1E-5, img_mask)
img_mask = np.ma.masked_where(tth_array < radial_range[0], img_mask)
img_mask = np.ma.masked_where(tth_array > radial_range[1], img_mask)
img_mask = np.ma.masked_where(chi_array < np.min(azimuth_range), img_mask)
img_mask = np.ma.masked_where(chi_array > np.max(azimuth_range), img_mask)
q_rad, i_rad, _, _ = splitBBox.histoBBox1d(img_mask,
pos0=tth_array,
delta_pos0=np.ones_like(img_mask) * (q_par[1] - q_par[0])/np.shape(
img_mask)[1],
pos1=q_v_te,
delta_pos1=np.ones_like(img_mask) * (q_per[1] - q_per[0])/np.shape(
img_mask)[0],
bins=bins,
pos0Range=np.array([np.min(tth_array), np.max(tth_array)]),
pos1Range=q_per,
dummy=None,
delta_dummy=None,
mask=img_mask.mask
)
return q_rad, i_rad
def integrate_qpar(img, q_par, q_per, q_par_range=None, q_per_range=None):
"""
Horizontal integration of a 2D array using masked array
Parameters:
-----------
:param q_par: minimum and maximum q_par (in A-1) of the input image
:type q_par: Tuple
:param q_per: minimum and maximum of q_par in A-1
:type q_per: Tuple
:param img: 2D array containing intensity
:type img: ndarray
:param q_par_range: q_par range (in A-1) at the which the integration will be done
:type q_par_range: Tuple
:param q_per_range: q_per range (in A-1) at the which the integration will be done
:type q_per_range: Tuple
"""
if q_par_range is None:
q_par_range = (np.asarray(q_par).min(), np.asarray(q_par).max())
if q_per_range is None:
q_per_range = (np.asarray(q_per).min(), np.asarray(q_per).max())
q_par = np.linspace(q_par[0], q_par[1], np.shape(img)[1])
q_per = np.linspace(q_per[0], q_per[1], np.shape(img)[0])[::-1]
qpar_mesh, qper_mesh = np.meshgrid(q_par, q_per)
img_mask = np.ma.masked_array(img, mask=img == 0)
img_mask = np.ma.masked_where(qper_mesh < q_per_range[0], img_mask)
img_mask = np.ma.masked_where(qper_mesh > q_per_range[1], img_mask)
img_mask = np.ma.masked_where(q_par_range[0] > qpar_mesh, img_mask)
img_mask = np.ma.masked_where(q_par_range[1] < qpar_mesh, img_mask)
i_par = np.mean(img_mask, axis=0)
return q_par, i_par
def integrate_qper(img, q_par, q_per, q_par_range=None, q_per_range=None):
"""
Vertical integration of a 2D array using masked array
Parameters:
-----------
:param q_par: minimum and maximum q_par (in A-1) of the input image
:type q_par: Tuple
:param q_per: minimum and maximum of q_par in A-1
:type q_per: Tuple
:param img: 2D array containing intensity
:type img: ndarray
:param q_par_range: q_par range (in A-1) at the which the integration will be done
:type q_par_range: Tuple
:param q_per_range: q_per range (in A-1) at the which the integration will be done
:type q_per_range: Tuple
"""
if q_par_range is None:
q_par_range = (np.asarray(q_par).min(), np.asarray(q_par).max())
if q_per_range is None:
q_per_range = (np.asarray(q_per).min(), np.asarray(q_per).max())
q_par = np.linspace(q_par[0], q_par[1], np.shape(img)[1])
q_per = np.linspace(q_per[0], q_per[1], np.shape(img)[0])[::-1]
q_par_mesh, q_per_mesh = np.meshgrid(q_par, q_per)
img_mask = np.ma.masked_array(img, mask=img == 0)
img_mask = np.ma.masked_where(q_per_mesh < q_per_range[0], img_mask)
img_mask = np.ma.masked_where(q_per_mesh > q_per_range[1], img_mask)
img_mask = np.ma.masked_where(q_par_mesh < q_par_range[0], img_mask)
img_mask = np.ma.masked_where(q_par_mesh > q_par_range[1], img_mask)
i_per = np.mean(img_mask, axis=1)
return q_per, i_per
# TODO: Implement azimuthal integration for GI
def cake_gisaxs(img, q_par, q_per, bins=None, radial_range=None, azimuth_range=None):
"""
Unwrap the stitched image from q-space to 2theta-Chi space (Radial-Azimuthal angle)
Parameters:
-----------
:param img: List of 2D images
:type img: List of ndarray
:param q_par: minimum and maximum q_par (in A-1) of the input image
:type q_par: Tuple
:param q_per: minimum and maximum of q_par in A-1
:type q_per: Tuple
:param bins: number of point in both x and y direction of the final cake
:type bins: Tuple
:param radial_range: minimum and maximum of the radial range in degree
:type radial_range: Tuple
:param azimuth_range: minimum and maximum of the 2th range in degree
:type azimuth_range: Tuple
"""
if bins is None:
bins = tuple(reversed(img.shape))
if radial_range is None:
radial_range = (0, q_par[-1])
if azimuth_range is None:
azimuth_range = (-180, 180)
azimuth_range = np.deg2rad(azimuth_range)
# recalculate the q-range of the input array
q_h = np.linspace(q_par[0], q_par[-1], bins[0])
q_v = np.linspace(q_per[0], q_per[-1], bins[1])[::-1]
q_h_te, q_v_te = np.meshgrid(q_h, q_v)
tth_array = np.sqrt(q_h_te**2 + q_v_te**2)
chi_array = -np.arctan2(q_h_te, q_v_te)
# Mask the remeshed array
img_mask = np.ma.masked_array(img, mask=img == 0)
img_mask = np.ma.masked_where(tth_array < radial_range[0], img_mask)
img_mask = np.ma.masked_where(tth_array > radial_range[1], img_mask)
img_mask = np.ma.masked_where(chi_array < np.min(azimuth_range), img_mask)
img_mask = np.ma.masked_where(chi_array > np.max(azimuth_range), img_mask)
cake, q, chi, _, _ = splitBBox.histoBBox2d(weights=img_mask,
pos0=tth_array,
delta_pos0=np.ones_like(img_mask) * (q_par[1] - q_par[0])/bins[1],
pos1=chi_array,
delta_pos1=np.ones_like(img_mask) * (q_per[1] - q_per[0])/bins[1],
bins=bins,
pos0Range=np.array([np.min(radial_range), np.max(radial_range)]),
pos1Range=np.array([np.min(azimuth_range), np.max(azimuth_range)]),
dummy=None,
delta_dummy=None,
mask=img_mask.mask)
return cake, q, np.rad2deg(chi)[::-1]
|
[
"numpy.mean",
"numpy.shape",
"numpy.ones_like",
"numpy.sqrt",
"numpy.asarray",
"numpy.ma.masked_where",
"numpy.max",
"numpy.deg2rad",
"numpy.linspace",
"numpy.arctan2",
"numpy.min",
"numpy.meshgrid",
"pyFAI.multi_geometry.MultiGeometry",
"numpy.rad2deg",
"numpy.ma.masked_array"
] |
[((1972, 2108), 'pyFAI.multi_geometry.MultiGeometry', 'MultiGeometry', (['ais'], {'unit': '"""q_A^-1"""', 'radial_range': 'radial_range', 'azimuth_range': 'azimuth_range', 'wavelength': 'None', 'empty': '(0.0)', 'chi_disc': '(180)'}), "(ais, unit='q_A^-1', radial_range=radial_range, azimuth_range=\n azimuth_range, wavelength=None, empty=0.0, chi_disc=180)\n", (1985, 2108), False, 'from pyFAI.multi_geometry import MultiGeometry\n'), ((3455, 3590), 'pyFAI.multi_geometry.MultiGeometry', 'MultiGeometry', (['ais'], {'unit': '"""q_A^-1"""', 'radial_range': 'radial_range', 'azimuth_range': 'azimuth_range', 'wavelength': 'None', 'empty': '(-1)', 'chi_disc': '(180)'}), "(ais, unit='q_A^-1', radial_range=radial_range, azimuth_range=\n azimuth_range, wavelength=None, empty=-1, chi_disc=180)\n", (3468, 3590), False, 'from pyFAI.multi_geometry import MultiGeometry\n'), ((4812, 4843), 'numpy.meshgrid', 'np.meshgrid', (['q_array', 'chi_array'], {}), '(q_array, chi_array)\n', (4823, 4843), True, 'import numpy as np\n'), ((4860, 4884), 'numpy.ma.masked_array', 'np.ma.masked_array', (['cake'], {}), '(cake)\n', (4878, 4884), True, 'import numpy as np\n'), ((4902, 4957), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(q_mesh < radial_range[0])', 'cake_mask'], {}), '(q_mesh < radial_range[0], cake_mask)\n', (4920, 4957), True, 'import numpy as np\n'), ((4974, 5029), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(q_mesh > radial_range[1])', 'cake_mask'], {}), '(q_mesh > radial_range[1], cake_mask)\n', (4992, 5029), True, 'import numpy as np\n'), ((5047, 5105), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(azimuth_range[0] > chi_mesh)', 'cake_mask'], {}), '(azimuth_range[0] > chi_mesh, cake_mask)\n', (5065, 5105), True, 'import numpy as np\n'), ((5122, 5180), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(azimuth_range[1] < chi_mesh)', 'cake_mask'], {}), '(azimuth_range[1] < chi_mesh, cake_mask)\n', (5140, 5180), True, 'import numpy as np\n'), ((6382, 6403), 'numpy.meshgrid', 'np.meshgrid', (['q_h', 'q_v'], {}), '(q_h, q_v)\n', (6393, 6403), True, 'import numpy as np\n'), ((6420, 6454), 'numpy.sqrt', 'np.sqrt', (['(q_h_te ** 2 + q_v_te ** 2)'], {}), '(q_h_te ** 2 + q_v_te ** 2)\n', (6427, 6454), True, 'import numpy as np\n'), ((6556, 6594), 'numpy.ma.masked_array', 'np.ma.masked_array', (['img'], {'mask': '(img == 0)'}), '(img, mask=img == 0)\n', (6574, 6594), True, 'import numpy as np\n'), ((6611, 6652), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(img < 1e-05)', 'img_mask'], {}), '(img < 1e-05, img_mask)\n', (6629, 6652), True, 'import numpy as np\n'), ((6667, 6724), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(tth_array < radial_range[0])', 'img_mask'], {}), '(tth_array < radial_range[0], img_mask)\n', (6685, 6724), True, 'import numpy as np\n'), ((6740, 6797), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(tth_array > radial_range[1])', 'img_mask'], {}), '(tth_array > radial_range[1], img_mask)\n', (6758, 6797), True, 'import numpy as np\n'), ((9010, 9035), 'numpy.meshgrid', 'np.meshgrid', (['q_par', 'q_per'], {}), '(q_par, q_per)\n', (9021, 9035), True, 'import numpy as np\n'), ((9051, 9089), 'numpy.ma.masked_array', 'np.ma.masked_array', (['img'], {'mask': '(img == 0)'}), '(img, mask=img == 0)\n', (9069, 9089), True, 'import numpy as np\n'), ((9106, 9162), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(qper_mesh < q_per_range[0])', 'img_mask'], {}), '(qper_mesh < q_per_range[0], img_mask)\n', (9124, 9162), True, 'import numpy as np\n'), ((9178, 9234), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(qper_mesh > q_per_range[1])', 'img_mask'], {}), '(qper_mesh > q_per_range[1], img_mask)\n', (9196, 9234), True, 'import numpy as np\n'), ((9251, 9307), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(q_par_range[0] > qpar_mesh)', 'img_mask'], {}), '(q_par_range[0] > qpar_mesh, img_mask)\n', (9269, 9307), True, 'import numpy as np\n'), ((9323, 9379), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(q_par_range[1] < qpar_mesh)', 'img_mask'], {}), '(q_par_range[1] < qpar_mesh, img_mask)\n', (9341, 9379), True, 'import numpy as np\n'), ((9393, 9418), 'numpy.mean', 'np.mean', (['img_mask'], {'axis': '(0)'}), '(img_mask, axis=0)\n', (9400, 9418), True, 'import numpy as np\n'), ((10463, 10488), 'numpy.meshgrid', 'np.meshgrid', (['q_par', 'q_per'], {}), '(q_par, q_per)\n', (10474, 10488), True, 'import numpy as np\n'), ((10504, 10542), 'numpy.ma.masked_array', 'np.ma.masked_array', (['img'], {'mask': '(img == 0)'}), '(img, mask=img == 0)\n', (10522, 10542), True, 'import numpy as np\n'), ((10559, 10616), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(q_per_mesh < q_per_range[0])', 'img_mask'], {}), '(q_per_mesh < q_per_range[0], img_mask)\n', (10577, 10616), True, 'import numpy as np\n'), ((10632, 10689), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(q_per_mesh > q_per_range[1])', 'img_mask'], {}), '(q_per_mesh > q_per_range[1], img_mask)\n', (10650, 10689), True, 'import numpy as np\n'), ((10706, 10763), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(q_par_mesh < q_par_range[0])', 'img_mask'], {}), '(q_par_mesh < q_par_range[0], img_mask)\n', (10724, 10763), True, 'import numpy as np\n'), ((10779, 10836), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(q_par_mesh > q_par_range[1])', 'img_mask'], {}), '(q_par_mesh > q_par_range[1], img_mask)\n', (10797, 10836), True, 'import numpy as np\n'), ((10850, 10875), 'numpy.mean', 'np.mean', (['img_mask'], {'axis': '(1)'}), '(img_mask, axis=1)\n', (10857, 10875), True, 'import numpy as np\n'), ((11935, 11960), 'numpy.deg2rad', 'np.deg2rad', (['azimuth_range'], {}), '(azimuth_range)\n', (11945, 11960), True, 'import numpy as np\n'), ((12021, 12062), 'numpy.linspace', 'np.linspace', (['q_par[0]', 'q_par[-1]', 'bins[0]'], {}), '(q_par[0], q_par[-1], bins[0])\n', (12032, 12062), True, 'import numpy as np\n'), ((12143, 12164), 'numpy.meshgrid', 'np.meshgrid', (['q_h', 'q_v'], {}), '(q_h, q_v)\n', (12154, 12164), True, 'import numpy as np\n'), ((12181, 12215), 'numpy.sqrt', 'np.sqrt', (['(q_h_te ** 2 + q_v_te ** 2)'], {}), '(q_h_te ** 2 + q_v_te ** 2)\n', (12188, 12215), True, 'import numpy as np\n'), ((12302, 12340), 'numpy.ma.masked_array', 'np.ma.masked_array', (['img'], {'mask': '(img == 0)'}), '(img, mask=img == 0)\n', (12320, 12340), True, 'import numpy as np\n'), ((12357, 12414), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(tth_array < radial_range[0])', 'img_mask'], {}), '(tth_array < radial_range[0], img_mask)\n', (12375, 12414), True, 'import numpy as np\n'), ((12430, 12487), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(tth_array > radial_range[1])', 'img_mask'], {}), '(tth_array > radial_range[1], img_mask)\n', (12448, 12487), True, 'import numpy as np\n'), ((6482, 6508), 'numpy.arctan2', 'np.arctan2', (['q_h_te', 'q_v_te'], {}), '(q_h_te, q_v_te)\n', (6492, 6508), True, 'import numpy as np\n'), ((12073, 12114), 'numpy.linspace', 'np.linspace', (['q_per[0]', 'q_per[-1]', 'bins[1]'], {}), '(q_per[0], q_per[-1], bins[1])\n', (12084, 12114), True, 'import numpy as np\n'), ((12229, 12255), 'numpy.arctan2', 'np.arctan2', (['q_h_te', 'q_v_te'], {}), '(q_h_te, q_v_te)\n', (12239, 12255), True, 'import numpy as np\n'), ((6138, 6151), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (6146, 6151), True, 'import numpy as np\n'), ((6844, 6865), 'numpy.min', 'np.min', (['azimuth_range'], {}), '(azimuth_range)\n', (6850, 6865), True, 'import numpy as np\n'), ((6923, 6944), 'numpy.max', 'np.max', (['azimuth_range'], {}), '(azimuth_range)\n', (6929, 6944), True, 'import numpy as np\n'), ((8896, 8909), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (8904, 8909), True, 'import numpy as np\n'), ((10348, 10361), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (10356, 10361), True, 'import numpy as np\n'), ((12535, 12556), 'numpy.min', 'np.min', (['azimuth_range'], {}), '(azimuth_range)\n', (12541, 12556), True, 'import numpy as np\n'), ((12614, 12635), 'numpy.max', 'np.max', (['azimuth_range'], {}), '(azimuth_range)\n', (12620, 12635), True, 'import numpy as np\n'), ((13565, 13580), 'numpy.rad2deg', 'np.rad2deg', (['chi'], {}), '(chi)\n', (13575, 13580), True, 'import numpy as np\n'), ((894, 912), 'numpy.ones_like', 'np.ones_like', (['mask'], {}), '(mask)\n', (906, 912), True, 'import numpy as np\n'), ((6199, 6212), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (6207, 6212), True, 'import numpy as np\n'), ((8958, 8971), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (8966, 8971), True, 'import numpy as np\n'), ((10410, 10423), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (10418, 10423), True, 'import numpy as np\n'), ((7135, 7157), 'numpy.ones_like', 'np.ones_like', (['img_mask'], {}), '(img_mask)\n', (7147, 7157), True, 'import numpy as np\n'), ((7182, 7200), 'numpy.shape', 'np.shape', (['img_mask'], {}), '(img_mask)\n', (7190, 7200), True, 'import numpy as np\n'), ((7375, 7397), 'numpy.ones_like', 'np.ones_like', (['img_mask'], {}), '(img_mask)\n', (7387, 7397), True, 'import numpy as np\n'), ((7422, 7440), 'numpy.shape', 'np.shape', (['img_mask'], {}), '(img_mask)\n', (7430, 7440), True, 'import numpy as np\n'), ((7622, 7639), 'numpy.min', 'np.min', (['tth_array'], {}), '(tth_array)\n', (7628, 7639), True, 'import numpy as np\n'), ((7641, 7658), 'numpy.max', 'np.max', (['tth_array'], {}), '(tth_array)\n', (7647, 7658), True, 'import numpy as np\n'), ((8700, 8717), 'numpy.asarray', 'np.asarray', (['q_par'], {}), '(q_par)\n', (8710, 8717), True, 'import numpy as np\n'), ((8725, 8742), 'numpy.asarray', 'np.asarray', (['q_par'], {}), '(q_par)\n', (8735, 8742), True, 'import numpy as np\n'), ((8801, 8818), 'numpy.asarray', 'np.asarray', (['q_per'], {}), '(q_per)\n', (8811, 8818), True, 'import numpy as np\n'), ((8826, 8843), 'numpy.asarray', 'np.asarray', (['q_per'], {}), '(q_per)\n', (8836, 8843), True, 'import numpy as np\n'), ((10152, 10169), 'numpy.asarray', 'np.asarray', (['q_par'], {}), '(q_par)\n', (10162, 10169), True, 'import numpy as np\n'), ((10177, 10194), 'numpy.asarray', 'np.asarray', (['q_par'], {}), '(q_par)\n', (10187, 10194), True, 'import numpy as np\n'), ((10253, 10270), 'numpy.asarray', 'np.asarray', (['q_per'], {}), '(q_per)\n', (10263, 10270), True, 'import numpy as np\n'), ((10278, 10295), 'numpy.asarray', 'np.asarray', (['q_per'], {}), '(q_per)\n', (10288, 10295), True, 'import numpy as np\n'), ((12834, 12856), 'numpy.ones_like', 'np.ones_like', (['img_mask'], {}), '(img_mask)\n', (12846, 12856), True, 'import numpy as np\n'), ((13011, 13033), 'numpy.ones_like', 'np.ones_like', (['img_mask'], {}), '(img_mask)\n', (13023, 13033), True, 'import numpy as np\n'), ((13192, 13212), 'numpy.min', 'np.min', (['radial_range'], {}), '(radial_range)\n', (13198, 13212), True, 'import numpy as np\n'), ((13214, 13234), 'numpy.max', 'np.max', (['radial_range'], {}), '(radial_range)\n', (13220, 13234), True, 'import numpy as np\n'), ((13305, 13326), 'numpy.min', 'np.min', (['azimuth_range'], {}), '(azimuth_range)\n', (13311, 13326), True, 'import numpy as np\n'), ((13328, 13349), 'numpy.max', 'np.max', (['azimuth_range'], {}), '(azimuth_range)\n', (13334, 13349), True, 'import numpy as np\n')]
|
import numpy as np
from typing import Tuple
import plotly.io
from IMLearn.metalearners.adaboost import AdaBoost
from IMLearn.learners.classifiers import DecisionStump
from IMLearn.metrics import accuracy
from utils import *
import plotly.graph_objects as go
from plotly.subplots import make_subplots
plotly.io.renderers.default = 'browser'
def generate_data(n: int, noise_ratio: float) -> Tuple[np.ndarray, np.ndarray]:
"""
Generate a dataset in R^2 of specified size
Parameters
----------
n: int
Number of samples to generate
noise_ratio: float
Ratio of labels to invert
Returns
-------
X: np.ndarray of shape (n_samples,2)
Design matrix of samples
y: np.ndarray of shape (n_samples,)
Labels of samples
"""
'''
generate samples X with shape: (num_samples, 2) and labels y with shape (num_samples).
num_samples: the number of samples to generate
noise_ratio: invert the label for this ratio of the samples
'''
X, y = np.random.rand(n, 2) * 2 - 1, np.ones(n)
y[np.sum(X ** 2, axis=1) < 0.5 ** 2] = -1
y[np.random.choice(n, int(noise_ratio * n))] *= -1
return X, y
def fit_and_evaluate_adaboost(noise, n_learners=250, train_size=5000, test_size=500):
(train_X, train_y), (test_X, test_y) = generate_data(train_size, noise), generate_data(test_size, noise)
# Question 1: Train- and test errors of AdaBoost in noiseless case
# print("Fitting.......")
adb = AdaBoost(DecisionStump, n_learners).fit(train_X, train_y)
# save it
# with open(f'adb_{train_size}_{test_size}_{noise}noise.pickle', 'wb') as file:
# pickle.dump(adb, file)
# print("saved")
# return
# print("Loading...")
# with open(f'adb_{train_size}_{test_size}_{noise}noise.pickle', 'rb') as file2:
# adb = pickle.load(file2)
# print("Plotting.......")
go.Figure(
data=[
go.Scatter(
x=list(range(1, n_learners + 1)),
y=list(map(lambda n: adb.partial_loss(train_X, train_y, n), list(range(1, n_learners + 1)))),
mode='markers+lines',
name="Training Loss"
),
go.Scatter(
x=list(range(1, n_learners + 1)),
y=list(map(lambda n: adb.partial_loss(test_X, test_y, n), list(range(1, n_learners + 1)))),
mode='markers+lines',
name="Test Loss"
)
],
layout=go.Layout(
title=f"Loss as Function of Num of Learners over Data with {noise} noise",
xaxis_title={'text': "$\\text{Num of Learners}$"},
yaxis_title={'text': "$\\text{Misclassification Loss}$"}
)
).show()
# Question 2: Plotting decision surfaces
T = [5, 50, 100, 250]
lims = np.array([np.r_[train_X, test_X].min(axis=0), np.r_[train_X, test_X].max(axis=0)]).T + np.array([-.1, .1])
# preds = [adb.partial_predict(train_X, t) for t in T]
symbols = np.array(["circle", "x", "diamond"])
fig = make_subplots(rows=2,
cols=2,
subplot_titles=[f"Decision Boundary for Ensemble of Size {m}"
for i, m in enumerate(T)],
horizontal_spacing=0.1,
vertical_spacing=.05,
)
# Add traces for data-points setting symbols and colors
for i, m in enumerate(T):
fig.add_traces([go.Scatter(
x=test_X[:, 0],
y=test_X[:, 1],
mode="markers",
showlegend=False,
marker=dict(
color=test_y,
symbol='diamond',
line=dict(color="black", width=1)),
),
decision_surface(lambda x: adb.partial_predict(x, m), lims[0], lims[1], showscale=False)
],
rows=(i // 2) + 1, cols=(i % 2) + 1
)
fig.update_layout(
title=f"Decision Boundaries for Different Ensemble Size <br>",
margin=dict(t=100),
width=1200,
height=1000
)
fig.show()
# Question 3: Decision surface of best performing ensemble
best_ensemble = np.argmin(np.array(
[adb.partial_loss(X=test_X, y=test_y, T=t)
for t in range(1, 251)])) + 1
go.Figure(
data=[
go.Scatter(
x=test_X[:, 0],
y=test_X[:, 1],
mode="markers",
showlegend=False,
marker=dict(
color=test_y,
symbol='diamond',
line=dict(color="black", width=1)),
),
decision_surface(
lambda x: adb.partial_predict(x, best_ensemble),
lims[0], lims[1], showscale=False
)
]
).update_layout(
title=f"Decision Boundaries for Ensemble of Size {best_ensemble}<br>"
f"<sup> With Accuracy of: "
f"{accuracy(test_y, adb.partial_predict(test_X, best_ensemble))}"
f"</sup>",
margin=dict(t=100),
width=1200,
height=1000
).show()
# Question 4: Decision surface with weighted samples
weights = adb.D_ * 10 / np.max(adb.D_)
go.Figure(
data=[
go.Scatter(
x=train_X[:, 0],
y=train_X[:, 1],
mode="markers",
showlegend=False,
marker=dict(
color=weights,
symbol=symbols[train_y.astype(int)],
line=dict(color="black", width=1),
size=weights
)
).update(),
decision_surface(
adb.predict,
lims[0], lims[1], showscale=False
)
]
).update_layout(
title=f"Decision Boundaries for Data with {noise} noise <br>"
f"With Training Set Point Size & Color Proportional To It’s Weight<br>"
f"<sup> x - True label is blue</sup><br>"
f"<sup>diamond - True label is red</sup>",
margin=dict(t=120),
width=1000,
height=1000,
).show()
if __name__ == '__main__':
np.random.seed(0)
fit_and_evaluate_adaboost(noise=0)
fit_and_evaluate_adaboost(noise=0.4)
|
[
"numpy.ones",
"numpy.random.rand",
"plotly.graph_objects.Layout",
"IMLearn.metalearners.adaboost.AdaBoost",
"numpy.max",
"numpy.array",
"numpy.sum",
"numpy.random.seed"
] |
[((3011, 3047), 'numpy.array', 'np.array', (["['circle', 'x', 'diamond']"], {}), "(['circle', 'x', 'diamond'])\n", (3019, 3047), True, 'import numpy as np\n'), ((6267, 6284), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (6281, 6284), True, 'import numpy as np\n'), ((1056, 1066), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (1063, 1066), True, 'import numpy as np\n'), ((2917, 2938), 'numpy.array', 'np.array', (['[-0.1, 0.1]'], {}), '([-0.1, 0.1])\n', (2925, 2938), True, 'import numpy as np\n'), ((5270, 5284), 'numpy.max', 'np.max', (['adb.D_'], {}), '(adb.D_)\n', (5276, 5284), True, 'import numpy as np\n'), ((1073, 1095), 'numpy.sum', 'np.sum', (['(X ** 2)'], {'axis': '(1)'}), '(X ** 2, axis=1)\n', (1079, 1095), True, 'import numpy as np\n'), ((1493, 1528), 'IMLearn.metalearners.adaboost.AdaBoost', 'AdaBoost', (['DecisionStump', 'n_learners'], {}), '(DecisionStump, n_learners)\n', (1501, 1528), False, 'from IMLearn.metalearners.adaboost import AdaBoost\n'), ((1026, 1046), 'numpy.random.rand', 'np.random.rand', (['n', '(2)'], {}), '(n, 2)\n', (1040, 1046), True, 'import numpy as np\n'), ((2494, 2700), 'plotly.graph_objects.Layout', 'go.Layout', ([], {'title': 'f"""Loss as Function of Num of Learners over Data with {noise} noise"""', 'xaxis_title': "{'text': '$\\\\text{Num of Learners}$'}", 'yaxis_title': "{'text': '$\\\\text{Misclassification Loss}$'}"}), "(title=\n f'Loss as Function of Num of Learners over Data with {noise} noise',\n xaxis_title={'text': '$\\\\text{Num of Learners}$'}, yaxis_title={'text':\n '$\\\\text{Misclassification Loss}$'})\n", (2503, 2700), True, 'import plotly.graph_objects as go\n')]
|
# -*- coding: utf-8 -*-
#
# Author: <NAME> <<EMAIL>>
#
# Setup the SMRT module
from __future__ import print_function, absolute_import, division
from distutils.command.clean import clean
# from setuptools import setup # DO NOT use setuptools!!!!!!
import shutil
import os
import sys
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
# Hacky, adopted from sklearn. This sets a global variable
# so smrt __init__ can detect if it's being loaded in the setup
# routine, so it won't load submodules that haven't yet been built.
builtins.__SMRT_SETUP__ = True
# metadata
DISTNAME = 'smrt'
DESCRIPTION = 'Handle class imbalance intelligently by using Variational Autoencoders ' \
'to generate synthetic observations of your minority class.'
MAINTAINER = '<NAME>'
MAINTAINER_EMAIL = '<EMAIL>'
LICENSE = 'new BSD'
# import restricted version
import smrt
VERSION = smrt.__version__
# get the installation requirements:
with open('requirements.txt') as req:
REQUIREMENTS = req.read().split(os.linesep)
# Custom clean command to remove build artifacts -- adopted from sklearn
class CleanCommand(clean):
description = "Remove build artifacts from the source tree"
# this is mostly in case we ever add a Cython module to SMRT
def run(self):
clean.run(self)
# Remove c files if we are not within a sdist package
cwd = os.path.abspath(os.path.dirname(__file__))
remove_c_files = not os.path.exists(os.path.join(cwd, 'PKG-INFO'))
if remove_c_files:
cython_hash_file = os.path.join(cwd, 'cythonize.dat')
if os.path.exists(cython_hash_file):
os.unlink(cython_hash_file)
print('Will remove generated .c & .so files')
if os.path.exists('build'):
shutil.rmtree('build')
for dirpath, dirnames, filenames in os.walk(DISTNAME):
for filename in filenames:
if any(filename.endswith(suffix) for suffix in
(".so", ".pyd", ".dll", ".pyc")):
print('Removing file: %s' % filename)
os.unlink(os.path.join(dirpath, filename))
continue
extension = os.path.splitext(filename)[1]
if remove_c_files and extension in ['.c', '.cpp']:
pyx_file = str.replace(filename, extension, '.pyx')
if os.path.exists(os.path.join(dirpath, pyx_file)):
os.unlink(os.path.join(dirpath, filename))
# this is for FORTRAN modules, which some of my other packages have used in the past...
for dirname in dirnames:
if dirname == '__pycache__' or dirname.endswith('.so.dSYM'):
print('Removing directory: %s' % dirname)
shutil.rmtree(os.path.join(dirpath, dirname))
cmdclass = {'clean': CleanCommand}
def configuration(parent_package='', top_path=None):
# we know numpy is a valid import now
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg
# "Ignoring attempt to set 'name' (from ... "
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage(DISTNAME)
return config
def do_setup():
# setup the config
metadata = dict(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
version=VERSION,
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Intended Audience :: Scikit-learn users',
'Programming Language :: Python',
'Topic :: Machine Learning',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2.7'
],
keywords='sklearn scikit-learn tensorflow auto-encoders neural-networks class-imbalance',
# packages=[DISTNAME],
# install_requires=REQUIREMENTS,
cmdclass=cmdclass)
if len(sys.argv) == 1 or (
len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or
sys.argv[1] in ('--help-commands',
'egg-info',
'--version',
'clean'))):
# For these actions, NumPy is not required
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
else: # we DO need numpy
try:
from numpy.distutils.core import setup
except ImportError:
raise RuntimeError('Need numpy to build %s' % DISTNAME)
# add the config to the metadata
metadata['configuration'] = configuration
# call setup on the dict
setup(**metadata)
if __name__ == '__main__':
do_setup()
|
[
"os.path.exists",
"distutils.command.clean.clean.run",
"distutils.core.setup",
"os.path.join",
"numpy.distutils.misc_util.Configuration",
"os.path.splitext",
"os.path.dirname",
"os.unlink",
"shutil.rmtree",
"os.walk"
] |
[((3093, 3138), 'numpy.distutils.misc_util.Configuration', 'Configuration', (['None', 'parent_package', 'top_path'], {}), '(None, parent_package, top_path)\n', (3106, 3138), False, 'from numpy.distutils.misc_util import Configuration\n'), ((5720, 5737), 'distutils.core.setup', 'setup', ([], {}), '(**metadata)\n', (5725, 5737), False, 'from distutils.core import setup\n'), ((1315, 1330), 'distutils.command.clean.clean.run', 'clean.run', (['self'], {}), '(self)\n', (1324, 1330), False, 'from distutils.command.clean import clean\n'), ((1780, 1803), 'os.path.exists', 'os.path.exists', (['"""build"""'], {}), "('build')\n", (1794, 1803), False, 'import os\n'), ((1884, 1901), 'os.walk', 'os.walk', (['DISTNAME'], {}), '(DISTNAME)\n', (1891, 1901), False, 'import os\n'), ((1423, 1448), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1438, 1448), False, 'import os\n'), ((1583, 1617), 'os.path.join', 'os.path.join', (['cwd', '"""cythonize.dat"""'], {}), "(cwd, 'cythonize.dat')\n", (1595, 1617), False, 'import os\n'), ((1633, 1665), 'os.path.exists', 'os.path.exists', (['cython_hash_file'], {}), '(cython_hash_file)\n', (1647, 1665), False, 'import os\n'), ((1817, 1839), 'shutil.rmtree', 'shutil.rmtree', (['"""build"""'], {}), "('build')\n", (1830, 1839), False, 'import shutil\n'), ((1494, 1523), 'os.path.join', 'os.path.join', (['cwd', '"""PKG-INFO"""'], {}), "(cwd, 'PKG-INFO')\n", (1506, 1523), False, 'import os\n'), ((1683, 1710), 'os.unlink', 'os.unlink', (['cython_hash_file'], {}), '(cython_hash_file)\n', (1692, 1710), False, 'import os\n'), ((2240, 2266), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (2256, 2266), False, 'import os\n'), ((2150, 2181), 'os.path.join', 'os.path.join', (['dirpath', 'filename'], {}), '(dirpath, filename)\n', (2162, 2181), False, 'import os\n'), ((2447, 2478), 'os.path.join', 'os.path.join', (['dirpath', 'pyx_file'], {}), '(dirpath, pyx_file)\n', (2459, 2478), False, 'import os\n'), ((2858, 2888), 'os.path.join', 'os.path.join', (['dirpath', 'dirname'], {}), '(dirpath, dirname)\n', (2870, 2888), False, 'import os\n'), ((2515, 2546), 'os.path.join', 'os.path.join', (['dirpath', 'filename'], {}), '(dirpath, filename)\n', (2527, 2546), False, 'import os\n')]
|
import numpy as np
import tensorflow as tf
import unittest
hungarian_module = tf.load_op_library("hungarian.so")
class HungarianTests(unittest.TestCase):
def test_min_weighted_bp_cover_1(self):
W = np.array([[3, 2, 2], [1, 2, 0], [2, 2, 1]])
M, c_0, c_1 = hungarian_module.hungarian(W)
with tf.Session() as sess:
M = M.eval()
c_0 = c_0.eval()
c_1 = c_1.eval()
c_0_t = np.array([2, 1, 1])
c_1_t = np.array([1, 1, 0])
M_t = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
self.assertTrue((c_0.flatten() == c_0_t.flatten()).all())
self.assertTrue((c_1.flatten() == c_1_t.flatten()).all())
self.assertTrue((M == M_t).all())
pass
def test_min_weighted_bp_cover_2(self):
W = np.array([[5, 0, 4, 0], [0, 4, 6, 8], [4, 0, 5, 7]])
M, c_0, c_1 = hungarian_module.hungarian(W)
with tf.Session() as sess:
M = M.eval()
c_0 = c_0.eval()
c_1 = c_1.eval()
c_0_t = np.array([5, 6, 5])
c_1_t = np.array([0, 0, 0, 2])
M_t = np.array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
self.assertTrue((c_0.flatten() == c_0_t.flatten()).all())
self.assertTrue((c_1.flatten() == c_1_t.flatten()).all())
self.assertTrue((M == M_t).all())
def test_min_weighted_bp_cover_3(self):
W = np.array([[5, 0, 2], [3, 1, 0], [0, 5, 0]])
M, c_0, c_1 = hungarian_module.hungarian(W)
with tf.Session() as sess:
M = M.eval()
c_0 = c_0.eval()
c_1 = c_1.eval()
c_0_t = np.array([2, 0, 4])
c_1_t = np.array([3, 1, 0])
M_t = np.array([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
self.assertTrue((c_0.flatten() == c_0_t.flatten()).all())
self.assertTrue((c_1.flatten() == c_1_t.flatten()).all())
self.assertTrue((M == M_t).all())
def test_min_weighted_bp_cover_4(self):
W = np.array([[[5, 0, 2], [3, 1, 0], [0, 5, 0]], [[3, 2, 2], [1, 2, 0],
[2, 2, 1]]])
M, c_0, c_1 = hungarian_module.hungarian(W)
with tf.Session() as sess:
M = M.eval()
c_0 = c_0.eval()
c_1 = c_1.eval()
c_0_t = np.array([[2, 0, 4], [2, 1, 1]])
c_1_t = np.array([[3, 1, 0], [1, 1, 0]])
M_t = np.array([[[0, 0, 1], [1, 0, 0], [0, 1, 0]], [[1, 0, 0], [0, 1, 0],
[0, 0, 1]]])
self.assertTrue((c_0.flatten() == c_0_t.flatten()).all())
self.assertTrue((c_1.flatten() == c_1_t.flatten()).all())
self.assertTrue((M == M_t).all())
def test_real_values_1(self):
# Test the while loop terminates with real values.
W = np.array(
[[0.90, 0.70, 0.30, 0.20, 0.40, 0.001, 0.001, 0.001, 0.001, 0.001],
[0.80, 0.75, 0.92, 0.10, 0.15, 0.001, 0.001, 0.001, 0.001, 0.001],
[0.78, 0.85, 0.66, 0.29, 0.21, 0.001, 0.001, 0.001, 0.001, 0.001],
[0.42, 0.55, 0.23, 0.43, 0.33, 0.002, 0.001, 0.001, 0.001, 0.001],
[0.64, 0.44, 0.33, 0.33, 0.34, 0.001, 0.002, 0.001, 0.001, 0.001],
[0.22, 0.55, 0.43, 0.43, 0.14, 0.001, 0.001, 0.002, 0.001, 0.001],
[0.43, 0.33, 0.34, 0.22, 0.14, 0.001, 0.001, 0.001, 0.002, 0.001],
[0.33, 0.42, 0.23, 0.13, 0.43, 0.001, 0.001, 0.001, 0.001, 0.002],
[0.39, 0.24, 0.53, 0.56, 0.89, 0.001, 0.001, 0.001, 0.001, 0.001],
[0.12, 0.34, 0.82, 0.82, 0.77, 0.001, 0.001, 0.001, 0.001, 0.001]])
M, c_0, c_1 = hungarian_module.hungarian(W)
with tf.Session() as sess:
M = M.eval()
M_t = np.array(
[[1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0]])
self.assertTrue((M == M_t).all())
def test_real_values_2(self):
W = np.array([[
0.00604139, 0.0126045, 0.0117373, 0.01245, 0.00808836, 0.0162662,
0.0137996, 0.00403898, 0.0123786, 1e-05
], [
0.00604229, 0.0126071, 0.0117400, 0.0124528, 0.00808971, 0.0162703,
0.0138028, 0.00403935, 0.0123812, 1e-05
], [
0.00604234, 0.0126073, 0.0117402, 0.012453, 0.00808980, 0.0162706,
0.0138030, 0.00403937, 0.0123814, 1e-05
], [
0.00604235, 0.0126073, 0.0117402, 0.012453, 0.00808981, 0.0162706,
0.0138030, 0.00403938, 0.0123814, 1e-05
], [
0.00604235, 0.0126073, 0.0117402, 0.012453, 0.00808981, 0.0162706,
0.0138030, 0.00403938, 0.0123814, 1e-05
], [
0.00604235, 0.0126073, 0.0117402, 0.012453, 0.00808981, 0.0162706,
0.0138030, 0.00403938, 0.0123814, 1e-05
], [
0.00604235, 0.0126073, 0.0117402, 0.012453, 0.00808981, 0.0162706,
0.0138030, 0.00403938, 0.0123814, 1e-05
], [
0.00604235, 0.0126073, 0.0117402, 0.012453, 0.00808981, 0.0162706,
0.0138030, 0.00403938, 0.0123814, 1e-05
], [
0.00604235, 0.0126073, 0.0117402, 0.012453, 0.00808981, 0.0162706,
0.0138030, 0.00403938, 0.0123814, 1e-05
], [
0.00604235, 0.0126073, 0.0117402, 0.012453, 0.00808981, 0.0162706,
0.0138030, 0.00403938, 0.0123814, 1e-05
]])
M, c_0, c_1 = hungarian_module.hungarian(W)
with tf.Session() as sess:
M = M.eval()
def test_real_values_3(self):
W = np.array([[
0.00302646, 0.00321431, 0.0217552, 0.00836773, 0.0256353, 0.0177026,
0.0289461, 0.0214768, 0.0101898, 1e-05
], [
0.00302875, 0.003217, 0.0217628, 0.00836405, 0.0256229, 0.0177137,
0.0289468, 0.0214719, 0.0101904, 1e-05
], [
0.00302897, 0.00321726, 0.0217636, 0.00836369, 0.0256217, 0.0177148,
0.0289468, 0.0214714, 0.0101905, 1e-05
], [
0.003029, 0.0032173, 0.0217637, 0.00836364, 0.0256216, 0.0177149,
0.0289468, 0.0214713, 0.0101905, 1e-05
], [
0.003029, 0.0032173, 0.0217637, 0.00836364, 0.0256216, 0.0177149,
0.0289468, 0.0214713, 0.0101905, 1e-05
], [
0.003029, 0.0032173, 0.0217637, 0.00836364, 0.0256216, 0.017715,
0.0289468, 0.0214713, 0.0101905, 1e-05
], [
0.003029, 0.0032173, 0.0217637, 0.00836364, 0.0256216, 0.017715,
0.0289468, 0.0214713, 0.0101905, 1e-05
], [
0.003029, 0.0032173, 0.0217637, 0.00836364, 0.0256216, 0.017715,
0.0289468, 0.0214713, 0.0101905, 1e-05
], [
0.003029, 0.0032173, 0.0217637, 0.00836364, 0.0256216, 0.017715,
0.0289468, 0.0214713, 0.0101905, 1e-05
], [
0.003029, 0.0032173, 0.0217637, 0.00836364, 0.0256216, 0.017715,
0.0289468, 0.0214713, 0.0101905, 1e-05
]])
M, c_0, c_1 = hungarian_module.hungarian(W)
with tf.Session() as sess:
M = M.eval()
def test_real_values_4(self):
W = np.array([[
1e-05, 0.0634311, 1e-05, 4.76687e-05, 1.00079e-05, 1.00378e-05, 1e-05,
1e-05, 1e-05, 3.9034e-05
], [
1e-05, 3.42696e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1.0122e-05,
3.43236e-05, 1e-05
], [
1e-05, 0.0426792, 0.031155, 1.0008e-05, 0.00483961, 0.0228187, 1e-05,
1e-05, 1e-05, 0.102463
], [
1e-05, 1e-05, 1e-05, 1.07065e-05, 1e-05, 1.00185e-05, 1e-05, 1e-05,
1e-05, 1.00007e-05
], [
1e-05, 4.22947e-05, 0.00062168, 0.623917, 1.03468e-05, 0.00588984,
1.00004e-05, 1.44433e-05, 1.00014e-05, 0.000213425
], [
1e-05, 1.01764e-05, 1e-05, 0.000667249, 1e-05, 0.000485082, 1e-05,
1e-05, 1.00002e-05, 1e-05
], [
1e-05, 1e-05, 1.50331e-05, 1e-05, 0.11269, 1e-05, 1e-05, 1e-05, 1e-05,
1.13251e-05
], [
1.0001e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 0.0246974, 1e-05, 1e-05,
1e-05
], [
1e-05, 2.89144e-05, 1e-05, 1.05147e-05, 1e-05, 0.000894762, 1.03587e-05,
0.150301, 1e-05, 1.00045e-05
], [
1e-05, 3.97901e-05, 1e-05, 1.11641e-05, 1e-05, 2.34249e-05, 1.0007e-05,
2.42828e-05, 1e-05, 1.10529e-05
]])
p = 1e6
W = np.round(W * p) / p
M, c_0, c_1 = hungarian_module.hungarian(W)
with tf.Session() as sess:
M = M.eval()
def test_real_values_5(self):
W = np.array([[
1.4e-05, 1e-05, 1e-05, 0.053306, 0.044139, 1e-05, 1.2e-05, 1e-05, 1e-05,
1e-05
], [
0.001234, 1e-05, 1e-05, 2.1e-05, 1e-05, 0.001535, 0.019553, 1e-05,
1e-05, 1e-05
], [
0.002148, 1e-05, 1e-05, 1.6e-05, 0.651536, 2e-05, 7.4e-05, 0.002359,
1e-05, 1e-05
], [
3.8e-05, 1e-05, 0.000592, 4.7e-05, 0.09173, 1e-05, 1e-05, 1e-05, 1e-05,
1e-05
], [
1e-05, 1e-05, 1e-05, 0.213736, 1e-05, 4.5e-05, 0.000768, 1e-05, 1e-05,
1e-05
], [
1e-05, 1e-05, 1e-05, 0.317609, 1e-05, 1e-05, 0.002151, 1e-05, 1e-05,
1e-05
], [
0.002802, 1e-05, 1.2e-05, 1e-05, 1e-05, 0.002999, 4.8e-05, 1.1e-05,
0.000919, 1e-05
], [
1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 0.028816, 1e-05
], [
1e-05, 1e-05, 0.047335, 1e-05, 1.2e-05, 1e-05, 1e-05, 1e-05, 1e-05,
1e-05
], [1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05]])
p = 1e6
W = np.round(W * p) / p
M, c_0, c_1 = hungarian_module.hungarian(W)
with tf.Session() as sess:
M = M.eval()
def test_real_values_6(self):
W = np.array([[
0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743,
0.023617, 0.010436, 0.003116
], [
0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743,
0.023617, 0.010436, 0.003116
], [
0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743,
0.023617, 0.010436, 0.003116
], [
0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743,
0.023617, 0.010436, 0.003116
], [
0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743,
0.023617, 0.010436, 0.003116
], [
0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743,
0.023617, 0.010436, 0.003116
], [
0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743,
0.023617, 0.010436, 0.003116
], [
0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743,
0.023617, 0.010436, 0.003116
], [
0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743,
0.023617, 0.010436, 0.003116
], [
0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743,
0.023617, 0.010436, 0.003116
]])
p = 1e6
W = np.round(W * p) / p
M, c_0, c_1 = hungarian_module.hungarian(W)
with tf.Session() as sess:
M = M.eval()
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(HungarianTests)
unittest.TextTestRunner(verbosity=2).run(suite)
|
[
"tensorflow.load_op_library",
"numpy.round",
"tensorflow.Session",
"numpy.array",
"unittest.TextTestRunner",
"unittest.TestLoader"
] |
[((78, 112), 'tensorflow.load_op_library', 'tf.load_op_library', (['"""hungarian.so"""'], {}), "('hungarian.so')\n", (96, 112), True, 'import tensorflow as tf\n'), ((207, 250), 'numpy.array', 'np.array', (['[[3, 2, 2], [1, 2, 0], [2, 2, 1]]'], {}), '([[3, 2, 2], [1, 2, 0], [2, 2, 1]])\n', (215, 250), True, 'import numpy as np\n'), ((407, 426), 'numpy.array', 'np.array', (['[2, 1, 1]'], {}), '([2, 1, 1])\n', (415, 426), True, 'import numpy as np\n'), ((439, 458), 'numpy.array', 'np.array', (['[1, 1, 0]'], {}), '([1, 1, 0])\n', (447, 458), True, 'import numpy as np\n'), ((469, 512), 'numpy.array', 'np.array', (['[[1, 0, 0], [0, 1, 0], [0, 0, 1]]'], {}), '([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n', (477, 512), True, 'import numpy as np\n'), ((736, 788), 'numpy.array', 'np.array', (['[[5, 0, 4, 0], [0, 4, 6, 8], [4, 0, 5, 7]]'], {}), '([[5, 0, 4, 0], [0, 4, 6, 8], [4, 0, 5, 7]])\n', (744, 788), True, 'import numpy as np\n'), ((945, 964), 'numpy.array', 'np.array', (['[5, 6, 5]'], {}), '([5, 6, 5])\n', (953, 964), True, 'import numpy as np\n'), ((977, 999), 'numpy.array', 'np.array', (['[0, 0, 0, 2]'], {}), '([0, 0, 0, 2])\n', (985, 999), True, 'import numpy as np\n'), ((1010, 1062), 'numpy.array', 'np.array', (['[[1, 0, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]'], {}), '([[1, 0, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])\n', (1018, 1062), True, 'import numpy as np\n'), ((1276, 1319), 'numpy.array', 'np.array', (['[[5, 0, 2], [3, 1, 0], [0, 5, 0]]'], {}), '([[5, 0, 2], [3, 1, 0], [0, 5, 0]])\n', (1284, 1319), True, 'import numpy as np\n'), ((1476, 1495), 'numpy.array', 'np.array', (['[2, 0, 4]'], {}), '([2, 0, 4])\n', (1484, 1495), True, 'import numpy as np\n'), ((1508, 1527), 'numpy.array', 'np.array', (['[3, 1, 0]'], {}), '([3, 1, 0])\n', (1516, 1527), True, 'import numpy as np\n'), ((1538, 1581), 'numpy.array', 'np.array', (['[[0, 0, 1], [1, 0, 0], [0, 1, 0]]'], {}), '([[0, 0, 1], [1, 0, 0], [0, 1, 0]])\n', (1546, 1581), True, 'import numpy as np\n'), ((1795, 1880), 'numpy.array', 'np.array', (['[[[5, 0, 2], [3, 1, 0], [0, 5, 0]], [[3, 2, 2], [1, 2, 0], [2, 2, 1]]]'], {}), '([[[5, 0, 2], [3, 1, 0], [0, 5, 0]], [[3, 2, 2], [1, 2, 0], [2, 2, 1]]]\n )\n', (1803, 1880), True, 'import numpy as np\n'), ((2086, 2118), 'numpy.array', 'np.array', (['[[2, 0, 4], [2, 1, 1]]'], {}), '([[2, 0, 4], [2, 1, 1]])\n', (2094, 2118), True, 'import numpy as np\n'), ((2131, 2163), 'numpy.array', 'np.array', (['[[3, 1, 0], [1, 1, 0]]'], {}), '([[3, 1, 0], [1, 1, 0]])\n', (2139, 2163), True, 'import numpy as np\n'), ((2174, 2259), 'numpy.array', 'np.array', (['[[[0, 0, 1], [1, 0, 0], [0, 1, 0]], [[1, 0, 0], [0, 1, 0], [0, 0, 1]]]'], {}), '([[[0, 0, 1], [1, 0, 0], [0, 1, 0]], [[1, 0, 0], [0, 1, 0], [0, 0, 1]]]\n )\n', (2182, 2259), True, 'import numpy as np\n'), ((2569, 3283), 'numpy.array', 'np.array', (['[[0.9, 0.7, 0.3, 0.2, 0.4, 0.001, 0.001, 0.001, 0.001, 0.001], [0.8, 0.75, \n 0.92, 0.1, 0.15, 0.001, 0.001, 0.001, 0.001, 0.001], [0.78, 0.85, 0.66,\n 0.29, 0.21, 0.001, 0.001, 0.001, 0.001, 0.001], [0.42, 0.55, 0.23, 0.43,\n 0.33, 0.002, 0.001, 0.001, 0.001, 0.001], [0.64, 0.44, 0.33, 0.33, 0.34,\n 0.001, 0.002, 0.001, 0.001, 0.001], [0.22, 0.55, 0.43, 0.43, 0.14, \n 0.001, 0.001, 0.002, 0.001, 0.001], [0.43, 0.33, 0.34, 0.22, 0.14, \n 0.001, 0.001, 0.001, 0.002, 0.001], [0.33, 0.42, 0.23, 0.13, 0.43, \n 0.001, 0.001, 0.001, 0.001, 0.002], [0.39, 0.24, 0.53, 0.56, 0.89, \n 0.001, 0.001, 0.001, 0.001, 0.001], [0.12, 0.34, 0.82, 0.82, 0.77, \n 0.001, 0.001, 0.001, 0.001, 0.001]]'], {}), '([[0.9, 0.7, 0.3, 0.2, 0.4, 0.001, 0.001, 0.001, 0.001, 0.001], [\n 0.8, 0.75, 0.92, 0.1, 0.15, 0.001, 0.001, 0.001, 0.001, 0.001], [0.78, \n 0.85, 0.66, 0.29, 0.21, 0.001, 0.001, 0.001, 0.001, 0.001], [0.42, 0.55,\n 0.23, 0.43, 0.33, 0.002, 0.001, 0.001, 0.001, 0.001], [0.64, 0.44, 0.33,\n 0.33, 0.34, 0.001, 0.002, 0.001, 0.001, 0.001], [0.22, 0.55, 0.43, 0.43,\n 0.14, 0.001, 0.001, 0.002, 0.001, 0.001], [0.43, 0.33, 0.34, 0.22, 0.14,\n 0.001, 0.001, 0.001, 0.002, 0.001], [0.33, 0.42, 0.23, 0.13, 0.43, \n 0.001, 0.001, 0.001, 0.001, 0.002], [0.39, 0.24, 0.53, 0.56, 0.89, \n 0.001, 0.001, 0.001, 0.001, 0.001], [0.12, 0.34, 0.82, 0.82, 0.77, \n 0.001, 0.001, 0.001, 0.001, 0.001]])\n', (2577, 3283), True, 'import numpy as np\n'), ((3448, 3795), 'numpy.array', 'np.array', (['[[1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, \n 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0,\n 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1,\n 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0,\n 0, 0, 1, 0, 0, 0, 0, 0, 0]]'], {}), '([[1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0], [\n 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0,\n 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0,\n 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 1, 0, 0, 0, 0,\n 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0]])\n', (3456, 3795), True, 'import numpy as np\n'), ((3903, 5081), 'numpy.array', 'np.array', (['[[0.00604139, 0.0126045, 0.0117373, 0.01245, 0.00808836, 0.0162662, \n 0.0137996, 0.00403898, 0.0123786, 1e-05], [0.00604229, 0.0126071, \n 0.01174, 0.0124528, 0.00808971, 0.0162703, 0.0138028, 0.00403935, \n 0.0123812, 1e-05], [0.00604234, 0.0126073, 0.0117402, 0.012453, \n 0.0080898, 0.0162706, 0.013803, 0.00403937, 0.0123814, 1e-05], [\n 0.00604235, 0.0126073, 0.0117402, 0.012453, 0.00808981, 0.0162706, \n 0.013803, 0.00403938, 0.0123814, 1e-05], [0.00604235, 0.0126073, \n 0.0117402, 0.012453, 0.00808981, 0.0162706, 0.013803, 0.00403938, \n 0.0123814, 1e-05], [0.00604235, 0.0126073, 0.0117402, 0.012453, \n 0.00808981, 0.0162706, 0.013803, 0.00403938, 0.0123814, 1e-05], [\n 0.00604235, 0.0126073, 0.0117402, 0.012453, 0.00808981, 0.0162706, \n 0.013803, 0.00403938, 0.0123814, 1e-05], [0.00604235, 0.0126073, \n 0.0117402, 0.012453, 0.00808981, 0.0162706, 0.013803, 0.00403938, \n 0.0123814, 1e-05], [0.00604235, 0.0126073, 0.0117402, 0.012453, \n 0.00808981, 0.0162706, 0.013803, 0.00403938, 0.0123814, 1e-05], [\n 0.00604235, 0.0126073, 0.0117402, 0.012453, 0.00808981, 0.0162706, \n 0.013803, 0.00403938, 0.0123814, 1e-05]]'], {}), '([[0.00604139, 0.0126045, 0.0117373, 0.01245, 0.00808836, 0.0162662,\n 0.0137996, 0.00403898, 0.0123786, 1e-05], [0.00604229, 0.0126071, \n 0.01174, 0.0124528, 0.00808971, 0.0162703, 0.0138028, 0.00403935, \n 0.0123812, 1e-05], [0.00604234, 0.0126073, 0.0117402, 0.012453, \n 0.0080898, 0.0162706, 0.013803, 0.00403937, 0.0123814, 1e-05], [\n 0.00604235, 0.0126073, 0.0117402, 0.012453, 0.00808981, 0.0162706, \n 0.013803, 0.00403938, 0.0123814, 1e-05], [0.00604235, 0.0126073, \n 0.0117402, 0.012453, 0.00808981, 0.0162706, 0.013803, 0.00403938, \n 0.0123814, 1e-05], [0.00604235, 0.0126073, 0.0117402, 0.012453, \n 0.00808981, 0.0162706, 0.013803, 0.00403938, 0.0123814, 1e-05], [\n 0.00604235, 0.0126073, 0.0117402, 0.012453, 0.00808981, 0.0162706, \n 0.013803, 0.00403938, 0.0123814, 1e-05], [0.00604235, 0.0126073, \n 0.0117402, 0.012453, 0.00808981, 0.0162706, 0.013803, 0.00403938, \n 0.0123814, 1e-05], [0.00604235, 0.0126073, 0.0117402, 0.012453, \n 0.00808981, 0.0162706, 0.013803, 0.00403938, 0.0123814, 1e-05], [\n 0.00604235, 0.0126073, 0.0117402, 0.012453, 0.00808981, 0.0162706, \n 0.013803, 0.00403938, 0.0123814, 1e-05]])\n', (3911, 5081), True, 'import numpy as np\n'), ((5373, 6544), 'numpy.array', 'np.array', (['[[0.00302646, 0.00321431, 0.0217552, 0.00836773, 0.0256353, 0.0177026, \n 0.0289461, 0.0214768, 0.0101898, 1e-05], [0.00302875, 0.003217, \n 0.0217628, 0.00836405, 0.0256229, 0.0177137, 0.0289468, 0.0214719, \n 0.0101904, 1e-05], [0.00302897, 0.00321726, 0.0217636, 0.00836369, \n 0.0256217, 0.0177148, 0.0289468, 0.0214714, 0.0101905, 1e-05], [\n 0.003029, 0.0032173, 0.0217637, 0.00836364, 0.0256216, 0.0177149, \n 0.0289468, 0.0214713, 0.0101905, 1e-05], [0.003029, 0.0032173, \n 0.0217637, 0.00836364, 0.0256216, 0.0177149, 0.0289468, 0.0214713, \n 0.0101905, 1e-05], [0.003029, 0.0032173, 0.0217637, 0.00836364, \n 0.0256216, 0.017715, 0.0289468, 0.0214713, 0.0101905, 1e-05], [0.003029,\n 0.0032173, 0.0217637, 0.00836364, 0.0256216, 0.017715, 0.0289468, \n 0.0214713, 0.0101905, 1e-05], [0.003029, 0.0032173, 0.0217637, \n 0.00836364, 0.0256216, 0.017715, 0.0289468, 0.0214713, 0.0101905, 1e-05\n ], [0.003029, 0.0032173, 0.0217637, 0.00836364, 0.0256216, 0.017715, \n 0.0289468, 0.0214713, 0.0101905, 1e-05], [0.003029, 0.0032173, \n 0.0217637, 0.00836364, 0.0256216, 0.017715, 0.0289468, 0.0214713, \n 0.0101905, 1e-05]]'], {}), '([[0.00302646, 0.00321431, 0.0217552, 0.00836773, 0.0256353, \n 0.0177026, 0.0289461, 0.0214768, 0.0101898, 1e-05], [0.00302875, \n 0.003217, 0.0217628, 0.00836405, 0.0256229, 0.0177137, 0.0289468, \n 0.0214719, 0.0101904, 1e-05], [0.00302897, 0.00321726, 0.0217636, \n 0.00836369, 0.0256217, 0.0177148, 0.0289468, 0.0214714, 0.0101905, \n 1e-05], [0.003029, 0.0032173, 0.0217637, 0.00836364, 0.0256216, \n 0.0177149, 0.0289468, 0.0214713, 0.0101905, 1e-05], [0.003029, \n 0.0032173, 0.0217637, 0.00836364, 0.0256216, 0.0177149, 0.0289468, \n 0.0214713, 0.0101905, 1e-05], [0.003029, 0.0032173, 0.0217637, \n 0.00836364, 0.0256216, 0.017715, 0.0289468, 0.0214713, 0.0101905, 1e-05\n ], [0.003029, 0.0032173, 0.0217637, 0.00836364, 0.0256216, 0.017715, \n 0.0289468, 0.0214713, 0.0101905, 1e-05], [0.003029, 0.0032173, \n 0.0217637, 0.00836364, 0.0256216, 0.017715, 0.0289468, 0.0214713, \n 0.0101905, 1e-05], [0.003029, 0.0032173, 0.0217637, 0.00836364, \n 0.0256216, 0.017715, 0.0289468, 0.0214713, 0.0101905, 1e-05], [0.003029,\n 0.0032173, 0.0217637, 0.00836364, 0.0256216, 0.017715, 0.0289468, \n 0.0214713, 0.0101905, 1e-05]])\n', (5381, 6544), True, 'import numpy as np\n'), ((6825, 7870), 'numpy.array', 'np.array', (['[[1e-05, 0.0634311, 1e-05, 4.76687e-05, 1.00079e-05, 1.00378e-05, 1e-05, \n 1e-05, 1e-05, 3.9034e-05], [1e-05, 3.42696e-05, 1e-05, 1e-05, 1e-05, \n 1e-05, 1e-05, 1.0122e-05, 3.43236e-05, 1e-05], [1e-05, 0.0426792, \n 0.031155, 1.0008e-05, 0.00483961, 0.0228187, 1e-05, 1e-05, 1e-05, \n 0.102463], [1e-05, 1e-05, 1e-05, 1.07065e-05, 1e-05, 1.00185e-05, 1e-05,\n 1e-05, 1e-05, 1.00007e-05], [1e-05, 4.22947e-05, 0.00062168, 0.623917, \n 1.03468e-05, 0.00588984, 1.00004e-05, 1.44433e-05, 1.00014e-05, \n 0.000213425], [1e-05, 1.01764e-05, 1e-05, 0.000667249, 1e-05, \n 0.000485082, 1e-05, 1e-05, 1.00002e-05, 1e-05], [1e-05, 1e-05, \n 1.50331e-05, 1e-05, 0.11269, 1e-05, 1e-05, 1e-05, 1e-05, 1.13251e-05],\n [1.0001e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 0.0246974, 1e-05, 1e-05,\n 1e-05], [1e-05, 2.89144e-05, 1e-05, 1.05147e-05, 1e-05, 0.000894762, \n 1.03587e-05, 0.150301, 1e-05, 1.00045e-05], [1e-05, 3.97901e-05, 1e-05,\n 1.11641e-05, 1e-05, 2.34249e-05, 1.0007e-05, 2.42828e-05, 1e-05, \n 1.10529e-05]]'], {}), '([[1e-05, 0.0634311, 1e-05, 4.76687e-05, 1.00079e-05, 1.00378e-05, \n 1e-05, 1e-05, 1e-05, 3.9034e-05], [1e-05, 3.42696e-05, 1e-05, 1e-05, \n 1e-05, 1e-05, 1e-05, 1.0122e-05, 3.43236e-05, 1e-05], [1e-05, 0.0426792,\n 0.031155, 1.0008e-05, 0.00483961, 0.0228187, 1e-05, 1e-05, 1e-05, \n 0.102463], [1e-05, 1e-05, 1e-05, 1.07065e-05, 1e-05, 1.00185e-05, 1e-05,\n 1e-05, 1e-05, 1.00007e-05], [1e-05, 4.22947e-05, 0.00062168, 0.623917, \n 1.03468e-05, 0.00588984, 1.00004e-05, 1.44433e-05, 1.00014e-05, \n 0.000213425], [1e-05, 1.01764e-05, 1e-05, 0.000667249, 1e-05, \n 0.000485082, 1e-05, 1e-05, 1.00002e-05, 1e-05], [1e-05, 1e-05, \n 1.50331e-05, 1e-05, 0.11269, 1e-05, 1e-05, 1e-05, 1e-05, 1.13251e-05],\n [1.0001e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 0.0246974, 1e-05, 1e-05,\n 1e-05], [1e-05, 2.89144e-05, 1e-05, 1.05147e-05, 1e-05, 0.000894762, \n 1.03587e-05, 0.150301, 1e-05, 1.00045e-05], [1e-05, 3.97901e-05, 1e-05,\n 1.11641e-05, 1e-05, 2.34249e-05, 1.0007e-05, 2.42828e-05, 1e-05, \n 1.10529e-05]])\n', (6833, 7870), True, 'import numpy as np\n'), ((8206, 9066), 'numpy.array', 'np.array', (['[[1.4e-05, 1e-05, 1e-05, 0.053306, 0.044139, 1e-05, 1.2e-05, 1e-05, 1e-05, \n 1e-05], [0.001234, 1e-05, 1e-05, 2.1e-05, 1e-05, 0.001535, 0.019553, \n 1e-05, 1e-05, 1e-05], [0.002148, 1e-05, 1e-05, 1.6e-05, 0.651536, 2e-05,\n 7.4e-05, 0.002359, 1e-05, 1e-05], [3.8e-05, 1e-05, 0.000592, 4.7e-05, \n 0.09173, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05], [1e-05, 1e-05, 1e-05, \n 0.213736, 1e-05, 4.5e-05, 0.000768, 1e-05, 1e-05, 1e-05], [1e-05, 1e-05,\n 1e-05, 0.317609, 1e-05, 1e-05, 0.002151, 1e-05, 1e-05, 1e-05], [\n 0.002802, 1e-05, 1.2e-05, 1e-05, 1e-05, 0.002999, 4.8e-05, 1.1e-05, \n 0.000919, 1e-05], [1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, \n 1e-05, 0.028816, 1e-05], [1e-05, 1e-05, 0.047335, 1e-05, 1.2e-05, 1e-05,\n 1e-05, 1e-05, 1e-05, 1e-05], [1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05,\n 1e-05, 1e-05, 1e-05, 1e-05]]'], {}), '([[1.4e-05, 1e-05, 1e-05, 0.053306, 0.044139, 1e-05, 1.2e-05, 1e-05,\n 1e-05, 1e-05], [0.001234, 1e-05, 1e-05, 2.1e-05, 1e-05, 0.001535, \n 0.019553, 1e-05, 1e-05, 1e-05], [0.002148, 1e-05, 1e-05, 1.6e-05, \n 0.651536, 2e-05, 7.4e-05, 0.002359, 1e-05, 1e-05], [3.8e-05, 1e-05, \n 0.000592, 4.7e-05, 0.09173, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05], [1e-05,\n 1e-05, 1e-05, 0.213736, 1e-05, 4.5e-05, 0.000768, 1e-05, 1e-05, 1e-05],\n [1e-05, 1e-05, 1e-05, 0.317609, 1e-05, 1e-05, 0.002151, 1e-05, 1e-05, \n 1e-05], [0.002802, 1e-05, 1.2e-05, 1e-05, 1e-05, 0.002999, 4.8e-05, \n 1.1e-05, 0.000919, 1e-05], [1e-05, 1e-05, 1e-05, 1e-05, 1e-05, 1e-05, \n 1e-05, 1e-05, 0.028816, 1e-05], [1e-05, 1e-05, 0.047335, 1e-05, 1.2e-05,\n 1e-05, 1e-05, 1e-05, 1e-05, 1e-05], [1e-05, 1e-05, 1e-05, 1e-05, 1e-05,\n 1e-05, 1e-05, 1e-05, 1e-05, 1e-05]])\n', (8214, 9066), True, 'import numpy as np\n'), ((9387, 10453), 'numpy.array', 'np.array', (['[[0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743, \n 0.023617, 0.010436, 0.003116], [0.003408, 0.010531, 0.002795, 1e-05, \n 0.019786, 0.010435, 0.002743, 0.023617, 0.010436, 0.003116], [0.003408,\n 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743, 0.023617, \n 0.010436, 0.003116], [0.003408, 0.010531, 0.002795, 1e-05, 0.019786, \n 0.010435, 0.002743, 0.023617, 0.010436, 0.003116], [0.003408, 0.010531,\n 0.002795, 1e-05, 0.019786, 0.010435, 0.002743, 0.023617, 0.010436, \n 0.003116], [0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, \n 0.002743, 0.023617, 0.010436, 0.003116], [0.003408, 0.010531, 0.002795,\n 1e-05, 0.019786, 0.010435, 0.002743, 0.023617, 0.010436, 0.003116], [\n 0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743, \n 0.023617, 0.010436, 0.003116], [0.003408, 0.010531, 0.002795, 1e-05, \n 0.019786, 0.010435, 0.002743, 0.023617, 0.010436, 0.003116], [0.003408,\n 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743, 0.023617, \n 0.010436, 0.003116]]'], {}), '([[0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, \n 0.002743, 0.023617, 0.010436, 0.003116], [0.003408, 0.010531, 0.002795,\n 1e-05, 0.019786, 0.010435, 0.002743, 0.023617, 0.010436, 0.003116], [\n 0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743, \n 0.023617, 0.010436, 0.003116], [0.003408, 0.010531, 0.002795, 1e-05, \n 0.019786, 0.010435, 0.002743, 0.023617, 0.010436, 0.003116], [0.003408,\n 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743, 0.023617, \n 0.010436, 0.003116], [0.003408, 0.010531, 0.002795, 1e-05, 0.019786, \n 0.010435, 0.002743, 0.023617, 0.010436, 0.003116], [0.003408, 0.010531,\n 0.002795, 1e-05, 0.019786, 0.010435, 0.002743, 0.023617, 0.010436, \n 0.003116], [0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, \n 0.002743, 0.023617, 0.010436, 0.003116], [0.003408, 0.010531, 0.002795,\n 1e-05, 0.019786, 0.010435, 0.002743, 0.023617, 0.010436, 0.003116], [\n 0.003408, 0.010531, 0.002795, 1e-05, 0.019786, 0.010435, 0.002743, \n 0.023617, 0.010436, 0.003116]])\n', (9395, 10453), True, 'import numpy as np\n'), ((308, 320), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (318, 320), True, 'import tensorflow as tf\n'), ((846, 858), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (856, 858), True, 'import tensorflow as tf\n'), ((1377, 1389), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1387, 1389), True, 'import tensorflow as tf\n'), ((1987, 1999), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1997, 1999), True, 'import tensorflow as tf\n'), ((3397, 3409), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3407, 3409), True, 'import tensorflow as tf\n'), ((5291, 5303), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (5301, 5303), True, 'import tensorflow as tf\n'), ((6743, 6755), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (6753, 6755), True, 'import tensorflow as tf\n'), ((8047, 8062), 'numpy.round', 'np.round', (['(W * p)'], {}), '(W * p)\n', (8055, 8062), True, 'import numpy as np\n'), ((8124, 8136), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (8134, 8136), True, 'import tensorflow as tf\n'), ((9228, 9243), 'numpy.round', 'np.round', (['(W * p)'], {}), '(W * p)\n', (9236, 9243), True, 'import numpy as np\n'), ((9305, 9317), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (9315, 9317), True, 'import tensorflow as tf\n'), ((10629, 10644), 'numpy.round', 'np.round', (['(W * p)'], {}), '(W * p)\n', (10637, 10644), True, 'import numpy as np\n'), ((10706, 10718), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (10716, 10718), True, 'import tensorflow as tf\n'), ((10786, 10807), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (10805, 10807), False, 'import unittest\n'), ((10848, 10884), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (10871, 10884), False, 'import unittest\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import xarray as xr
sns.set()
def plot_range(xlabel, ylabel, title, x, values):
"""x and values should have the same size"""
plt.plot(x, values, 'r-', linewidth=2)
plt.gcf().set_size_inches(8, 2)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
def plot_year_multi(*args):
"""*args should be iterateble with 2 elements (value, label);
value should be an array of 365 elements"""
fig = plt.figure(figsize=(8, 3))
ax = fig.add_subplot(1, 1, 1)
ox = np.arange(1, 366, 1)
for arg in args:
ax.plot(ox, arg[0], linewidth=2, label=arg[1])
ax.set_ylabel(r'$values$')
ax.set_xlabel(r'$days$')
ax.legend(loc='best')
def extract_alk(data_train):
ds = xr.open_dataset(data_train[0])
alk_df = ds['B_C_Alk'].to_dataframe()
alk_surface = alk_df.groupby('z').get_group(data_train[1])
alk = alk_surface.loc['2011-01-01':'2011-12-31']
alk = alk.reset_index()
return alk
def show_alk(data_train):
fig = plt.figure(figsize=(10, 2))
ax = fig.add_subplot(1, 1, 1)
for item in data_train:
ax.plot(item[0]['time'],
item[0]['B_C_Alk'], linewidth=2, label=item[1])
ax.legend(loc='best')
ax.set_title('Alkalinity in the surface layer')
plt.show()
if __name__ == '__main__':
print('This is a plot functions module')
|
[
"seaborn.set",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"xarray.open_dataset",
"numpy.arange",
"matplotlib.pyplot.show"
] |
[((93, 102), 'seaborn.set', 'sns.set', ([], {}), '()\n', (100, 102), True, 'import seaborn as sns\n'), ((209, 247), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'values', '"""r-"""'], {'linewidth': '(2)'}), "(x, values, 'r-', linewidth=2)\n", (217, 247), True, 'import matplotlib.pyplot as plt\n'), ((288, 304), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (297, 304), True, 'import matplotlib.pyplot as plt\n'), ((309, 327), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (319, 327), True, 'import matplotlib.pyplot as plt\n'), ((332, 350), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (342, 350), True, 'import matplotlib.pyplot as plt\n'), ((509, 535), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 3)'}), '(figsize=(8, 3))\n', (519, 535), True, 'import matplotlib.pyplot as plt\n'), ((579, 599), 'numpy.arange', 'np.arange', (['(1)', '(366)', '(1)'], {}), '(1, 366, 1)\n', (588, 599), True, 'import numpy as np\n'), ((803, 833), 'xarray.open_dataset', 'xr.open_dataset', (['data_train[0]'], {}), '(data_train[0])\n', (818, 833), True, 'import xarray as xr\n'), ((1073, 1100), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 2)'}), '(figsize=(10, 2))\n', (1083, 1100), True, 'import matplotlib.pyplot as plt\n'), ((1342, 1352), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1350, 1352), True, 'import matplotlib.pyplot as plt\n'), ((252, 261), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (259, 261), True, 'import matplotlib.pyplot as plt\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 28 10:47:38 2016
@author: ahefny
Policies are BLIND to the representation of states, which could be (1) observation,
(2) original latent state or (3) predictive state.
Policies takes the "state" dimension x_dim, the number of actions/dim of action as input.
"""
import numpy as np
import scipy.stats
class BasePolicy(object):
def reset(self):
pass
def sample_action(self, state):
'''
Samples an action and returns a tuple consisting of:
chosen action, action probability,
dictionary of diagnostic information (values must be numbers or vectors)
'''
raise NotImplementedError
def _load(self, params):
raise NotImplementedError
def _save(self):
raise NotImplementedError
class RandomDiscretePolicy(BasePolicy):
def __init__(self, num_actions, rng=None):
self.num_actions = num_actions
self.rng = rng
def sample_action(self, state):
action = self.rng.randint(0, self.num_actions)
return action, 1. / self.num_actions, {}
class RandomGaussianPolicy(BasePolicy):
def __init__(self, num_actions, rng=None):
self.num_actions = num_actions
self.rng = rng
def sample_action(self, state):
action = self.rng.randn(self.num_actions)
return action, np.prod(scipy.stats.norm.pdf(action)), {}
class UniformContinuousPolicy(BasePolicy):
def __init__(self, low, high, rng=None):
self._low = low
self._high = high
self._prob = 1.0 / np.prod(self._high - self._low)
self.rng = rng
def sample_action(self, state):
dim = len(self._high)
action = self.rng.rand(dim)
action = action * (self._high - self._low) + self._low
return action, self._prob, {}
class LinearPolicy(BasePolicy):
def __init__(self, K, sigma, rng=None):
self._K = K
self._sigma = sigma
self.rng = rng
def reset(self):
pass
def sample_action(self, state):
mean = np.dot(self._K, state)
noise = self.rng.randn(len(mean))
sigma = self._sigma
action = mean + noise * sigma
return action, np.prod(scipy.stats.norm.pdf(noise)), {}
class SineWavePolicy(BasePolicy):
def __init__(self, amps, periods, phases):
self._amps = amps
self._scales = 2 * np.pi / periods
self._phases = phases * np.pi / 180.0
self._t = 0
def reset(self):
self._t = 0
def sample_action(self, state):
a = self._amps * np.sin(self._t * self._scales + self._phases)
self._t += 1
return a, 1.0, {}
|
[
"numpy.sin",
"numpy.prod",
"numpy.dot"
] |
[((2094, 2116), 'numpy.dot', 'np.dot', (['self._K', 'state'], {}), '(self._K, state)\n', (2100, 2116), True, 'import numpy as np\n'), ((1598, 1629), 'numpy.prod', 'np.prod', (['(self._high - self._low)'], {}), '(self._high - self._low)\n', (1605, 1629), True, 'import numpy as np\n'), ((2612, 2657), 'numpy.sin', 'np.sin', (['(self._t * self._scales + self._phases)'], {}), '(self._t * self._scales + self._phases)\n', (2618, 2657), True, 'import numpy as np\n')]
|
import torch
import numpy as np
from torch.utils.data import Dataset
import torchvision.transforms as transforms
import skimage.io as io
from path import Path
import cv2
import torch.nn.functional as F
class ETH_LFB(Dataset):
def __init__(self, configs):
"""
dataset for eth local feature benchmark
"""
super(ETH_LFB, self).__init__()
self.configs = configs
self.transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225)),
])
# self.imfs = []
self.sift = cv2.SIFT_create()
imdir = Path(self.configs['data_path'])
folder_dir = imdir/self.configs['subfolder']
images_dir = folder_dir/'images'
imgs = images_dir.glob('*')
self.imfs = imgs
self.imfs.sort()
def __getitem__(self, item):
imf = self.imfs[item]
im = io.imread(imf)
name = imf.name
name = '{}/{}'.format(self.configs['subfolder'], name)
if len(im.shape) != 3: #gray images
im = cv2.cvtColor(im, cv2.COLOR_GRAY2RGB)
im = im.copy()
im_tensor = self.transform(im) #
c, h, w = im_tensor.shape
# pad_b = 16 - h%16
# pad_r = 16 - w%16
# pad = (0,pad_r,0,pad_b)
# im_tensor = F.pad(im_tensor.unsqueeze(0), pad, mode='replicate').squeeze(0)
pad=(0,0,0,0)
# now use crop to get suitable size
crop_r = w%16
crop_b = h%16
im_tensor = im_tensor[:,:h-crop_b,:w-crop_r]
im = im[:h-crop_b,:w-crop_r,:]
# using sift keypoints
gray = cv2.cvtColor(im, cv2.COLOR_RGB2GRAY)
kpts = self.sift.detect(gray)
kpts = np.array([[kp.pt[0], kp.pt[1]] for kp in kpts])
coord = torch.from_numpy(kpts).float()
out = {'im1': im_tensor, 'im1_ori':im, 'coord1': coord, 'name1': name, 'pad1':pad}
return out
def __len__(self):
return len(self.imfs)
|
[
"torch.from_numpy",
"numpy.array",
"path.Path",
"cv2.SIFT_create",
"skimage.io.imread",
"cv2.cvtColor",
"torchvision.transforms.Normalize",
"torchvision.transforms.ToTensor"
] |
[((752, 769), 'cv2.SIFT_create', 'cv2.SIFT_create', ([], {}), '()\n', (767, 769), False, 'import cv2\n'), ((786, 817), 'path.Path', 'Path', (["self.configs['data_path']"], {}), "(self.configs['data_path'])\n", (790, 817), False, 'from path import Path\n'), ((1075, 1089), 'skimage.io.imread', 'io.imread', (['imf'], {}), '(imf)\n', (1084, 1089), True, 'import skimage.io as io\n'), ((1799, 1835), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_RGB2GRAY'], {}), '(im, cv2.COLOR_RGB2GRAY)\n', (1811, 1835), False, 'import cv2\n'), ((1889, 1936), 'numpy.array', 'np.array', (['[[kp.pt[0], kp.pt[1]] for kp in kpts]'], {}), '([[kp.pt[0], kp.pt[1]] for kp in kpts])\n', (1897, 1936), True, 'import numpy as np\n'), ((1239, 1275), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_GRAY2RGB'], {}), '(im, cv2.COLOR_GRAY2RGB)\n', (1251, 1275), False, 'import cv2\n'), ((448, 469), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (467, 469), True, 'import torchvision.transforms as transforms\n'), ((516, 591), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '(0.485, 0.456, 0.406)', 'std': '(0.229, 0.224, 0.225)'}), '(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))\n', (536, 591), True, 'import torchvision.transforms as transforms\n'), ((1953, 1975), 'torch.from_numpy', 'torch.from_numpy', (['kpts'], {}), '(kpts)\n', (1969, 1975), False, 'import torch\n')]
|
"""This file contains functions for processing image"""
import cv2
import math
import copy
import numpy as np
import matplotlib.pyplot as plt
def binarize_image(image):
"""Binarize image pixel values to 0 and 255."""
unique_values = np.unique(image)
if len(unique_values) == 2:
if (unique_values == np.array([0., 255.])).all():
return image
mean = image.mean()
image[image > mean] = 255
image[image <= mean] = 0
return image
def read_gray_image(path):
"""Read in a gray scale image."""
image = cv2.imread(path, 0)
return image
def read_image(path):
"""Read in a RGB image."""
image = cv2.imread(path)
return image
def save_image(image, path):
"""Save image using cv2."""
cv2.imwrite(path, image)
def get_black_area(raw_mask):
"""Get the area of black values which needs to be filled with Tangram.
Input:
raw_mask: input image of the Tangram problem, np.array with black area == 0
Return:
black: area of black values
"""
h, w = raw_mask.shape
black = h * w - np.count_nonzero(raw_mask)
return black
def get_unit_length(raw_mask, standard_s=64.):
"""Get the unit length for a Tangram problem.
For example, if an input mask has an area == 64, while a typical 13 Tangram has an area == 64,
then the unit length will be 1 for this problem.
Input:
raw_mask: input image of the Tangram problem, np.array with black area == 0
standard_s: standard square of a set of 13 Tangram, typically 64
Return:
unit_length: the length in the mask that equals to 1 in a typical Tangram
"""
black_area = get_black_area(raw_mask)
unit_length = math.sqrt(float(black_area) / float(standard_s))
return unit_length
def show_gray_image(image):
"""Show gray scale image."""
plt.imshow(image, cmap='gray')
plt.show()
def show_image(image):
"""Show RGB image."""
plt.imshow(image)
plt.show()
def get_final_result(grid, elements, colors):
"""Draw elements on grid and returns the final solution."""
img = copy.deepcopy(grid)
if len(img.shape) == 2:
# extend it to RGB form image
img = np.stack([img, img, img], axis=-1)
for i in range(len(elements)):
for j in range(elements[i].area):
img[elements[i].coordinates[j][0], elements[i].coordinates[j][1]] = colors[i]
return img
def segment_image(image, tangram_s):
"""Since we know all elements in a 13 Tangram can be decomposed into small 1x1 squares,
I want to segment the original image into grid form that,
each pixel corresponds to one 1x1 square.
"""
# get unit_length
unit_length = int(round(get_unit_length(image, tangram_s)))
# first reverse image to set black area == 1 and white area == 0
mask = np.zeros_like(image, dtype=np.uint8)
mask[image > 128] = 0
mask[image <= 128] = 1
w_sum = np.sum(mask, axis=0)
h_sum = np.sum(mask, axis=1)
loc1 = np.where(h_sum >= unit_length * 0.5)
start_x = loc1[0][0]
end_x = loc1[0][-1] + 1
loc2 = np.where(w_sum >= unit_length * 0.5)
start_y = loc2[0][0]
end_y = loc2[0][-1] + 1
h = end_x - start_x
w = end_y - start_y
assert (h % unit_length == 0 and w % unit_length == 0)
# pad image
ori_h, ori_w = mask.shape
new_h = (ori_h // unit_length + 2) * unit_length
new_w = (ori_w // unit_length + 2) * unit_length
new_image = np.ones((new_h, new_w), dtype=np.uint8) * 255
pad_x_start = unit_length - (start_x % unit_length)
pad_y_start = unit_length - (start_y % unit_length)
new_image[pad_x_start:pad_x_start + ori_h, pad_y_start:pad_y_start + ori_w] = image
# generate grid
h = new_h // unit_length
w = new_w // unit_length
grid = np.ones((h, w), dtype=np.uint8) * 255
# iterate over small squares and compare areas
mask = np.zeros_like(new_image, dtype=np.uint8)
mask[new_image > 128] = 0
mask[new_image <= 128] = 1
for i in range(h):
for j in range(w):
area = \
np.sum(mask[unit_length * i:unit_length * (i + 1), unit_length * j:unit_length * (j + 1)])
if area > (unit_length ** 2) * 0.5:
grid[i, j] = 0
return unit_length, new_image, grid
|
[
"matplotlib.pyplot.imshow",
"cv2.imwrite",
"numpy.unique",
"numpy.ones",
"numpy.where",
"numpy.zeros_like",
"numpy.count_nonzero",
"numpy.sum",
"numpy.stack",
"numpy.array",
"copy.deepcopy",
"cv2.imread",
"matplotlib.pyplot.show"
] |
[((255, 271), 'numpy.unique', 'np.unique', (['image'], {}), '(image)\n', (264, 271), True, 'import numpy as np\n'), ((580, 599), 'cv2.imread', 'cv2.imread', (['path', '(0)'], {}), '(path, 0)\n', (590, 599), False, 'import cv2\n'), ((692, 708), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (702, 708), False, 'import cv2\n'), ((801, 825), 'cv2.imwrite', 'cv2.imwrite', (['path', 'image'], {}), '(path, image)\n', (812, 825), False, 'import cv2\n'), ((1934, 1964), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {'cmap': '"""gray"""'}), "(image, cmap='gray')\n", (1944, 1964), True, 'import matplotlib.pyplot as plt\n'), ((1970, 1980), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1978, 1980), True, 'import matplotlib.pyplot as plt\n'), ((2041, 2058), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (2051, 2058), True, 'import matplotlib.pyplot as plt\n'), ((2064, 2074), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2072, 2074), True, 'import matplotlib.pyplot as plt\n'), ((2202, 2221), 'copy.deepcopy', 'copy.deepcopy', (['grid'], {}), '(grid)\n', (2215, 2221), False, 'import copy\n'), ((2954, 2990), 'numpy.zeros_like', 'np.zeros_like', (['image'], {'dtype': 'np.uint8'}), '(image, dtype=np.uint8)\n', (2967, 2990), True, 'import numpy as np\n'), ((3059, 3079), 'numpy.sum', 'np.sum', (['mask'], {'axis': '(0)'}), '(mask, axis=0)\n', (3065, 3079), True, 'import numpy as np\n'), ((3093, 3113), 'numpy.sum', 'np.sum', (['mask'], {'axis': '(1)'}), '(mask, axis=1)\n', (3099, 3113), True, 'import numpy as np\n'), ((3126, 3162), 'numpy.where', 'np.where', (['(h_sum >= unit_length * 0.5)'], {}), '(h_sum >= unit_length * 0.5)\n', (3134, 3162), True, 'import numpy as np\n'), ((3230, 3266), 'numpy.where', 'np.where', (['(w_sum >= unit_length * 0.5)'], {}), '(w_sum >= unit_length * 0.5)\n', (3238, 3266), True, 'import numpy as np\n'), ((4057, 4097), 'numpy.zeros_like', 'np.zeros_like', (['new_image'], {'dtype': 'np.uint8'}), '(new_image, dtype=np.uint8)\n', (4070, 4097), True, 'import numpy as np\n'), ((1141, 1167), 'numpy.count_nonzero', 'np.count_nonzero', (['raw_mask'], {}), '(raw_mask)\n', (1157, 1167), True, 'import numpy as np\n'), ((2305, 2339), 'numpy.stack', 'np.stack', (['[img, img, img]'], {'axis': '(-1)'}), '([img, img, img], axis=-1)\n', (2313, 2339), True, 'import numpy as np\n'), ((3607, 3646), 'numpy.ones', 'np.ones', (['(new_h, new_w)'], {'dtype': 'np.uint8'}), '((new_h, new_w), dtype=np.uint8)\n', (3614, 3646), True, 'import numpy as np\n'), ((3953, 3984), 'numpy.ones', 'np.ones', (['(h, w)'], {'dtype': 'np.uint8'}), '((h, w), dtype=np.uint8)\n', (3960, 3984), True, 'import numpy as np\n'), ((4252, 4347), 'numpy.sum', 'np.sum', (['mask[unit_length * i:unit_length * (i + 1), unit_length * j:unit_length * (\n j + 1)]'], {}), '(mask[unit_length * i:unit_length * (i + 1), unit_length * j:\n unit_length * (j + 1)])\n', (4258, 4347), True, 'import numpy as np\n'), ((335, 357), 'numpy.array', 'np.array', (['[0.0, 255.0]'], {}), '([0.0, 255.0])\n', (343, 357), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
#import bibliotek
from keras.applications.resnet50 import ResNet50, decode_predictions,preprocess_input
from keras.preprocessing import image
import numpy as np
import requests
from io import BytesIO
from PIL import Image
# In[2]:
#podbranie modelu ResNet50
model = ResNet50(weights = 'imagenet')
# In[3]:
#architektura modelu ResNet50
model.summary()
# ## Import zdjecia z internetu
# #wyświetla zdjecie w jupyterze
#
#
#
# ![] (link)
#
# 
# 
# In[4]:
#import zdjecia z internetu
url_img = ('https://natgeo.imgix.net/syndication/d03e14b9-ccf2-40d2-9612-997a20d35b4a/magazine-rights-exempt-2016-08-departments-panda-mania-12.jpg?auto=compress,format&w=1024&h=560&fit=crop')
response = requests.get(url_img)
#zmiana na Bytes
img = Image.open(BytesIO(response.content))
#rozmiar zdjecia 224x 224 bo taki wymaga model
img = img.resize((224,224))
img
# In[5]:
# konwersja zdjecia na tablice o wartosciach 0-255
X = image.img_to_array(img)
#dodanie nowego wymiaru bo model przyjmuje 4 wymiary
X = np.expand_dims(X, axis =0)
#(1,,224,224,3)
# 1 - zdjecie
# 224 - rozmiar
# 224 - rozmiar
# 3 - RBG
X.shape
# In[6]:
np.expand_dims(X, axis =0).shape
# In[7]:
#predykcja
y_pred = model.predict(X)
# In[8]:
#prawdopodobieństwo co jest na zdjęciu
decode_predictions(y_pred, top = 5)
# In[9]:
#inne przypadki
url_money =('http://3.bp.blogspot.com/-CU3Mg-LeVC4/VWSAi6Ff3dI/AAAAAAAAAkM/UnHJHUkba3c/s400/IMG_9240.JPG')
url_dolar =('https://s3.amazonaws.com/ngccoin-production/us-coin-explorer-category/2718362-020o.jpg')
url_kasa =('https://ocdn.eu/pulscms-transforms/1/MesktkpTURBXy82NDZmNjk1MTExMzVmN2Q5ZmMwMWE1YjUxODU5YzdkNC5qcGeSlQMAAM0QbM0JPZMFzQNSzQHe')
url_snow =('https://miastodzieci.pl/wp-content/uploads/2015/09/snowman-1073800_1920.jpg')
url_dolares = ('https://wf2.xcdn.pl/files/17/04/12/984916_hI4O_17123251389_bed3c3a1ba_b_83.jpg')
url_cash = ('http://m.wm.pl/2018/07/orig/pieniadze-22-482228.jpg')
response = requests.get(url_cash)
img = Image.open(BytesIO(response.content))
#rozmiar zdjecia 224x 224
img = img.resize((224,224))
img
# In[10]:
X = image.img_to_array(img)
X = np.expand_dims(X, axis =0)
X.shape
# In[11]:
#predykcja
y_pred = model.predict(X)
# In[12]:
#prawdopodobieństwo co jest na zdjęciu
decode_predictions(y_pred, top = 5)
# In[ ]:
|
[
"keras.preprocessing.image.img_to_array",
"keras.applications.resnet50.decode_predictions",
"io.BytesIO",
"requests.get",
"numpy.expand_dims",
"keras.applications.resnet50.ResNet50"
] |
[((323, 351), 'keras.applications.resnet50.ResNet50', 'ResNet50', ([], {'weights': '"""imagenet"""'}), "(weights='imagenet')\n", (331, 351), False, 'from keras.applications.resnet50 import ResNet50, decode_predictions, preprocess_input\n'), ((1204, 1225), 'requests.get', 'requests.get', (['url_img'], {}), '(url_img)\n', (1216, 1225), False, 'import requests\n'), ((1436, 1459), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (1454, 1459), False, 'from keras.preprocessing import image\n'), ((1518, 1543), 'numpy.expand_dims', 'np.expand_dims', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (1532, 1543), True, 'import numpy as np\n'), ((1778, 1811), 'keras.applications.resnet50.decode_predictions', 'decode_predictions', (['y_pred'], {'top': '(5)'}), '(y_pred, top=5)\n', (1796, 1811), False, 'from keras.applications.resnet50 import ResNet50, decode_predictions, preprocess_input\n'), ((2458, 2480), 'requests.get', 'requests.get', (['url_cash'], {}), '(url_cash)\n', (2470, 2480), False, 'import requests\n'), ((2602, 2625), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (2620, 2625), False, 'from keras.preprocessing import image\n'), ((2630, 2655), 'numpy.expand_dims', 'np.expand_dims', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (2644, 2655), True, 'import numpy as np\n'), ((2769, 2802), 'keras.applications.resnet50.decode_predictions', 'decode_predictions', (['y_pred'], {'top': '(5)'}), '(y_pred, top=5)\n', (2787, 2802), False, 'from keras.applications.resnet50 import ResNet50, decode_predictions, preprocess_input\n'), ((1261, 1286), 'io.BytesIO', 'BytesIO', (['response.content'], {}), '(response.content)\n', (1268, 1286), False, 'from io import BytesIO\n'), ((1643, 1668), 'numpy.expand_dims', 'np.expand_dims', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (1657, 1668), True, 'import numpy as np\n'), ((2498, 2523), 'io.BytesIO', 'BytesIO', (['response.content'], {}), '(response.content)\n', (2505, 2523), False, 'from io import BytesIO\n')]
|
# -*- coding: utf-8 -*-
"""
Plot comparisons of IHME projections to actual data for US states.
IHME data per IHME:
https://covid19.healthdata.org/united-states-of-america
IHME data stored here in the "..\data\ihme" directory for each release
that was obtained.
State-level data per Covid tracking project:
https://covidtracking.com/
Data for the COVID-19 repo is contained here in the
"..\data\covid19_tracker" directory for each day that the state
historical values were obtained.
"""
import os
import numpy as np
from scipy.integrate import solve_ivp
from scipy.optimize import fsolve
import matplotlib.pyplot as plt
from datetime import date
from scipy.signal import medfilt
from read_data import get_data_ctrack, get_data_ihme, format_date_ihme
def intfun(s):
try:
return int(s)
except ValueError:
return 0
# Select states and set data dates for display
#state = 'NY'
#state_long = 'New York'
#state = 'GA'
#state_long = 'Georgia'
#state = 'KY'
#state_long = 'Kentucky'
#state = 'CA'
#state_long = 'California'
#state = 'WI'
#state_long = 'Wisconsin'
#ylpct = [0., 15.]
#state = 'IA'
#state_long = 'Iowa'
state = 'AL'
state_long = 'Alabama'
#state = 'OR'
#state_long = 'Oregon'
#state = 'FL'
#state_long = 'Florida'
#ylpct = [0.,25.]
#state = 'MI'
#state_long = 'Michigan'
#state = 'WA'
#state_long = 'Washington'
#state = 'DC'
#state_long = 'District of Columbia'
#state = 'NJ'
#state_long = 'New Jersey'
#state = 'OK'
#state_long = 'Oklahoma'
#state = 'SD'
#state_long = 'South Dakota'
# TODO: Have to add all state data together for the covid tracker data
#state = 'US'
#state_long = 'US'
#state = 'TX'
#state_long = 'Texas'
#state = 'GA'
#state_long = 'Georgia'
#state = 'MN'
#state_long = 'Minnesota'
#state = 'CO'
#state_long = 'Colorado'
ylpct = [0., 30.]
# Set files which we're loading from and set data dates for display
data_filename = r'..\data\covid19_tracker\states-daily_20200504.csv'
data_date = '04 May'
#model_fname = r'..\data\ihme\2020_03_31.1\Hospitalization_all_locs.csv'
#project_date = '31 March'
#model_fname = r'..\data\ihme\2020_04_12.02\Hospitalization_all_locs.csv'
#project_date = '13 April'
model_fname = r'..\data\ihme\2020_04_16.05\Hospitalization_all_locs.csv'
project_date = '17 April'
# When to stop the plotting
start_date = '20200401'
stop_date = '20200510'
# Which plots to make
plot_testing = True
plot_hosp_death = True
today = date.today()
# Load data and format
data = get_data_ctrack(state, data_filename)
dates = data['date']
start_date_ind = list(dates).index(start_date)
dates = dates[start_date_ind:]
pos = data['positive']
neg = data['negative']
hosp = data['hospitalizedCurrently']
icu = data['inIcuCurrently']
vent = data['onVentilatorCurrently']
death = data['death']
date_inds = range(len(dates))
dpos = np.diff(pos, prepend = 0)
dneg = np.diff(neg, prepend = 0)
dhosp = np.diff(hosp, prepend = 0.)
ddhosp = np.diff(dhosp, prepend = 0)
ddeath = np.diff(death, prepend = 0)
pos = pos[start_date_ind:]
neg = neg[start_date_ind:]
hosp = hosp[start_date_ind:]
death = death[start_date_ind:]
dpos = dpos[start_date_ind:]
dneg = dneg[start_date_ind:]
dhosp = dhosp[start_date_ind:]
ddeath = ddeath[start_date_ind:]
xticks = date_inds[::4]
xticklabels = ['%s/%s' % (s[-3], s[-2:]) for s in dates[::4]]
# Load ihme data
data_ihme = get_data_ihme(model_fname)[state_long]
dates_ihme = [format_date_ihme(s) for s in data_ihme['date']]
# Trim to desired range
start_ihme = dates_ihme.index(start_date)
stop_ihme = dates_ihme.index(stop_date)
dates_ihme = dates_ihme[start_ihme:stop_ihme]
date_inds_ihme = range(len(dates_ihme))
dhosp_ihme_m, dhosp_ihme_l, dhosp_ihme_u = (data_ihme['admis_mean'][start_ihme:stop_ihme],
data_ihme['admis_lower'][start_ihme:stop_ihme],
data_ihme['admis_upper'][start_ihme:stop_ihme])
hosp_ihme_m, hosp_ihme_l, hosp_ihme_u = (data_ihme['allbed_mean'][start_ihme:stop_ihme],
data_ihme['allbed_lower'][start_ihme:stop_ihme],
data_ihme['allbed_upper'][start_ihme:stop_ihme])
death_ihme_m, death_ihme_l, death_ihme_u = (data_ihme['totdea_mean'][start_ihme:stop_ihme],
data_ihme['totdea_lower'][start_ihme:stop_ihme],
data_ihme['totdea_upper'][start_ihme:stop_ihme])
ddeath_ihme_m, ddeath_ihme_l, ddeath_ihme_u = (data_ihme['deaths_mean'][start_ihme:stop_ihme],
data_ihme['deaths_lower'][start_ihme:stop_ihme],
data_ihme['deaths_upper'][start_ihme:stop_ihme])
xticks = date_inds_ihme[::4]
xticklabels = ['%s/%s' % (s[-3], s[-2:]) for s in dates_ihme[::4]]
#%% Data on tests
if plot_testing:
fig, ax = plt.subplots(1, 3, figsize = (17, 5))
gray = 0.3*np.array([1, 1, 1])
lightblue = [0.3, 0.3, 0.8]
darkblue = [0.2, 0.2, 0.6]
red = [0.6, 0.2, 0.2]
lightred = [0.8, 0.4, 0.4]
dtotal = dpos + dneg
avg_7 = medfilt(dtotal, 7)
ax[0].plot(dates, dtotal, 'o', label = 'Total Tests',
color = darkblue, markerfacecolor = lightblue)
ax[0].plot(dates, avg_7, 'k--', label = '7 Day Moving Average')
ax[0].set_xticks(xticks)
ax[0].set_xticklabels(xticklabels)
ax[0].set_ylabel('Number of Tests', fontsize = 12, fontweight = 'bold')
ax[0].set_xlabel('Date', fontsize = 12, fontweight = 'bold')
ax[1].plot(dates, dpos, 'o', label = 'Positive Tests',
color = red, markerfacecolor = lightred)
avg_3 = medfilt(dpos, 3)
avg_7 = medfilt(dpos, 7)
# ax[1].plot(dates, avg_3, 'b--', label = '3 Day Moving Average')
ax[1].plot(dates, avg_7, 'k--', label = '7 Day Moving Average')
ax[1].set_xticks(xticks)
ax[1].set_xticklabels(xticklabels)
ax[1].set_ylabel('Number of Positives', fontsize = 12, fontweight = 'bold')
ax[1].set_xlabel('Date', fontsize = 12, fontweight = 'bold')
avg_7 = medfilt(100*dpos/dtotal, 7)
ax[2].plot(dates, avg_7, 'k--', label = '7 Day Moving Average')
ax[2].plot(dates, 100*dpos/dtotal, 'o', color = 'k',
markerfacecolor = gray)
ax[2].set_xticks(xticks)
ax[2].set_xticklabels(xticklabels)
ax[2].set_xlabel('Date', fontweight = 'bold', fontsize = 12)
ax[2].set_ylabel('Percentage of Positive Tests',
fontweight = 'bold', fontsize = 12)
ax[0].set_title('All Tests', fontsize = 12, fontweight = 'bold')
ax[1].set_title('Positive Tests', fontsize = 12, fontweight = 'bold')
ax[2].set_title('Percentage of Tests Positive', fontsize = 12, fontweight = 'bold')
yl0 = ax[0].get_ylim()
yl1 = ax[1].get_ylim()
yl2 = ax[2].get_ylim()
ax[0].set_ylim([-5, yl0[1]])
ax[0].set_xlim([0, len(dates)])
ax[1].set_ylim([-5, yl1[1]])
ax[1].set_xlim([0, len(dates)])
ax[1].legend()
if ylpct is None:
ax[2].set_ylim([-5, yl2[1]])
else:
ax[2].set_ylim(ylpct)
ax[2].set_xlim([0, len(dates)])
fig.suptitle('%s: All Tests, Positive Tests, and Positive Test Percentages' %
state_long, fontsize = 14, fontweight = 'bold')
impath = '../images/test_data'
imname = '%s_data%s_%s.png' % (state_long, data_date, str(today))
plt.savefig(os.path.join(impath, imname), bbox_inches = 'tight')
#%% Show info on hospitalizations and deaths
if plot_hosp_death:
impath = '../images/ihme_compare'
imname = '%s_data%s_project%s_%s.png' % (state_long, data_date, project_date, str(today))
lightblue = [0.3, 0.3, 0.8]
darkblue = [0.2, 0.2, 0.6]
fig, ax = plt.subplots(2, 2, figsize = (12, 6))
ax = ax.flatten()
ax[0].plot(dates, hosp, 'o', label = 'Reported',
color = darkblue, markerfacecolor = lightblue)
ax[0].plot(dates_ihme, hosp_ihme_m, 'k-', label = 'IHME Projected [Mean]')
ax[0].plot(dates_ihme, hosp_ihme_l, 'r--', label = 'IHME Projected [Lower CI]')
ax[0].plot(dates_ihme, hosp_ihme_u, 'r--', label = 'IHME Projected [Upper CI]')
ax[0].set_xlim(0, date_inds_ihme[-1])
ax[0].set_xticks(xticks)
ax[0].set_xticklabels(xticklabels)
ax[0].legend()
ax[0].set_ylabel('Total Hospitalized', fontsize = 12, fontweight = 'bold')
ax[0].set_title('Hospitalizations', fontsize = 12, fontweight = 'bold')
ax[2].plot(dates, dhosp, 'o',
color = darkblue, markerfacecolor = lightblue)
ax[2].plot(dates_ihme, dhosp_ihme_m, 'k-')
ax[2].plot(dates_ihme, dhosp_ihme_l, 'r--')
ax[2].plot(dates_ihme, dhosp_ihme_u, 'r--')
ax[2].set_xlim(0, date_inds_ihme[-1])
ax[2].set_xticks(xticks)
ax[2].set_xticklabels(xticklabels)
ax[2].set_ylabel('New Hospitalized', fontsize = 12, fontweight = 'bold')
ax[2].set_xlabel('Date', fontsize = 12, fontweight = 'bold')
ax[1].plot(dates, death, 'o', label = 'Reported',
color = darkblue, markerfacecolor = lightblue)
ax[1].plot(dates_ihme, death_ihme_m, 'k-', label = 'IHME Projected [Mean]')
ax[1].plot(dates_ihme, death_ihme_l, 'r--', label = 'IHME Projected [Lower CI]')
ax[1].plot(dates_ihme, death_ihme_u, 'r--', label = 'IHME Projected [Upper CI]')
ax[1].set_xlim(0, date_inds_ihme[-1])
ax[1].set_xticks(xticks)
ax[1].set_xticklabels(xticklabels)
ax[1].legend()
ax[1].set_ylabel('Total Deaths', fontsize = 12, fontweight = 'bold')
ax[1].set_title('Deaths', fontsize = 12, fontweight = 'bold')
ax[3].plot(dates, ddeath, 'o',
color = darkblue, markerfacecolor = lightblue)
ax[3].plot(dates_ihme, ddeath_ihme_m, 'k-')
ax[3].plot(dates_ihme, ddeath_ihme_l, 'r--')
ax[3].plot(dates_ihme, ddeath_ihme_u, 'r--')
ax[3].set_xlim(0, date_inds_ihme[-1])
ax[3].set_xticks(xticks)
ax[3].set_xticklabels(xticklabels)
ax[3].set_ylabel('New Deaths', fontsize = 12, fontweight = 'bold')
ax[3].set_xlabel('Date', fontsize = 12, fontweight = 'bold')
# plt.tight_layout()
fig.suptitle('%s: Reported Data [%s] vs IHME Projections [%s]' %
(state_long, data_date, project_date), fontsize = 14, fontweight = 'bold')
plt.savefig(os.path.join(impath, imname), bbox_inches = 'tight')
|
[
"read_data.get_data_ctrack",
"os.path.join",
"read_data.format_date_ihme",
"numpy.diff",
"read_data.get_data_ihme",
"numpy.array",
"scipy.signal.medfilt",
"datetime.date.today",
"matplotlib.pyplot.subplots"
] |
[((2462, 2474), 'datetime.date.today', 'date.today', ([], {}), '()\n', (2472, 2474), False, 'from datetime import date\n'), ((2506, 2543), 'read_data.get_data_ctrack', 'get_data_ctrack', (['state', 'data_filename'], {}), '(state, data_filename)\n', (2521, 2543), False, 'from read_data import get_data_ctrack, get_data_ihme, format_date_ihme\n'), ((2863, 2886), 'numpy.diff', 'np.diff', (['pos'], {'prepend': '(0)'}), '(pos, prepend=0)\n', (2870, 2886), True, 'import numpy as np\n'), ((2896, 2919), 'numpy.diff', 'np.diff', (['neg'], {'prepend': '(0)'}), '(neg, prepend=0)\n', (2903, 2919), True, 'import numpy as np\n'), ((2930, 2956), 'numpy.diff', 'np.diff', (['hosp'], {'prepend': '(0.0)'}), '(hosp, prepend=0.0)\n', (2937, 2956), True, 'import numpy as np\n'), ((2967, 2992), 'numpy.diff', 'np.diff', (['dhosp'], {'prepend': '(0)'}), '(dhosp, prepend=0)\n', (2974, 2992), True, 'import numpy as np\n'), ((3004, 3029), 'numpy.diff', 'np.diff', (['death'], {'prepend': '(0)'}), '(death, prepend=0)\n', (3011, 3029), True, 'import numpy as np\n'), ((3388, 3414), 'read_data.get_data_ihme', 'get_data_ihme', (['model_fname'], {}), '(model_fname)\n', (3401, 3414), False, 'from read_data import get_data_ctrack, get_data_ihme, format_date_ihme\n'), ((3441, 3460), 'read_data.format_date_ihme', 'format_date_ihme', (['s'], {}), '(s)\n', (3457, 3460), False, 'from read_data import get_data_ctrack, get_data_ihme, format_date_ihme\n'), ((5016, 5051), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(17, 5)'}), '(1, 3, figsize=(17, 5))\n', (5028, 5051), True, 'import matplotlib.pyplot as plt\n'), ((5256, 5274), 'scipy.signal.medfilt', 'medfilt', (['dtotal', '(7)'], {}), '(dtotal, 7)\n', (5263, 5274), False, 'from scipy.signal import medfilt\n'), ((5811, 5827), 'scipy.signal.medfilt', 'medfilt', (['dpos', '(3)'], {}), '(dpos, 3)\n', (5818, 5827), False, 'from scipy.signal import medfilt\n'), ((5840, 5856), 'scipy.signal.medfilt', 'medfilt', (['dpos', '(7)'], {}), '(dpos, 7)\n', (5847, 5856), False, 'from scipy.signal import medfilt\n'), ((6225, 6256), 'scipy.signal.medfilt', 'medfilt', (['(100 * dpos / dtotal)', '(7)'], {}), '(100 * dpos / dtotal, 7)\n', (6232, 6256), False, 'from scipy.signal import medfilt\n'), ((7919, 7954), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'figsize': '(12, 6)'}), '(2, 2, figsize=(12, 6))\n', (7931, 7954), True, 'import matplotlib.pyplot as plt\n'), ((5069, 5088), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (5077, 5088), True, 'import numpy as np\n'), ((7579, 7607), 'os.path.join', 'os.path.join', (['impath', 'imname'], {}), '(impath, imname)\n', (7591, 7607), False, 'import os\n'), ((10494, 10522), 'os.path.join', 'os.path.join', (['impath', 'imname'], {}), '(impath, imname)\n', (10506, 10522), False, 'import os\n')]
|
from feature import Feature
from itertools import product
import numpy as np
import random
class Node:
def __init__(self, K, Cweights, Dweights, seed):
self.K = K
self.seed = seed
self.Kd = int(K*2/3)
self.Kc = int(K*1/3)
self.Cfeatures = [Feature(False, seed) for k in range(self.Kc)]
self.Dfeatures = [Feature(True, seed) for k in range(self.Kd)]
self.CfeatureWeights = Cweights
self.DfeatureWeights = Dweights
self.group = np.arange(self.Kd).reshape((4,-1))
for i in range(self.Kc):
self.Cfeatures[i].weight = self.CfeatureWeights[i]
for i in range(self.Kd):
self.Dfeatures[i].weight = self.DfeatureWeights[i]
for i in range(self.group.shape[0]):
one = np.random.randint(self.group[i,0], self.group[i,0]+self.group.shape[1])
for k in self.group[i,:]:
self.Dfeatures[k].xhat = 0
self.Dfeatures[one].xhat = 1
self.u = np.random.rand()
def getAlpha(self):
alpha = 0
for f in self.features[self.Kd:]:
alpha += min(f.weight * max(f.xhat - f.range, 0), f.weight * min(f.xhat + f.range, 1))
alpha = np.exp(alpha)
return alpha
def getBeta(self):
beta = 0
for f in self.features[self.Kd:]:
beta += max(f.weight * max(f.xhat - f.range, 0), f.weight * min(f.xhat + f.range, 1))
beta = np.exp(beta)
return beta
def getMaxCost(self):
maxCost = 0
for f in self.Cfeatures:
maxCost += f.cost * min(f.xhat, f.range, 1-f.xhat)
for f in self.Dfeatures:
maxCost += f.cost
return maxCost
def printFeatures(self):
for f in self.features:
feat = {"xhat": f.xhat, "cost": f.cost, "range": f.range, "weight": f.weight}
print(feat)
def generateCorrelation(self):
K = 10
corr = [3, 2, 2]
l = sum(corr)
m1 = [(1,0,0), (0,1,0), (0,0,1)]
m2 = [(1,0), (0,1)]
m3 = m2
m4 = list(product(range(2), repeat=K-l))
m = list(product(m1, m2, m3, m4))
self.discreteSpace = [i[0] + i[1] + i[2] + i[3] for i in m]
self.discreteCost = [random.random()*10 for i in self.discreteSpace]
ws = np.array([f.weight for f in self.features[:self.Kd]])
self.discreteScore = [np.exp(np.dot(ws, np.array(i))) for i in self.discreteSpace]
|
[
"numpy.random.rand",
"itertools.product",
"feature.Feature",
"numpy.exp",
"numpy.array",
"numpy.random.randint",
"random.random",
"numpy.arange"
] |
[((1003, 1019), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1017, 1019), True, 'import numpy as np\n'), ((1220, 1233), 'numpy.exp', 'np.exp', (['alpha'], {}), '(alpha)\n', (1226, 1233), True, 'import numpy as np\n'), ((1451, 1463), 'numpy.exp', 'np.exp', (['beta'], {}), '(beta)\n', (1457, 1463), True, 'import numpy as np\n'), ((2322, 2375), 'numpy.array', 'np.array', (['[f.weight for f in self.features[:self.Kd]]'], {}), '([f.weight for f in self.features[:self.Kd]])\n', (2330, 2375), True, 'import numpy as np\n'), ((284, 304), 'feature.Feature', 'Feature', (['(False)', 'seed'], {}), '(False, seed)\n', (291, 304), False, 'from feature import Feature\n'), ((356, 375), 'feature.Feature', 'Feature', (['(True)', 'seed'], {}), '(True, seed)\n', (363, 375), False, 'from feature import Feature\n'), ((792, 867), 'numpy.random.randint', 'np.random.randint', (['self.group[i, 0]', '(self.group[i, 0] + self.group.shape[1])'], {}), '(self.group[i, 0], self.group[i, 0] + self.group.shape[1])\n', (809, 867), True, 'import numpy as np\n'), ((2139, 2162), 'itertools.product', 'product', (['m1', 'm2', 'm3', 'm4'], {}), '(m1, m2, m3, m4)\n', (2146, 2162), False, 'from itertools import product\n'), ((502, 520), 'numpy.arange', 'np.arange', (['self.Kd'], {}), '(self.Kd)\n', (511, 520), True, 'import numpy as np\n'), ((2261, 2276), 'random.random', 'random.random', ([], {}), '()\n', (2274, 2276), False, 'import random\n'), ((2424, 2435), 'numpy.array', 'np.array', (['i'], {}), '(i)\n', (2432, 2435), True, 'import numpy as np\n')]
|
#https://github.com/Newmu/Theano-Tutorials/blob/master/1_linear_regression.py
import theano
from theano import tensor as T
import numpy as np
trX = np.linspace(-1, 1, 101)
trY = 2 * trX + np.random.randn(*trX.shape) * 0.33
X = T.scalar()
Y = T.scalar()
def model(X, w):
return X * w
w = theano.shared(np.asarray(0., dtype=theano.config.floatX))
y = model(X, w)
cost = T.mean(T.sqr(y - Y))
gradient = T.grad(cost=cost, wrt=w)
updates = [[w, w - gradient * 0.01]]
train = theano.function(inputs=[X, Y], outputs=cost, updates=updates, allow_input_downcast=True)
for i in range(100):
for x, y in zip(trX, trY):
train(x, y)
print(w.get_value()) #something around 2
#https://raw.githubusercontent.com/Newmu/Theano-Tutorials/master/2_logistic_regression.py
import theano
from theano import tensor as T
import numpy as np
from fuel.datasets import MNIST
from matplotlib import pyplot, cm
dataset = MNIST(('train',), sources=('features',))
state = dataset.open()
image, = dataset.get_data(state=state, request=[1234])
pyplot.imshow(image.reshape((28, 28)), cmap=cm.Greys_r, interpolation='nearest')
pyplot.show()
dataset.close(state)
def floatX(X):
return np.asarray(X, dtype=theano.config.floatX)
def init_weights(shape):
return theano.shared(floatX(np.random.randn(*shape) * 0.01))
def model(X, w):
return T.nnet.softmax(T.dot(X, w))
trX, teX, trY, teY = mnist(onehot=True)
X = T.fmatrix()
Y = T.fmatrix()
w = init_weights((784, 10))
py_x = model(X, w)
y_pred = T.argmax(py_x, axis=1)
cost = T.mean(T.nnet.categorical_crossentropy(py_x, Y))
gradient = T.grad(cost=cost, wrt=w)
update = [[w, w - gradient * 0.05]]
train = theano.function(inputs=[X, Y], outputs=cost, updates=update, allow_input_downcast=True)
predict = theano.function(inputs=[X], outputs=y_pred, allow_input_downcast=True)
for i in range(100):
for start, end in zip(range(0, len(trX), 128), range(128, len(trX), 128)):
cost = train(trX[start:end], trY[start:end])
print(i, np.mean(np.argmax(teY, axis=1) == predict(teX)))
|
[
"theano.tensor.nnet.categorical_crossentropy",
"theano.function",
"matplotlib.pyplot.show",
"theano.tensor.dot",
"numpy.asarray",
"numpy.argmax",
"theano.tensor.sqr",
"numpy.linspace",
"theano.tensor.fmatrix",
"theano.tensor.argmax",
"theano.tensor.scalar",
"numpy.random.randn",
"fuel.datasets.MNIST",
"theano.tensor.grad"
] |
[((150, 173), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(101)'], {}), '(-1, 1, 101)\n', (161, 173), True, 'import numpy as np\n'), ((230, 240), 'theano.tensor.scalar', 'T.scalar', ([], {}), '()\n', (238, 240), True, 'from theano import tensor as T\n'), ((245, 255), 'theano.tensor.scalar', 'T.scalar', ([], {}), '()\n', (253, 255), True, 'from theano import tensor as T\n'), ((410, 434), 'theano.tensor.grad', 'T.grad', ([], {'cost': 'cost', 'wrt': 'w'}), '(cost=cost, wrt=w)\n', (416, 434), True, 'from theano import tensor as T\n'), ((481, 573), 'theano.function', 'theano.function', ([], {'inputs': '[X, Y]', 'outputs': 'cost', 'updates': 'updates', 'allow_input_downcast': '(True)'}), '(inputs=[X, Y], outputs=cost, updates=updates,\n allow_input_downcast=True)\n', (496, 573), False, 'import theano\n'), ((927, 967), 'fuel.datasets.MNIST', 'MNIST', (["('train',)"], {'sources': "('features',)"}), "(('train',), sources=('features',))\n", (932, 967), False, 'from fuel.datasets import MNIST\n'), ((1127, 1140), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (1138, 1140), False, 'from matplotlib import pyplot, cm\n'), ((1425, 1436), 'theano.tensor.fmatrix', 'T.fmatrix', ([], {}), '()\n', (1434, 1436), True, 'from theano import tensor as T\n'), ((1441, 1452), 'theano.tensor.fmatrix', 'T.fmatrix', ([], {}), '()\n', (1450, 1452), True, 'from theano import tensor as T\n'), ((1511, 1533), 'theano.tensor.argmax', 'T.argmax', (['py_x'], {'axis': '(1)'}), '(py_x, axis=1)\n', (1519, 1533), True, 'from theano import tensor as T\n'), ((1602, 1626), 'theano.tensor.grad', 'T.grad', ([], {'cost': 'cost', 'wrt': 'w'}), '(cost=cost, wrt=w)\n', (1608, 1626), True, 'from theano import tensor as T\n'), ((1672, 1763), 'theano.function', 'theano.function', ([], {'inputs': '[X, Y]', 'outputs': 'cost', 'updates': 'update', 'allow_input_downcast': '(True)'}), '(inputs=[X, Y], outputs=cost, updates=update,\n allow_input_downcast=True)\n', (1687, 1763), False, 'import theano\n'), ((1770, 1840), 'theano.function', 'theano.function', ([], {'inputs': '[X]', 'outputs': 'y_pred', 'allow_input_downcast': '(True)'}), '(inputs=[X], outputs=y_pred, allow_input_downcast=True)\n', (1785, 1840), False, 'import theano\n'), ((310, 353), 'numpy.asarray', 'np.asarray', (['(0.0)'], {'dtype': 'theano.config.floatX'}), '(0.0, dtype=theano.config.floatX)\n', (320, 353), True, 'import numpy as np\n'), ((385, 397), 'theano.tensor.sqr', 'T.sqr', (['(y - Y)'], {}), '(y - Y)\n', (390, 397), True, 'from theano import tensor as T\n'), ((1189, 1230), 'numpy.asarray', 'np.asarray', (['X'], {'dtype': 'theano.config.floatX'}), '(X, dtype=theano.config.floatX)\n', (1199, 1230), True, 'import numpy as np\n'), ((1549, 1589), 'theano.tensor.nnet.categorical_crossentropy', 'T.nnet.categorical_crossentropy', (['py_x', 'Y'], {}), '(py_x, Y)\n', (1580, 1589), True, 'from theano import tensor as T\n'), ((190, 217), 'numpy.random.randn', 'np.random.randn', (['*trX.shape'], {}), '(*trX.shape)\n', (205, 217), True, 'import numpy as np\n'), ((1366, 1377), 'theano.tensor.dot', 'T.dot', (['X', 'w'], {}), '(X, w)\n', (1371, 1377), True, 'from theano import tensor as T\n'), ((1289, 1312), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (1304, 1312), True, 'import numpy as np\n'), ((2016, 2038), 'numpy.argmax', 'np.argmax', (['teY'], {'axis': '(1)'}), '(teY, axis=1)\n', (2025, 2038), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""Make the double periodic shear test grid"""
import matplotlib.pyplot as plt
from configparser import ConfigParser
import numpy as np
import sys
import os
sys.path.append(os.path.abspath("../../.."))
from pycato import *
# Make the empty grid
domain = make_uniform_grid(
n_cells=(256, 256),
xrange=(0, 2 * np.pi),
yrange=(0, 2 * np.pi),
input_file="input.ini",
)
# Set the initial conditions
rho_0 = np.pi / 15
delta = 1
domain["rho"] = domain["rho"] * rho_0
domain["p"] = domain["p"] * 4.0
x = domain["xc"].m
y = domain["yc"].m
u = domain["u"].m
# The u and v arrays depend on the location w/in the grid.
# Since they're cell-centered quantities, they need the location
# of the cell center (xc, yc)
v = delta * np.sin(x)
for i in range(y.shape[0]):
for j in range(y.shape[1]):
if y[i, j] <= np.pi:
u[i, j] = np.tanh((y[i, j] - np.pi / 2) / rho_0)
else:
u[i, j] = np.tanh((1.5 * np.pi - y[i, j]) / rho_0)
domain["u"] = u * ureg("cm/s")
domain["v"] = v * ureg("cm/s")
write_initial_hdf5(filename="double_shear", initial_condition_dict=domain)
# Plot the results
fig, (ax1, ax2) = plt.subplots(figsize=(18, 8), nrows=1, ncols=2)
vc = ax1.pcolormesh(
domain["x"].m,
domain["y"].m,
domain["v"].m,
edgecolor="k",
lw=0.001,
cmap="RdBu",
antialiased=True,
)
fig.colorbar(vc, ax=ax1, label="Y Velocity")
ax1.set_xlabel("X")
ax1.set_ylabel("Y")
uc = ax2.pcolormesh(
domain["x"].m,
domain["y"].m,
domain["u"].m,
edgecolor="k",
lw=0.001,
cmap="RdBu",
antialiased=True,
)
ax2.set_xlabel("X")
ax2.set_ylabel("Y")
fig.colorbar(uc, ax=ax2, label="X Velocity")
ax1.axis("equal")
ax2.axis("equal")
plt.show()
|
[
"numpy.sin",
"numpy.tanh",
"os.path.abspath",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] |
[((1174, 1221), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(18, 8)', 'nrows': '(1)', 'ncols': '(2)'}), '(figsize=(18, 8), nrows=1, ncols=2)\n', (1186, 1221), True, 'import matplotlib.pyplot as plt\n'), ((1735, 1745), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1743, 1745), True, 'import matplotlib.pyplot as plt\n'), ((198, 225), 'os.path.abspath', 'os.path.abspath', (['"""../../.."""'], {}), "('../../..')\n", (213, 225), False, 'import os\n'), ((760, 769), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (766, 769), True, 'import numpy as np\n'), ((881, 919), 'numpy.tanh', 'np.tanh', (['((y[i, j] - np.pi / 2) / rho_0)'], {}), '((y[i, j] - np.pi / 2) / rho_0)\n', (888, 919), True, 'import numpy as np\n'), ((956, 996), 'numpy.tanh', 'np.tanh', (['((1.5 * np.pi - y[i, j]) / rho_0)'], {}), '((1.5 * np.pi - y[i, j]) / rho_0)\n', (963, 996), True, 'import numpy as np\n')]
|
import numpy as np
from keras.layers import *
from keras.models import *
from keras.activations import *
from keras.callbacks import ModelCheckpoint,ReduceLROnPlateau
def keras_model():
model=Sequential()
model.add(Conv2D(32,(3,3),padding="same"))
model.add(Conv2D(32,(3,3),padding="same"))
model.add(MaxPool2D())
model.add(Conv2D(64,(3,3),padding="same"))
model.add(Conv2D(64,(3,3),padding="same"))
model.add(MaxPool2D())
model.add(Flatten())
model.add(Dense(128,activation='relu'))
# model.add(relu())
model.add(Dense(256,activation='relu'))
# model.add(relu())
model.add(Dense(128,activation='relu'))
# model.add(relu())
model.add(Dense(1))
model.compile(optimizer="adam",loss="mse")
filepath="selfdrivingv1.h5"
checkpoint= ModelCheckpoint (filepath,verbose=1,save_best_only=True)
lr=ReduceLROnPlateau(factor=0.1,patience=3,min_lr=1e-8)
callbacks=[checkpoint,lr]
return model,callbacks
features=np.load("features_40x40.npy")
labels=np.load("labels_40x40.npy")
#augment data
features=np.append(features,features[:,:,::-1],axis=0)
labels=np.append(labels,-labels,axis=0)
features=features.reshape(features.shape[0],40,40,1)
print(features.shape)
model,callbacks=keras_model()
from sklearn.model_selection import train_test_split as split
train_x,test_x,train_y,test_y=split(features,labels,test_size=0.1,random_state=1)
print(train_x[0])
model.fit(x=train_x,y=train_y,epochs=10,batch_size=64,callbacks=callbacks,validation_data=(test_x,test_y))
print(model.summary())
model.save("selfdriving1v1.h5")
|
[
"keras.callbacks.ModelCheckpoint",
"sklearn.model_selection.train_test_split",
"keras.callbacks.ReduceLROnPlateau",
"numpy.append",
"numpy.load"
] |
[((1030, 1059), 'numpy.load', 'np.load', (['"""features_40x40.npy"""'], {}), "('features_40x40.npy')\n", (1037, 1059), True, 'import numpy as np\n'), ((1068, 1095), 'numpy.load', 'np.load', (['"""labels_40x40.npy"""'], {}), "('labels_40x40.npy')\n", (1075, 1095), True, 'import numpy as np\n'), ((1125, 1174), 'numpy.append', 'np.append', (['features', 'features[:, :, ::-1]'], {'axis': '(0)'}), '(features, features[:, :, ::-1], axis=0)\n', (1134, 1174), True, 'import numpy as np\n'), ((1179, 1213), 'numpy.append', 'np.append', (['labels', '(-labels)'], {'axis': '(0)'}), '(labels, -labels, axis=0)\n', (1188, 1213), True, 'import numpy as np\n'), ((1418, 1472), 'sklearn.model_selection.train_test_split', 'split', (['features', 'labels'], {'test_size': '(0.1)', 'random_state': '(1)'}), '(features, labels, test_size=0.1, random_state=1)\n', (1423, 1472), True, 'from sklearn.model_selection import train_test_split as split\n'), ((839, 896), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['filepath'], {'verbose': '(1)', 'save_best_only': '(True)'}), '(filepath, verbose=1, save_best_only=True)\n', (854, 896), False, 'from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau\n'), ((904, 959), 'keras.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'factor': '(0.1)', 'patience': '(3)', 'min_lr': '(1e-08)'}), '(factor=0.1, patience=3, min_lr=1e-08)\n', (921, 959), False, 'from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau\n')]
|
#encoding=utf8
import time
import numpy as np
import tensorflow as tf
from tensorflow.contrib import crf
import cws.BiLSTM as modelDef
from cws.data import Data
tf.app.flags.DEFINE_string('dict_path', 'data/your_dict.pkl', 'dict path')
tf.app.flags.DEFINE_string('train_data', 'data/your_train_data.pkl', 'train data path')
tf.app.flags.DEFINE_string('ckpt_path', 'checkpoint/cws.finetune.ckpt/', 'checkpoint path')
tf.app.flags.DEFINE_integer('embed_size', 256, 'embedding size')
tf.app.flags.DEFINE_integer('hidden_size', 512, 'hidden layer node number')
tf.app.flags.DEFINE_integer('batch_size', 128, 'batch size')
tf.app.flags.DEFINE_integer('epoch', 20, 'training epoch')
tf.app.flags.DEFINE_float('lr', 0.001, 'learning rate')
tf.app.flags.DEFINE_string('save_path','checkpoint/cws.ckpt/','new model save path')
FLAGS = tf.app.flags.FLAGS
class BiLSTMTrain(object):
def __init__(self, data_train=None, data_valid=None, data_test=None, model=None):
self.data_train = data_train
self.data_valid = data_valid
self.data_test = data_test
self.model = model
def train(self):
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
sess.run(tf.global_variables_initializer())
## finetune ##
# ckpt = tf.train.latest_checkpoint(FLAGS.ckpt_path)
# saver = tf.train.Saver()
# saver.restore(sess, ckpt)
# print('-->finetune the ckeckpoint:'+ckpt+'...')
##############
max_epoch = 10
tr_batch_size = FLAGS.batch_size
max_max_epoch = FLAGS.epoch # Max epoch
display_num = 10 # Display 10 pre epoch
tr_batch_num = int(self.data_train.y.shape[0] / tr_batch_size)
display_batch = int(tr_batch_num / display_num)
saver = tf.train.Saver(max_to_keep=10)
for epoch in range(max_max_epoch):
_lr = FLAGS.lr
if epoch > max_epoch:
_lr = 0.0002
print('EPOCH %d, lr=%g' % (epoch + 1, _lr))
start_time = time.time()
_losstotal = 0.0
show_loss = 0.0
for batch in range(tr_batch_num):
fetches = [self.model.loss, self.model.train_op]
X_batch, y_batch = self.data_train.next_batch(tr_batch_size)
feed_dict = {self.model.X_inputs: X_batch, self.model.y_inputs: y_batch, self.model.lr: _lr,
self.model.batch_size: tr_batch_size,
self.model.keep_prob: 0.5}
_loss, _ = sess.run(fetches, feed_dict)
_losstotal += _loss
show_loss += _loss
if (batch + 1) % display_batch == 0:
valid_acc = self.test_epoch(self.data_valid, sess) # valid
print('\ttraining loss=%g ; valid acc= %g ' % (show_loss / display_batch,
valid_acc))
show_loss = 0.0
mean_loss = _losstotal / (tr_batch_num + 0.000001)
if (epoch + 1) % 1 == 0: # Save once per epoch
save_path = saver.save(sess, self.model.model_save_path+'_plus', global_step=(epoch + 1))
print('the save path is ', save_path)
print('\ttraining %d, loss=%g ' % (self.data_train.y.shape[0], mean_loss))
print('Epoch training %d, loss=%g, speed=%g s/epoch' % (
self.data_train.y.shape[0], mean_loss, time.time() - start_time))
# testing
print('**TEST RESULT:')
test_acc = self.test_epoch(self.data_test, sess)
print('**Test %d, acc=%g' % (self.data_test.y.shape[0], test_acc))
sess.close()
def test_epoch(self, dataset=None, sess=None):
_batch_size = FLAGS.batch_size
_y = dataset.y
data_size = _y.shape[0]
batch_num = int(data_size / _batch_size)
correct_labels = 0
total_labels = 0
fetches = [self.model.scores, self.model.length, self.model.transition_params]
for i in range(batch_num):
X_batch, y_batch = dataset.next_batch(_batch_size)
feed_dict = {self.model.X_inputs: X_batch, self.model.y_inputs: y_batch, self.model.lr: 1e-5,
self.model.batch_size: _batch_size,
self.model.keep_prob: 1.0}
test_score, test_length, transition_params = sess.run(fetches=fetches,
feed_dict=feed_dict)
#print(test_score)
#print(test_length)
for tf_unary_scores_, y_, sequence_length_ in zip(
test_score, y_batch, test_length):
tf_unary_scores_ = tf_unary_scores_[:sequence_length_]
y_ = y_[:sequence_length_]
viterbi_sequence, _ = crf.viterbi_decode(
tf_unary_scores_, transition_params)
correct_labels += np.sum(np.equal(viterbi_sequence, y_))
total_labels += sequence_length_
accuracy = correct_labels / float(total_labels)
return accuracy
def main(_):
Data_ = Data(dict_path=FLAGS.dict_path, train_data=FLAGS.train_data)
print('Corpus loading completed:', FLAGS.train_data)
data_train, data_valid, data_test = Data_.builderTrainData()
print('The training set, verification set, and test set split are completed!')
model = modelDef.BiLSTMModel(max_len=Data_.max_len,
vocab_size=Data_.word2id.__len__()+1,
class_num= Data_.tag2id.__len__(),
model_save_path=FLAGS.save_path,
embed_size=FLAGS.embed_size,
hs=FLAGS.hidden_size)
print('Model definition completed!')
train = BiLSTMTrain(data_train, data_valid, data_test, model)
train.train()
print('Model training completed!')
if __name__ == '__main__':
tf.app.run()
|
[
"tensorflow.app.flags.DEFINE_float",
"cws.data.Data",
"tensorflow.app.flags.DEFINE_integer",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.app.flags.DEFINE_string",
"numpy.equal",
"tensorflow.global_variables_initializer",
"tensorflow.ConfigProto",
"tensorflow.contrib.crf.viterbi_decode",
"time.time",
"tensorflow.app.run"
] |
[((172, 246), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""dict_path"""', '"""data/your_dict.pkl"""', '"""dict path"""'], {}), "('dict_path', 'data/your_dict.pkl', 'dict path')\n", (198, 246), True, 'import tensorflow as tf\n'), ((248, 339), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""train_data"""', '"""data/your_train_data.pkl"""', '"""train data path"""'], {}), "('train_data', 'data/your_train_data.pkl',\n 'train data path')\n", (274, 339), True, 'import tensorflow as tf\n'), ((337, 432), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""ckpt_path"""', '"""checkpoint/cws.finetune.ckpt/"""', '"""checkpoint path"""'], {}), "('ckpt_path', 'checkpoint/cws.finetune.ckpt/',\n 'checkpoint path')\n", (363, 432), True, 'import tensorflow as tf\n'), ((430, 494), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""embed_size"""', '(256)', '"""embedding size"""'], {}), "('embed_size', 256, 'embedding size')\n", (457, 494), True, 'import tensorflow as tf\n'), ((496, 571), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""hidden_size"""', '(512)', '"""hidden layer node number"""'], {}), "('hidden_size', 512, 'hidden layer node number')\n", (523, 571), True, 'import tensorflow as tf\n'), ((573, 633), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""batch_size"""', '(128)', '"""batch size"""'], {}), "('batch_size', 128, 'batch size')\n", (600, 633), True, 'import tensorflow as tf\n'), ((635, 693), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""epoch"""', '(20)', '"""training epoch"""'], {}), "('epoch', 20, 'training epoch')\n", (662, 693), True, 'import tensorflow as tf\n'), ((695, 750), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""lr"""', '(0.001)', '"""learning rate"""'], {}), "('lr', 0.001, 'learning rate')\n", (720, 750), True, 'import tensorflow as tf\n'), ((752, 842), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""save_path"""', '"""checkpoint/cws.ckpt/"""', '"""new model save path"""'], {}), "('save_path', 'checkpoint/cws.ckpt/',\n 'new model save path')\n", (778, 842), True, 'import tensorflow as tf\n'), ((5400, 5460), 'cws.data.Data', 'Data', ([], {'dict_path': 'FLAGS.dict_path', 'train_data': 'FLAGS.train_data'}), '(dict_path=FLAGS.dict_path, train_data=FLAGS.train_data)\n', (5404, 5460), False, 'from cws.data import Data\n'), ((6262, 6274), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (6272, 6274), True, 'import tensorflow as tf\n'), ((1175, 1191), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (1189, 1191), True, 'import tensorflow as tf\n'), ((1256, 1281), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (1266, 1281), True, 'import tensorflow as tf\n'), ((1887, 1917), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(10)'}), '(max_to_keep=10)\n', (1901, 1917), True, 'import tensorflow as tf\n'), ((1300, 1333), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1331, 1333), True, 'import tensorflow as tf\n'), ((2141, 2152), 'time.time', 'time.time', ([], {}), '()\n', (2150, 2152), False, 'import time\n'), ((5065, 5120), 'tensorflow.contrib.crf.viterbi_decode', 'crf.viterbi_decode', (['tf_unary_scores_', 'transition_params'], {}), '(tf_unary_scores_, transition_params)\n', (5083, 5120), False, 'from tensorflow.contrib import crf\n'), ((5203, 5233), 'numpy.equal', 'np.equal', (['viterbi_sequence', 'y_'], {}), '(viterbi_sequence, y_)\n', (5211, 5233), True, 'import numpy as np\n'), ((3633, 3644), 'time.time', 'time.time', ([], {}), '()\n', (3642, 3644), False, 'import time\n')]
|
"""
Implements MissSVM
"""
from __future__ import print_function, division
import numpy as np
import scipy.sparse as sp
from random import uniform
import inspect
from misvm.quadprog import IterativeQP, Objective
from misvm.util import BagSplitter, spdiag, slices
from misvm.kernel import by_name as kernel_by_name
from misvm.mica import MICA
from misvm.cccp import CCCP
class MissSVM(MICA):
"""
Semi-supervised learning applied to MI data (Zhou & Xu 2007)
"""
def __init__(self, alpha=1e4, **kwargs):
"""
@param kernel : the desired kernel function; can be linear, quadratic,
polynomial, or rbf [default: linear]
@param C : the loss/regularization tradeoff constant [default: 1.0]
@param scale_C : if True [default], scale C by the number of examples
@param p : polynomial degree when a 'polynomial' kernel is used
[default: 3]
@param gamma : RBF scale parameter when an 'rbf' kernel is used
[default: 1.0]
@param verbose : print optimization status messages [default: True]
@param sv_cutoff : the numerical cutoff for an example to be considered
a support vector [default: 1e-7]
@param restarts : the number of random restarts [default: 0]
@param max_iters : the maximum number of iterations in the outer loop of
the optimization procedure [default: 50]
@param alpha : the softmax parameter [default: 1e4]
"""
self.alpha = alpha
super(MissSVM, self).__init__(**kwargs)
self._bags = None
self._sv_bags = None
self._bag_predictions = None
def fit(self, bags, y):
"""
@param bags : a sequence of n bags; each bag is an m-by-k array-like
object containing m instances with k features
@param y : an array-like object of length n containing -1/+1 labels
"""
self._bags = map(np.asmatrix, bags)
bs = BagSplitter(self._bags,
np.asmatrix(y).reshape((-1, 1)))
self._X = np.vstack([bs.pos_instances,
bs.pos_instances,
bs.pos_instances,
bs.neg_instances])
self._y = np.vstack([np.matrix(np.ones((bs.X_p + bs.L_p, 1))),
-np.matrix(np.ones((bs.L_p + bs.L_n, 1)))])
if self.scale_C:
C = self.C / float(len(self._bags))
else:
C = self.C
# Setup SVM and adjust constraints
_, _, f, A, b, lb, ub = self._setup_svm(self._y, self._y, C)
ub[:bs.X_p] *= (float(bs.L_n) / float(bs.X_p))
ub[bs.X_p: bs.X_p + 2 * bs.L_p] *= (float(bs.L_n) / float(bs.L_p))
K = kernel_by_name(self.kernel, gamma=self.gamma, p=self.p)(self._X, self._X)
D = spdiag(self._y)
ub0 = np.matrix(ub)
ub0[bs.X_p: bs.X_p + 2 * bs.L_p] *= 0.5
def get_V(pos_classifications):
eye_n = bs.L_n + 2 * bs.L_p
top = np.zeros((bs.X_p, bs.L_p))
for row, (i, j) in enumerate(slices(bs.pos_groups)):
top[row, i:j] = _grad_softmin(-pos_classifications[i:j], self.alpha).flat
return sp.bmat([[sp.coo_matrix(top), None],
[None, sp.eye(eye_n, eye_n)]])
V0 = get_V(np.matrix(np.zeros((bs.L_p, 1))))
qp = IterativeQP(D * V0 * K * V0.T * D, f, A, b, lb, ub0)
best_obj = float('inf')
best_svm = None
for rr in range(self.restarts + 1):
if rr == 0:
if self.verbose:
print('Non-random start...')
# Train on instances
alphas, obj = qp.solve(self.verbose)
else:
if self.verbose:
print('Random restart %d of %d...' % (rr, self.restarts))
alphas = np.matrix([uniform(0.0, 1.0) for i in range(len(lb))]).T
obj = Objective(0.0, 0.0)
svm = MICA(kernel=self.kernel, gamma=self.gamma, p=self.p,
verbose=self.verbose, sv_cutoff=self.sv_cutoff)
svm._X = self._X
svm._y = self._y
svm._V = V0
svm._alphas = alphas
svm._objective = obj
svm._compute_separator(K)
svm._K = K
class missCCCP(CCCP):
def bailout(cself, svm, obj_val):
return svm
def iterate(cself, svm, obj_val):
cself.mention('Linearizing constraints...')
classifications = svm._predictions[bs.X_p: bs.X_p + bs.L_p]
V = get_V(classifications)
cself.mention('Computing slacks...')
# Difference is [1 - y_i*(w*phi(x_i) + b)]
pos_differences = 1.0 - classifications
neg_differences = 1.0 + classifications
# Slacks are positive differences only
pos_slacks = np.multiply(pos_differences > 0, pos_differences)
neg_slacks = np.multiply(neg_differences > 0, neg_differences)
all_slacks = np.hstack([pos_slacks, neg_slacks])
cself.mention('Linearizing...')
# Compute gradient across pairs
slack_grads = np.vstack([_grad_softmin(pair, self.alpha)
for pair in all_slacks])
# Stack results into one column
slack_grads = np.vstack([np.ones((bs.X_p, 1)),
slack_grads[:, 0],
slack_grads[:, 1],
np.ones((bs.L_n, 1))])
# Update QP
qp.update_H(D * V * K * V.T * D)
qp.update_ub(np.multiply(ub, slack_grads))
# Re-solve
cself.mention('Solving QP...')
alphas, obj = qp.solve(self.verbose)
new_svm = MICA(kernel=self.kernel, gamma=self.gamma, p=self.p,
verbose=self.verbose, sv_cutoff=self.sv_cutoff)
new_svm._X = self._X
new_svm._y = self._y
new_svm._V = V
new_svm._alphas = alphas
new_svm._objective = obj
new_svm._compute_separator(K)
new_svm._K = K
if cself.check_tolerance(obj_val, obj):
return None, new_svm
return {'svm': new_svm, 'obj_val': obj}, None
cccp = missCCCP(verbose=self.verbose, svm=svm, obj_val=None,
max_iters=self.max_iters)
svm = cccp.solve()
if svm is not None:
obj = float(svm._objective)
if obj < best_obj:
best_svm = svm
best_obj = obj
if best_svm is not None:
self._V = best_svm._V
self._alphas = best_svm._alphas
self._objective = best_svm._objective
self._compute_separator(best_svm._K)
self._bag_predictions = self.predict(self._bags)
def get_params(self, deep=True):
super_args = super(MissSVM, self).get_params()
args, _, _, _ = inspect.getargspec(MissSVM.__init__)
args.pop(0)
super_args.update({key: getattr(self, key, None) for key in args})
return super_args
def _grad_softmin(x, alpha=1e4):
"""
Computes the gradient of min function,
taken from gradient of softmin as
alpha goes to infinity. It is:
0 if x_i != min(x), or
1/n if x_i is one of the n
elements equal to min(x)
"""
grad = np.matrix(np.zeros(x.shape))
minimizers = (x == min(x.flat))
n = float(np.sum(minimizers))
grad[np.nonzero(minimizers)] = 1.0 / n
return grad
|
[
"misvm.quadprog.IterativeQP",
"numpy.hstack",
"numpy.asmatrix",
"numpy.multiply",
"misvm.util.spdiag",
"misvm.quadprog.Objective",
"scipy.sparse.eye",
"numpy.vstack",
"misvm.mica.MICA",
"scipy.sparse.coo_matrix",
"misvm.util.slices",
"misvm.kernel.by_name",
"random.uniform",
"numpy.ones",
"numpy.nonzero",
"inspect.getargspec",
"numpy.sum",
"numpy.zeros",
"numpy.matrix"
] |
[((2143, 2231), 'numpy.vstack', 'np.vstack', (['[bs.pos_instances, bs.pos_instances, bs.pos_instances, bs.neg_instances]'], {}), '([bs.pos_instances, bs.pos_instances, bs.pos_instances, bs.\n neg_instances])\n', (2152, 2231), True, 'import numpy as np\n'), ((2909, 2924), 'misvm.util.spdiag', 'spdiag', (['self._y'], {}), '(self._y)\n', (2915, 2924), False, 'from misvm.util import BagSplitter, spdiag, slices\n'), ((2939, 2952), 'numpy.matrix', 'np.matrix', (['ub'], {}), '(ub)\n', (2948, 2952), True, 'import numpy as np\n'), ((3465, 3517), 'misvm.quadprog.IterativeQP', 'IterativeQP', (['(D * V0 * K * V0.T * D)', 'f', 'A', 'b', 'lb', 'ub0'], {}), '(D * V0 * K * V0.T * D, f, A, b, lb, ub0)\n', (3476, 3517), False, 'from misvm.quadprog import IterativeQP, Objective\n'), ((7528, 7564), 'inspect.getargspec', 'inspect.getargspec', (['MissSVM.__init__'], {}), '(MissSVM.__init__)\n', (7546, 7564), False, 'import inspect\n'), ((7967, 7984), 'numpy.zeros', 'np.zeros', (['x.shape'], {}), '(x.shape)\n', (7975, 7984), True, 'import numpy as np\n'), ((8036, 8054), 'numpy.sum', 'np.sum', (['minimizers'], {}), '(minimizers)\n', (8042, 8054), True, 'import numpy as np\n'), ((8065, 8087), 'numpy.nonzero', 'np.nonzero', (['minimizers'], {}), '(minimizers)\n', (8075, 8087), True, 'import numpy as np\n'), ((2823, 2878), 'misvm.kernel.by_name', 'kernel_by_name', (['self.kernel'], {'gamma': 'self.gamma', 'p': 'self.p'}), '(self.kernel, gamma=self.gamma, p=self.p)\n', (2837, 2878), True, 'from misvm.kernel import by_name as kernel_by_name\n'), ((3100, 3126), 'numpy.zeros', 'np.zeros', (['(bs.X_p, bs.L_p)'], {}), '((bs.X_p, bs.L_p))\n', (3108, 3126), True, 'import numpy as np\n'), ((4086, 4190), 'misvm.mica.MICA', 'MICA', ([], {'kernel': 'self.kernel', 'gamma': 'self.gamma', 'p': 'self.p', 'verbose': 'self.verbose', 'sv_cutoff': 'self.sv_cutoff'}), '(kernel=self.kernel, gamma=self.gamma, p=self.p, verbose=self.verbose,\n sv_cutoff=self.sv_cutoff)\n', (4090, 4190), False, 'from misvm.mica import MICA\n'), ((3168, 3189), 'misvm.util.slices', 'slices', (['bs.pos_groups'], {}), '(bs.pos_groups)\n', (3174, 3189), False, 'from misvm.util import BagSplitter, spdiag, slices\n'), ((3427, 3448), 'numpy.zeros', 'np.zeros', (['(bs.L_p, 1)'], {}), '((bs.L_p, 1))\n', (3435, 3448), True, 'import numpy as np\n'), ((4048, 4067), 'misvm.quadprog.Objective', 'Objective', (['(0.0)', '(0.0)'], {}), '(0.0, 0.0)\n', (4057, 4067), False, 'from misvm.quadprog import IterativeQP, Objective\n'), ((2092, 2106), 'numpy.asmatrix', 'np.asmatrix', (['y'], {}), '(y)\n', (2103, 2106), True, 'import numpy as np\n'), ((2353, 2382), 'numpy.ones', 'np.ones', (['(bs.X_p + bs.L_p, 1)'], {}), '((bs.X_p + bs.L_p, 1))\n', (2360, 2382), True, 'import numpy as np\n'), ((5111, 5160), 'numpy.multiply', 'np.multiply', (['(pos_differences > 0)', 'pos_differences'], {}), '(pos_differences > 0, pos_differences)\n', (5122, 5160), True, 'import numpy as np\n'), ((5194, 5243), 'numpy.multiply', 'np.multiply', (['(neg_differences > 0)', 'neg_differences'], {}), '(neg_differences > 0, neg_differences)\n', (5205, 5243), True, 'import numpy as np\n'), ((5277, 5312), 'numpy.hstack', 'np.hstack', (['[pos_slacks, neg_slacks]'], {}), '([pos_slacks, neg_slacks])\n', (5286, 5312), True, 'import numpy as np\n'), ((6198, 6302), 'misvm.mica.MICA', 'MICA', ([], {'kernel': 'self.kernel', 'gamma': 'self.gamma', 'p': 'self.p', 'verbose': 'self.verbose', 'sv_cutoff': 'self.sv_cutoff'}), '(kernel=self.kernel, gamma=self.gamma, p=self.p, verbose=self.verbose,\n sv_cutoff=self.sv_cutoff)\n', (6202, 6302), False, 'from misvm.mica import MICA\n'), ((2425, 2454), 'numpy.ones', 'np.ones', (['(bs.L_p + bs.L_n, 1)'], {}), '((bs.L_p + bs.L_n, 1))\n', (2432, 2454), True, 'import numpy as np\n'), ((3311, 3329), 'scipy.sparse.coo_matrix', 'sp.coo_matrix', (['top'], {}), '(top)\n', (3324, 3329), True, 'import scipy.sparse as sp\n'), ((3373, 3393), 'scipy.sparse.eye', 'sp.eye', (['eye_n', 'eye_n'], {}), '(eye_n, eye_n)\n', (3379, 3393), True, 'import scipy.sparse as sp\n'), ((5998, 6026), 'numpy.multiply', 'np.multiply', (['ub', 'slack_grads'], {}), '(ub, slack_grads)\n', (6009, 6026), True, 'import numpy as np\n'), ((3980, 3997), 'random.uniform', 'uniform', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (3987, 3997), False, 'from random import uniform\n'), ((5662, 5682), 'numpy.ones', 'np.ones', (['(bs.X_p, 1)'], {}), '((bs.X_p, 1))\n', (5669, 5682), True, 'import numpy as np\n'), ((5857, 5877), 'numpy.ones', 'np.ones', (['(bs.L_n, 1)'], {}), '((bs.L_n, 1))\n', (5864, 5877), True, 'import numpy as np\n')]
|
import numpy as np
def trapezoidal_rule(f, a, b, tol=1e-8):
"""
The trapezoidal rule is known to be very accurate for
oscillatory integrals integrated over their period.
See papers on spectral integration (it's just the composite trapezoidal rule....)
TODO (aaron): f is memoized to get the already computed points quickly.
Ideally, we should put this into a C++ function and call it with Cython. (Maybe someday)
"""
# endpoints first:
num = 2
dx = b - a
res0 = 1e30
res1 = 0.5 * dx * (f(b) + f(a))
delta_res = res0 - res1
re_err = np.abs(np.real(delta_res))
im_err = np.abs(np.imag(delta_res))
while re_err > tol or im_err > tol:
res0 = res1
num = 2 * num - 1
# print(num)
x = np.linspace(a, b, num=num)
res = 0
dx = (x[1] - x[0])
res += f(x[0])
for i in range(1, len(x) - 1):
res += 2 * f(x[i])
res += f(x[-1])
res1 = 0.5 * dx * res
delta_res = res1 - res0
re_err = np.abs(np.real(delta_res))
im_err = np.abs(np.imag(delta_res))
if num > 100000:
print('Integral failed to converge with', num, 'points.')
return np.nan, np.nan, np.nan
return res1, re_err, im_err
|
[
"numpy.real",
"numpy.linspace",
"numpy.imag"
] |
[((599, 617), 'numpy.real', 'np.real', (['delta_res'], {}), '(delta_res)\n', (606, 617), True, 'import numpy as np\n'), ((639, 657), 'numpy.imag', 'np.imag', (['delta_res'], {}), '(delta_res)\n', (646, 657), True, 'import numpy as np\n'), ((778, 804), 'numpy.linspace', 'np.linspace', (['a', 'b'], {'num': 'num'}), '(a, b, num=num)\n', (789, 804), True, 'import numpy as np\n'), ((1052, 1070), 'numpy.real', 'np.real', (['delta_res'], {}), '(delta_res)\n', (1059, 1070), True, 'import numpy as np\n'), ((1096, 1114), 'numpy.imag', 'np.imag', (['delta_res'], {}), '(delta_res)\n', (1103, 1114), True, 'import numpy as np\n')]
|
"""
January 13th 2020
Author T.Mizumoto
"""
#! python 3
# ver.x1.00
# Integral-Scale_function.py - this program calculate integral-scale and correlation.
import numpy as np
from scipy.integrate import simps
from scipy.stats import pearsonr
import pandas as pd
# index_basepoint = 0 (defult)
def fun_CrossCorr(data, index_basepoint):
alpha = data[:, index_basepoint]
cross_corretion = []
p_value = []
matrix_num = data.shape
point_num = int(matrix_num[1])
for i in range(point_num):
line = data[:, i]
cc, p = pearsonr(alpha, line)
cross_corretion.append(cc)
p_value.append(p)
df_CC = pd.DataFrame(columns = ["CrossCorrelation", "Pvalue"])
df_CC["CrossCorrelation"] = cross_corretion
df_CC["Pvalue"] = p_value
return df_CC
def fun_IntegralScale(correlation, distance):
# find the first negative point
minus = np.where(correlation < 0)
first_minus = minus[0][0]
# extract positibe points
corr_plus = list(correlation[:first_minus])
dis_plus = distance[:first_minus]
complement = (distance[first_minus + 1] - distance[first_minus]) / 2 + distance[first_minus]
corr_plus.append(0.0)
dis_plus.append(complement)
# integrate
integral = simps(corr_plus, dis_plus)
return integral
if __name__ == "__main__":
from graph import Graph
import matplotlib.pyplot as plt
# read data
data_path = "HISTORY/z-traverse_2-1-0_MeasureData.txt"
data = np.loadtxt(data_path)
coord_path = "HISTORY/z-traverse_2-1-0_Coordinate.txt"
coord = np.loadtxt(coord_path)
point = [0, 161, 322, 483, 644, 805, 966, 1127, 1288, 1449]
name = ["X2-MVD", "X2-RMS1", "X2-RMS2", "X1-MVD", "X1-RMS1", "X1-RMS2", "X0-MVD", "X0-RMS1", "X0-RMS2"]
IS_list = []
for i in range(9):
pstart = point[i]
pend = point[i + 1]
# calculate CrossCorrelation
df_CC = fun_CrossCorr(data[:, pstart:pend], 0)
# only z-traverse
z_axis = coord[pstart:pend, 2]
distance = []
for j in z_axis:
diff = j - z_axis[0]
distance.append(diff)
IS = fun_IntegralScale(df_CC["CrossCorrelation"], distance)
IS_list.append(IS)
g = Graph()
g.label = ["CrossCorrelation", "Pvalue"]
g.line(distance, df_CC["CrossCorrelation"], 0)
g.line(distance, df_CC["Pvalue"], 1)
plt.legend(title = "IS = " + str(IS))
g.save_graph("graph/Z-traverse/" + name[i])
print(IS_list)
|
[
"numpy.where",
"scipy.integrate.simps",
"graph.Graph",
"scipy.stats.pearsonr",
"pandas.DataFrame",
"numpy.loadtxt"
] |
[((660, 712), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['CrossCorrelation', 'Pvalue']"}), "(columns=['CrossCorrelation', 'Pvalue'])\n", (672, 712), True, 'import pandas as pd\n'), ((906, 931), 'numpy.where', 'np.where', (['(correlation < 0)'], {}), '(correlation < 0)\n', (914, 931), True, 'import numpy as np\n'), ((1270, 1296), 'scipy.integrate.simps', 'simps', (['corr_plus', 'dis_plus'], {}), '(corr_plus, dis_plus)\n', (1275, 1296), False, 'from scipy.integrate import simps\n'), ((1497, 1518), 'numpy.loadtxt', 'np.loadtxt', (['data_path'], {}), '(data_path)\n', (1507, 1518), True, 'import numpy as np\n'), ((1590, 1612), 'numpy.loadtxt', 'np.loadtxt', (['coord_path'], {}), '(coord_path)\n', (1600, 1612), True, 'import numpy as np\n'), ((565, 586), 'scipy.stats.pearsonr', 'pearsonr', (['alpha', 'line'], {}), '(alpha, line)\n', (573, 586), False, 'from scipy.stats import pearsonr\n'), ((2260, 2267), 'graph.Graph', 'Graph', ([], {}), '()\n', (2265, 2267), False, 'from graph import Graph\n')]
|
import numpy as np
import matplotlib.pyplot as plt
from utils import get_state_vowel
class HopfieldNetwork:
"""
Creates a Hopfield Network.
"""
def __init__(self, patterns):
"""
Initializes the network.
Args:
patterns (np.array): Group of states to be memorized by the network.
"""
self.num_units = patterns.shape[1]
self.passes = 0
self.state_units = np.array([1 if 2 * np.random.random() - 1 >= 0 else 0 for _ in range(self.num_units)])
self.W = np.zeros((self.num_units, self.num_units))
for pattern in patterns:
self.W += np.dot(np.transpose((2 * patterns - 1)), (2 * patterns - 1))
np.fill_diagonal(self.W, 0)
self.energy = [-0.5 * np.dot(np.dot(self.state_units.T, self.W), self.state_units)]
def _generate_sequence_units(self):
""" Selects randomly the order to update states in the next iteration."""
return np.random.choice(self.num_units, self.num_units)
def run(self):
""" Runs the network until no updates occur. """
no_update = True
while True:
for unit in self._generate_sequence_units():
unit_activation = np.dot(self.W[unit, :], self.state_units)
if unit_activation >= 0 and self.state_units[unit] == 0:
self.state_units[unit] = 1
no_update = False
elif unit_activation < 0 and self.state_units[unit] == 1:
self.state_units[unit] = 0
no_update = False
self.energy.append(-0.5 * np.dot(np.dot(self.state_units.T, self.W), self.state_units))
self.passes += 1
if no_update:
break
else:
no_update = True
def main():
np.random.seed(1234)
patterns = np.array([get_state_vowel('A'),
get_state_vowel('E'),
get_state_vowel('I'),
get_state_vowel('O'),
get_state_vowel('U')])
net = HopfieldNetwork(patterns)
net.run()
# Plot patterns and output
plt.figure(figsize=(6, 3), tight_layout=True)
plt.subplot(2, 3, 1)
plt.imshow(np.reshape(patterns[0, :], (5, 5)), cmap="Greys_r")
plt.title("A")
plt.subplot(2, 3, 2)
plt.imshow(np.reshape(patterns[1, :], (5, 5)), cmap="Greys_r")
plt.title("E")
plt.subplot(2, 3, 3)
plt.imshow(np.reshape(patterns[2, :], (5, 5)), cmap="Greys_r")
plt.title("I")
plt.subplot(2, 3, 4)
plt.imshow(np.reshape(patterns[3, :], (5, 5)), cmap="Greys_r")
plt.title("O")
plt.subplot(2, 3, 5)
plt.imshow(np.reshape(patterns[4, :], (5, 5)), cmap="Greys_r")
plt.title("U")
plt.subplot(2, 3, 6)
plt.imshow(np.reshape(net.state_units, (5, 5)), cmap="Greys_r")
plt.title("Output")
# Plot energy over time
plt.figure(figsize=(4, 2))
plt.plot(net.energy)
plt.title("Energy")
plt.show()
if __name__ == "__main__":
main()
|
[
"numpy.reshape",
"numpy.random.choice",
"numpy.random.random",
"matplotlib.pyplot.plot",
"utils.get_state_vowel",
"numpy.fill_diagonal",
"matplotlib.pyplot.figure",
"numpy.zeros",
"numpy.dot",
"numpy.random.seed",
"matplotlib.pyplot.title",
"numpy.transpose",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
] |
[((1840, 1860), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (1854, 1860), True, 'import numpy as np\n'), ((2183, 2228), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 3)', 'tight_layout': '(True)'}), '(figsize=(6, 3), tight_layout=True)\n', (2193, 2228), True, 'import matplotlib.pyplot as plt\n'), ((2233, 2253), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(1)'], {}), '(2, 3, 1)\n', (2244, 2253), True, 'import matplotlib.pyplot as plt\n'), ((2325, 2339), 'matplotlib.pyplot.title', 'plt.title', (['"""A"""'], {}), "('A')\n", (2334, 2339), True, 'import matplotlib.pyplot as plt\n'), ((2344, 2364), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(2)'], {}), '(2, 3, 2)\n', (2355, 2364), True, 'import matplotlib.pyplot as plt\n'), ((2436, 2450), 'matplotlib.pyplot.title', 'plt.title', (['"""E"""'], {}), "('E')\n", (2445, 2450), True, 'import matplotlib.pyplot as plt\n'), ((2455, 2475), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(3)'], {}), '(2, 3, 3)\n', (2466, 2475), True, 'import matplotlib.pyplot as plt\n'), ((2547, 2561), 'matplotlib.pyplot.title', 'plt.title', (['"""I"""'], {}), "('I')\n", (2556, 2561), True, 'import matplotlib.pyplot as plt\n'), ((2566, 2586), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(4)'], {}), '(2, 3, 4)\n', (2577, 2586), True, 'import matplotlib.pyplot as plt\n'), ((2658, 2672), 'matplotlib.pyplot.title', 'plt.title', (['"""O"""'], {}), "('O')\n", (2667, 2672), True, 'import matplotlib.pyplot as plt\n'), ((2677, 2697), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(5)'], {}), '(2, 3, 5)\n', (2688, 2697), True, 'import matplotlib.pyplot as plt\n'), ((2769, 2783), 'matplotlib.pyplot.title', 'plt.title', (['"""U"""'], {}), "('U')\n", (2778, 2783), True, 'import matplotlib.pyplot as plt\n'), ((2788, 2808), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(6)'], {}), '(2, 3, 6)\n', (2799, 2808), True, 'import matplotlib.pyplot as plt\n'), ((2881, 2900), 'matplotlib.pyplot.title', 'plt.title', (['"""Output"""'], {}), "('Output')\n", (2890, 2900), True, 'import matplotlib.pyplot as plt\n'), ((2934, 2960), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4, 2)'}), '(figsize=(4, 2))\n', (2944, 2960), True, 'import matplotlib.pyplot as plt\n'), ((2965, 2985), 'matplotlib.pyplot.plot', 'plt.plot', (['net.energy'], {}), '(net.energy)\n', (2973, 2985), True, 'import matplotlib.pyplot as plt\n'), ((2990, 3009), 'matplotlib.pyplot.title', 'plt.title', (['"""Energy"""'], {}), "('Energy')\n", (2999, 3009), True, 'import matplotlib.pyplot as plt\n'), ((3014, 3024), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3022, 3024), True, 'import matplotlib.pyplot as plt\n'), ((544, 586), 'numpy.zeros', 'np.zeros', (['(self.num_units, self.num_units)'], {}), '((self.num_units, self.num_units))\n', (552, 586), True, 'import numpy as np\n'), ((711, 738), 'numpy.fill_diagonal', 'np.fill_diagonal', (['self.W', '(0)'], {}), '(self.W, 0)\n', (727, 738), True, 'import numpy as np\n'), ((969, 1017), 'numpy.random.choice', 'np.random.choice', (['self.num_units', 'self.num_units'], {}), '(self.num_units, self.num_units)\n', (985, 1017), True, 'import numpy as np\n'), ((2269, 2303), 'numpy.reshape', 'np.reshape', (['patterns[0, :]', '(5, 5)'], {}), '(patterns[0, :], (5, 5))\n', (2279, 2303), True, 'import numpy as np\n'), ((2380, 2414), 'numpy.reshape', 'np.reshape', (['patterns[1, :]', '(5, 5)'], {}), '(patterns[1, :], (5, 5))\n', (2390, 2414), True, 'import numpy as np\n'), ((2491, 2525), 'numpy.reshape', 'np.reshape', (['patterns[2, :]', '(5, 5)'], {}), '(patterns[2, :], (5, 5))\n', (2501, 2525), True, 'import numpy as np\n'), ((2602, 2636), 'numpy.reshape', 'np.reshape', (['patterns[3, :]', '(5, 5)'], {}), '(patterns[3, :], (5, 5))\n', (2612, 2636), True, 'import numpy as np\n'), ((2713, 2747), 'numpy.reshape', 'np.reshape', (['patterns[4, :]', '(5, 5)'], {}), '(patterns[4, :], (5, 5))\n', (2723, 2747), True, 'import numpy as np\n'), ((2824, 2859), 'numpy.reshape', 'np.reshape', (['net.state_units', '(5, 5)'], {}), '(net.state_units, (5, 5))\n', (2834, 2859), True, 'import numpy as np\n'), ((1886, 1906), 'utils.get_state_vowel', 'get_state_vowel', (['"""A"""'], {}), "('A')\n", (1901, 1906), False, 'from utils import get_state_vowel\n'), ((1933, 1953), 'utils.get_state_vowel', 'get_state_vowel', (['"""E"""'], {}), "('E')\n", (1948, 1953), False, 'from utils import get_state_vowel\n'), ((1980, 2000), 'utils.get_state_vowel', 'get_state_vowel', (['"""I"""'], {}), "('I')\n", (1995, 2000), False, 'from utils import get_state_vowel\n'), ((2027, 2047), 'utils.get_state_vowel', 'get_state_vowel', (['"""O"""'], {}), "('O')\n", (2042, 2047), False, 'from utils import get_state_vowel\n'), ((2074, 2094), 'utils.get_state_vowel', 'get_state_vowel', (['"""U"""'], {}), "('U')\n", (2089, 2094), False, 'from utils import get_state_vowel\n'), ((649, 679), 'numpy.transpose', 'np.transpose', (['(2 * patterns - 1)'], {}), '(2 * patterns - 1)\n', (661, 679), True, 'import numpy as np\n'), ((1231, 1272), 'numpy.dot', 'np.dot', (['self.W[unit, :]', 'self.state_units'], {}), '(self.W[unit, :], self.state_units)\n', (1237, 1272), True, 'import numpy as np\n'), ((776, 810), 'numpy.dot', 'np.dot', (['self.state_units.T', 'self.W'], {}), '(self.state_units.T, self.W)\n', (782, 810), True, 'import numpy as np\n'), ((1639, 1673), 'numpy.dot', 'np.dot', (['self.state_units.T', 'self.W'], {}), '(self.state_units.T, self.W)\n', (1645, 1673), True, 'import numpy as np\n'), ((459, 477), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (475, 477), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import unittest
import numpy as np
import onnx
from onnx import TensorProto, helper
from op_test_utils import TestDataFeeds, check_model_correctness, check_op_type_count, check_qtype_by_node_type
from onnxruntime.quantization import QuantFormat, QuantType, quantize_static
class TestOpRelu(unittest.TestCase):
def input_feeds(self, n, name2shape):
input_data_list = []
for i in range(n):
inputs = {}
for name, shape in name2shape.items():
inputs.update({name: np.random.randint(-1, 2, shape).astype(np.float32)})
input_data_list.extend([inputs])
dr = TestDataFeeds(input_data_list)
return dr
def construct_model_gemm(self, output_model_path):
# (input)
# |
# Gemm
# |
# Relu
# |
# Gemm
# |
# (output)
input_name = "input"
output_name = "output"
initializers = []
def make_gemm(input_name, weight_shape, weight_name, bias_shape, bias_name, output_name):
weight_data = np.random.normal(0, 0.1, weight_shape).astype(np.float32)
initializers.append(onnx.numpy_helper.from_array(weight_data, name=weight_name))
bias_data = np.random.normal(0, 0.1, bias_shape).astype(np.float32)
initializers.append(onnx.numpy_helper.from_array(bias_data, name=bias_name))
return onnx.helper.make_node(
"Gemm",
[input_name, weight_name, bias_name],
[output_name],
alpha=1.0,
beta=1.0,
transB=1,
)
# make gemm1 node
gemm1_output_name = "gemm1_output"
gemm1_node = make_gemm(
input_name,
[100, 10],
"linear1.weight",
[100],
"linear1.bias",
gemm1_output_name,
)
# make Relu
relu_output = "relu_output"
relu_node = onnx.helper.make_node("Relu", [gemm1_output_name], [relu_output])
# make gemm2 node
gemm2_node = make_gemm(
relu_output,
[10, 100],
"linear2.weight",
[10],
"linear2.bias",
output_name,
)
# make graph
input_tensor = helper.make_tensor_value_info(input_name, TensorProto.FLOAT, [-1, 10])
output_tensor = helper.make_tensor_value_info(output_name, TensorProto.FLOAT, [-1, 10])
graph_name = "relu_test"
graph = helper.make_graph(
[gemm1_node, relu_node, gemm2_node],
graph_name,
[input_tensor],
[output_tensor],
initializer=initializers,
)
model = helper.make_model(graph, opset_imports=[helper.make_opsetid("", 13)])
model.ir_version = onnx.IR_VERSION
onnx.save(model, output_model_path)
def static_quant_test(
self,
model_fp32_path,
data_reader,
activation_type,
weight_type,
extra_options={},
):
activation_proto_qtype = TensorProto.UINT8 if activation_type == QuantType.QUInt8 else TensorProto.INT8
activation_type_str = "u8" if (activation_type == QuantType.QUInt8) else "s8"
weight_type_str = "u8" if (weight_type == QuantType.QUInt8) else "s8"
model_int8_path = "relu_fp32.quant_{}{}.onnx".format(activation_type_str, weight_type_str)
data_reader.rewind()
quantize_static(
model_fp32_path,
model_int8_path,
data_reader,
quant_format=QuantFormat.QOperator,
activation_type=activation_type,
weight_type=weight_type,
extra_options=extra_options,
)
qdq_count = 1 if activation_type == QuantType.QUInt8 else 2
relu_count = 0 if activation_type == QuantType.QUInt8 else 1
quant_nodes = {"QGemm": 2, "QuantizeLinear": qdq_count, "DequantizeLinear": qdq_count, "Relu": relu_count}
check_op_type_count(self, model_int8_path, **quant_nodes)
qnode_io_qtypes = {
"QuantizeLinear": [
["i", 2, activation_proto_qtype],
["o", 0, activation_proto_qtype],
]
}
qnode_io_qtypes.update({"DequantizeLinear": [["i", 2, activation_proto_qtype]]})
check_qtype_by_node_type(self, model_int8_path, qnode_io_qtypes)
data_reader.rewind()
check_model_correctness(self, model_fp32_path, model_int8_path, data_reader.get_next())
def static_quant_test_qdq(
self,
model_fp32_path,
data_reader,
activation_type,
weight_type,
extra_options={},
):
activation_proto_qtype = TensorProto.UINT8 if activation_type == QuantType.QUInt8 else TensorProto.INT8
activation_type_str = "u8" if (activation_type == QuantType.QUInt8) else "s8"
weight_type_str = "u8" if (weight_type == QuantType.QUInt8) else "s8"
model_int8_path = "relu_fp32.quant_dqd_{}{}.onnx".format(activation_type_str, weight_type_str)
data_reader.rewind()
quantize_static(
model_fp32_path,
model_int8_path,
data_reader,
quant_format=QuantFormat.QDQ,
activation_type=activation_type,
weight_type=weight_type,
extra_options=extra_options,
)
relu_count = 0 if activation_type == QuantType.QUInt8 else 1
q_count = 3 if activation_type == QuantType.QUInt8 else 4
dq_count = 7 if activation_type == QuantType.QUInt8 else 8
quant_nodes = {"Gemm": 2, "QuantizeLinear": q_count, "DequantizeLinear": dq_count, "Relu": relu_count}
check_op_type_count(self, model_int8_path, **quant_nodes)
qnode_io_qtypes = {
"QuantizeLinear": [
["i", 2, activation_proto_qtype],
["o", 0, activation_proto_qtype],
]
}
check_qtype_by_node_type(self, model_int8_path, qnode_io_qtypes)
data_reader.rewind()
check_model_correctness(self, model_fp32_path, model_int8_path, data_reader.get_next())
def test_quantize_gemm(self):
np.random.seed(1)
model_fp32_path = "relu_fp32.onnx"
self.construct_model_gemm(model_fp32_path)
data_reader = self.input_feeds(1, {"input": [5, 10]})
self.static_quant_test(
model_fp32_path,
data_reader,
activation_type=QuantType.QUInt8,
weight_type=QuantType.QUInt8,
)
self.static_quant_test_qdq(
model_fp32_path,
data_reader,
activation_type=QuantType.QUInt8,
weight_type=QuantType.QUInt8,
)
def test_quantize_relu_s8s8(self):
np.random.seed(1)
model_fp32_path = "relu_fp32.onnx"
self.construct_model_gemm(model_fp32_path)
data_reader = self.input_feeds(1, {"input": [5, 10]})
self.static_quant_test(
model_fp32_path,
data_reader,
activation_type=QuantType.QInt8,
weight_type=QuantType.QInt8,
extra_options={"ActivationSymmetric": True},
)
self.static_quant_test_qdq(
model_fp32_path,
data_reader,
activation_type=QuantType.QInt8,
weight_type=QuantType.QInt8,
extra_options={"ActivationSymmetric": True},
)
if __name__ == "__main__":
unittest.main()
|
[
"onnx.helper.make_graph",
"onnxruntime.quantization.quantize_static",
"numpy.random.normal",
"onnx.save",
"onnx.helper.make_node",
"onnx.numpy_helper.from_array",
"onnx.helper.make_tensor_value_info",
"numpy.random.randint",
"numpy.random.seed",
"op_test_utils.TestDataFeeds",
"unittest.main",
"op_test_utils.check_qtype_by_node_type",
"op_test_utils.check_op_type_count",
"onnx.helper.make_opsetid"
] |
[((7923, 7938), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7936, 7938), False, 'import unittest\n'), ((984, 1014), 'op_test_utils.TestDataFeeds', 'TestDataFeeds', (['input_data_list'], {}), '(input_data_list)\n', (997, 1014), False, 'from op_test_utils import TestDataFeeds, check_model_correctness, check_op_type_count, check_qtype_by_node_type\n'), ((2403, 2468), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Relu"""', '[gemm1_output_name]', '[relu_output]'], {}), "('Relu', [gemm1_output_name], [relu_output])\n", (2424, 2468), False, 'import onnx\n'), ((2732, 2802), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['input_name', 'TensorProto.FLOAT', '[-1, 10]'], {}), '(input_name, TensorProto.FLOAT, [-1, 10])\n', (2761, 2802), False, 'from onnx import TensorProto, helper\n'), ((2827, 2898), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['output_name', 'TensorProto.FLOAT', '[-1, 10]'], {}), '(output_name, TensorProto.FLOAT, [-1, 10])\n', (2856, 2898), False, 'from onnx import TensorProto, helper\n'), ((2948, 3078), 'onnx.helper.make_graph', 'helper.make_graph', (['[gemm1_node, relu_node, gemm2_node]', 'graph_name', '[input_tensor]', '[output_tensor]'], {'initializer': 'initializers'}), '([gemm1_node, relu_node, gemm2_node], graph_name, [\n input_tensor], [output_tensor], initializer=initializers)\n', (2965, 3078), False, 'from onnx import TensorProto, helper\n'), ((3283, 3318), 'onnx.save', 'onnx.save', (['model', 'output_model_path'], {}), '(model, output_model_path)\n', (3292, 3318), False, 'import onnx\n'), ((3899, 4094), 'onnxruntime.quantization.quantize_static', 'quantize_static', (['model_fp32_path', 'model_int8_path', 'data_reader'], {'quant_format': 'QuantFormat.QOperator', 'activation_type': 'activation_type', 'weight_type': 'weight_type', 'extra_options': 'extra_options'}), '(model_fp32_path, model_int8_path, data_reader, quant_format\n =QuantFormat.QOperator, activation_type=activation_type, weight_type=\n weight_type, extra_options=extra_options)\n', (3914, 4094), False, 'from onnxruntime.quantization import QuantFormat, QuantType, quantize_static\n'), ((4441, 4498), 'op_test_utils.check_op_type_count', 'check_op_type_count', (['self', 'model_int8_path'], {}), '(self, model_int8_path, **quant_nodes)\n', (4460, 4498), False, 'from op_test_utils import TestDataFeeds, check_model_correctness, check_op_type_count, check_qtype_by_node_type\n'), ((4780, 4844), 'op_test_utils.check_qtype_by_node_type', 'check_qtype_by_node_type', (['self', 'model_int8_path', 'qnode_io_qtypes'], {}), '(self, model_int8_path, qnode_io_qtypes)\n', (4804, 4844), False, 'from op_test_utils import TestDataFeeds, check_model_correctness, check_op_type_count, check_qtype_by_node_type\n'), ((5558, 5747), 'onnxruntime.quantization.quantize_static', 'quantize_static', (['model_fp32_path', 'model_int8_path', 'data_reader'], {'quant_format': 'QuantFormat.QDQ', 'activation_type': 'activation_type', 'weight_type': 'weight_type', 'extra_options': 'extra_options'}), '(model_fp32_path, model_int8_path, data_reader, quant_format\n =QuantFormat.QDQ, activation_type=activation_type, weight_type=\n weight_type, extra_options=extra_options)\n', (5573, 5747), False, 'from onnxruntime.quantization import QuantFormat, QuantType, quantize_static\n'), ((6155, 6212), 'op_test_utils.check_op_type_count', 'check_op_type_count', (['self', 'model_int8_path'], {}), '(self, model_int8_path, **quant_nodes)\n', (6174, 6212), False, 'from op_test_utils import TestDataFeeds, check_model_correctness, check_op_type_count, check_qtype_by_node_type\n'), ((6405, 6469), 'op_test_utils.check_qtype_by_node_type', 'check_qtype_by_node_type', (['self', 'model_int8_path', 'qnode_io_qtypes'], {}), '(self, model_int8_path, qnode_io_qtypes)\n', (6429, 6469), False, 'from op_test_utils import TestDataFeeds, check_model_correctness, check_op_type_count, check_qtype_by_node_type\n'), ((6638, 6655), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (6652, 6655), True, 'import numpy as np\n'), ((7233, 7250), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (7247, 7250), True, 'import numpy as np\n'), ((1834, 1952), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Gemm"""', '[input_name, weight_name, bias_name]', '[output_name]'], {'alpha': '(1.0)', 'beta': '(1.0)', 'transB': '(1)'}), "('Gemm', [input_name, weight_name, bias_name], [\n output_name], alpha=1.0, beta=1.0, transB=1)\n", (1855, 1952), False, 'import onnx\n'), ((1583, 1642), 'onnx.numpy_helper.from_array', 'onnx.numpy_helper.from_array', (['weight_data'], {'name': 'weight_name'}), '(weight_data, name=weight_name)\n', (1611, 1642), False, 'import onnx\n'), ((1757, 1812), 'onnx.numpy_helper.from_array', 'onnx.numpy_helper.from_array', (['bias_data'], {'name': 'bias_name'}), '(bias_data, name=bias_name)\n', (1785, 1812), False, 'import onnx\n'), ((1493, 1531), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.1)', 'weight_shape'], {}), '(0, 0.1, weight_shape)\n', (1509, 1531), True, 'import numpy as np\n'), ((1669, 1705), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.1)', 'bias_shape'], {}), '(0, 0.1, bias_shape)\n', (1685, 1705), True, 'import numpy as np\n'), ((3201, 3228), 'onnx.helper.make_opsetid', 'helper.make_opsetid', (['""""""', '(13)'], {}), "('', 13)\n", (3220, 3228), False, 'from onnx import TensorProto, helper\n'), ((873, 904), 'numpy.random.randint', 'np.random.randint', (['(-1)', '(2)', 'shape'], {}), '(-1, 2, shape)\n', (890, 904), True, 'import numpy as np\n')]
|
from model import *
from dataloader import *
from utils import *
from torch.utils.tensorboard import SummaryWriter
import torch.optim as optim
import time
import gc
from tqdm import tqdm
import matplotlib.pyplot as plt
import torch.nn as nn
import numpy as np
import warnings as wn
wn.filterwarnings('ignore')
#load either PAMAP2 or Opportunity Datasets
batch_size_train = 500 # PAM
batch_size_val = 300 # PAM
#batch_size_train = 10000 # OPP
#batch_size_val = 1 # OPP
# 1 = PAM, 0 = OPP
PAM_dataset = 1
if (PAM_dataset):
# PAM Dataset
train_dataset = Wearables_Dataset(0,dataset_name='PAM2',dataset_path='data/PAM2',train_dataset=True)
val_dataset = Wearables_Dataset(0,dataset_name='PAM2',dataset_path='data/PAM2',train_dataset=False)
else:
# Opportunity Dataset
train_dataset = Wearables_Dataset(dataset_name='OPP',dataset_path='data/OPP',train_dataset=True)
val_dataset = Wearables_Dataset(dataset_name='OPP',dataset_path='data/OPP',train_dataset=False)
# Get dataloaders
train_loader = DataLoader(dataset=train_dataset,
batch_size=batch_size_train,
num_workers=4,
shuffle=True)
val_loader = DataLoader(dataset=val_dataset,
batch_size=batch_size_val,
num_workers=4,
shuffle=False)
writer = SummaryWriter()
def init_weights(m):
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
torch.nn.init.xavier_uniform_(m.weight.data)
# torch.nn.init.xavier_uniform_(m.bias.data)
def plot(train_loss, val_loss, train_acc, val_acc, train_f1, val_f1, dataset):
# train/val acc plots
x = np.arange(len(train_loss))
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
ax1.plot(x,train_acc)
ax1.plot(x,val_acc)
ax1.set_xlabel('Number of Epochs')
ax1.set_ylabel('Accuracy')
ax1.set_title('Training vs. Validation Accuracy')
ax1.legend(['Training Acc','Val Acc'])
fig1.savefig('train_val_accuracy_' + dataset + '.png')
# train/val loss plots
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
ax2.plot(x,train_loss)
ax2.plot(x,val_loss)
ax2.set_xlabel('Number of Epochs')
ax2.set_ylabel('Cross Entropy Loss')
ax2.set_title('Training vs. Validation Loss')
ax2.legend(['Training Loss','Val Loss'])
fig2.savefig('train_val_loss_' + dataset + '.png')
# train/val f1 plots
fig3 = plt.figure()
ax3 = fig3.add_subplot(111)
ax3.plot(x,train_f1)
ax3.plot(x,val_f1)
ax3.set_xlabel('Number of Epochs')
ax3.set_ylabel('F1 Score')
ax3.set_title('Training vs. Validation F1 Score')
ax3.legend(['Train F1 Score','Val F1 Score'])
fig3.savefig('train_val_f1_' + dataset + '.png')
def train():
train_epoch_loss = []
train_epoch_acc = []
train_epoch_f1 = []
val_epoch_loss = []
val_epoch_acc = []
val_epoch_f1 = []
best_model = 'best_model_train'
best_loss = float('inf')
for epoch in tqdm(range(epochs)):
train_loss_per_iter = []
train_acc_per_iter = []
train_f1_per_iter = []
ts = time.time()
for iter, (X, Y) in tqdm(enumerate(train_loader), total=len(train_loader)):
optimizer.zero_grad()
if use_gpu:
inputs = X.cuda()
labels = Y.long().cuda()
else:
inputs, labels = X, Y.long()
clear(X, Y)
inputs = torch.split(inputs, 9, 1)
outputs = psm(inputs)
loss = criterion(outputs, torch.max(labels, 1)[1])
clear(outputs)
loss.backward()
optimizer.step()
clear(loss)
# save loss per iteration
train_loss_per_iter.append(loss.item())
t_acc = compute_acc(outputs,labels)
train_acc_per_iter.append(t_acc)
micro_f1, macro_f1, weighted = calculate_f1(outputs, labels)
train_f1_per_iter.append(weighted)
writer.add_scalar('Loss/train', loss.item(), epoch)
writer.add_scalar('Accuracy/train', t_acc, epoch)
(print("Finish epoch {}, time elapsed {}, train acc {}, train weighted f1 {}".format(epoch,
time.time() - ts, np.mean(train_acc_per_iter), np.mean(train_f1_per_iter))))
# calculate validation loss and accuracy
val_loss, val_acc, val_f1 = val(epoch)
print("Val loss {}, Val Acc {}, Val F1 {}".format(val_loss, val_acc, val_f1))
# Early Stopping
if loss < best_loss:
best_loss = loss
# TODO: Consider switching to state dict instead
torch.save(psm, best_model)
train_epoch_loss.append(np.mean(train_loss_per_iter))
train_epoch_acc.append(np.mean(train_acc_per_iter))
train_epoch_f1.append(np.mean(train_f1_per_iter))
val_epoch_loss.append(val_loss)
val_epoch_acc.append(val_acc)
val_epoch_f1.append(val_f1)
writer.add_scalar('Loss/val', val_loss, epoch)
writer.add_scalar('Accuracy/val', val_acc, epoch)
# plot val/training plot curves
plot(train_epoch_loss, val_epoch_loss, train_epoch_acc, val_epoch_acc, train_epoch_f1, val_epoch_f1, 'shared')
def val(epoch):
batch_loss = []
batch_acc = []
batch_f1 = []
for iter, (X, Y) in tqdm(enumerate(val_loader), total=len(val_loader)):
'''
y -> Labels (Used for pix acc and IOU)
tar -> One-hot encoded labels (used for loss)
'''
if use_gpu:
inputs = X.cuda()
labels = Y.long().cuda()
else:
inputs, labels = X, Y.long()
clear(X, Y)
inputs = torch.split(inputs, 9, 1)
outputs = psm(inputs)
# save val loss/accuracy
loss = criterion(outputs, torch.max(labels, 1)[1])
batch_loss.append(loss.item())
batch_acc.append(compute_acc(outputs,labels))
micro_f1, macro_f1, weighted = calculate_f1(outputs, labels)
batch_f1.append(weighted)
clear(outputs, loss)
# if iter % 20 == 0:
# print("iter: {}".format(iter))
return np.mean(batch_loss), np.mean(batch_acc), np.mean(batch_f1)
if __name__ == "__main__":
# Define model parameters
epochs = 3
criterion = nn.CrossEntropyLoss()
sensors_per_device = 3
fr = 100
# Initialize model sensor model (senseHAR paper Figure 3/4)
# Initialize encoder model A,B,C
psm = PSM(12, sensors_per_device, fr, p=0.15)
psm.apply(init_weights)
params = psm.parameters()
optimizer = optim.Adam(params, lr=1e-2)
use_gpu = torch.cuda.is_available()
if use_gpu:
psm = psm.cuda()
#print("Init val loss: {}, Init val acc: {}, Init val iou: {}".format(val_loss, val_acc, val_iou))
train()
|
[
"torch.optim.Adam",
"torch.utils.tensorboard.SummaryWriter",
"numpy.mean",
"torch.nn.CrossEntropyLoss",
"matplotlib.pyplot.figure",
"time.time",
"warnings.filterwarnings"
] |
[((282, 309), 'warnings.filterwarnings', 'wn.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (299, 309), True, 'import warnings as wn\n'), ((1375, 1390), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {}), '()\n', (1388, 1390), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((1741, 1753), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1751, 1753), True, 'import matplotlib.pyplot as plt\n'), ((2100, 2112), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2110, 2112), True, 'import matplotlib.pyplot as plt\n'), ((2463, 2475), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2473, 2475), True, 'import matplotlib.pyplot as plt\n'), ((6338, 6359), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (6357, 6359), True, 'import torch.nn as nn\n'), ((6625, 6652), 'torch.optim.Adam', 'optim.Adam', (['params'], {'lr': '(0.01)'}), '(params, lr=0.01)\n', (6635, 6652), True, 'import torch.optim as optim\n'), ((3154, 3165), 'time.time', 'time.time', ([], {}), '()\n', (3163, 3165), False, 'import time\n'), ((6186, 6205), 'numpy.mean', 'np.mean', (['batch_loss'], {}), '(batch_loss)\n', (6193, 6205), True, 'import numpy as np\n'), ((6207, 6225), 'numpy.mean', 'np.mean', (['batch_acc'], {}), '(batch_acc)\n', (6214, 6225), True, 'import numpy as np\n'), ((6227, 6244), 'numpy.mean', 'np.mean', (['batch_f1'], {}), '(batch_f1)\n', (6234, 6244), True, 'import numpy as np\n'), ((4745, 4773), 'numpy.mean', 'np.mean', (['train_loss_per_iter'], {}), '(train_loss_per_iter)\n', (4752, 4773), True, 'import numpy as np\n'), ((4806, 4833), 'numpy.mean', 'np.mean', (['train_acc_per_iter'], {}), '(train_acc_per_iter)\n', (4813, 4833), True, 'import numpy as np\n'), ((4865, 4891), 'numpy.mean', 'np.mean', (['train_f1_per_iter'], {}), '(train_f1_per_iter)\n', (4872, 4891), True, 'import numpy as np\n'), ((4287, 4314), 'numpy.mean', 'np.mean', (['train_acc_per_iter'], {}), '(train_acc_per_iter)\n', (4294, 4314), True, 'import numpy as np\n'), ((4316, 4342), 'numpy.mean', 'np.mean', (['train_f1_per_iter'], {}), '(train_f1_per_iter)\n', (4323, 4342), True, 'import numpy as np\n'), ((4269, 4280), 'time.time', 'time.time', ([], {}), '()\n', (4278, 4280), False, 'import time\n')]
|
import os
import csv
import numpy as np
from pathlib import Path
from tqdm import tqdm
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
seed = 3535999445
def imdb(path=Path("data/aclImdb/")):
import pickle
try:
return pickle.load((path / "train-test.p").open("rb"))
except FileNotFoundError:
pass
CLASSES = ["neg", "pos", "unsup"]
def get_texts(path):
texts,labels = [],[]
for idx,label in tqdm(enumerate(CLASSES)):
for fname in tqdm((path/label).glob('*.txt'), leave=False):
texts.append(fname.read_text())
labels.append(idx)
return texts, np.asarray(labels)
trXY = get_texts(path / "train")
teXY = get_texts(path / "test")
data = (trXY, teXY)
pickle.dump(data, (path / "train-test.p").open("wb"))
return data
def _rocstories(path):
"""Returns 4 lists :
st: input sentences
ct1: first answer
ct2: second answer
y: index of the good answer
"""
with open(path, encoding='utf_8') as f:
f = csv.reader(f)
st = []
ct1 = []
ct2 = []
y = []
for i, line in enumerate(tqdm(list(f), ncols=80, leave=False)):
if i > 0:
s = ' '.join(line[1:5])
c1 = line[5]
c2 = line[6]
st.append(s)
ct1.append(c1)
ct2.append(c2)
y.append(int(line[-1])-1)
return st, ct1, ct2, y
def rocstories(data_dir, n_train=1497, n_valid=374):
storys, comps1, comps2, ys = _rocstories(os.path.join(data_dir, 'cloze_test_val__spring2016 - cloze_test_ALL_val.csv'))
teX1, teX2, teX3, _ = _rocstories(os.path.join(data_dir, 'cloze_test_test__spring2016 - cloze_test_ALL_test.csv'))
tr_storys, va_storys, tr_comps1, va_comps1, tr_comps2, va_comps2, tr_ys, va_ys = train_test_split(storys, comps1, comps2, ys, test_size=n_valid, random_state=seed)
trX1, trX2, trX3 = [], [], []
trY = []
for s, c1, c2, y in zip(tr_storys, tr_comps1, tr_comps2, tr_ys):
trX1.append(s)
trX2.append(c1)
trX3.append(c2)
trY.append(y)
vaX1, vaX2, vaX3 = [], [], []
vaY = []
for s, c1, c2, y in zip(va_storys, va_comps1, va_comps2, va_ys):
vaX1.append(s)
vaX2.append(c1)
vaX3.append(c2)
vaY.append(y)
trY = np.asarray(trY, dtype=np.int32)
vaY = np.asarray(vaY, dtype=np.int32)
return (trX1, trX2, trX3, trY), (vaX1, vaX2, vaX3, vaY), (teX1, teX2, teX3)
|
[
"pathlib.Path",
"sklearn.model_selection.train_test_split",
"os.path.join",
"numpy.asarray",
"csv.reader"
] |
[((210, 231), 'pathlib.Path', 'Path', (['"""data/aclImdb/"""'], {}), "('data/aclImdb/')\n", (214, 231), False, 'from pathlib import Path\n'), ((1932, 2018), 'sklearn.model_selection.train_test_split', 'train_test_split', (['storys', 'comps1', 'comps2', 'ys'], {'test_size': 'n_valid', 'random_state': 'seed'}), '(storys, comps1, comps2, ys, test_size=n_valid,\n random_state=seed)\n', (1948, 2018), False, 'from sklearn.model_selection import train_test_split\n'), ((2444, 2475), 'numpy.asarray', 'np.asarray', (['trY'], {'dtype': 'np.int32'}), '(trY, dtype=np.int32)\n', (2454, 2475), True, 'import numpy as np\n'), ((2486, 2517), 'numpy.asarray', 'np.asarray', (['vaY'], {'dtype': 'np.int32'}), '(vaY, dtype=np.int32)\n', (2496, 2517), True, 'import numpy as np\n'), ((1115, 1128), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (1125, 1128), False, 'import csv\n'), ((1649, 1726), 'os.path.join', 'os.path.join', (['data_dir', '"""cloze_test_val__spring2016 - cloze_test_ALL_val.csv"""'], {}), "(data_dir, 'cloze_test_val__spring2016 - cloze_test_ALL_val.csv')\n", (1661, 1726), False, 'import os\n'), ((1766, 1845), 'os.path.join', 'os.path.join', (['data_dir', '"""cloze_test_test__spring2016 - cloze_test_ALL_test.csv"""'], {}), "(data_dir, 'cloze_test_test__spring2016 - cloze_test_ALL_test.csv')\n", (1778, 1845), False, 'import os\n'), ((689, 707), 'numpy.asarray', 'np.asarray', (['labels'], {}), '(labels)\n', (699, 707), True, 'import numpy as np\n')]
|
'''
@date: 31/03/2015
@author: <NAME>
Tests for generator
'''
import unittest
import numpy as np
import scipy.constants as constants
from PyHEADTAIL.trackers.longitudinal_tracking import RFSystems
import PyHEADTAIL.particles.generators as gf
from PyHEADTAIL.general.printers import SilentPrinter
class TestParticleGenerators(unittest.TestCase):
'''Test class for the new ParticleGenerator (generator_functional.py)'''
def setUp(self):
np.random.seed(0)
self.nparticles = 1000
self.epsx = 0.5
self.intensity = 1e11
self.charge = constants.e
self.mass = constants.m_p
self.circumference = 99
self.gamma = 27.1
self.generator = gf.ParticleGenerator(
self.nparticles, self.intensity,
self.charge, self.mass, self.circumference, self.gamma,
distribution_x=gf.gaussian2D(0.5),alpha_x=-0.7, beta_x=4, D_x=0,
distribution_z=gf.gaussian2D(3.0),
printer=SilentPrinter())
self.beam = self.generator.generate()
def tearDown(self):
pass
def test_particles_length(self):
'''Tests whether the coordinate arrays of the resulting beam
have the correct length'''
self.assertEqual(self.beam.x.size, self.nparticles,
'Length of x-beam coordinate array not correct')
def test_particles_coordinates(self):
'''Tests whether only the coordinates specified in the initializer
are initialized in the beam (e.g. yp is not)
'''
with self.assertRaises(AttributeError):
self.beam.yp
def test_update_beam_with_existing_coords(self):
'''Tests whether updating already existing coords produces
beam coordinates of the correct size
'''
self.generator.update(self.beam)
self.assertEqual(self.beam.x.size, self.nparticles,
'Updating existing coordinates leads to wrong' +
'coordinate lengths')
def test_update_beam_with_new_coords(self):
'''Tests whether adding new coordinates to the beam
works as expected
'''
x_copy = self.beam.x.copy()
longitudinal_generator = gf.ParticleGenerator(
self.nparticles, self.intensity, self.charge,
self.mass, self.circumference, self.gamma,
distribution_z=gf.gaussian2D(3.0))
longitudinal_generator.update(self.beam)
self.assertEqual(self.beam.dp.size, self.nparticles,
'Updating the beam with new coordinates leads to' +
'faulty coordinates')
for n in range(self.nparticles):
self.assertAlmostEqual(x_copy[n], self.beam.x[n],
msg='Updating the beam with new coordinates invalidates' +
'existing coordinates')
def test_distributions(self):
'''Tests whether the specified distributions return the coords
in the correct format (dimensions). If new distributions are added,
add them to the test here!
'''
# Gaussian
dist = gf.gaussian2D(0.1)
self.distribution_testing_implementation(dist)
# Uniform
dist = gf.uniform2D(-2., 3.)
self.distribution_testing_implementation(dist)
def test_import_distribution(self):
'''Tests whether import_distribution produces coordinate arrays of the
correct size'''
nparticles = 5
coords = [np.linspace(-2, 2, nparticles),
np.linspace(-3, 3, nparticles)]
import_generator = gf.ParticleGenerator(
nparticles, 1e11, constants.e, constants.m_p, 100, 10,
distribution_y=gf.import_distribution2D(coords))
beam = import_generator.generate()
self.assertEqual(len(beam.y), nparticles,
'import_generator produces coords with the wrong length')
self.assertEqual(len(beam.yp), nparticles,
'import_generator produces coords with the wrong length')
def test_rf_bucket_distribution(self):
'''Tests the functionality of the rf-bucket matchor'''
#SPS Q20 flattop
nparticles = 100
h1 = 4620
h2 = 4*4620
V1 = 10e6
V2 = 1e6
dphi1 = 0
dphi2 = 0
alpha = 0.00308
p_increment = 0
long_map = RFSystems(self.circumference, [h1, h2], [V1, V2],
[dphi1, dphi2], [alpha], self.gamma, p_increment, charge=self.charge, mass=self.mass)
bucket = long_map.get_bucket(gamma=self.gamma)
bunch = gf.ParticleGenerator(
nparticles, 1e11, constants.e, constants.m_p,
self.circumference, self.gamma,
distribution_z=gf.RF_bucket_distribution(
bucket, epsn_z=0.002, printer=SilentPrinter())).generate()
def test_cut_bucket_distribution(self):
'''Tests functionality of the cut-bucket matchor '''
nparticles = 100
h1 = 4620
h2 = 4*4620
V1 = 10e6
V2 = 1e6
dphi1 = 0
dphi2 = 0
alpha = 0.00308
p_increment = 0
long_map = RFSystems(self.circumference, [h1, h2], [V1, V2],
[dphi1, dphi2], [alpha], self.gamma, p_increment, charge=self.charge, mass=self.mass)
bucket = long_map.get_bucket(gamma=self.gamma)
is_accepted_fn = bucket.make_is_accepted(margin=0.)
bunch = gf.ParticleGenerator(
nparticles, 11, constants.e, constants.m_p,
self.circumference, self.gamma,
distribution_z=gf.cut_distribution(
is_accepted=is_accepted_fn,
distribution=gf.gaussian2D(0.01))).generate()
self.assertEqual(nparticles, len(bunch.z),
'bucket_cut_distribution loses particles')
self.assertTrue(np.sum(is_accepted_fn(bunch.z, bunch.dp)) == nparticles,
'not all particles generated with the cut RF matcher' +
' lie inside the specified separatrix')
def test_import_distribution_raises_error(self):
'''Tests whether the generation fails when the number of particles
and the size of the specified distribution list do not match
'''
nparticles = 10
coords = [np.linspace(-2, 2, nparticles+1),
np.linspace(-3, 3, nparticles+1)]
import_generator = gf.ParticleGenerator(
nparticles, 1e11, constants.e, constants.m_p, 100, 10,
distribution_y=gf.import_distribution2D(coords))
with self.assertRaises(AssertionError):
beam = import_generator.generate()
def distribution_testing_implementation(self, distribution):
'''Call this method with the distribution as a parameter.
distribution(n_particles) should be a valid command
'''
distribution_size = 100
X = distribution(distribution_size)
x = X[0]
p = X[1]
self.assertEqual(x.size, distribution_size,
'space-direction ([0]) of ' + str(distribution) +
'has wrong dimension')
self.assertEqual(p.size, distribution_size,
'momentum-direction ([1]) of ' + str(distribution) +
'has wrong dimension')
if __name__ == '__main__':
unittest.main()
|
[
"PyHEADTAIL.trackers.longitudinal_tracking.RFSystems",
"PyHEADTAIL.general.printers.SilentPrinter",
"PyHEADTAIL.particles.generators.gaussian2D",
"PyHEADTAIL.particles.generators.import_distribution2D",
"PyHEADTAIL.particles.generators.uniform2D",
"numpy.linspace",
"numpy.random.seed",
"unittest.main"
] |
[((7430, 7445), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7443, 7445), False, 'import unittest\n'), ((457, 474), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (471, 474), True, 'import numpy as np\n'), ((3136, 3154), 'PyHEADTAIL.particles.generators.gaussian2D', 'gf.gaussian2D', (['(0.1)'], {}), '(0.1)\n', (3149, 3154), True, 'import PyHEADTAIL.particles.generators as gf\n'), ((3244, 3267), 'PyHEADTAIL.particles.generators.uniform2D', 'gf.uniform2D', (['(-2.0)', '(3.0)'], {}), '(-2.0, 3.0)\n', (3256, 3267), True, 'import PyHEADTAIL.particles.generators as gf\n'), ((4398, 4537), 'PyHEADTAIL.trackers.longitudinal_tracking.RFSystems', 'RFSystems', (['self.circumference', '[h1, h2]', '[V1, V2]', '[dphi1, dphi2]', '[alpha]', 'self.gamma', 'p_increment'], {'charge': 'self.charge', 'mass': 'self.mass'}), '(self.circumference, [h1, h2], [V1, V2], [dphi1, dphi2], [alpha],\n self.gamma, p_increment, charge=self.charge, mass=self.mass)\n', (4407, 4537), False, 'from PyHEADTAIL.trackers.longitudinal_tracking import RFSystems\n'), ((5197, 5336), 'PyHEADTAIL.trackers.longitudinal_tracking.RFSystems', 'RFSystems', (['self.circumference', '[h1, h2]', '[V1, V2]', '[dphi1, dphi2]', '[alpha]', 'self.gamma', 'p_increment'], {'charge': 'self.charge', 'mass': 'self.mass'}), '(self.circumference, [h1, h2], [V1, V2], [dphi1, dphi2], [alpha],\n self.gamma, p_increment, charge=self.charge, mass=self.mass)\n', (5206, 5336), False, 'from PyHEADTAIL.trackers.longitudinal_tracking import RFSystems\n'), ((3506, 3536), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', 'nparticles'], {}), '(-2, 2, nparticles)\n', (3517, 3536), True, 'import numpy as np\n'), ((3556, 3586), 'numpy.linspace', 'np.linspace', (['(-3)', '(3)', 'nparticles'], {}), '(-3, 3, nparticles)\n', (3567, 3586), True, 'import numpy as np\n'), ((6364, 6398), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', '(nparticles + 1)'], {}), '(-2, 2, nparticles + 1)\n', (6375, 6398), True, 'import numpy as np\n'), ((6416, 6450), 'numpy.linspace', 'np.linspace', (['(-3)', '(3)', '(nparticles + 1)'], {}), '(-3, 3, nparticles + 1)\n', (6427, 6450), True, 'import numpy as np\n'), ((873, 891), 'PyHEADTAIL.particles.generators.gaussian2D', 'gf.gaussian2D', (['(0.5)'], {}), '(0.5)\n', (886, 891), True, 'import PyHEADTAIL.particles.generators as gf\n'), ((950, 968), 'PyHEADTAIL.particles.generators.gaussian2D', 'gf.gaussian2D', (['(3.0)'], {}), '(3.0)\n', (963, 968), True, 'import PyHEADTAIL.particles.generators as gf\n'), ((990, 1005), 'PyHEADTAIL.general.printers.SilentPrinter', 'SilentPrinter', ([], {}), '()\n', (1003, 1005), False, 'from PyHEADTAIL.general.printers import SilentPrinter\n'), ((2401, 2419), 'PyHEADTAIL.particles.generators.gaussian2D', 'gf.gaussian2D', (['(3.0)'], {}), '(3.0)\n', (2414, 2419), True, 'import PyHEADTAIL.particles.generators as gf\n'), ((3739, 3771), 'PyHEADTAIL.particles.generators.import_distribution2D', 'gf.import_distribution2D', (['coords'], {}), '(coords)\n', (3763, 3771), True, 'import PyHEADTAIL.particles.generators as gf\n'), ((6601, 6633), 'PyHEADTAIL.particles.generators.import_distribution2D', 'gf.import_distribution2D', (['coords'], {}), '(coords)\n', (6625, 6633), True, 'import PyHEADTAIL.particles.generators as gf\n'), ((4861, 4876), 'PyHEADTAIL.general.printers.SilentPrinter', 'SilentPrinter', ([], {}), '()\n', (4874, 4876), False, 'from PyHEADTAIL.general.printers import SilentPrinter\n'), ((5735, 5754), 'PyHEADTAIL.particles.generators.gaussian2D', 'gf.gaussian2D', (['(0.01)'], {}), '(0.01)\n', (5748, 5754), True, 'import PyHEADTAIL.particles.generators as gf\n')]
|
import numpy as np
import matplotlib.pyplot as plt
def filter_rms_error(filter_object,
to_filter_data_lambda,
desired_filter_data_lambda,
dt=0.01,
start_time=0.0,
end_time=10.0,
skip_initial=0,
use_pressure_error=False,
abs_tol=2.0,
rel_tol=0.02,
generate_plot=False):
"""Calculates root-mean-square (RMS) error between data calculated
by a filter and a reference function that nominally should yield
equal data.
Parameters
----------
filter_object : object
An object representing the filter being tested. It must have
the following functions defined.
filter_object(dt: float)
filter_object.append(datum: float)
filter_object.get_datum() -> float
to_filter_data_lambda : lambda
A function representing the data being fed to the filter. It
should be of the form
to_filter_lambda(time: np.array) -> np.array
desired_filter_data_lambda : lambda
A function representing output that the filter_object output
should be nominally equal to. It should be of the form
desired_filter_data_lambda(time: np.array) -> np.array
start_time=0.0 : float
end_time=10.0 : float
dt=0.01 : float
Represents a time interval in seconds of [start_time, end_time)
with steps of dt between. Calculated as
np.arange(start_time, end_time, dt).
skip_initial=0 : int
Ignores the first skip_inital data points when calculating
error. This is useful when a filter has an initial transient
before it starts returning useful data.
use_pressure_error=False : bool
Instead of calculating direct RMS error, this function will
calculate a normalized error based on given tolerances. This is
useful for ventilators trying to calculate pressure meeting
ISO 80601-2-80:2018 192.168.127.12.1. Default values for the
tolerances are based on this standard.
abs_tol=2.0 : float
The design absolute tolerance when calculating pressure error,
i.e. +/- abs_tol. Only used if use_pressure_error == True.
rel_tol=0.02 : float
The design relative tolerance when calculating pressure error,
i.e. +/- rel_tol * desired_filter_data(t).
generate_plot=False : bool
If True, then a plot of the filter data and
desired_filter_data_lambda with respect to time will be
generated. Note that this should be false in non-interactive
contexts.
Returns
-------
error : float
If use_pressure_error is False,
This returns the RMS error between the filter output and
the output of desired_filter_data_lambda.
If use_pressure_error is True,
This returns a normalized error between the filter output
and the output of desired_filter_data_lambda. If error < 1,
then the typical error is within the design tolerance. When
testing, you can add a safety factor to the error by
asserting that the error must be less than 1/safety_factor.
"""
t = np.arange(start_time, end_time, dt)
test_filter = filter_object(dt)
to_filter_data = to_filter_data_lambda(t)
filtered_data = np.array([])
desired_filtered_data = desired_filter_data_lambda(t)
for i in range(len(to_filter_data)):
test_filter.append(to_filter_data[i])
filtered_data = np.append(filtered_data,
test_filter.get_datum())
if generate_plot:
figure, axis = plt.subplots()
axis.plot(t, to_filter_data, label="To Filter Data")
axis.plot(t, filtered_data, label="Filtered Data")
axis.plot(t, desired_filtered_data, label="Desired Filtered Data")
axis.legend()
plt.show()
if not use_pressure_error:
return _root_mean_square(
(filtered_data - desired_filtered_data)[skip_initial:])
else:
return _pressure_error(filtered_data[skip_initial:],
desired_filtered_data[skip_initial:])
def _root_mean_square(np_array):
return np.sqrt(np.mean(np.square(np_array)))
def _pressure_error(calculated_pressure,
actual_pressure,
abs_tol=2.0,
rel_tol=0.02):
return _root_mean_square(
(calculated_pressure - actual_pressure)
/ (np.full_like(actual_pressure, abs_tol) + rel_tol * actual_pressure)
)
|
[
"numpy.full_like",
"numpy.square",
"numpy.array",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] |
[((3339, 3374), 'numpy.arange', 'np.arange', (['start_time', 'end_time', 'dt'], {}), '(start_time, end_time, dt)\n', (3348, 3374), True, 'import numpy as np\n'), ((3477, 3489), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3485, 3489), True, 'import numpy as np\n'), ((3789, 3803), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3801, 3803), True, 'import matplotlib.pyplot as plt\n'), ((4029, 4039), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4037, 4039), True, 'import matplotlib.pyplot as plt\n'), ((4376, 4395), 'numpy.square', 'np.square', (['np_array'], {}), '(np_array)\n', (4385, 4395), True, 'import numpy as np\n'), ((4635, 4673), 'numpy.full_like', 'np.full_like', (['actual_pressure', 'abs_tol'], {}), '(actual_pressure, abs_tol)\n', (4647, 4673), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import patches
from matplotlib.legend_handler import HandlerTuple
from scipy.integrate import quad, simps
from math import * #lets the user enter complicated functions easily, eg: exp(3*sin(x**2))
import pyinputplus as pyip # makes taking inputs more convenient for the user
from warnings import filterwarnings
# prevents a warning for calling ax.set_yscale("symlog", linthresh=ACERTAINVALUE) with linthresh where it is.
filterwarnings("ignore", category=__import__('matplotlib').cbook.mplDeprecation)
#can avoid having a whole bunch of message if the polynomial interpolation is bad while displaying Simpson's rule's parabolas.
filterwarnings("ignore", category=np.RankWarning)
#This function is unique because it's the only one to graph many things on each axis, thus it is separate in order to not make special cases in the rest of the code
def Riemann(fig, formula, f, xmin, xmax, boolLogX, boolLogY, listGraph_n, listComp_n, lenContinuous, exact, errorBound):
continuous_x = np.linspace(xmin, xmax, lenContinuous)
continuous_y = f(continuous_x)
interval = xmax - xmin
listListGraph_xLeft = [np.linspace(xmin, xmax - (interval) / n, n) for n in listGraph_n]
listListGraph_yLeft = [f(list_x) for list_x in listListGraph_xLeft]
listListComp_xLeft = [np.linspace(xmin, xmax - (interval) / n, n) for n in listComp_n]
listListComp_yLeft = [f(list_x) for list_x in listListComp_xLeft]
listListGraph_xRight = [np.linspace(xmin + (interval) / n, xmax, n) for n in listGraph_n]
listListGraph_yRight = [f(list_x) for list_x in listListGraph_xRight]
listListComp_xRight = [np.linspace(xmin + (interval) / n, xmax, n) for n in listComp_n]
listListComp_yRight = [f(list_x) for list_x in listListComp_xRight]
listListGraph_x = [np.linspace(xmin, xmax - interval / n, n) + interval / (2 * n) for n in listGraph_n]
listListGraph_y = [(list_yLeft + list_yRight) / 2 for list_yLeft, list_yRight in zip(listListGraph_yLeft, listListGraph_yRight)]
listListComp_y = [(list_yLeft + list_yRight) / 2 for list_yLeft, list_yRight in zip(listListComp_yLeft, listListComp_yRight)]
listWidth = [interval / n for n in listGraph_n]
fig.suptitle(f"Study of the approximation of the integral of the function y = {formula} on the interval ({xmin}, {xmax}) "
f"by left and right Riemann sums and their average and of the quality of the approximations compared to the exact value")
#This fills the left side of the figure, filling it row by row.
nbCol = ceil(len(listGraph_n) / 5)
areaAxes = [fig.add_subplot(ceil(len(listGraph_n) / nbCol), 2 * nbCol, i) for i in range(1, 2 * len(listGraph_n)) if 0 <= i % (2 * nbCol) - 1 < nbCol]
for i, ax in enumerate(areaAxes):
ax.bar(listListGraph_xLeft[i], listListGraph_yLeft[i], align='edge', alpha=0.5, width=listWidth[i], label="Left sum",
color=list("#70db70" if y >= 0 else "#ff6666" for y in listListGraph_yLeft[i])) #green and red
ax.bar(listListGraph_x[i], listListGraph_y[i], align='center', alpha=0.5, width=listWidth[i], label="Average of left and right sums",
color=list("#071ba3" if y >= 0 else "#d8e30e" for y in listListGraph_y[i])) # blue and orange
ax.bar(listListGraph_xRight[i], listListGraph_yRight[i], align='edge', alpha=0.5, width=-listWidth[i], label="Right sum",
color=list("#6815a3" if y >= 0 else "#e08f0b" for y in listListGraph_yRight[i])) # purple and yellow
ax.plot(continuous_x, continuous_y)
if boolLogX:
ax.set_xscale('symlog', linthreshy=interval / (10 * max(len(list_) for list_ in listListGraph_x + listListComp_xRight)))
if boolLogY:
ax.set_yscale('symlog', linthreshy=absGetNonZeroMin(listListGraph_yLeft + listListComp_yLeft + listListGraph_yRight + listListComp_yRight))
ax.set_title(f"{listGraph_n[i]} rectangles")
ax.grid(True)
#stuff for the legend to display both colors for each barplot. See last answer at
#https://stackoverflow.com/questions/31478077/how-to-make-two-markers-share-the-same-label-in-the-legend-using-matplotlib
#for explanation, by user rouckas
legendpatches = ((patches.Patch(color=color1, alpha=0.5), patches.Patch(color=color2, alpha=0.5))
for color1, color2 in zip(("#70db70", "#071ba3", "#6815a3"), ("#ff6666", "#d8e30e", "#e08f0b")))
areaAxes[0].legend((legendpatches), ("Left sum", "Average of left and right sums", "Right sum"), handler_map={tuple: HandlerTuple(ndivide=None)}, fontsize=10)
errorBounder = np.vectorize(lambda x: x if abs(x) > errorBound else 0)
#the sorting here is to keep the implicit mapping between the lists of y values and the n values, which gets sorted later.
listDistLeft = errorBounder(np.fromiter(((list_y.mean() * (interval) - exact) for list_y in sorted(listListGraph_yLeft + listListComp_yLeft, key=len)), dtype=float))
listDistMid = errorBounder(np.fromiter(((list_y.mean() * (interval) - exact) for list_y in sorted(listListGraph_y + listListComp_y, key=len)), dtype=float))
listDistRight = errorBounder(np.fromiter(((list_y.mean() * (interval) - exact) for list_y in sorted(listListGraph_yRight + listListComp_yRight, key=len)), dtype=float))
accuracyAxes = [fig.add_subplot(2, 2, i) for i in (2, 4)]
if 0 in listDistLeft + listDistRight + listDistMid:
global exactMessage
exactMessage = True
if exact == 0:
titles = ("difference for each approximation compared to the exact value of the integral, 0",
"difference for each approximation compared to the exact value of the integral, 0, on a logarithmic scale")
else:
listDistLeft, listDistMid, listDistRight = map(lambda x: 100 * x / exact, (listDistLeft, listDistMid, listDistRight))
titles = (f"relative percentage error for each approximation compared to the exact integral: {niceStr(exact)}",
f"relative percentage error for each approximation compared to the exact integral: {niceStr(exact)}, on a logarithmic scale")
#sorted to avoid lines going back and forth because it wouldn't be monotonically increasing.
listTot_n = list(sorted(listGraph_n + listComp_n))
ax = accuracyAxes[0]
for listDist, color, label in zip((listDistLeft, listDistMid, listDistRight), ("#70db70", "#071ba3", "#6815a3"),
("Left sums", "Average of left and right sums", "Right sums")):
ax.plot(listTot_n, listDist, color=color, label=label)
for x, y in zip(listTot_n * 3, np.concatenate((listDistLeft, listDistMid, listDistRight))):
ax.text(x, y, niceStr(y))
ax.grid(True)
ax.set_title(titles[0])
ax.legend()
ax = accuracyAxes[1]
for listDist, color, label in zip((listDistLeft, listDistMid, listDistRight), ("#70db70", "#071ba3", "#6815a3"),
("Left sums", "Average of left and right sums", "Right sums")):
ax.plot(listTot_n, listDist, color=color, label=label)
ax.set_xscale("log")
ax.get_xaxis().set_tick_params(which='minor', size=0)
ax.get_xaxis().set_tick_params(which='minor', width=0)
ax.set_yscale("symlog", linthreshy=absGetNonZeroMin(np.concatenate((listDistLeft, listDistMid, listDistRight))) * 0.9)
good_ylim(ax, np.concatenate((listDistLeft, listDistMid, listDistRight))) # sets the y limits to something a bit cleaner
for x, y in zip(listTot_n * 3, np.concatenate((listDistLeft, listDistMid, listDistRight))):
ax.text(x, y, niceStr(y))
ax.set_title(titles[1])
ax.grid(True, which='major')
ax.legend()
class Midpoint:
def __init__(self, f, xmin, xmax, listGraph_n, listComp_n):
self.interval = xmax - xmin
self.listListGraph_x = [np.linspace(xmin, xmax - self.interval / n, n) + self.interval / (2*n) for n in listGraph_n]
self.listListGraph_y = [f(list_x) for list_x in self.listListGraph_x]
self.listListComp_x = [np.linspace(xmin, xmax - self.interval / n, n) + self.interval / (2*n) for n in listComp_n]
self.listListComp_y = [f(list_x) for list_x in self.listListComp_x]
self.listWidth = [self.interval / n for n in listGraph_n]
def titleSpecs(self): return "some midpoint sums"
def shapeName(self): return "rectangles"
def listDist(self, exact):
return np.fromiter(((list_y.mean() * (self.interval) - exact) for list_y in sorted(self.listListGraph_y + self.listListComp_y, key=len)), dtype=float)
def graph(self, ax, i):
ax.bar(self.listListGraph_x[i], self.listListGraph_y[i], alpha=0.5, width=self.listWidth[i],
color=["#70db70" if y >= 0 else "#ff6666" for y in self.listListGraph_y[i]])
class Trapezoidal:
def __init__(self, f, xmin, xmax, listGraph_n, listComp_n):
self.interval = xmax - xmin
self.listListGraph_x = [np.linspace(xmin, xmax, n+1) for n in listGraph_n]
self.listListGraph_y = [f(list_x) for list_x in self.listListGraph_x]
self.listListComp_x = [np.linspace(xmin, xmax, n+1) for n in listComp_n]
self.listListComp_y = [f(list_x) for list_x in self.listListComp_x]
def titleSpecs(self): return "some trapezoidal sums"
def shapeName(self): return "trapezia"
def listDist(self, exact):
return np.fromiter((((list_y.sum() - (list_y[0] + list_y[-1]) / 2) / (len(list_y) - 1) * (self.interval) - exact)
for list_y in sorted(self.listListGraph_y + self.listListComp_y, key=len)), dtype=float)
def graph(self, ax, i):
ax.plot(self.listListGraph_x[i], self.listListGraph_y[i], color='#8b008b', linestyle='--')
ax.fill_between(self.listListGraph_x[i], self.listListGraph_y[i], alpha=0.5, interpolate=True,
color=["#70db70" if y >= 0 else "#ff6666" for y in self.listListGraph_y[i]])
class Simpson:
def __init__(self, f, xmin, xmax, listGraph_n, listComp_n): #the list_ns contain a number of parabolas.
self.interval = xmax - xmin
self.listListGraph_x = [np.linspace(xmin, xmax, 2*n + 1) for n in listGraph_n] # 2n + 1 sub-intervals
self.listListGraph_y = [f(list_x) for list_x in self.listListGraph_x]
self.listListComp_x = [np.linspace(xmin, xmax, 2*n + 1) for n in listComp_n]
self.listListComp_y = [f(list_x) for list_x in self.listListComp_x]
def titleSpecs(self): return "Simpson's rule"
def shapeName(self): return "parabolas"
def listDist(self, exact):
return np.fromiter(((simps(list_y, list_x) - exact) for list_x, list_y in
zip(list(sorted(self.listListGraph_x + self.listListComp_x, key=len)),
list(sorted(self.listListGraph_y + self.listListComp_y, key=len)))),
dtype = float)
def graph(self, ax, i):
"""
separate it into n intervals, find the fitting parabola on each of them, grab the corresponding x values from continuous_x,
use them to get y values with the polynomial, plot them.
"""
global continuous_x
listData_x = self.listListGraph_x[i]
listData_y = self.listListGraph_y[i]
n = (len(listData_x) - 1) // 2 # number of parabolas
toPlot_y = []
for i_inter in range(n):
x_data = listData_x[2*i_inter:2*i_inter+3]
y_data = listData_y[2*i_inter:2*i_inter+3]
poly = np.polyfit(x_data, y_data, 2)
list_x = continuous_x[len(continuous_x) * i_inter // n: len(continuous_x) * (i_inter+1) // n]
list_y = np.polyval(poly, list_x)
toPlot_y.extend(list_y)
ax.plot(continuous_x, toPlot_y, color='#8b008b', linestyle='--')
def firstDigit(num):
digits = '123456789'
for char in str(num):
if char in digits:
return int(char)
def good_ylim(ax, values): # symlog scale can give ugly limits for y values. This fixes that with a 0 and a power of 10 times a digit, like 9e-2.
mini, maxi = min(values), max(values)
newBottom, newTop = ax.get_ylim()
if mini < 0 < maxi:
newBottom = -(firstDigit(mini) + 1) * 10 ** floor(log10(-mini))
newTop = (firstDigit(maxi) + 1) * 10 ** floor(log10(maxi))
elif mini < maxi <= 0 :
newBottom = -(firstDigit(mini) + 1) * 10 ** floor(log10(-mini))
newTop = 0
elif 0 <= mini < maxi:
newBottom = 0
newTop = (firstDigit(maxi) + 1) * 10 ** floor(log10(maxi))
ax.set_ylim(newBottom, newTop)
def niceStr(val): #gives a nice value, avoids having far too many digits display.
if 100 < abs(val) < 1000000: #just take away a few decimal digits
return str(round(val, max(0, 6 - floor(log10(abs(val))))))
#if it is in scientific notation, keep the scientific notation, just reduce the number of digits
string = str(val)
end = string.find('e')
if end != -1:
return string[:min(7, end)] + string[end:]
else:
return string[:min(7, len(string))]
def looper(func, check): #for taking inputs: if there is an error, then ask for input again. Used on inputs that are quite error prone: listGraph_n and listComp_n.
while True:
try:
list_ = func()
if check(list_): #raises Exception if wrong
return list_
except Exception as e:
print("An error occured, so you will be asked for that input again, it is probably a typo, but just in case it isn't, here is the error message", e, sep='\n')
def getvalue(variable): #input taker
global tier
tuple = tiers[variable]
if tier < tuple[0]: return eval(tuple[1])
else: return eval(tuple[2])
def raiseEx(text): #to raise exceptions in lambda functions
raise Exception(text)
def absGetNonZeroMin(values): #returns the smallest positive non-zero value in the list, positive, used to set linthreshy on symlog scales, as it can't be 0
return min(abs(val) for val in values if val) if any(values) else 1
def main():
print("If you want to stop the program (which is an infinite loop), enter 0 as a level of customization and the program will terminate")
while True: #just a loop allowing to test many functions without quitting/restarting the program.
global tier, tiers
tier = pyip.inputInt("How much customization do you want ? 0: stop, 1: minimum, 2: average, 3: advanced : ", min=0, max=3)
if tier == 0:
break
#concept: the first value is the default value, the second value is what is executed (with getvalue) to get the value.
tiers = {"boologX": (2, 'False', """pyip.inputYesNo("Logarithmic x scale for graphing f(x) ? [y/n]", yesVal='y', noVal='n') == 'y'"""),
"boologY": (2, 'False', """pyip.inputYesNo("Logarithmic y scale for graphing f(x) ? [y/n]", yesVal='y', noVal='n') == 'y'"""),
"listGraph_n": (2, '[10, 100, 1000]', """list(map(int, input("what number of intervals/shapes would you like to study ? use comma separated values, "
"eg: 10, 100, 1000, 10000, spaces don't matter: ").split(',')))"""),
"listComp_n": (3, '[]', #the + sign on the next line is for proper string formatting: indented code without indented string.
"""input('''Enter anything that evaluates to a regular python list of integers, such as [10, 100, 1000] or [3**i for i in range(2, 10)],\n''' +
'''these will be added to the computations to display more points in the accuracy graphs:\n''')"""),
"lenContinuous": (3, '10000', """pyip.inputInt("How many values should be used to plot f(x) ? For graphing purposes only: ")""")}
formula = input("f(x) = ")
f = np.vectorize(lambda x: eval(formula))
xmin, xmax = eval(input("Interval of integration: xmin, xmax = "))
boolLogX = getvalue("boologX")
boolLogY = getvalue("boologY")
listGraph_n = looper(lambda: getvalue('listGraph_n'), lambda list_: True if isinstance(list_, list) and all(isinstance(x, int) for x in list_) else \
raiseEx("It should evaluate to a list of integers"))
listComp_n = [] if tier < 3 else looper(lambda : (eval(eval(tiers['listComp_n'][2]))),
lambda list_: True if isinstance(list_, list) and all(isinstance(x, int) and x >= 1 for x in list_) else \
raiseEx("It should evaluate to a list of integers all >= 1")) #the first eval gets the comprehension, the second eval computes it.
#these 3 are used to graph the function.
global continuous_x #can be accessed by methods that need it, like simpson's rule
lenContinuous = getvalue("lenContinuous")
continuous_x = np.linspace(xmin, xmax, lenContinuous)
continuous_y = f(continuous_x)
dictMethod = {
1: Riemann,
2: Midpoint,
3: Trapezoidal,
4: Simpson,
}
exact, errorBound = quad(f, xmin, xmax)
errorBounder = np.vectorize(lambda x: x if abs(x) > errorBound else 0)
global exactMessage
exactMessage = False
numbers = looper(lambda: list(map(int, input("What methods would you like to use ? all methods called will be executed one after the other, the results will be displayed "
"at the end." + '\n' +
"1 for Riemann sums, 2 for midpoint rule, 3 for trapezoidal rule, 4 for Simpson's rule: ")
.split(','))),
lambda values: True if all(isinstance(val, int) and 1 <= val <= 4 for val in values) else raiseEx("These should all be integers between 1 and 4"))
for number in numbers:
fig = plt.figure()
if number == 1: # this function is a unique case.
Riemann(fig, formula, f, xmin, xmax, boolLogX, boolLogY, listGraph_n, listComp_n, lenContinuous, exact, errorBound)
fig.subplots_adjust(left=0.05, bottom=0.05, right=0.95, top=0.9, wspace=0.1, hspace=0.6)
plt.draw()
continue
method = dictMethod[number](f, xmin, xmax, listGraph_n, listComp_n)
fig.suptitle(f"Study of the approximation of the function y = {formula} on the interval ({xmin}, {xmax}) "
f"with {method.titleSpecs()} and of the quality of the approximations compared to the exact value")
nbCol = ceil(len(listGraph_n) / 5)
areaAxes = (fig.add_subplot(ceil(len(listGraph_n) / nbCol), 2 * nbCol, i) for i in
range(1, 2 * len(listGraph_n)) if 0 <= i % (2 * nbCol) - 1 < nbCol) # the left side of the figure, filled row by row.
for i, ax in enumerate(areaAxes):
method.graph(ax, i)
ax.plot(continuous_x, continuous_y)
if boolLogX:
ax.set_xscale('symlog', linthreshy=(xmax - xmin) / 10 * max(len(list_) for list_ in listGraph_n + listComp_n)) #shouldn't be visible, unless you're really
# picky about your graphs.
if boolLogY:
ax.set_yscale('symlog', linthreshy=absGetNonZeroMin(method.listListGraph_y + method.listListComp_y))
ax.set_title(f"{listGraph_n[i]} {method.shapeName()}")
ax.grid(True)
listDist = method.listDist(exact)
accuracyAxes = [fig.add_subplot(2, 2, i) for i in (2, 4)]
listDist = errorBounder(listDist)
if 0 in listDist:
exactMessage = True
if exact == 0:
titles = ("difference for each approximation compared to the exact value of the integral, 0",
"difference for each approximation compared to the exact value of the integral, 0, on a logarithmic scale")
else:
listDist = listDist * 100 / exact
titles = (f"relative percentage error for each approximation compared to the exact integral: {niceStr(exact)}",
f"relative percentage error for each approximation compared to the exact integral: {niceStr(exact)}, on a logarithmic scale")
#sorted for nicer graph: prevents line going back and forth by making it monotically increasing. The same sorting order is applied in each method
listTot_n = list(sorted(listGraph_n + listComp_n))
ax = accuracyAxes[0]
ax.plot(listTot_n, listDist)
for x, y in zip(listTot_n, listDist):
ax.text(x, y, niceStr(y))
ax.grid(True)
ax.set_title(titles[0])
ax = accuracyAxes[1]
ax.plot(listTot_n, listDist)
ax.set_xscale("log")
ax.get_xaxis().set_tick_params(which='minor', size=0)
ax.get_xaxis().set_tick_params(which='minor', width=0)
ax.set_yscale("symlog", linthreshy=absGetNonZeroMin(listDist) * 0.9)
good_ylim(ax, listDist) # sets the y limits to something a bit cleaner
for x, y in zip(listTot_n, listDist):
ax.text(x, y, niceStr(y))
ax.set_title(titles[1])
ax.grid(True, which='major')
fig.subplots_adjust(left=0.05, bottom=0.05,right=0.95, top=0.9, wspace=0.1, hspace=0.5)
plt.draw()
if exactMessage:
print(f"Some 0s are displayed in the accuracy check, however this does not mean necessarily mean the accuracy is perfect:\n"
f"the exact value is computed with a certain margin of error, here it is {niceStr(errorBound)}\n"
f"and any 0 displayed here means the inacurracy is less than this, and thus too small to be evaluated properly")
plt.show()
if __name__ == '__main__':
main()
|
[
"matplotlib.legend_handler.HandlerTuple",
"numpy.polyfit",
"scipy.integrate.quad",
"scipy.integrate.simps",
"numpy.linspace",
"pyinputplus.inputInt",
"numpy.polyval",
"matplotlib.patches.Patch",
"numpy.concatenate",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.draw",
"warnings.filterwarnings",
"matplotlib.pyplot.show"
] |
[((710, 759), 'warnings.filterwarnings', 'filterwarnings', (['"""ignore"""'], {'category': 'np.RankWarning'}), "('ignore', category=np.RankWarning)\n", (724, 759), False, 'from warnings import filterwarnings\n'), ((1070, 1108), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', 'lenContinuous'], {}), '(xmin, xmax, lenContinuous)\n', (1081, 1108), True, 'import numpy as np\n'), ((1201, 1242), 'numpy.linspace', 'np.linspace', (['xmin', '(xmax - interval / n)', 'n'], {}), '(xmin, xmax - interval / n, n)\n', (1212, 1242), True, 'import numpy as np\n'), ((1367, 1408), 'numpy.linspace', 'np.linspace', (['xmin', '(xmax - interval / n)', 'n'], {}), '(xmin, xmax - interval / n, n)\n', (1378, 1408), True, 'import numpy as np\n'), ((1532, 1573), 'numpy.linspace', 'np.linspace', (['(xmin + interval / n)', 'xmax', 'n'], {}), '(xmin + interval / n, xmax, n)\n', (1543, 1573), True, 'import numpy as np\n'), ((1701, 1742), 'numpy.linspace', 'np.linspace', (['(xmin + interval / n)', 'xmax', 'n'], {}), '(xmin + interval / n, xmax, n)\n', (1712, 1742), True, 'import numpy as np\n'), ((6729, 6787), 'numpy.concatenate', 'np.concatenate', (['(listDistLeft, listDistMid, listDistRight)'], {}), '((listDistLeft, listDistMid, listDistRight))\n', (6743, 6787), True, 'import numpy as np\n'), ((7491, 7549), 'numpy.concatenate', 'np.concatenate', (['(listDistLeft, listDistMid, listDistRight)'], {}), '((listDistLeft, listDistMid, listDistRight))\n', (7505, 7549), True, 'import numpy as np\n'), ((7635, 7693), 'numpy.concatenate', 'np.concatenate', (['(listDistLeft, listDistMid, listDistRight)'], {}), '((listDistLeft, listDistMid, listDistRight))\n', (7649, 7693), True, 'import numpy as np\n'), ((14635, 14760), 'pyinputplus.inputInt', 'pyip.inputInt', (['"""How much customization do you want ? 0: stop, 1: minimum, 2: average, 3: advanced : """'], {'min': '(0)', 'max': '(3)'}), "(\n 'How much customization do you want ? 0: stop, 1: minimum, 2: average, 3: advanced : '\n , min=0, max=3)\n", (14648, 14760), True, 'import pyinputplus as pyip\n'), ((17294, 17332), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', 'lenContinuous'], {}), '(xmin, xmax, lenContinuous)\n', (17305, 17332), True, 'import numpy as np\n'), ((17544, 17563), 'scipy.integrate.quad', 'quad', (['f', 'xmin', 'xmax'], {}), '(f, xmin, xmax)\n', (17548, 17563), False, 'from scipy.integrate import quad, simps\n'), ((22491, 22501), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22499, 22501), True, 'import matplotlib.pyplot as plt\n'), ((1863, 1904), 'numpy.linspace', 'np.linspace', (['xmin', '(xmax - interval / n)', 'n'], {}), '(xmin, xmax - interval / n, n)\n', (1874, 1904), True, 'import numpy as np\n'), ((4314, 4352), 'matplotlib.patches.Patch', 'patches.Patch', ([], {'color': 'color1', 'alpha': '(0.5)'}), '(color=color1, alpha=0.5)\n', (4327, 4352), False, 'from matplotlib import patches\n'), ((4354, 4392), 'matplotlib.patches.Patch', 'patches.Patch', ([], {'color': 'color2', 'alpha': '(0.5)'}), '(color=color2, alpha=0.5)\n', (4367, 4392), False, 'from matplotlib import patches\n'), ((9093, 9123), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', '(n + 1)'], {}), '(xmin, xmax, n + 1)\n', (9104, 9123), True, 'import numpy as np\n'), ((9255, 9285), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', '(n + 1)'], {}), '(xmin, xmax, n + 1)\n', (9266, 9285), True, 'import numpy as np\n'), ((10299, 10333), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', '(2 * n + 1)'], {}), '(xmin, xmax, 2 * n + 1)\n', (10310, 10333), True, 'import numpy as np\n'), ((10488, 10522), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', '(2 * n + 1)'], {}), '(xmin, xmax, 2 * n + 1)\n', (10499, 10522), True, 'import numpy as np\n'), ((11708, 11737), 'numpy.polyfit', 'np.polyfit', (['x_data', 'y_data', '(2)'], {}), '(x_data, y_data, 2)\n', (11718, 11737), True, 'import numpy as np\n'), ((11867, 11891), 'numpy.polyval', 'np.polyval', (['poly', 'list_x'], {}), '(poly, list_x)\n', (11877, 11891), True, 'import numpy as np\n'), ((18399, 18411), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (18409, 18411), True, 'import matplotlib.pyplot as plt\n'), ((22058, 22068), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (22066, 22068), True, 'import matplotlib.pyplot as plt\n'), ((4635, 4661), 'matplotlib.legend_handler.HandlerTuple', 'HandlerTuple', ([], {'ndivide': 'None'}), '(ndivide=None)\n', (4647, 4661), False, 'from matplotlib.legend_handler import HandlerTuple\n'), ((7969, 8015), 'numpy.linspace', 'np.linspace', (['xmin', '(xmax - self.interval / n)', 'n'], {}), '(xmin, xmax - self.interval / n, n)\n', (7980, 8015), True, 'import numpy as np\n'), ((8173, 8219), 'numpy.linspace', 'np.linspace', (['xmin', '(xmax - self.interval / n)', 'n'], {}), '(xmin, xmax - self.interval / n, n)\n', (8184, 8219), True, 'import numpy as np\n'), ((18731, 18741), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (18739, 18741), True, 'import matplotlib.pyplot as plt\n'), ((7405, 7463), 'numpy.concatenate', 'np.concatenate', (['(listDistLeft, listDistMid, listDistRight)'], {}), '((listDistLeft, listDistMid, listDistRight))\n', (7419, 7463), True, 'import numpy as np\n'), ((10781, 10802), 'scipy.integrate.simps', 'simps', (['list_y', 'list_x'], {}), '(list_y, list_x)\n', (10786, 10802), False, 'from scipy.integrate import quad, simps\n')]
|
import os
import trimesh
import unittest
import pocketing
import numpy as np
def get_model(file_name):
"""
Load a model from the models directory by expanding paths out.
Parameters
------------
file_name : str
Name of file in `models`
Returns
------------
mesh : trimesh.Geometry
Trimesh object or similar
"""
pwd = os.path.dirname(os.path.abspath(
os.path.expanduser(__file__)))
return trimesh.load(os.path.abspath(
os.path.join(pwd, '../models', file_name)))
class PocketTest(unittest.TestCase):
def test_contour(self):
path = get_model('wrench.dxf')
poly = path.polygons_full[0]
# generate tool paths
toolpaths = pocketing.contour.contour_parallel(poly, .05)
assert all(trimesh.util.is_shape(i, (-1, 2))
for i in toolpaths)
def test_troch(self):
path = get_model('wrench.dxf')
polygon = path.polygons_full[0]
# set radius arbitrarily
radius = .125
# set step to 10% of tool radius
step = radius * 0.10
# generate our trochoids
toolpath = pocketing.trochoidal.toolpath(
polygon, step=step)
assert trimesh.util.is_shape(toolpath, (-1, 2))
def test_archimedian(self):
# test generating a simple archimedean spiral
spiral = pocketing.spiral.archimedean(0.5, 2.0, 0.125)
assert trimesh.util.is_shape(spiral, (-1, 3, 2))
def test_helix(self):
# check a 3D helix
# set values off a tool radius
tool_radius = 0.25
radius = tool_radius * 1.2
pitch = tool_radius * 0.3
height = 2.0
# create the helix
h = pocketing.spiral.helix(
radius=radius,
height=height,
pitch=pitch,)
# should be 3-point arcs
check_arcs(h)
# heights should start and end correctly
assert np.isclose(h[0][0][2], 0.0)
assert np.isclose(h[-1][-1][2], height)
# check the flattened 2D radius
radii = np.linalg.norm(h.reshape((-1, 3))[:, :2], axis=1)
assert np.allclose(radii, radius)
def check_arcs(arcs):
# arcs should be 2D or 2D 3-point arcs
assert trimesh.util.is_shape(arcs, (-1, 3, (3, 2)))
# make sure arcs start where previous arc begins
for a, b in zip(arcs[:-1], arcs[1:]):
assert np.allclose(a[2], b[0])
if __name__ == '__main__':
unittest.main()
|
[
"numpy.allclose",
"pocketing.contour.contour_parallel",
"numpy.isclose",
"os.path.join",
"pocketing.spiral.archimedean",
"pocketing.trochoidal.toolpath",
"unittest.main",
"pocketing.spiral.helix",
"trimesh.util.is_shape",
"os.path.expanduser"
] |
[((2261, 2305), 'trimesh.util.is_shape', 'trimesh.util.is_shape', (['arcs', '(-1, 3, (3, 2))'], {}), '(arcs, (-1, 3, (3, 2)))\n', (2282, 2305), False, 'import trimesh\n'), ((2473, 2488), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2486, 2488), False, 'import unittest\n'), ((732, 778), 'pocketing.contour.contour_parallel', 'pocketing.contour.contour_parallel', (['poly', '(0.05)'], {}), '(poly, 0.05)\n', (766, 778), False, 'import pocketing\n'), ((1155, 1204), 'pocketing.trochoidal.toolpath', 'pocketing.trochoidal.toolpath', (['polygon'], {'step': 'step'}), '(polygon, step=step)\n', (1184, 1204), False, 'import pocketing\n'), ((1234, 1274), 'trimesh.util.is_shape', 'trimesh.util.is_shape', (['toolpath', '(-1, 2)'], {}), '(toolpath, (-1, 2))\n', (1255, 1274), False, 'import trimesh\n'), ((1379, 1424), 'pocketing.spiral.archimedean', 'pocketing.spiral.archimedean', (['(0.5)', '(2.0)', '(0.125)'], {}), '(0.5, 2.0, 0.125)\n', (1407, 1424), False, 'import pocketing\n'), ((1440, 1481), 'trimesh.util.is_shape', 'trimesh.util.is_shape', (['spiral', '(-1, 3, 2)'], {}), '(spiral, (-1, 3, 2))\n', (1461, 1481), False, 'import trimesh\n'), ((1733, 1798), 'pocketing.spiral.helix', 'pocketing.spiral.helix', ([], {'radius': 'radius', 'height': 'height', 'pitch': 'pitch'}), '(radius=radius, height=height, pitch=pitch)\n', (1755, 1798), False, 'import pocketing\n'), ((1958, 1985), 'numpy.isclose', 'np.isclose', (['h[0][0][2]', '(0.0)'], {}), '(h[0][0][2], 0.0)\n', (1968, 1985), True, 'import numpy as np\n'), ((2001, 2033), 'numpy.isclose', 'np.isclose', (['h[-1][-1][2]', 'height'], {}), '(h[-1][-1][2], height)\n', (2011, 2033), True, 'import numpy as np\n'), ((2156, 2182), 'numpy.allclose', 'np.allclose', (['radii', 'radius'], {}), '(radii, radius)\n', (2167, 2182), True, 'import numpy as np\n'), ((2416, 2439), 'numpy.allclose', 'np.allclose', (['a[2]', 'b[0]'], {}), '(a[2], b[0])\n', (2427, 2439), True, 'import numpy as np\n'), ((414, 442), 'os.path.expanduser', 'os.path.expanduser', (['__file__'], {}), '(__file__)\n', (432, 442), False, 'import os\n'), ((494, 535), 'os.path.join', 'os.path.join', (['pwd', '"""../models"""', 'file_name'], {}), "(pwd, '../models', file_name)\n", (506, 535), False, 'import os\n'), ((798, 831), 'trimesh.util.is_shape', 'trimesh.util.is_shape', (['i', '(-1, 2)'], {}), '(i, (-1, 2))\n', (819, 831), False, 'import trimesh\n')]
|
import gym
import random
import numpy as np
import tflearn
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
from statistics import median, mean
from collections import Counter
LR = 1e-3
env = gym.make('CartPole-v0')
env.reset()
goal_steps = 500
score_requirement = 50
initial_games = 10000
def some_random_games_first():
for _ in range(5):
for t in range(200):
env.reset()
env.render()
action = env.action_space.sample()
observation, reward, done, info = env.step(action)
if done:
break
some_random_games_first()
def generate_traning_data():
training_data = []
scores = []
accepted_scores = []
for _ in range(initial_games):
score = 0
game_memory = []
prev_observation = []
for _ in range(goal_steps):
action = random.randrange(0, 2)
observation, reward, done, info = env.step(action)
if len(prev_observation) > 0:
game_memory.append([prev_observation, action])
prev_observation = observation
score += reward
if done:
break
if score > score_requirement:
accepted_scores.append(score)
for data in game_memory:
if data[1] == 1:
output = [0, 1]
elif data[1] == 0:
output = [1, 0]
training_data.append([data[0], output])
env.reset()
scores.append(score)
training_data_save = np.array(training_data)
np.save('saved.npy', training_data_save)
print('Avg score: ', mean(accepted_scores))
print('Median score: ', median(accepted_scores))
print(Counter(accepted_scores))
return training_data
def neural_network_model(input_size):
network = input_data(shape=[None, input_size, 1], name='input')
network = fully_connected(network, 128, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 512, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 128, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 2, activation='softmax')
network = regression(
network,
optimizer='adam',
learning_rate=LR,
loss='categorical_crossentropy',
name='targets')
model = tflearn.DNN(network, tensorboard_dir='log')
return model
def train_model(training_data, model=False):
x = np.array([i[0] for i in training_data]).reshape(-1, len(training_data[0][0]), 1)
y = [i[1] for i in training_data]
if not model:
model = neural_network_model(input_size=len(x[0]))
model.fit(
{'input': x},
{'targets': y},
n_epoch=5,
snapshot_step=500,
show_metric=True,
run_id='openai_learning'
)
return model
training_data = generate_traning_data()
model = train_model(training_data)
scores = []
choices = []
for each_game in range(10):
score = 0
game_memory = []
prev_obs = []
env.reset()
for _ in range(goal_steps):
env.render()
if len(prev_obs) == 0:
action = random.randrange(0, 2)
else:
action = np.argmax(model.predict(prev_obs.reshape(-1, len(prev_obs), 1))[0])
choices.append(action)
new_observation, reward, done, info = env.step(action)
prev_obs = new_observation
game_memory.append([new_observation, action])
score += reward
if done:
break
scores.append(score)
print('Avg score:', sum(scores) / len(scores))
print('choice 1:{} choice 0:{}'.format(choices.count(1) / len(choices), choices.count(0) / len(choices)))
print(score_requirement)
env.close()
|
[
"statistics.mean",
"tflearn.layers.core.dropout",
"tflearn.layers.core.fully_connected",
"random.randrange",
"tflearn.DNN",
"statistics.median",
"numpy.array",
"collections.Counter",
"tflearn.layers.core.input_data",
"tflearn.layers.estimator.regression",
"gym.make",
"numpy.save"
] |
[((261, 284), 'gym.make', 'gym.make', (['"""CartPole-v0"""'], {}), "('CartPole-v0')\n", (269, 284), False, 'import gym\n'), ((1632, 1655), 'numpy.array', 'np.array', (['training_data'], {}), '(training_data)\n', (1640, 1655), True, 'import numpy as np\n'), ((1660, 1700), 'numpy.save', 'np.save', (['"""saved.npy"""', 'training_data_save'], {}), "('saved.npy', training_data_save)\n", (1667, 1700), True, 'import numpy as np\n'), ((1919, 1972), 'tflearn.layers.core.input_data', 'input_data', ([], {'shape': '[None, input_size, 1]', 'name': '"""input"""'}), "(shape=[None, input_size, 1], name='input')\n", (1929, 1972), False, 'from tflearn.layers.core import input_data, dropout, fully_connected\n'), ((1987, 2035), 'tflearn.layers.core.fully_connected', 'fully_connected', (['network', '(128)'], {'activation': '"""relu"""'}), "(network, 128, activation='relu')\n", (2002, 2035), False, 'from tflearn.layers.core import input_data, dropout, fully_connected\n'), ((2050, 2071), 'tflearn.layers.core.dropout', 'dropout', (['network', '(0.8)'], {}), '(network, 0.8)\n', (2057, 2071), False, 'from tflearn.layers.core import input_data, dropout, fully_connected\n'), ((2086, 2134), 'tflearn.layers.core.fully_connected', 'fully_connected', (['network', '(256)'], {'activation': '"""relu"""'}), "(network, 256, activation='relu')\n", (2101, 2134), False, 'from tflearn.layers.core import input_data, dropout, fully_connected\n'), ((2149, 2170), 'tflearn.layers.core.dropout', 'dropout', (['network', '(0.8)'], {}), '(network, 0.8)\n', (2156, 2170), False, 'from tflearn.layers.core import input_data, dropout, fully_connected\n'), ((2185, 2233), 'tflearn.layers.core.fully_connected', 'fully_connected', (['network', '(512)'], {'activation': '"""relu"""'}), "(network, 512, activation='relu')\n", (2200, 2233), False, 'from tflearn.layers.core import input_data, dropout, fully_connected\n'), ((2248, 2269), 'tflearn.layers.core.dropout', 'dropout', (['network', '(0.8)'], {}), '(network, 0.8)\n', (2255, 2269), False, 'from tflearn.layers.core import input_data, dropout, fully_connected\n'), ((2284, 2332), 'tflearn.layers.core.fully_connected', 'fully_connected', (['network', '(256)'], {'activation': '"""relu"""'}), "(network, 256, activation='relu')\n", (2299, 2332), False, 'from tflearn.layers.core import input_data, dropout, fully_connected\n'), ((2347, 2368), 'tflearn.layers.core.dropout', 'dropout', (['network', '(0.8)'], {}), '(network, 0.8)\n', (2354, 2368), False, 'from tflearn.layers.core import input_data, dropout, fully_connected\n'), ((2383, 2431), 'tflearn.layers.core.fully_connected', 'fully_connected', (['network', '(128)'], {'activation': '"""relu"""'}), "(network, 128, activation='relu')\n", (2398, 2431), False, 'from tflearn.layers.core import input_data, dropout, fully_connected\n'), ((2446, 2467), 'tflearn.layers.core.dropout', 'dropout', (['network', '(0.8)'], {}), '(network, 0.8)\n', (2453, 2467), False, 'from tflearn.layers.core import input_data, dropout, fully_connected\n'), ((2482, 2531), 'tflearn.layers.core.fully_connected', 'fully_connected', (['network', '(2)'], {'activation': '"""softmax"""'}), "(network, 2, activation='softmax')\n", (2497, 2531), False, 'from tflearn.layers.core import input_data, dropout, fully_connected\n'), ((2546, 2655), 'tflearn.layers.estimator.regression', 'regression', (['network'], {'optimizer': '"""adam"""', 'learning_rate': 'LR', 'loss': '"""categorical_crossentropy"""', 'name': '"""targets"""'}), "(network, optimizer='adam', learning_rate=LR, loss=\n 'categorical_crossentropy', name='targets')\n", (2556, 2655), False, 'from tflearn.layers.estimator import regression\n'), ((2704, 2747), 'tflearn.DNN', 'tflearn.DNN', (['network'], {'tensorboard_dir': '"""log"""'}), "(network, tensorboard_dir='log')\n", (2715, 2747), False, 'import tflearn\n'), ((1727, 1748), 'statistics.mean', 'mean', (['accepted_scores'], {}), '(accepted_scores)\n', (1731, 1748), False, 'from statistics import median, mean\n'), ((1778, 1801), 'statistics.median', 'median', (['accepted_scores'], {}), '(accepted_scores)\n', (1784, 1801), False, 'from statistics import median, mean\n'), ((1813, 1837), 'collections.Counter', 'Counter', (['accepted_scores'], {}), '(accepted_scores)\n', (1820, 1837), False, 'from collections import Counter\n'), ((937, 959), 'random.randrange', 'random.randrange', (['(0)', '(2)'], {}), '(0, 2)\n', (953, 959), False, 'import random\n'), ((2821, 2860), 'numpy.array', 'np.array', (['[i[0] for i in training_data]'], {}), '([i[0] for i in training_data])\n', (2829, 2860), True, 'import numpy as np\n'), ((3512, 3534), 'random.randrange', 'random.randrange', (['(0)', '(2)'], {}), '(0, 2)\n', (3528, 3534), False, 'import random\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import seaborn as sns
sns.set(style='ticks', context='paper', palette='colorblind')
try:
import cmocean.cm as cmo
cmocean_flag = True
except:
cmocean_flag = False
class pltClass:
def __init__(self):
self.__info__ = 'Python qc package plt class'
def float_ncep_inair(sdn, flt, ncep, ax=None, legend=True):
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.get_figure()
ax.plot(sdn, flt, linewidth=2, label='Float')
ax.plot(sdn, ncep, linewidth=2, label='NCEP')
if legend:
ax.legend(loc=3)
mhr = mdates.MonthLocator(interval=4)
mihr = mdates.MonthLocator()
fmt = mdates.DateFormatter('%b %Y')
ax.xaxis.set_major_locator(mhr)
ax.xaxis.set_major_formatter(fmt)
ax.xaxis.set_minor_locator(mihr)
ax.set_ylabel('pO$_2$ (mbar)')
for tick in ax.get_xticklabels():
tick.set_rotation(45)
g = pltClass()
g.fig = fig
g.axes = [ax]
return g
def float_woa_surface(sdn, flt, woa, ax=None, legend=True):
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.get_figure()
ax.plot(sdn, flt, linewidth=2, label='Float')
ax.plot(sdn, woa, linewidth=2, label='WOA18')
if legend:
ax.legend(loc=3)
mhr = mdates.MonthLocator(interval=4)
mihr = mdates.MonthLocator()
fmt = mdates.DateFormatter('%b %Y')
ax.xaxis.set_major_locator(mhr)
ax.xaxis.set_major_formatter(fmt)
ax.xaxis.set_minor_locator(mihr)
ax.set_ylabel('O$_2$ Saturation %')
for tick in ax.get_xticklabels():
tick.set_rotation(45)
g = pltClass()
g.fig = fig
g.axes = [ax]
return g
def gains(sdn, gains, inair=True, ax=None, legend=True):
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.get_figure()
ax.plot(sdn, gains, 'o', markeredgewidth=0.5, markersize=5, markeredgecolor='grey', zorder=3, label='Gains')
ax.axhline(np.nanmean(gains), color='k', linestyle='--', label='Mean = {:.2f}'.format(np.nanmean(gains)), zorder=2)
ax.axhline(1.0, color='k', linestyle='-', linewidth=0.5, label=None,zorder=1)
if legend:
ax.legend(loc=3)
mhr = mdates.MonthLocator(interval=4)
mihr = mdates.MonthLocator()
fmt = mdates.DateFormatter('%b %Y')
ax.xaxis.set_major_locator(mhr)
ax.xaxis.set_major_formatter(fmt)
ax.xaxis.set_minor_locator(mihr)
ax.set_ylabel('O$_2$ Gain (unitless)')
for tick in ax.get_xticklabels():
tick.set_rotation(45)
g = pltClass()
g.fig = fig
g.axes = [ax]
return g
def gainplot(sdn, float_data, ref_data, gainvals, ref):
fig, axes = plt.subplots(2,1,sharex=True)
if ref == 'NCEP':
g1 = float_ncep_inair(sdn, float_data, ref_data, ax=axes[0])
g2 = gains(sdn, gainvals, inair=False, ax=axes[1])
elif ref == 'WOA':
g1 = float_woa_surface(sdn, float_data, ref_data, ax=axes[0])
g2 = gains(sdn, gainvals, inair=False, ax=axes[1])
g = pltClass()
g.fig = fig
g.axes = axes
return g
def var_cscatter(df, varname='DOXY', cmap=None, ax=None, ylim=(0,2000), clabel=None, vmin=None, vmax=None, **kwargs):
# define colormaps
if cmocean_flag:
color_maps = dict(
TEMP=cmo.thermal,
TEMP_ADJUSTED=cmo.thermal,
PSAL=cmo.haline,
PSAL_ADJUSTED=cmo.haline,
PDEN=cmo.dense,
CHLA=cmo.algae,
CHLA_ADJUSTED=cmo.algae,
BBP700=cmo.matter,
BBP700_ADJUSTED=cmo.matter,
DOXY=cmo.ice,
DOXY_ADJUSTED=cmo.ice,
DOWNWELLING_IRRADIANCE=cmo.solar,
)
else:
color_maps = dict(
TEMP=plt.cm.inferno,
TEMP_ADJUSTED=plt.cm.inferno,
PSAL=plt.cm.viridis,
PSAL_ADJUSTED=plt.cm.viridis,
PDEN=plt.cm.cividis,
CHLA=plt.cm.YlGn,
CHLA_ADJUSTED=plt.cm.YlGn,
BBP700=plt.cm.pink_r,
BBP700_ADJUSTED=plt.cm.pink_r,
DOXY=plt.cm.YlGnBu_r,
DOXY_ADJUSTED=plt.cm.YlGnBu_r,
DOWNWELLING_IRRADIANCE=plt.cm.magma,
)
if clabel is None:
var_units = dict(
TEMP='Temperature ({}C)'.format(chr(176)),
TEMP_ADJUSTED='Temperature ({}C)'.format(chr(176)),
PSAL='Practical Salinity',
PSAL_ADJUSTED='Practical Salinity',
PDEN='Potential Density (kg m${-3}$)',
CHLA='Chlorophyll (mg m$^{-3}$',
CHLA_ADJUSTED='Chlorophyll (mg m$^{-3}$',
BBP700='$\mathsf{b_{bp}}$ (m$^{-1}$)',
BBP700_ADJUSTED='$\mathsf{b_{bp}}$ (m$^{-1}$)',
DOXY='Diss. Oxygen ($\mathregular{\mu}$mol kg$^{-1}$)',
DOXY_ADJUSTED='Diss. Oxygen ($\mathregular{\mu}$mol kg$^{-1}$)',
DOWNWELLING_IRRADIANCE='Downwelling Irradiance (W m$^{-2}$)',
)
clabel = var_units[varname]
if cmap is None:
cmap = color_maps[varname]
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.get_figure()
df = df.loc[df.PRES < ylim[1]+50]
if vmin is None:
vmin = 1.05*df[varname].min()
if vmax is None:
vmax = 0.95*df[varname].max()
im = ax.scatter(df.SDN, df.PRES, c=df[varname], s=50, cmap=cmap, vmin=vmin, vmax=vmax, **kwargs)
cb = plt.colorbar(im, ax=ax)
cb.set_label(clabel)
ax.set_ylim(ylim)
ax.invert_yaxis()
ax.set_ylabel('Depth (dbar)')
w, h = fig.get_figwidth(), fig.get_figheight()
fig.set_size_inches(w*2, h)
mhr = mdates.MonthLocator(interval=4)
mihr = mdates.MonthLocator()
fmt = mdates.DateFormatter('%b %Y')
ax.xaxis.set_major_locator(mhr)
ax.xaxis.set_major_formatter(fmt)
ax.xaxis.set_minor_locator(mihr)
g = pltClass()
g.fig = fig
g.axes = [ax]
g.cb = cb
return g
def profiles(df, varlist=['DOXY'], Ncycle=1, Nprof=np.inf, zvar='PRES', xlabels=None, ylabel=None, axes=None, ylim=None, **kwargs):
if xlabels is None:
var_units = dict(
TEMP='Temperature ({}C)'.format(chr(176)),
TEMP_ADJUSTED='Temperature ({}C)'.format(chr(176)),
PSAL='Practical Salinity',
PSAL_ADJUSTED='Practical Salinity',
PDEN='Potential Density (kg m$^{-3}$)',
CHLA='Chlorophyll (mg m$^{-3}$',
CHLA_ADJUSTED='Chlorophyll (mg m$^{-3}$',
BBP700='$\mathsf{b_{bp}}$ (m$^{-1}$)',
BBP700_ADJUSTED='$\mathsf{b_{bp}}$ (m$^{-1}$)',
CDOM='CDOM (mg m$^{-3}$)',
CDOM_ADJUSTED='CDOM (mg m$^{-3}$)',
DOXY='Diss. Oxygen ($\mathregular{\mu}$mol kg$^{-1}$)',
DOXY_ADJUSTED='Diss. Oxygen ($\mathregular{\mu}$mol kg$^{-1}$)',
DOWNWELLING_IRRADIANCE='Downwelling Irradiance (W m$^{-2}$)',
)
xlabels = [var_units[v] if v in var_units.keys() else '' for v in varlist]
cm = plt.cm.gray_r
if axes is None:
fig, axes = plt.subplots(1, len(varlist), sharey=True)
if len(varlist) == 1:
axes = [axes]
elif len(varlist) > 1:
fig = axes[0].get_figure()
else:
fig = axes.get_figure()
axes = [axes]
if ylim is None:
if zvar == 'PRES':
ylim=(0,2000)
if ylabel is None:
ylabel = 'Pressure (dbar)'
elif zvar == 'PDEN':
ylim = (df.PDEN.min(), df.PDEN.max())
if ylabel is None:
ylabel = 'Density (kg m$^{-3}$)'
df.loc[df[zvar] > ylim[1]*1.1] = np.nan
CYCNUM = df.CYCLE.unique()
greyflag = False
if not 'color' in kwargs.keys():
greyflag = True
else:
c = kwargs.pop('color')
if Nprof > CYCNUM.shape[0]:
Nprof = CYCNUM.shape[0]
for i,v in enumerate(varlist):
for n in range(Nprof):
subset_df = df.loc[df.CYCLE == CYCNUM[Ncycle-1 + n-1]]
if greyflag:
c = cm(0.75*(CYCNUM[Ncycle-1 + n-1]/CYCNUM[-1])+0.25)
axes[i].plot(subset_df[v], subset_df[zvar], color=c, **kwargs)
axes[i].set_ylim(ylim[::-1])
axes[i].set_xlabel(xlabels[i])
subset_df = df.loc[df.CYCLE == CYCNUM[Ncycle-1]]
date = mdates.num2date(subset_df.SDN.iloc[0]).strftime('%d %b, %Y')
axes[0].set_ylabel(ylabel)
if Nprof != 1:
axes[0].set_title('Cyc. {:d}-{:d}, {}'.format(int(CYCNUM[Ncycle-1]), int(CYCNUM[Ncycle-1+Nprof-1]), date))
else:
axes[0].set_title('Cyc. {:d}, {}'.format(int(CYCNUM[Ncycle-1]), date))
w, h = fig.get_figwidth(), fig.get_figheight()
fig.set_size_inches(w*len(varlist)/3, h)
g = pltClass()
g.fig = fig
g.axes = axes
return g
def qc_profiles(df, varlist=['DOXY'], Ncycle=1, Nprof=np.inf, zvar='PRES', xlabels=None, ylabel=None, axes=None, ylim=None, **kwargs):
if xlabels is None:
var_units = dict(
TEMP='Temperature ({}C)'.format(chr(176)),
TEMP_ADJUSTED='Temperature ({}C)'.format(chr(176)),
PSAL='Practical Salinity',
PSAL_ADJUSTED='Practical Salinity',
PDEN='Potential Density (kg m$^{-3}$)',
CHLA='Chlorophyll (mg m$^{-3}$',
CHLA_ADJUSTED='Chlorophyll (mg m$^{-3}$',
BBP700='$\mathsf{b_{bp}}$ (m$^{-1}$)',
BBP700_ADJUSTED='$\mathsf{b_{bp}}$ (m$^{-1}$)',
CDOM='CDOM (mg m$^{-3}$)',
CDOM_ADJUSTED='CDOM (mg m$^{-3}$)',
DOXY='Diss. Oxygen ($\mathregular{\mu}$mol kg$^{-1}$)',
DOXY_ADJUSTED='Diss. Oxygen ($\mathregular{\mu}$mol kg$^{-1}$)',
DOWNWELLING_IRRADIANCE='Downwelling Irradiance (W m$^{-2}$)',
)
xlabels = [var_units[v] for v in varlist]
if axes is None:
fig, axes = plt.subplots(1, len(varlist), sharey=True)
if len(varlist) == 1:
axes = [axes]
elif len(varlist) > 1:
fig = axes[0].get_figure()
else:
fig = axes.get_figure()
axes = [axes]
if ylim is None:
if zvar == 'PRES':
ylim=(0,2000)
if ylabel is None:
ylabel = 'Pressure (dbar)'
elif zvar == 'PDEN':
ylim = (df.PDEN.min(), df.PDEN.max())
if ylabel is None:
ylabel = 'Density (kg m$^{-3}$)'
df.loc[df[zvar] > ylim[1]*1.1] = np.nan
CYCNUM = df.CYCLE.unique()
if Nprof > CYCNUM.shape[0]:
Nprof = CYCNUM.shape[0]
groups = {'Good':[1,2,5], 'Probably Bad':[3], 'Bad':[4], 'Interpolated':[8]}
colors = {'Good':'green', 'Probably Bad':'yellow', 'Bad':'red', 'Interpolated':'blue'}
for i,v in enumerate(varlist):
vqc = v + '_QC'
for n in range(Nprof):
subset_df = df.loc[df.CYCLE == CYCNUM[Ncycle-1 + n-1]]
for k,f in groups.items():
flag_subset_df = subset_df[subset_df[vqc].isin(f)]
axes[i].plot(flag_subset_df[v], flag_subset_df[zvar], 'o', markeredgewidth=0.1, markeredgecolor='k', markerfacecolor=colors[k], **kwargs)
axes[i].set_ylim(ylim[::-1])
axes[i].set_xlabel(xlabels[i])
subset_df = df.loc[df.CYCLE == CYCNUM[Ncycle-1]]
date = mdates.num2date(subset_df.SDN.iloc[0]).strftime('%d %b, %Y')
axes[0].set_ylabel(ylabel)
if Nprof != 1:
axes[0].set_title('Cyc. {:d}-{:d}, {}'.format(int(CYCNUM[Ncycle-1]), int(CYCNUM[Ncycle-1+Nprof-1]), date))
else:
axes[0].set_title('Cyc. {:d}, {}'.format(int(CYCNUM[Ncycle-1]), date))
w, h = fig.get_figwidth(), fig.get_figheight()
fig.set_size_inches(w*len(varlist)/3, h)
g = pltClass()
g.fig = fig
g.axes = axes
return g
|
[
"matplotlib.dates.num2date",
"seaborn.set",
"matplotlib.dates.MonthLocator",
"matplotlib.dates.DateFormatter",
"matplotlib.pyplot.colorbar",
"numpy.nanmean",
"matplotlib.pyplot.subplots"
] |
[((109, 170), 'seaborn.set', 'sns.set', ([], {'style': '"""ticks"""', 'context': '"""paper"""', 'palette': '"""colorblind"""'}), "(style='ticks', context='paper', palette='colorblind')\n", (116, 170), True, 'import seaborn as sns\n'), ((666, 697), 'matplotlib.dates.MonthLocator', 'mdates.MonthLocator', ([], {'interval': '(4)'}), '(interval=4)\n', (685, 697), True, 'import matplotlib.dates as mdates\n'), ((709, 730), 'matplotlib.dates.MonthLocator', 'mdates.MonthLocator', ([], {}), '()\n', (728, 730), True, 'import matplotlib.dates as mdates\n'), ((742, 771), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%b %Y"""'], {}), "('%b %Y')\n", (762, 771), True, 'import matplotlib.dates as mdates\n'), ((1366, 1397), 'matplotlib.dates.MonthLocator', 'mdates.MonthLocator', ([], {'interval': '(4)'}), '(interval=4)\n', (1385, 1397), True, 'import matplotlib.dates as mdates\n'), ((1409, 1430), 'matplotlib.dates.MonthLocator', 'mdates.MonthLocator', ([], {}), '()\n', (1428, 1430), True, 'import matplotlib.dates as mdates\n'), ((1442, 1471), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%b %Y"""'], {}), "('%b %Y')\n", (1462, 1471), True, 'import matplotlib.dates as mdates\n'), ((2283, 2314), 'matplotlib.dates.MonthLocator', 'mdates.MonthLocator', ([], {'interval': '(4)'}), '(interval=4)\n', (2302, 2314), True, 'import matplotlib.dates as mdates\n'), ((2326, 2347), 'matplotlib.dates.MonthLocator', 'mdates.MonthLocator', ([], {}), '()\n', (2345, 2347), True, 'import matplotlib.dates as mdates\n'), ((2359, 2388), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%b %Y"""'], {}), "('%b %Y')\n", (2379, 2388), True, 'import matplotlib.dates as mdates\n'), ((2762, 2793), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'sharex': '(True)'}), '(2, 1, sharex=True)\n', (2774, 2793), True, 'import matplotlib.pyplot as plt\n'), ((5488, 5511), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['im'], {'ax': 'ax'}), '(im, ax=ax)\n', (5500, 5511), True, 'import matplotlib.pyplot as plt\n'), ((5712, 5743), 'matplotlib.dates.MonthLocator', 'mdates.MonthLocator', ([], {'interval': '(4)'}), '(interval=4)\n', (5731, 5743), True, 'import matplotlib.dates as mdates\n'), ((5755, 5776), 'matplotlib.dates.MonthLocator', 'mdates.MonthLocator', ([], {}), '()\n', (5774, 5776), True, 'import matplotlib.dates as mdates\n'), ((5788, 5817), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%b %Y"""'], {}), "('%b %Y')\n", (5808, 5817), True, 'import matplotlib.dates as mdates\n'), ((457, 471), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (469, 471), True, 'import matplotlib.pyplot as plt\n'), ((1157, 1171), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1169, 1171), True, 'import matplotlib.pyplot as plt\n'), ((1859, 1873), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1871, 1873), True, 'import matplotlib.pyplot as plt\n'), ((2043, 2060), 'numpy.nanmean', 'np.nanmean', (['gains'], {}), '(gains)\n', (2053, 2060), True, 'import numpy as np\n'), ((5163, 5177), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5175, 5177), True, 'import matplotlib.pyplot as plt\n'), ((8390, 8428), 'matplotlib.dates.num2date', 'mdates.num2date', (['subset_df.SDN.iloc[0]'], {}), '(subset_df.SDN.iloc[0])\n', (8405, 8428), True, 'import matplotlib.dates as mdates\n'), ((11361, 11399), 'matplotlib.dates.num2date', 'mdates.num2date', (['subset_df.SDN.iloc[0]'], {}), '(subset_df.SDN.iloc[0])\n', (11376, 11399), True, 'import matplotlib.dates as mdates\n'), ((2118, 2135), 'numpy.nanmean', 'np.nanmean', (['gains'], {}), '(gains)\n', (2128, 2135), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
P-spline versions of Whittaker functions
----------------------------------------
pybaselines contains penalized spline (P-spline) versions of all of the
Whittaker-smoothing-based algorithms implemented in pybaselines. The reason
for doing so was that P-splines offer additional user flexibility when choosing
parameters for fitting and more easily work for unequally spaced data. This example
will examine the relationship of `lam` versus the number of data points when fitting
a baseline with the :func:`.arpls` function and its P-spline version,
:func:`.pspline_arpls`.
Note that the exact optimal `lam` values reported in this example are not of significant
use since they depend on many other factors such as the baseline curvature, noise, peaks,
etc.; however, the examined trends can be used to simplify the process of selecting `lam`
values for fitting new datasets.
"""
# sphinx_gallery_thumbnail_number = 2
from itertools import cycle
import matplotlib.pyplot as plt
import numpy as np
from pybaselines import spline, whittaker
# local import with setup code
from example_helpers import make_data, optimize_lam
# %%
# The baseline for this example is an exponentially decaying baseline, shown below.
# Other baseline types could be examined, similar to the
# :ref:`Whittaker lam vs data size example <sphx_glr_examples_whittaker_plot_lam_vs_data_size.py>`,
# which should give similar results.
plt.plot(make_data(1000, bkg_type='exponential')[0])
# %%
# For each function, the optimal `lam` value will be calculated for data sizes
# ranging from 500 to 20000 points. Further, the intercept and slope of the linear fit
# of the log of the data size, N, and the log of the `lam` value will be reported.
# The number of knots for the P-spline version is fixed at the default, 100 (the effect
# of the number of knots versus optimal `lam` is shown in another
# :ref:`example <sphx_glr_examples_spline_plot_lam_vs_num_knots.py>`).
print('Function, intercept & slope of log(N) vs log(lam) fit')
print('-' * 60)
show_plots = False # for debugging
num_points = np.logspace(np.log10(500), np.log10(20000), 6, dtype=int)
symbols = cycle(['o', 's'])
_, ax = plt.subplots()
legend = [[], []]
for i, func in enumerate((whittaker.arpls, spline.pspline_arpls)):
func_name = func.__name__
best_lams = np.empty_like(num_points, float)
min_lam = None
for j, num_x in enumerate(num_points):
y, baseline = make_data(num_x, bkg_type='exponential')
# use a slightly lower tolerance to speed up the calculation
min_lam = optimize_lam(y, baseline, func, min_lam, tol=1e-2, max_iter=50)
best_lams[j] = min_lam
if show_plots:
plt.figure(num=num_x)
if i == 0:
plt.plot(y)
plt.plot(baseline)
plt.plot(func(y, lam=10**min_lam)[0], '--')
fit = np.polynomial.polynomial.Polynomial.fit(np.log10(num_points), best_lams, 1)
coeffs = fit.convert().coef
print(f'{func_name:<16} {coeffs}')
line = 10**fit(np.log10(num_points))
handle_1 = ax.plot(num_points, line, label=func_name)[0]
handle_2 = ax.plot(num_points, 10**best_lams, next(symbols))[0]
legend[0].append((handle_1, handle_2))
legend[1].append(func_name)
ax.loglog()
ax.legend(*legend)
ax.set_xlabel('Input Array Size, N')
ax.set_ylabel('Optimal lam Value')
plt.show()
# %%
# The results shown above demonstrate that the slope of the `lam` vs data
# size best fit line is much smaller for the P-spline based version of arpls.
# This means that once the number of knots is fixed for a particular baseline,
# the required `lam` value should be much less affected by a change in the
# number of data points (assuming the curvature of the data does not change).
#
# The above results are particularly useful when processing very large datasets.
# A `lam` value greater than ~1e14 typically causes numerical issues that can cause
# the solver to fail. Most Whittaker-smoothing-based algorithms reach that `lam`
# cutoff when the number of points is around ~20,000-500,000 (depends on the exact
# algorithm). Since the P-spline versions do not experience such a large increase in
# the required `lam`, they are more suited to fit those larger datasets. Additionally,
# the required `lam` value for the P-spline versions can be lowered simply by reducing
# the number of knots.
#
# It should be addressed that a similar result could be obtained using the regular
# Whittaker-smoothing-based version by truncating the number of points to a fixed
# value. That, however, would require additional processing steps to smooth out the
# resulting baseline after interpolating back to the original data size. Thus, the
# P-spline versions require less user-intervention to achieve the same result.
|
[
"itertools.cycle",
"numpy.log10",
"example_helpers.make_data",
"matplotlib.pyplot.plot",
"example_helpers.optimize_lam",
"matplotlib.pyplot.figure",
"numpy.empty_like",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] |
[((2171, 2188), 'itertools.cycle', 'cycle', (["['o', 's']"], {}), "(['o', 's'])\n", (2176, 2188), False, 'from itertools import cycle\n'), ((2197, 2211), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2209, 2211), True, 'import matplotlib.pyplot as plt\n'), ((3388, 3398), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3396, 3398), True, 'import matplotlib.pyplot as plt\n'), ((2115, 2128), 'numpy.log10', 'np.log10', (['(500)'], {}), '(500)\n', (2123, 2128), True, 'import numpy as np\n'), ((2130, 2145), 'numpy.log10', 'np.log10', (['(20000)'], {}), '(20000)\n', (2138, 2145), True, 'import numpy as np\n'), ((2343, 2375), 'numpy.empty_like', 'np.empty_like', (['num_points', 'float'], {}), '(num_points, float)\n', (2356, 2375), True, 'import numpy as np\n'), ((1450, 1489), 'example_helpers.make_data', 'make_data', (['(1000)'], {'bkg_type': '"""exponential"""'}), "(1000, bkg_type='exponential')\n", (1459, 1489), False, 'from example_helpers import make_data, optimize_lam\n'), ((2460, 2500), 'example_helpers.make_data', 'make_data', (['num_x'], {'bkg_type': '"""exponential"""'}), "(num_x, bkg_type='exponential')\n", (2469, 2500), False, 'from example_helpers import make_data, optimize_lam\n'), ((2588, 2651), 'example_helpers.optimize_lam', 'optimize_lam', (['y', 'baseline', 'func', 'min_lam'], {'tol': '(0.01)', 'max_iter': '(50)'}), '(y, baseline, func, min_lam, tol=0.01, max_iter=50)\n', (2600, 2651), False, 'from example_helpers import make_data, optimize_lam\n'), ((2930, 2950), 'numpy.log10', 'np.log10', (['num_points'], {}), '(num_points)\n', (2938, 2950), True, 'import numpy as np\n'), ((2719, 2740), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'num': 'num_x'}), '(num=num_x)\n', (2729, 2740), True, 'import matplotlib.pyplot as plt\n'), ((2804, 2822), 'matplotlib.pyplot.plot', 'plt.plot', (['baseline'], {}), '(baseline)\n', (2812, 2822), True, 'import matplotlib.pyplot as plt\n'), ((3056, 3076), 'numpy.log10', 'np.log10', (['num_points'], {}), '(num_points)\n', (3064, 3076), True, 'import numpy as np\n'), ((2780, 2791), 'matplotlib.pyplot.plot', 'plt.plot', (['y'], {}), '(y)\n', (2788, 2791), True, 'import matplotlib.pyplot as plt\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
# CODE NAME HERE
# CODE DESCRIPTION HERE
Created on 2021-07-08
@author: cook
"""
from astropy.io import fits
from astropy.table import Table
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
# =============================================================================
# Define variables
# =============================================================================
path = '/data/spirou/data/minidata2/reduced/2020-08-31/'
# =============================================================================
# Define functions
# =============================================================================
def diff_image(imagepath, imagename):
try:
hdu1 = fits.open(os.path.join(imagepath, imagename))
hdu2 = fits.open(imagename)
except:
print('Skipping {0} [non-fits]'.format(imagename))
return
for extnum in range(len(hdu1)):
# get name
name = '{0}[{1}]'.format(imagename, extnum)
print('=' * 50)
print(name)
print('=' * 50)
if extnum >= len(hdu2):
print('\tEXTENSION {0} MISSING HDU2'.format(extnum))
continue
# deal with image hdu
if isinstance(hdu1[extnum], fits.ImageHDU):
imdiff = fits.diff.ImageDataDiff(hdu1[extnum].data, hdu2[extnum].data)
print(imdiff.report())
diff = hdu1[extnum].data - hdu2[extnum].data
if np.nansum(diff) != 0:
fig, frame = plt.subplots(ncols=1, nrows=1)
pos = frame.imshow(diff, aspect='auto', origin='lower')
frame.set(title=name)
fig.colorbar(pos, ax=frame)
plt.show()
plt.close()
elif isinstance(hdu1[extnum], fits.BinTableHDU):
imdiff = fits.diff.TableDataDiff(hdu1[extnum].data, hdu2[extnum].data)
print(imdiff.report())
else:
print('\tSkipping (not ImageHDU or BinHDU)')
# =============================================================================
# Start of code
# =============================================================================
if __name__ == "__main__":
files = np.array(os.listdir('.'))
last_modified = []
# get last modified for all files
for filename in files:
last_modified.append(os.path.getmtime(filename))
# sort by last modified
sortmask = np.argsort(last_modified)
files = files[sortmask]
# diff images in order
for filename in files:
diff_image(path, filename)
# =============================================================================
# End of code
# =============================================================================
|
[
"astropy.io.fits.diff.ImageDataDiff",
"os.listdir",
"os.path.join",
"astropy.io.fits.diff.TableDataDiff",
"numpy.argsort",
"matplotlib.pyplot.close",
"astropy.io.fits.open",
"os.path.getmtime",
"numpy.nansum",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] |
[((2454, 2479), 'numpy.argsort', 'np.argsort', (['last_modified'], {}), '(last_modified)\n', (2464, 2479), True, 'import numpy as np\n'), ((809, 829), 'astropy.io.fits.open', 'fits.open', (['imagename'], {}), '(imagename)\n', (818, 829), False, 'from astropy.io import fits\n'), ((2249, 2264), 'os.listdir', 'os.listdir', (['"""."""'], {}), "('.')\n", (2259, 2264), False, 'import os\n'), ((758, 792), 'os.path.join', 'os.path.join', (['imagepath', 'imagename'], {}), '(imagepath, imagename)\n', (770, 792), False, 'import os\n'), ((1316, 1377), 'astropy.io.fits.diff.ImageDataDiff', 'fits.diff.ImageDataDiff', (['hdu1[extnum].data', 'hdu2[extnum].data'], {}), '(hdu1[extnum].data, hdu2[extnum].data)\n', (1339, 1377), False, 'from astropy.io import fits\n'), ((2383, 2409), 'os.path.getmtime', 'os.path.getmtime', (['filename'], {}), '(filename)\n', (2399, 2409), False, 'import os\n'), ((1486, 1501), 'numpy.nansum', 'np.nansum', (['diff'], {}), '(diff)\n', (1495, 1501), True, 'import numpy as np\n'), ((1537, 1567), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(1)', 'nrows': '(1)'}), '(ncols=1, nrows=1)\n', (1549, 1567), True, 'import matplotlib.pyplot as plt\n'), ((1738, 1748), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1746, 1748), True, 'import matplotlib.pyplot as plt\n'), ((1765, 1776), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1774, 1776), True, 'import matplotlib.pyplot as plt\n'), ((1855, 1916), 'astropy.io.fits.diff.TableDataDiff', 'fits.diff.TableDataDiff', (['hdu1[extnum].data', 'hdu2[extnum].data'], {}), '(hdu1[extnum].data, hdu2[extnum].data)\n', (1878, 1916), False, 'from astropy.io import fits\n')]
|
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.patches import Rectangle,Circle
import mpl_toolkits.mplot3d.art3d as art3d
fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
ax = plt.axes(projection='3d')
x,y,z = 10,0,0
dx,dy,dz = 12,12,10
p = Circle((x,y),radius=dx/2,color='b')
p2 = Circle((x,y),radius=dx/2,color='b')
ax.add_patch(p)
ax.add_patch(p2)
art3d.pathpatch_2d_to_3d(p, z=z, zdir="z")
art3d.pathpatch_2d_to_3d(p2, z=z+dz, zdir="z")
us = np.linspace(0, 2 * np.pi, 32)
zs = np.linspace(0, 10, 2)
us, zs = np.meshgrid(us, zs)
xs = 6 * np.cos(us)
ys = 6 * np.sin(us)
ax.plot_surface(xs, ys, zs, color='g')
plt.show()
|
[
"matplotlib.pyplot.figure",
"numpy.linspace",
"matplotlib.pyplot.axes",
"mpl_toolkits.mplot3d.art3d.pathpatch_2d_to_3d",
"numpy.cos",
"numpy.sin",
"numpy.meshgrid",
"matplotlib.patches.Circle",
"matplotlib.pyplot.show"
] |
[((192, 204), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (202, 204), True, 'import matplotlib.pyplot as plt\n'), ((257, 282), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (265, 282), True, 'import matplotlib.pyplot as plt\n'), ((325, 365), 'matplotlib.patches.Circle', 'Circle', (['(x, y)'], {'radius': '(dx / 2)', 'color': '"""b"""'}), "((x, y), radius=dx / 2, color='b')\n", (331, 365), False, 'from matplotlib.patches import Rectangle, Circle\n'), ((366, 406), 'matplotlib.patches.Circle', 'Circle', (['(x, y)'], {'radius': '(dx / 2)', 'color': '"""b"""'}), "((x, y), radius=dx / 2, color='b')\n", (372, 406), False, 'from matplotlib.patches import Rectangle, Circle\n'), ((435, 477), 'mpl_toolkits.mplot3d.art3d.pathpatch_2d_to_3d', 'art3d.pathpatch_2d_to_3d', (['p'], {'z': 'z', 'zdir': '"""z"""'}), "(p, z=z, zdir='z')\n", (459, 477), True, 'import mpl_toolkits.mplot3d.art3d as art3d\n'), ((478, 526), 'mpl_toolkits.mplot3d.art3d.pathpatch_2d_to_3d', 'art3d.pathpatch_2d_to_3d', (['p2'], {'z': '(z + dz)', 'zdir': '"""z"""'}), "(p2, z=z + dz, zdir='z')\n", (502, 526), True, 'import mpl_toolkits.mplot3d.art3d as art3d\n'), ((531, 560), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(32)'], {}), '(0, 2 * np.pi, 32)\n', (542, 560), True, 'import numpy as np\n'), ((567, 588), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(2)'], {}), '(0, 10, 2)\n', (578, 588), True, 'import numpy as np\n'), ((600, 619), 'numpy.meshgrid', 'np.meshgrid', (['us', 'zs'], {}), '(us, zs)\n', (611, 619), True, 'import numpy as np\n'), ((705, 715), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (713, 715), True, 'import matplotlib.pyplot as plt\n'), ((631, 641), 'numpy.cos', 'np.cos', (['us'], {}), '(us)\n', (637, 641), True, 'import numpy as np\n'), ((652, 662), 'numpy.sin', 'np.sin', (['us'], {}), '(us)\n', (658, 662), True, 'import numpy as np\n')]
|
import argparse
import datetime
import sys
import threading
import time
import matplotlib.pyplot as plt
import numpy
import yaml
from .__about__ import __copyright__, __version__
from .main import (
cooldown,
measure_temp,
measure_core_frequency,
measure_ambient_temperature,
test,
)
def _get_version_text():
return "\n".join(
[
"stressberry {} [Python {}.{}.{}]".format(
__version__,
sys.version_info.major,
sys.version_info.minor,
sys.version_info.micro,
),
__copyright__,
]
)
def _get_parser_run():
parser = argparse.ArgumentParser(
description="Run stress test for the Raspberry Pi."
)
parser.add_argument(
"--version", "-v", action="version", version=_get_version_text()
)
parser.add_argument(
"-n",
"--name",
type=str,
default="stressberry data",
help="name the data set (default: 'stressberry data')",
)
parser.add_argument(
"-t",
"--temperature-file",
type=str,
default=None,
help="temperature file e.g /sys/class/thermal/thermal_zone0/temp (default: vcgencmd)",
)
parser.add_argument(
"-d",
"--duration",
type=int,
default=300,
help="stress test duration in seconds (default: 300)",
)
parser.add_argument(
"-i",
"--idle",
type=int,
default=150,
help="idle time in seconds at start and end of stress test (default: 150)",
)
parser.add_argument(
"--cooldown",
type=int,
default=60,
help="poll interval seconds to check for stable temperature (default: 60)",
)
parser.add_argument(
"-c",
"--cores",
type=int,
default=None,
help="number of CPU cores to stress (default: all)",
)
parser.add_argument(
"-f",
"--frequency-file",
type=str,
default=None,
help="CPU core frequency file e.g. /sys/devices/system/cpu/cpu0/cpufreq/scaling_cur_freq (default: vcgencmd)",
)
parser.add_argument(
"-a",
"--ambient",
type=str,
nargs=2,
default=None,
help="measure ambient temperature. Sensor Type [11|22|2302] <GPIO Number> e.g. 2302 26",
)
parser.add_argument("outfile", type=argparse.FileType("w"), help="output data file")
return parser
def run(argv=None):
parser = _get_parser_run()
args = parser.parse_args(argv)
# Cool down first
print("Awaiting stable baseline temperature...")
cooldown(interval=args.cooldown, filename=args.temperature_file)
# Start the stress test in another thread
t = threading.Thread(
target=lambda: test(args.duration, args.idle, args.cores), args=()
)
t.start()
times = []
temps = []
freqs = []
ambient = []
while t.is_alive():
times.append(time.time())
temps.append(measure_temp(args.temperature_file))
freqs.append(measure_core_frequency(args.frequency_file))
if args.ambient:
ambient_temperature = measure_ambient_temperature(
sensor_type=args.ambient[0], pin=args.ambient[1]
)
if ambient_temperature is None:
# Reading the sensor can return None if it times out.
# If never had a good result, probably configuration error
# Else use last known value if available or worst case set to zero
if not ambient:
message = "Could not read ambient temperature sensor {} on pin {}".format(
args.ambient[0], args.ambient[1]
)
else:
message = "WARN - Could not read ambient temperature, using last good value"
print(message)
ambient_temperature = next(
(temp for temp in reversed(ambient) if temp is not None), 0
)
ambient.append(ambient_temperature)
delta_t = temps[-1] - ambient[-1]
print(
"Temperature (current | ambient | ΔT): {:4.1f}°C | {:4.1f}°C | {:4.1f}°C - Frequency: {:4.0f}MHz".format(
temps[-1], ambient[-1], delta_t, freqs[-1]
)
)
else:
print(
"Current temperature: {:4.1f}°C - Frequency: {:4.0f}MHz".format(
temps[-1], freqs[-1]
)
)
# Choose the sample interval such that we have a respectable number of
# data points
t.join(2.0)
# normalize times
time0 = times[0]
times = [tm - time0 for tm in times]
args.outfile.write(
"# This file was created by stressberry v{} on {}\n".format(
__version__, datetime.datetime.now()
)
)
yaml.dump(
{
"name": args.name,
"time": times,
"temperature": temps,
"cpu frequency": freqs,
"ambient": ambient,
},
args.outfile,
)
return
def plot(argv=None):
parser = _get_parser_plot()
args = parser.parse_args(argv)
data = [yaml.load(f, Loader=yaml.SafeLoader) for f in args.infiles]
# sort the data such that the data series with the lowest terminal
# temperature is plotted last (and appears in the legend last)
terminal_temps = [d["temperature"][-1] for d in data]
order = [i[0] for i in sorted(enumerate(terminal_temps), key=lambda x: x[1])]
# actually plot it
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1)
for k in order[::-1]:
if args.delta_t:
temperature_data = numpy.subtract(
data[k]["temperature"], data[k]["ambient"]
)
else:
temperature_data = data[k]["temperature"]
ax1.plot(
data[k]["time"], temperature_data, label=data[k]["name"], lw=args.line_width
)
ax1.grid()
if not args.hide_legend:
ax1.legend(loc="upper left", bbox_to_anchor=(1.03, 1.0), borderaxespad=0)
if args.delta_t:
plot_yaxis_label = "Δ temperature °C (over ambient)"
else:
plot_yaxis_label = "temperature °C"
ax1.set_xlabel("time (s)")
ax1.set_ylabel(plot_yaxis_label)
ax1.set_xlim([data[-1]["time"][0], data[-1]["time"][-1]])
if args.temp_lims:
ax1.set_ylim(*args.temp_lims)
# Only plot frequencies when using a single input file
if len(data) == 1 and args.frequency:
ax2 = plt.twinx()
ax2.set_ylabel("core frequency (MHz)")
if args.freq_lims:
ax2.set_ylim(*args.freq_lims)
try:
for k in order[::-1]:
ax2.plot(
data[k]["time"],
data[k]["cpu frequency"],
label=data[k]["name"],
color="C1",
alpha=0.9,
lw=args.line_width,
)
ax1.set_zorder(ax2.get_zorder() + 1) # put ax1 plot in front of ax2
ax1.patch.set_visible(False) # hide the 'canvas'
except KeyError():
print("Source data does not contain CPU frequency data.")
if args.outfile is not None:
plt.savefig(
args.outfile,
transparent=args.transparent,
bbox_inches="tight",
dpi=args.dpi,
)
else:
plt.show()
return
def _get_parser_plot():
parser = argparse.ArgumentParser(description="Plot stress test data.")
parser.add_argument(
"--version", "-v", action="version", version=_get_version_text()
)
parser.add_argument(
"infiles",
nargs="+",
type=argparse.FileType("r"),
help="input YAML file(s) (default: stdin)",
)
parser.add_argument(
"-o",
"--outfile",
help=(
"if specified, the plot is written to this file "
"(default: show on screen)"
),
)
parser.add_argument(
"-t",
"--temp-lims",
type=float,
nargs=2,
default=None,
help="limits for the temperature (default: data limits)",
)
parser.add_argument(
"-d",
"--dpi",
type=int,
default=None,
help="image resolution in dots per inch when written to file",
)
parser.add_argument(
"-f",
"--frequency",
help="plot CPU core frequency (single input files only)",
action="store_true",
)
parser.add_argument(
"-l",
"--freq-lims",
type=float,
nargs=2,
default=None,
help="limits for the frequency scale (default: data limits)",
)
parser.add_argument("--hide-legend", help="do not draw legend", action="store_true")
parser.add_argument(
"--not-transparent",
dest="transparent",
help="do not make images transparent",
action="store_false",
default=True,
)
parser.add_argument(
"-lw", "--line-width", type=float, default=None, help="line width"
)
parser.add_argument(
"--delta-t",
action="store_true",
default=False,
help="Use Delta-T (core - ambient) temperature instead of CPU core temperature",
)
return parser
|
[
"argparse.FileType",
"matplotlib.pyplot.savefig",
"argparse.ArgumentParser",
"yaml.dump",
"matplotlib.pyplot.twinx",
"yaml.load",
"numpy.subtract",
"datetime.datetime.now",
"matplotlib.pyplot.figure",
"time.time",
"matplotlib.pyplot.show"
] |
[((665, 741), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run stress test for the Raspberry Pi."""'}), "(description='Run stress test for the Raspberry Pi.')\n", (688, 741), False, 'import argparse\n'), ((4974, 5103), 'yaml.dump', 'yaml.dump', (["{'name': args.name, 'time': times, 'temperature': temps, 'cpu frequency':\n freqs, 'ambient': ambient}", 'args.outfile'], {}), "({'name': args.name, 'time': times, 'temperature': temps,\n 'cpu frequency': freqs, 'ambient': ambient}, args.outfile)\n", (4983, 5103), False, 'import yaml\n'), ((5680, 5692), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5690, 5692), True, 'import matplotlib.pyplot as plt\n'), ((7613, 7674), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Plot stress test data."""'}), "(description='Plot stress test data.')\n", (7636, 7674), False, 'import argparse\n'), ((5308, 5344), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.SafeLoader'}), '(f, Loader=yaml.SafeLoader)\n', (5317, 5344), False, 'import yaml\n'), ((6654, 6665), 'matplotlib.pyplot.twinx', 'plt.twinx', ([], {}), '()\n', (6663, 6665), True, 'import matplotlib.pyplot as plt\n'), ((7384, 7478), 'matplotlib.pyplot.savefig', 'plt.savefig', (['args.outfile'], {'transparent': 'args.transparent', 'bbox_inches': '"""tight"""', 'dpi': 'args.dpi'}), "(args.outfile, transparent=args.transparent, bbox_inches='tight',\n dpi=args.dpi)\n", (7395, 7478), True, 'import matplotlib.pyplot as plt\n'), ((7552, 7562), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7560, 7562), True, 'import matplotlib.pyplot as plt\n'), ((2438, 2460), 'argparse.FileType', 'argparse.FileType', (['"""w"""'], {}), "('w')\n", (2455, 2460), False, 'import argparse\n'), ((3014, 3025), 'time.time', 'time.time', ([], {}), '()\n', (3023, 3025), False, 'import time\n'), ((4930, 4953), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4951, 4953), False, 'import datetime\n'), ((5810, 5868), 'numpy.subtract', 'numpy.subtract', (["data[k]['temperature']", "data[k]['ambient']"], {}), "(data[k]['temperature'], data[k]['ambient'])\n", (5824, 5868), False, 'import numpy\n'), ((7855, 7877), 'argparse.FileType', 'argparse.FileType', (['"""r"""'], {}), "('r')\n", (7872, 7877), False, 'import argparse\n')]
|
#!/usr/bin/env python3
import json
import argparse
import os
from collections import OrderedDict
from utils import normalize
from utils import exact_match_score, regex_match_score, get_rank
from utils import slugify, aggregate, aggregate_ans
from utils import Tokenizer
from multiprocessing import Pool as ProcessPool
# import numpy as np
import pickle as pk
import sys
import time
import numpy as np
ENCODING = "utf-8"
DOC_MEAN = 8.5142
DOC_STD = 2.8324
# ANS_MEAN=86486
# ANS_STD=256258
ANS_MEAN = 11588614
ANS_STD = 98865053
all_corr_rank = []
# def process_record(data_line_, prediction_line_, neg_gap_, feature_dir_, record_dir_, match_fn):
def process_record(data_line_, prediction_line_, neg_gap_, feature_dir_, record_dir_, match_fn, all_doc_scores,
all_ans_scores, z_scores):
missing_count_ = 0
total_count_ = 0
stop_count_ = 0
data = json.loads(data_line_)
question = data['question']
q_id = slugify(question)
q_path = os.path.join(feature_dir_, '%s.json' % q_id)
n_q = [0 for _ in Tokenizer.FEAT]
if os.path.exists(q_path):
q_data = open(q_path, encoding=ENCODING).read()
record = json.loads(q_data)
q_ner = record['ner']
q_pos = record['pos']
for feat in q_ner + q_pos:
n_q[Tokenizer.FEAT_DICT[feat]] += 1
else:
print('question feature file %s not exist!' % q_path)
sys.stdout.flush()
missing_count_ += 1
return missing_count_, total_count_, stop_count_
answer = [normalize(a) for a in data['answer']]
prediction = json.loads(prediction_line_)
# MAKE SURE REVERSE IS TRUE
ranked_prediction = sorted(prediction, key=lambda k: k['doc_score'], reverse=True)
correct_rank = get_rank(prediction, answer, match_fn)
if correct_rank > 150:
# if correct_rank < 50 or correct_rank > 150:
return missing_count_, total_count_, stop_count_
all_corr_rank.append(correct_rank - 1)
all_n_p = []
all_n_a = []
all_p_scores = []
all_a_scores = []
all_probs = []
all_spans = []
repeats = 0
for i, entry in enumerate(ranked_prediction):
doc_id = entry['doc_id']
start = int(entry['start'])
end = int(entry['end'])
doc_score = entry['doc_score']
ans_score = entry['span_score']
prob = entry['prob']
span = entry['span']
# RESTRICT TO MAX 1000000000
# print("Threshold 1000000")
# ans_score=min(ans_score, 1000000) #restrict to max of million
if span in all_spans:
repeats += 1
all_spans.append(span)
################Calculate sample z score (t statistic) for answer score
if all_a_scores == [] or len(
all_a_scores) == 1: # dont use a_zscore feature at the beginning or if we only have 1
a_zscore = 0
else: # Take the sample mean of the previous ones, take zscore of the current with respect to that
# sample_mean = np.mean(all_a_scores + [ans_score])
sample_mean = np.mean(all_a_scores)
# sample_std = np.std(all_a_scores + [ans_score])
sample_std = np.std(all_a_scores)
# if sample_std != 0:
a_zscore = (ans_score - sample_mean) / sample_std
# else:
# a_zscore = 0
z_scores.append(a_zscore)
# THESE ARE FOR STATISTISTICS OVER ENTIRE DATA SET, IGNORE
all_doc_scores.append(doc_score)
all_ans_scores.append(ans_score)
corr_doc_score = (doc_score - DOC_MEAN) / DOC_STD
corr_ans_mean_score = (np.mean(all_a_scores + [ans_score]) - ANS_MEAN) / ANS_STD
all_probs.append(prob)
###############
p_pos = dict()
p_ner = dict()
feat_file = os.path.join(feature_dir_, '%s.json' % doc_id)
if os.path.exists(feat_file):
record = json.load(open(feat_file))
p_ner[doc_id] = record['ner']
p_pos[doc_id] = record['pos']
n_p = [0 for _ in Tokenizer.FEAT]
n_a = [0 for _ in Tokenizer.FEAT]
for feat in p_ner[doc_id] + p_pos[doc_id]:
n_p[Tokenizer.FEAT_DICT[feat]] += 1
for feat in p_ner[doc_id][start:end + 1] + p_pos[doc_id][start:end + 1]:
n_a[Tokenizer.FEAT_DICT[feat]] += 1
all_n_p.append(n_p)
all_n_a.append(n_a)
all_p_scores.append(doc_score)
all_a_scores.append(ans_score)
f_np = aggregate(all_n_p)
f_na = aggregate(all_n_a)
f_sp = aggregate(all_p_scores)
f_sa = aggregate_ans(all_a_scores)
record = OrderedDict()
# sp, nq, np, na, ha
record['sp'] = f_sp
record['nq'] = list(map(float, n_q))
record['np'] = f_np
record['na'] = f_na
record['sa'] = f_sa
record['a_zscore'] = a_zscore
record['corr_doc_score'] = corr_doc_score
record['i'] = i
record['prob_avg'] = sum(all_probs) / len(all_probs)
record['prob'] = prob
record['repeats'] = repeats
record['ans_avg'] = corr_ans_mean_score
if i + 1 == correct_rank:
# if i + 1 >= correct_rank:
record['stop'] = 1
stop_count_ += 1
write_record = True
# if i % neg_gap_ ==0:
# write_record = True
# else:
# write_record = False
should_return = True
# if i + 1 - correct_rank > 30:
# should_return = True
# else:
# should_return = False
else:
should_return = False
if i % neg_gap_ == 0:
record['stop'] = 0
write_record = True
else:
write_record = False
if write_record:
record_path = os.path.join(record_dir_, '%s_%s.pkl' % (q_id, doc_id))
with open(record_path, 'wb') as f:
pk.dump(record, f)
total_count_ += 1
if should_return:
return missing_count_, total_count_, stop_count_
return missing_count_, total_count_, stop_count_
if __name__ == '__main__':
# unzip trec.tgz to trec
# below is an example run, take 114.5s(on mac mini 2012), generated 15571 records, 7291 of them are stop labels
# python prepare_data.py -p CuratedTrec-test-lstm.preds.txt -a CuratedTrec-test.txt -f trec -r records
#
all_doc_scores = []
all_ans_scores = []
z_scores = []
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--prediction_file',
help='prediction file, e.g. CuratedTrec-test-lstm.preds_train.txt')
parser.add_argument('-a', '--answer_file', help='data set with labels, e.g. CuratedTrec-test_train.txt')
parser.add_argument('-nm', '--no_multiprocess', action='store_true', help='default to use multiprocessing')
parser.add_argument('-ns', '--negative_scale', type=int, default=10, help='scale factor for negative samples')
parser.add_argument('-r', '--record_dir', default=None, help='dir to save generated records data set')
parser.add_argument('-f', '--feature_dir', default=None,
help='dir that contains json features files, unzip squad.tgz or trec.tgz to get that dir')
parser.add_argument('-rg', '--regex', action='store_true', help='default to use exact match')
args = parser.parse_args()
match_func = regex_match_score if args.regex else exact_match_score
missing_count = 0
total_count = 0
stop_count = 0
answer_file = args.answer_file
prediction_file = args.prediction_file
record_dir = args.record_dir
if not os.path.exists(record_dir):
os.makedirs(record_dir)
feature_dir = args.feature_dir
if not os.path.exists(feature_dir):
print('feature_dir does not exist!')
exit(-1)
s = time.time()
if args.no_multiprocess:
for data_line, prediction_line in zip(open(answer_file, encoding=ENCODING),
open(prediction_file, encoding=ENCODING)):
# missing, total, stop = process_record(data_line, prediction_line, args.negative_scale,
# feature_dir, record_dir, match_func)
missing, total, stop = process_record(data_line, prediction_line, args.negative_scale,
feature_dir, record_dir, match_func, all_doc_scores, all_ans_scores,
z_scores)
missing_count += missing
stop_count += stop
total_count += total
print('processed %d records...' % total_count)
sys.stdout.flush()
else:
print('using multiprocessing...')
result_handles = []
async_pool = ProcessPool()
for data_line, prediction_line in zip(open(answer_file, encoding=ENCODING),
open(prediction_file, encoding=ENCODING)):
param = (data_line, prediction_line, args.negative_scale,
feature_dir, record_dir, match_func)
handle = async_pool.apply_async(process_record, param)
result_handles.append(handle)
for result in result_handles:
missing, total, stop = result.get()
missing_count += missing
stop_count += stop
total_count += total
print('processed %d records, stop: %d' % (total_count, stop_count))
sys.stdout.flush()
e = time.time()
print('%d records' % total_count)
print('%d stop labels' % stop_count)
print('%d docs not found' % missing_count)
print('took %.4f s' % (e - s))
# all_ans_scores = list(map(lambda x: min([x, 1000000]), all_ans_scores))
doc_mean = np.mean(all_doc_scores)
ans_mean = np.mean(all_ans_scores)
doc_std = np.std(all_doc_scores)
ans_std = np.std(all_ans_scores)
z_std = np.std(z_scores)
z_mean = np.mean(z_scores)
print("Doc Mean {}".format(doc_mean))
print("Doc Std {}".format(doc_std))
print("Ans Mean {}".format(ans_mean))
print("Ans Std {}".format(ans_std))
print("Doc Max {}".format(max(all_doc_scores)))
print("Ans Max {}".format(max(all_ans_scores)))
print("Z Std {}".format(z_std))
print("Z Max {}".format(max(z_scores)))
print("Z Mean {}".format(z_mean))
print(len(all_corr_rank))
print("i Std {}".format(np.std(all_corr_rank)))
print("i Mean {}".format(np.mean(all_corr_rank)))
|
[
"os.path.exists",
"json.loads",
"numpy.mean",
"collections.OrderedDict",
"pickle.dump",
"utils.slugify",
"argparse.ArgumentParser",
"utils.normalize",
"utils.aggregate",
"utils.aggregate_ans",
"os.makedirs",
"os.path.join",
"utils.get_rank",
"multiprocessing.Pool",
"numpy.std",
"sys.stdout.flush",
"time.time"
] |
[((885, 907), 'json.loads', 'json.loads', (['data_line_'], {}), '(data_line_)\n', (895, 907), False, 'import json\n'), ((951, 968), 'utils.slugify', 'slugify', (['question'], {}), '(question)\n', (958, 968), False, 'from utils import slugify, aggregate, aggregate_ans\n'), ((982, 1026), 'os.path.join', 'os.path.join', (['feature_dir_', "('%s.json' % q_id)"], {}), "(feature_dir_, '%s.json' % q_id)\n", (994, 1026), False, 'import os\n'), ((1072, 1094), 'os.path.exists', 'os.path.exists', (['q_path'], {}), '(q_path)\n', (1086, 1094), False, 'import os\n'), ((1585, 1613), 'json.loads', 'json.loads', (['prediction_line_'], {}), '(prediction_line_)\n', (1595, 1613), False, 'import json\n'), ((1752, 1790), 'utils.get_rank', 'get_rank', (['prediction', 'answer', 'match_fn'], {}), '(prediction, answer, match_fn)\n', (1760, 1790), False, 'from utils import exact_match_score, regex_match_score, get_rank\n'), ((6700, 6725), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6723, 6725), False, 'import argparse\n'), ((8080, 8091), 'time.time', 'time.time', ([], {}), '()\n', (8089, 8091), False, 'import time\n'), ((9798, 9809), 'time.time', 'time.time', ([], {}), '()\n', (9807, 9809), False, 'import time\n'), ((10064, 10087), 'numpy.mean', 'np.mean', (['all_doc_scores'], {}), '(all_doc_scores)\n', (10071, 10087), True, 'import numpy as np\n'), ((10103, 10126), 'numpy.mean', 'np.mean', (['all_ans_scores'], {}), '(all_ans_scores)\n', (10110, 10126), True, 'import numpy as np\n'), ((10141, 10163), 'numpy.std', 'np.std', (['all_doc_scores'], {}), '(all_doc_scores)\n', (10147, 10163), True, 'import numpy as np\n'), ((10178, 10200), 'numpy.std', 'np.std', (['all_ans_scores'], {}), '(all_ans_scores)\n', (10184, 10200), True, 'import numpy as np\n'), ((10213, 10229), 'numpy.std', 'np.std', (['z_scores'], {}), '(z_scores)\n', (10219, 10229), True, 'import numpy as np\n'), ((10243, 10260), 'numpy.mean', 'np.mean', (['z_scores'], {}), '(z_scores)\n', (10250, 10260), True, 'import numpy as np\n'), ((1169, 1187), 'json.loads', 'json.loads', (['q_data'], {}), '(q_data)\n', (1179, 1187), False, 'import json\n'), ((1411, 1429), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1427, 1429), False, 'import sys\n'), ((1530, 1542), 'utils.normalize', 'normalize', (['a'], {}), '(a)\n', (1539, 1542), False, 'from utils import normalize\n'), ((3887, 3933), 'os.path.join', 'os.path.join', (['feature_dir_', "('%s.json' % doc_id)"], {}), "(feature_dir_, '%s.json' % doc_id)\n", (3899, 3933), False, 'import os\n'), ((3945, 3970), 'os.path.exists', 'os.path.exists', (['feat_file'], {}), '(feat_file)\n', (3959, 3970), False, 'import os\n'), ((4569, 4587), 'utils.aggregate', 'aggregate', (['all_n_p'], {}), '(all_n_p)\n', (4578, 4587), False, 'from utils import slugify, aggregate, aggregate_ans\n'), ((4603, 4621), 'utils.aggregate', 'aggregate', (['all_n_a'], {}), '(all_n_a)\n', (4612, 4621), False, 'from utils import slugify, aggregate, aggregate_ans\n'), ((4637, 4660), 'utils.aggregate', 'aggregate', (['all_p_scores'], {}), '(all_p_scores)\n', (4646, 4660), False, 'from utils import slugify, aggregate, aggregate_ans\n'), ((4676, 4703), 'utils.aggregate_ans', 'aggregate_ans', (['all_a_scores'], {}), '(all_a_scores)\n', (4689, 4703), False, 'from utils import slugify, aggregate, aggregate_ans\n'), ((4722, 4735), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4733, 4735), False, 'from collections import OrderedDict\n'), ((7875, 7901), 'os.path.exists', 'os.path.exists', (['record_dir'], {}), '(record_dir)\n', (7889, 7901), False, 'import os\n'), ((7911, 7934), 'os.makedirs', 'os.makedirs', (['record_dir'], {}), '(record_dir)\n', (7922, 7934), False, 'import os\n'), ((7981, 8008), 'os.path.exists', 'os.path.exists', (['feature_dir'], {}), '(feature_dir)\n', (7995, 8008), False, 'import os\n'), ((9067, 9080), 'multiprocessing.Pool', 'ProcessPool', ([], {}), '()\n', (9078, 9080), True, 'from multiprocessing import Pool as ProcessPool\n'), ((3107, 3128), 'numpy.mean', 'np.mean', (['all_a_scores'], {}), '(all_a_scores)\n', (3114, 3128), True, 'import numpy as np\n'), ((3227, 3247), 'numpy.std', 'np.std', (['all_a_scores'], {}), '(all_a_scores)\n', (3233, 3247), True, 'import numpy as np\n'), ((6025, 6080), 'os.path.join', 'os.path.join', (['record_dir_', "('%s_%s.pkl' % (q_id, doc_id))"], {}), "(record_dir_, '%s_%s.pkl' % (q_id, doc_id))\n", (6037, 6080), False, 'import os\n'), ((8947, 8965), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (8963, 8965), False, 'import sys\n'), ((9770, 9788), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (9786, 9788), False, 'import sys\n'), ((10707, 10728), 'numpy.std', 'np.std', (['all_corr_rank'], {}), '(all_corr_rank)\n', (10713, 10728), True, 'import numpy as np\n'), ((10760, 10782), 'numpy.mean', 'np.mean', (['all_corr_rank'], {}), '(all_corr_rank)\n', (10767, 10782), True, 'import numpy as np\n'), ((3706, 3741), 'numpy.mean', 'np.mean', (['(all_a_scores + [ans_score])'], {}), '(all_a_scores + [ans_score])\n', (3713, 3741), True, 'import numpy as np\n'), ((6144, 6162), 'pickle.dump', 'pk.dump', (['record', 'f'], {}), '(record, f)\n', (6151, 6162), True, 'import pickle as pk\n')]
|
#!/usr/bin/env python3
import layers
import cv2
from os.path import join
import numpy as np
# import tensorflow as tf
import Augmentor
vw = 320
vh = 320
class Augment:
def __init__(self):
self.w = 2 * 640
self.h = 2 * 480
self.canvas = np.zeros((self.h, self.w, 3), dtype=np.uint8)
def update(self, _im):
# sc = .4
# h, w = (int(sc * _im.shape[0]), int(sc * _im.shape[1]))
vh = _im.shape[0]
vw = _im.shape[1]
# im = cv2.resize(_im, (w, h))
self.canvas[100:(100 + vh), :vw, :] = _im
cv2.putText(self.canvas, "Original", (0, 50), cv2.FONT_HERSHEY_COMPLEX, 1.0, (255, 255, 255))
cv2.putText(self.canvas, "Distorted", (0, 150 + vh), cv2.FONT_HERSHEY_COMPLEX, 1.0, (255, 255, 255))
self.canvas[(200 + vh):(200 + 2 * vh), :vw, :] = _im
cv2.imshow("Image Augment", self.canvas)
cv2.waitKey(0)
if __name__ == "__main__":
data_root = "/mnt/4102422c-af52-4b55-988f-df7544b35598/dataset/KITTI/KITTI_Odometry/"
seq = "14"
vo_fn = data_root + "dataset/poses/" + seq.zfill(2) + ".txt"
im_dir = data_root + "dataset/sequences/" + seq.zfill(2)
aux_dir = "/home/handuo/projects/paper/image_base/downloads"
i = 0
gui = Augment()
# with tf.Session() as sess:
ims = []
p = Augmentor.Pipeline(join(im_dir, "image_0/"), output_directory="../../../output", save_format="JPEG")
# print("Has %s samples." % (len(p.augmentor_images)))
p.zoom(probability=0.3, min_factor=0.9, max_factor=1.2)
p.skew(probability=0.75, magnitude=0.3)
# p.random_erasing(probability=0.5, rectangle_area=0.3)
p.multi_erasing(probability=0.5, max_x_axis=0.3, max_y_axis=0.15, max_num=4)
# p.rotate(probability=0.5, max_left_rotation=6, max_right_rotation=6)
p.sample(10)
|
[
"os.path.join",
"cv2.imshow",
"cv2.putText",
"numpy.zeros",
"cv2.waitKey"
] |
[((268, 313), 'numpy.zeros', 'np.zeros', (['(self.h, self.w, 3)'], {'dtype': 'np.uint8'}), '((self.h, self.w, 3), dtype=np.uint8)\n', (276, 313), True, 'import numpy as np\n'), ((576, 673), 'cv2.putText', 'cv2.putText', (['self.canvas', '"""Original"""', '(0, 50)', 'cv2.FONT_HERSHEY_COMPLEX', '(1.0)', '(255, 255, 255)'], {}), "(self.canvas, 'Original', (0, 50), cv2.FONT_HERSHEY_COMPLEX, 1.0,\n (255, 255, 255))\n", (587, 673), False, 'import cv2\n'), ((678, 783), 'cv2.putText', 'cv2.putText', (['self.canvas', '"""Distorted"""', '(0, 150 + vh)', 'cv2.FONT_HERSHEY_COMPLEX', '(1.0)', '(255, 255, 255)'], {}), "(self.canvas, 'Distorted', (0, 150 + vh), cv2.\n FONT_HERSHEY_COMPLEX, 1.0, (255, 255, 255))\n", (689, 783), False, 'import cv2\n'), ((850, 890), 'cv2.imshow', 'cv2.imshow', (['"""Image Augment"""', 'self.canvas'], {}), "('Image Augment', self.canvas)\n", (860, 890), False, 'import cv2\n'), ((899, 913), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (910, 913), False, 'import cv2\n'), ((1345, 1369), 'os.path.join', 'join', (['im_dir', '"""image_0/"""'], {}), "(im_dir, 'image_0/')\n", (1349, 1369), False, 'from os.path import join\n')]
|
from qtpy import QtWidgets, QtCore
from pyqtgraph.widgets.SpinBox import SpinBox
from pyqtgraph.parametertree.parameterTypes.basetypes import WidgetParameterItem
from pymodaq.daq_utils.daq_utils import scroll_log, scroll_linear
import numpy as np
class SliderSpinBox(QtWidgets.QWidget):
def __init__(self, *args, subtype='lin', **kwargs):
super().__init__()
self.subtype = subtype
self.initUI(*args, **kwargs)
self.valueChanged = self.spinbox.valueChanged # (value) for compatibility with QSpinBox
self.sigValueChanged = self.spinbox.sigValueChanged # (self)
self.sigValueChanging = self.spinbox.sigValueChanging # (self, value) sent immediately; no delay.
self.sigChanged = self.spinbox.sigValueChanged
@property
def opts(self):
return self.spinbox.opts
@opts.setter
def opts(self, **opts):
self.setOpts(**opts)
def setOpts(self, **opts):
self.spinbox.setOpts(**opts)
if 'visible' in opts:
self.slider.setVisible(opts['visible'])
def insert_widget(self ,widget, row=0):
self.vlayout.insertWidget(row, widget)
def initUI(self, *args, **kwargs):
"""
Init the User Interface.
"""
self.vlayout = QtWidgets.QVBoxLayout()
self.slider = QtWidgets.QSlider(QtCore.Qt.Horizontal)
self.slider.setMinimumWidth(50)
self.slider.setMinimum(0)
self.slider.setMaximum(100)
if 'value' in kwargs:
value = kwargs.pop('value')
else:
if 'bounds' in kwargs:
value = kwargs['bounds'][0]
else:
value = 1
self.spinbox = SpinBox(parent=None, value=value, **kwargs)
self.vlayout.addWidget(self.slider)
self.vlayout.addWidget(self.spinbox)
self.vlayout.setSpacing(0)
self.vlayout.setContentsMargins(0, 0, 0, 0)
self.setLayout(self.vlayout)
self.slider.valueChanged.connect(self.update_spinbox)
self.spinbox.valueChanged.connect(self.update_slide)
def update_spinbox(self, val):
"""
val is a percentage [0-100] used in order to set the spinbox value between its min and max
"""
min_val = float(self.opts['bounds'][0])
max_val = float(self.opts['bounds'][1])
if self.subtype == 'log':
val_out = scroll_log(val, min_val, max_val)
else:
val_out = scroll_linear(val, min_val, max_val)
try:
self.slider.valueChanged.disconnect(self.update_spinbox)
self.spinbox.valueChanged.disconnect(self.update_slide)
except Exception:
pass
self.spinbox.setValue(val_out)
self.slider.valueChanged.connect(self.update_spinbox)
self.spinbox.valueChanged.connect(self.update_slide)
def update_slide(self, val):
"""
val is the spinbox value between its min and max
"""
min_val = float(self.opts['bounds'][0])
max_val = float(self.opts['bounds'][1])
try:
self.slider.valueChanged.disconnect(self.update_spinbox)
self.spinbox.valueChanged.disconnect(self.update_slide)
except Exception:
pass
if self.subtype == 'linear':
value = int((val - min_val) / (max_val - min_val) * 100)
else:
value = int((np.log10(val) - np.log10(min_val)) / (np.log10(max_val) - np.log10(min_val)) * 100)
self.slider.setValue(value)
self.slider.valueChanged.connect(self.update_spinbox)
self.spinbox.valueChanged.connect(self.update_slide)
def setValue(self, val):
self.spinbox.setValue(val)
def value(self):
return self.spinbox.value()
class SliderParameterItem(WidgetParameterItem):
"""Registered parameter type which displays a QLineEdit"""
def makeWidget(self):
opts = self.param.opts
defs = {
'value': 0, 'min': None, 'max': None,
'step': 1.0, 'dec': False,
'siPrefix': False, 'suffix': '', 'decimals': 12,
}
if 'subtype' not in opts:
opts['subtype'] = 'linear'
defs['bounds'] = [0., self.param.value()] # max value set to default value when no max given
if 'limits' not in opts:
if 'min' in opts:
defs['bounds'][0] = opts['min']
if 'max' in opts:
defs['bounds'][1] = opts['max']
else:
defs['bounds'] = opts['limits']
w = SliderSpinBox(subtype=opts['subtype'], bounds=defs['bounds'], value=defs['value'])
self.setSizeHint(1, QtCore.QSize(50, 50))
return w
|
[
"numpy.log10",
"qtpy.QtWidgets.QVBoxLayout",
"pymodaq.daq_utils.daq_utils.scroll_log",
"qtpy.QtCore.QSize",
"pymodaq.daq_utils.daq_utils.scroll_linear",
"pyqtgraph.widgets.SpinBox.SpinBox",
"qtpy.QtWidgets.QSlider"
] |
[((1283, 1306), 'qtpy.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', ([], {}), '()\n', (1304, 1306), False, 'from qtpy import QtWidgets, QtCore\n'), ((1329, 1368), 'qtpy.QtWidgets.QSlider', 'QtWidgets.QSlider', (['QtCore.Qt.Horizontal'], {}), '(QtCore.Qt.Horizontal)\n', (1346, 1368), False, 'from qtpy import QtWidgets, QtCore\n'), ((1709, 1752), 'pyqtgraph.widgets.SpinBox.SpinBox', 'SpinBox', ([], {'parent': 'None', 'value': 'value'}), '(parent=None, value=value, **kwargs)\n', (1716, 1752), False, 'from pyqtgraph.widgets.SpinBox import SpinBox\n'), ((2402, 2435), 'pymodaq.daq_utils.daq_utils.scroll_log', 'scroll_log', (['val', 'min_val', 'max_val'], {}), '(val, min_val, max_val)\n', (2412, 2435), False, 'from pymodaq.daq_utils.daq_utils import scroll_log, scroll_linear\n'), ((2472, 2508), 'pymodaq.daq_utils.daq_utils.scroll_linear', 'scroll_linear', (['val', 'min_val', 'max_val'], {}), '(val, min_val, max_val)\n', (2485, 2508), False, 'from pymodaq.daq_utils.daq_utils import scroll_log, scroll_linear\n'), ((4675, 4695), 'qtpy.QtCore.QSize', 'QtCore.QSize', (['(50)', '(50)'], {}), '(50, 50)\n', (4687, 4695), False, 'from qtpy import QtWidgets, QtCore\n'), ((3415, 3428), 'numpy.log10', 'np.log10', (['val'], {}), '(val)\n', (3423, 3428), True, 'import numpy as np\n'), ((3431, 3448), 'numpy.log10', 'np.log10', (['min_val'], {}), '(min_val)\n', (3439, 3448), True, 'import numpy as np\n'), ((3453, 3470), 'numpy.log10', 'np.log10', (['max_val'], {}), '(max_val)\n', (3461, 3470), True, 'import numpy as np\n'), ((3473, 3490), 'numpy.log10', 'np.log10', (['min_val'], {}), '(min_val)\n', (3481, 3490), True, 'import numpy as np\n')]
|
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import logging as log
from mo.utils.error import Error
from mo.utils.utils import refer_to_faq_msg
def tf_slice_infer(node):
input = node.in_node(0)
begin = node.in_node(1)
size = node.in_node(2)
output = node.out_node()
if input.value is None or begin.value is None or size.value is None:
return
if begin.value.size > 1 or size.value.size > 1:
log.error("Slice operation doesn't support parameters (begin, size) with size more then 1")
log.error(" Begin : {}".format(begin.value))
log.error(" Size : {}".format(size.value))
return
# if the 'size' value is equal to -1 then all remaining elements in dimension are included in the slice.
# refer to TensorFlow documentation for more details
if size.value.item() == -1:
size.value = np.array(input.shape[0] - begin.value.item())
output.value = input.value[begin.value.item():(begin.value.item() + size.value.item())]
output.shape = np.array(output.value.shape, dtype=np.int64)
def tf_strided_slice_infer(node):
begin_id = node.in_node(1).value
end_id = node.in_node(2).value
stride = node.in_node(3).value
shape = node.in_node(0).shape
if shape is None or any([x < 0 for x in shape]):
return
convert_negative_indices(begin_id, shape)
convert_negative_indices(end_id, shape)
test_bit = lambda val, offset: ((1 << offset) & val != 0)
slice_idx = []
shrink_axis_mask = []
ellipsis_mask = []
new_axis_mask = []
dims = len(begin_id)
for idx in range(dims):
l = begin_id[idx] if not test_bit(node.begin_mask, idx) else 0
r = end_id[idx] if not test_bit(node.end_mask, idx) else shape[idx]
# Check shrink_axis_mask
shrink_axis_mask.append(test_bit(node.shrink_axis_mask, idx))
if shrink_axis_mask[idx]:
l, r = l, l + 1
# Check new_axis_mask
new_axis_mask.append(test_bit(node.new_axis_mask, idx))
if new_axis_mask[idx]:
slice_idx.append(np.newaxis)
# Check ellipsis_mask
ellipsis_mask.append(test_bit(node.ellipsis_mask, idx))
if ellipsis_mask[idx]:
shrink_axis_mask[idx] = False
l, r = 0, shape[idx]
slice_idx.append(slice(l, r, stride[idx]))
value = node.in_node(0).value if node.in_node(0).value is not None else np.zeros(shape)
value = value[slice_idx]
for idx, flag in reversed(list(enumerate(shrink_axis_mask))):
if flag:
value = np.squeeze(value, idx)
node['slices'] = np.array(slice_idx)
node['shrink_axis_mask'] = np.array(shrink_axis_mask)
node.out_node().value = np.array(value) if node.in_node(0).value is not None else None
node.out_node().shape = np.array(value.shape)
def convert_negative_indices(indices: np.array, shape: np.array):
for ind, value in enumerate(indices):
if value < 0:
indices[ind] += shape[ind]
def caffe_slice_infer(node):
"""
Slices an input layer to multiple output layers along a given dimension
with given slice indices
Parameters
----------
node
"""
top_shape = node.in_node(0).shape
slice_axis = node.axis
bottom_slice_axis = node.in_node(0).shape[node.axis]
if len(node.slice_point) == 0:
new_shape = np.array(top_shape, dtype=np.int64)
new_shape[slice_axis] = bottom_slice_axis / len(node.out_nodes())
for i in range(0, len(node.out_nodes())):
node.out_node(i).shape = new_shape
return
assert (len(node.slice_point) == len(node.out_nodes()) - 1)
prev = 0
slices = []
for slice_point in node.slice_point:
if slice_point <= prev:
raise Error('Check failed for the layer {}. Slice points should be ordered in increasing manner. '.format(node.id) +
'Current slice point {} is not greater than the previous slice point {}. '.format(slice_point, prev) +
'Please verify your model correctness')
slices.append(slice_point - prev)
prev = slice_point
slices.append(bottom_slice_axis - prev)
if sum(slices) != bottom_slice_axis:
raise Error('Check failed for the layer {}. Sum of slices points {} does not equal '.format(node.id, sum(slices)) +
'to the value of input blob shape by the given slice axis {}'.format(bottom_slice_axis))
for i in range(len(node.out_nodes())):
new_shape = np.array(top_shape, dtype=np.int64)
new_shape[slice_axis] = slices[i]
node.out_node(i).shape = new_shape
def mxnet_slice_axis_infer(node):
in_shape = node.in_node(0).shape
slice_axis = node.axis
new_shape = np.array(in_shape, dtype=np.int64)
new_shape[slice_axis] = new_shape[slice_axis] / len(node.out_nodes())
axis_size = in_shape[slice_axis]
if node.offset < 0:
node.offset += axis_size
if not node.dim:
node.dim = axis_size
elif node.dim < 0:
node.dim += axis_size
input_dim = in_shape.size
node.dim = (node.dim - node.offset)
if node.dim > in_shape[slice_axis]:
raise Error(
'{0} node dimension value is bigger than the corresponding value in the input shape {1}. ' +
'\nIn particular {2} is bigger than {3}. The Model Optimizer does not support this case. ' +
'\nTo overcome, try to edit the original model "end" property of the {0} layer.',
node.name, ','.join(str(i) for i in in_shape), str(node.dim), str(in_shape[slice_axis])
)
for i in range(0, input_dim):
if i == slice_axis:
new_shape[i] = node.dim
else:
new_shape[i] = in_shape[i]
for i in range(0, len(node.out_nodes())):
node.out_node(i)['shape'] = new_shape
|
[
"numpy.array",
"numpy.zeros",
"logging.error",
"numpy.squeeze"
] |
[((1582, 1626), 'numpy.array', 'np.array', (['output.value.shape'], {'dtype': 'np.int64'}), '(output.value.shape, dtype=np.int64)\n', (1590, 1626), True, 'import numpy as np\n'), ((3177, 3196), 'numpy.array', 'np.array', (['slice_idx'], {}), '(slice_idx)\n', (3185, 3196), True, 'import numpy as np\n'), ((3228, 3254), 'numpy.array', 'np.array', (['shrink_axis_mask'], {}), '(shrink_axis_mask)\n', (3236, 3254), True, 'import numpy as np\n'), ((3375, 3396), 'numpy.array', 'np.array', (['value.shape'], {}), '(value.shape)\n', (3383, 3396), True, 'import numpy as np\n'), ((5335, 5369), 'numpy.array', 'np.array', (['in_shape'], {'dtype': 'np.int64'}), '(in_shape, dtype=np.int64)\n', (5343, 5369), True, 'import numpy as np\n'), ((991, 1092), 'logging.error', 'log.error', (['"""Slice operation doesn\'t support parameters (begin, size) with size more then 1"""'], {}), '(\n "Slice operation doesn\'t support parameters (begin, size) with size more then 1"\n )\n', (1000, 1092), True, 'import logging as log\n'), ((2983, 2998), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (2991, 2998), True, 'import numpy as np\n'), ((3284, 3299), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (3292, 3299), True, 'import numpy as np\n'), ((3937, 3972), 'numpy.array', 'np.array', (['top_shape'], {'dtype': 'np.int64'}), '(top_shape, dtype=np.int64)\n', (3945, 3972), True, 'import numpy as np\n'), ((5097, 5132), 'numpy.array', 'np.array', (['top_shape'], {'dtype': 'np.int64'}), '(top_shape, dtype=np.int64)\n', (5105, 5132), True, 'import numpy as np\n'), ((3132, 3154), 'numpy.squeeze', 'np.squeeze', (['value', 'idx'], {}), '(value, idx)\n', (3142, 3154), True, 'import numpy as np\n')]
|
#!/usr/bin/python;
import sys
import ast
import json
import math as m
import numpy as np
# from scipy.interpolate import interp1d
# from scipy.optimize import fsolve
# Version Controller
sTitle = 'DNVGL RP F103 Cathodic protection of submarine pipelines'
sVersion = 'Version 1.0.0'
# Define constants
pi = m.pi
e = m.e
precision = 2
fFormat = "{:.{}f}"
separate = "--------------------"
Table62 = np.array([
[25, 0.050, 0.020],
[50, 0.060, 0.030],
[80, 0.075, 0.040],
[120, 0.100, 0.060],
[200, 0.130, 0.080],
])
Table63 = np.array([
['Al-Zn-In', 30, -1.050, 2000, -1.000, 1500],
['Al-Zn-In', 60, -1.050, 1500, -1.000, 680],
['Al-Zn-In', 80, -1.000, 720, -1.000, 320],
['Zn', 30, -1.030, 780, -0.980, 750],
['Zn', 50, -1.050, 2000, -0.750, 580],
])
TableA1 = np.array([
['Glass fibre reincored alphat enamel', True, 70, 0.01, 0.0003],
['FBE', True, 90, 0.030, 0.0003],
['FBE', False, 90, 0.030, 0.0010],
['3-layer FBE/PE', True, 80, 0.001, 0.00003],
['3-layer FBE/PE', False, 80, 0.001, 0.00003],
['3-layer FBE/PP', True, 110, 0.001, 0.00003],
['3-layer FBE/PP', False, 80, 0.001, 0.00003],
['FBE/PP Thermally insulating coating', False, 140, 0.0003, 0.00001],
['FBE/PU Thermally insulating coating', False, 70, 0.01, 0.003],
['Polychloroprene', False, 90, 0.01, 0.001],
])
TableA2 = np.array([
['none', '4E(1) moulded PU on top bare steel (with primer)', 70, 0.30, 0.030],
['1D Adhesive Tape or 2A(1)/2A-(2) HSS (PE/PP backing) with mastic adhesive', '4E(2) moulded PU on top 1D or 2A(1)/2A(2)', 70, 0.10, 0.010],
['2B(1) HSS (backing + adhesive in PE with LE primer)', 'none', 70, 0.03, 0.003],
['2B(1) HSS (backing + adhesive in PE with LE primer)', '4E(2) moulded PU on 0.03 0.003 top 2B(1)', 70, 0.03, 0.003],
['2C (1) HSS (backing + adhesive in PP, LE primer)', 'none', 110, 0.03, 0.003],
['2C (1) HSS (backing + adhesive in PP, LE primer)', '4E(2) moulded PU on top 2B(1)', 110, 0.03, 0.003],
['3A FBE', 'none', 90, 0.10, 0.010],
['3A FBE', '4E(2) moulded PU on top', 90, 0.03, 0.003],
['2B(2) FBE with PE HSS', 'none', 70, 0.01, 0.0003],
['2B(2) FBE with PE HSS', '4E(2) moulded PU on top FBE + PE HSS', 70, 0.01, 0.0003],
['5D(1) and 5E FBE with PE applied as flame spraying or tape, respectively', 'none', 70, 0.01, 0.0003],
['2C(2) FBE with PP HSS', 'none', 140, 0.01, 0.0003],
['5A/B/C(1) FBE, PP adhesive and PP (wrapped, flame sprayed or moulded)', 'none', 140, 0.01, 0.0003],
['NA', '5C(1) Moulded PE on top FBE with PE adhesive', 70, 0.01, 0.0003],
['NA', '5C(2) Moulded PP on top FBE with PP adhesive', 140, 0.01, 0.0003],
['8A polychloroprene', 'none', 90, 0.03, 0.001],
])
# This function final mean current demand, M, in accodance with Eq. 3 of [1]
def Icf(Ac, fcf, icm, k):
Icf = Ac*fcf*icm*k
return Icf;
def fcf(a, b, t):
fcf = a + b*tf
return fcf;
# This function return required anode mass, M, in accodance with Eq.5 of [1]
def M(Icm, tf, u, e):
M = (Icm*tf*8760)/(u*e)
return M;
def DNVGLRPF113(D, lPL, lFJ, tAmb, tAno, tf, rhoSea, aGap, aThk, aDen, aMat, coatLP, coatFJ, nJoints, burial, u=0.8):
k = 1.0
EA0 = -0.85
# determine mean current demand from Table 6-2 of [1]
icm = [x for x in Table62 if x[0] > tAmb][0][burial]
# print(icm)
if aMat == False:
aMaterial = 'Al-Zn-In'
else:
aMaterial = 'Zn'
# determine anode properties from Table 6-3 of [1]
anode = [x for x in Table63 if (x[0] == aMaterial and float(x[1]) >= float(tAno)) ][0]
if burial == 1:
EC0 = float(anode[2])
e = float(anode[3])
else:
EC0 = float(anode[4])
e = float(anode[5])
# print(anode)
# print(EC0)
# print(e)
# determine coating breakdown factor from Table A-1 of [1]
coatingPL = TableA1[coatLP]
aPL = float(coatingPL[3])
bPL = float(coatingPL[4])
# print(coatingPL)
# print(aPL)
# print(bPL)
# determine field joint coating breakdown factor from Table A-2 of [1]
coatingFJ = TableA2[coatFJ]
aFJ = float(coatingFJ[3])
bFJ = float(coatingFJ[4])
# print(coatingFJ)
# print(aFJ)
# print(bFJ)
# determine coating area
Acl = pi*D*(lPL)*nJoints
AclPL = pi*D*(lPL-2*lFJ)*nJoints
AclFJ = pi*D*(2*lFJ)*nJoints
# print(AclPL)
# print(AclFJ)
# print(Acl)
#determine mean coating breakdown factor, Eq 2 of [1]
fcmPL = aPL + 0.5*bPL*tf
fcmFJ = aFJ + 0.5*bFJ*tf
# print(fcmPL)
# print(fcmFJ)
#determine mean current demand, Eq 1 of [1]
IcmPL = AclPL*fcmPL*icm*k
IcmFJ = AclFJ*fcmFJ*icm*k
Icm = IcmPL + IcmFJ
# print(IcmPL)
# print(IcmFJ)
# print(Icm)
#determine final coating breakdown factor, Eq 4 of [1]
fcfPL = aPL + bPL*tf
fcfFJ = aFJ + bFJ*tf
# print(fcfPL)
# print(fcfFJ)
#determine final coating breakdown factor, Eq 3 of [1]
IcfPL = AclPL*fcfPL*icm*k
IcfFJ = AclFJ*fcfFJ*icm*k
Icf = IcfPL + IcfFJ
# print(IcfPL)
# print(IcfFJ)
# print(Icf)
#determine minimun required anode mass, Eq. 5 of [1]
reqM = (Icm*tf*8760)/(0.80*e)
reqV = reqM/aDen
# print('required anode mass',reqM)
# print('required anode volume', reqV)
unitV = (0.25*pi*((D + 2*aThk)**2) - 0.25*pi*(D**2) - 2*aGap*aThk)
massLength = reqV/unitV
# print('required anode length by mass', massLength)
deltaE = EC0 - EA0
reqA = (0.315*rhoSea*Icf/deltaE)**2
unitA = pi*(D+2*(1-u)*aThk) - 2*aGap
areaLength = reqA/unitA
# print('required anode length by area', areaLength)
input = [D, lPL, lFJ, tAmb, tAno, tf, rhoSea, aGap, aThk, aDen, aMat, coatLP, coatFJ, nJoints, burial]
# output = [icm, anode, coatingPL, coatingFJ, reqM, reqV, massLength, areaLength]
output = [icm, reqM, reqA, massLength, areaLength]
report = []
resultRaw = [input, output, report]
inputJson = {
'Outer diameter, m':D,
'Length of pipeline, m':lPL,
'Length of field joint':lFJ,
'Ambient temperature, degC':tAmb,
'Design life, year':tf,
'Seawater density, kg/cu.m':rhoSea,
}
outPutJson = {
'No of joints, N:':nJoints,
'Mean current demand, A/Sq.m.': fFormat.format(icm, precision ),
'Min. required anode mass, kg':fFormat.format(reqM, precision ),
'Min. required surface area, Sq.m':fFormat.format(reqA, precision ),
'Min. required length by anode mass, m':fFormat.format(massLength, precision ),
'Min. required length by anode area, m':fFormat.format(areaLength, precision ),
}
resultJson = {'input':inputJson, 'output':outPutJson, 'report':report}
result = [resultRaw, resultJson]
return result;
D = 273.05E-03
lPL = 12
lFJ = 0.30
tAmb = 30
tAno = 30
tf = 30
rhoSea = 1
aGap = 25E-03
aThk = 50E-03
aDen = 2700
aMat = 0
coatLP = 0
coatFJ = 0
spaceMin = 10
spaceMax = 10
burial = 1 # 1 for non burial and 2 for burial
if __name__ == "__main__":
D = float(sys.argv[1])
lPL = float(sys.argv[2])
lFJ = float(sys.argv[3])
tAmb = float(sys.argv[4])
tAno = float(sys.argv[5])
tf = float(sys.argv[6])
rhoSea = float(sys.argv[7])
aGap = float(sys.argv[8])
aThk = float(sys.argv[9])
aDen = float(sys.argv[10])
aMat = int(sys.argv[11])
coatLP = int(sys.argv[12])
coatFJ = int(sys.argv[13])
spaceMin = int(sys.argv[14])
spaceMax = int(sys.argv[15])
burial = int(sys.argv[16])
resultJson = []
for nJoints in range(spaceMin, spaceMax + 1):
result = DNVGLRPF113(D, lPL, lFJ, tAmb, tAno, tf, rhoSea, aGap, aThk, aDen, aMat, coatLP, coatFJ, nJoints, burial)
resultJson.append(result[1])
print (json.dumps(resultJson))
|
[
"numpy.array",
"json.dumps"
] |
[((402, 508), 'numpy.array', 'np.array', (['[[25, 0.05, 0.02], [50, 0.06, 0.03], [80, 0.075, 0.04], [120, 0.1, 0.06], [\n 200, 0.13, 0.08]]'], {}), '([[25, 0.05, 0.02], [50, 0.06, 0.03], [80, 0.075, 0.04], [120, 0.1,\n 0.06], [200, 0.13, 0.08]])\n', (410, 508), True, 'import numpy as np\n'), ((550, 767), 'numpy.array', 'np.array', (["[['Al-Zn-In', 30, -1.05, 2000, -1.0, 1500], ['Al-Zn-In', 60, -1.05, 1500, -\n 1.0, 680], ['Al-Zn-In', 80, -1.0, 720, -1.0, 320], ['Zn', 30, -1.03, \n 780, -0.98, 750], ['Zn', 50, -1.05, 2000, -0.75, 580]]"], {}), "([['Al-Zn-In', 30, -1.05, 2000, -1.0, 1500], ['Al-Zn-In', 60, -1.05,\n 1500, -1.0, 680], ['Al-Zn-In', 80, -1.0, 720, -1.0, 320], ['Zn', 30, -\n 1.03, 780, -0.98, 750], ['Zn', 50, -1.05, 2000, -0.75, 580]])\n", (558, 767), True, 'import numpy as np\n'), ((814, 1345), 'numpy.array', 'np.array', (["[['Glass fibre reincored alphat enamel', True, 70, 0.01, 0.0003], ['FBE', \n True, 90, 0.03, 0.0003], ['FBE', False, 90, 0.03, 0.001], [\n '3-layer FBE/PE', True, 80, 0.001, 3e-05], ['3-layer FBE/PE', False, 80,\n 0.001, 3e-05], ['3-layer FBE/PP', True, 110, 0.001, 3e-05], [\n '3-layer FBE/PP', False, 80, 0.001, 3e-05], [\n 'FBE/PP Thermally insulating coating', False, 140, 0.0003, 1e-05], [\n 'FBE/PU Thermally insulating coating', False, 70, 0.01, 0.003], [\n 'Polychloroprene', False, 90, 0.01, 0.001]]"], {}), "([['Glass fibre reincored alphat enamel', True, 70, 0.01, 0.0003],\n ['FBE', True, 90, 0.03, 0.0003], ['FBE', False, 90, 0.03, 0.001], [\n '3-layer FBE/PE', True, 80, 0.001, 3e-05], ['3-layer FBE/PE', False, 80,\n 0.001, 3e-05], ['3-layer FBE/PP', True, 110, 0.001, 3e-05], [\n '3-layer FBE/PP', False, 80, 0.001, 3e-05], [\n 'FBE/PP Thermally insulating coating', False, 140, 0.0003, 1e-05], [\n 'FBE/PU Thermally insulating coating', False, 70, 0.01, 0.003], [\n 'Polychloroprene', False, 90, 0.01, 0.001]])\n", (822, 1345), True, 'import numpy as np\n'), ((1380, 2775), 'numpy.array', 'np.array', (["[['none', '4E(1) moulded PU on top bare steel (with primer)', 70, 0.3, 0.03\n ], [\n '1D Adhesive Tape or 2A(1)/2A-(2) HSS (PE/PP backing) with mastic adhesive'\n , '4E(2) moulded PU on top 1D or 2A(1)/2A(2)', 70, 0.1, 0.01], [\n '2B(1) HSS (backing + adhesive in PE with LE primer)', 'none', 70, 0.03,\n 0.003], ['2B(1) HSS (backing + adhesive in PE with LE primer)',\n '4E(2) moulded PU on 0.03 0.003 top 2B(1)', 70, 0.03, 0.003], [\n '2C (1) HSS (backing + adhesive in PP, LE primer)', 'none', 110, 0.03, \n 0.003], ['2C (1) HSS (backing + adhesive in PP, LE primer)',\n '4E(2) moulded PU on top 2B(1)', 110, 0.03, 0.003], ['3A FBE', 'none', \n 90, 0.1, 0.01], ['3A FBE', '4E(2) moulded PU on top', 90, 0.03, 0.003],\n ['2B(2) FBE with PE HSS', 'none', 70, 0.01, 0.0003], [\n '2B(2) FBE with PE HSS', '4E(2) moulded PU on top FBE + PE HSS', 70, \n 0.01, 0.0003], [\n '5D(1) and 5E FBE with PE applied as flame spraying or tape, respectively',\n 'none', 70, 0.01, 0.0003], ['2C(2) FBE with PP HSS', 'none', 140, 0.01,\n 0.0003], [\n '5A/B/C(1) FBE, PP adhesive and PP (wrapped, flame sprayed or moulded)',\n 'none', 140, 0.01, 0.0003], ['NA',\n '5C(1) Moulded PE on top FBE with PE adhesive', 70, 0.01, 0.0003], [\n 'NA', '5C(2) Moulded PP on top FBE with PP adhesive', 140, 0.01, 0.0003\n ], ['8A polychloroprene', 'none', 90, 0.03, 0.001]]"], {}), "([['none', '4E(1) moulded PU on top bare steel (with primer)', 70, \n 0.3, 0.03], [\n '1D Adhesive Tape or 2A(1)/2A-(2) HSS (PE/PP backing) with mastic adhesive'\n , '4E(2) moulded PU on top 1D or 2A(1)/2A(2)', 70, 0.1, 0.01], [\n '2B(1) HSS (backing + adhesive in PE with LE primer)', 'none', 70, 0.03,\n 0.003], ['2B(1) HSS (backing + adhesive in PE with LE primer)',\n '4E(2) moulded PU on 0.03 0.003 top 2B(1)', 70, 0.03, 0.003], [\n '2C (1) HSS (backing + adhesive in PP, LE primer)', 'none', 110, 0.03, \n 0.003], ['2C (1) HSS (backing + adhesive in PP, LE primer)',\n '4E(2) moulded PU on top 2B(1)', 110, 0.03, 0.003], ['3A FBE', 'none', \n 90, 0.1, 0.01], ['3A FBE', '4E(2) moulded PU on top', 90, 0.03, 0.003],\n ['2B(2) FBE with PE HSS', 'none', 70, 0.01, 0.0003], [\n '2B(2) FBE with PE HSS', '4E(2) moulded PU on top FBE + PE HSS', 70, \n 0.01, 0.0003], [\n '5D(1) and 5E FBE with PE applied as flame spraying or tape, respectively',\n 'none', 70, 0.01, 0.0003], ['2C(2) FBE with PP HSS', 'none', 140, 0.01,\n 0.0003], [\n '5A/B/C(1) FBE, PP adhesive and PP (wrapped, flame sprayed or moulded)',\n 'none', 140, 0.01, 0.0003], ['NA',\n '5C(1) Moulded PE on top FBE with PE adhesive', 70, 0.01, 0.0003], [\n 'NA', '5C(2) Moulded PP on top FBE with PP adhesive', 140, 0.01, 0.0003\n ], ['8A polychloroprene', 'none', 90, 0.03, 0.001]])\n", (1388, 2775), True, 'import numpy as np\n'), ((7782, 7804), 'json.dumps', 'json.dumps', (['resultJson'], {}), '(resultJson)\n', (7792, 7804), False, 'import json\n')]
|
"""
Getting started with The Cannon and APOGEE
"""
import os
import numpy as np
from astropy.table import Table
import AnniesLasso as tc
# Load in the data.
PATH, CATALOG, FILE_FORMAT = ("/Users/arc/research/apogee/", "apogee-rg.fits",
"apogee-rg-custom-normalization-{}.memmap")
labelled_set = Table.read(os.path.join(PATH, CATALOG))
dispersion = np.memmap(os.path.join(PATH, FILE_FORMAT).format("dispersion"),
mode="r", dtype=float)
normalized_flux = np.memmap(
os.path.join(PATH, FILE_FORMAT).format("flux"),
mode="c", dtype=float).reshape((len(labelled_set), -1))
normalized_ivar = np.memmap(
os.path.join(PATH, FILE_FORMAT).format("ivar"),
mode="c", dtype=float).reshape(normalized_flux.shape)
# The labelled set includes ~14000 stars. Let's chose a random ~1,400 for the
# training and validation sets.
np.random.seed(888) # For reproducibility.
q = np.random.randint(0, 10, len(labelled_set)) % 10
validate_set = (q == 0)
train_set = (q == 1)
# Create a Cannon model in parallel using all available threads
model = tc.L1RegularizedCannonModel(labelled_set[train_set],
normalized_flux[train_set], normalized_ivar[train_set],
dispersion=dispersion, threads=-1)
# No regularization.
model.regularization = 0
# Specify the vectorizer.
model.vectorizer = tc.vectorizer.NormalizedPolynomialVectorizer(
labelled_set[train_set],
tc.vectorizer.polynomial.terminator(["TEFF", "LOGG", "FE_H"], 2))
print("Vectorizer terms: {0}".format(
" + ".join(model.vectorizer.get_human_readable_label_vector())))
# Train the model.
model.train()
# Let's set the scatter for each pixel to ensure the mean chi-squared value is
# 1 for the training set, then re-train.
model._set_s2_by_hogg_heuristic()
model.train()
# Use the model to fit the stars in the validation set.
validation_set_labels = model.fit(
normalized_flux[validate_set], normalized_ivar[validate_set])
for i, label_name in enumerate(model.vectorizer.label_names):
fig, ax = plt.subplots()
x = labelled_set[label_name][validate_set]
y = validation_set_labels[:, i]
abs_diff = np.abs(y - x)
ax.scatter(x, y, facecolor="k")
limits = np.array([ax.get_xlim(), ax.get_ylim()])
ax.set_xlim(limits.min(), limits.max())
ax.set_ylim(limits.min(), limits.max())
ax.set_title("{0}: {1:.2f}".format(label_name, np.mean(abs_diff)))
print("{0}: {1:.2f}".format(label_name, np.mean(abs_diff)))
|
[
"numpy.abs",
"numpy.mean",
"AnniesLasso.vectorizer.polynomial.terminator",
"os.path.join",
"AnniesLasso.L1RegularizedCannonModel",
"numpy.random.seed"
] |
[((840, 859), 'numpy.random.seed', 'np.random.seed', (['(888)'], {}), '(888)\n', (854, 859), True, 'import numpy as np\n'), ((1055, 1203), 'AnniesLasso.L1RegularizedCannonModel', 'tc.L1RegularizedCannonModel', (['labelled_set[train_set]', 'normalized_flux[train_set]', 'normalized_ivar[train_set]'], {'dispersion': 'dispersion', 'threads': '(-1)'}), '(labelled_set[train_set], normalized_flux[\n train_set], normalized_ivar[train_set], dispersion=dispersion, threads=-1)\n', (1082, 1203), True, 'import AnniesLasso as tc\n'), ((315, 342), 'os.path.join', 'os.path.join', (['PATH', 'CATALOG'], {}), '(PATH, CATALOG)\n', (327, 342), False, 'import os\n'), ((1379, 1443), 'AnniesLasso.vectorizer.polynomial.terminator', 'tc.vectorizer.polynomial.terminator', (["['TEFF', 'LOGG', 'FE_H']", '(2)'], {}), "(['TEFF', 'LOGG', 'FE_H'], 2)\n", (1414, 1443), True, 'import AnniesLasso as tc\n'), ((2105, 2118), 'numpy.abs', 'np.abs', (['(y - x)'], {}), '(y - x)\n', (2111, 2118), True, 'import numpy as np\n'), ((367, 398), 'os.path.join', 'os.path.join', (['PATH', 'FILE_FORMAT'], {}), '(PATH, FILE_FORMAT)\n', (379, 398), False, 'import os\n'), ((2350, 2367), 'numpy.mean', 'np.mean', (['abs_diff'], {}), '(abs_diff)\n', (2357, 2367), True, 'import numpy as np\n'), ((2415, 2432), 'numpy.mean', 'np.mean', (['abs_diff'], {}), '(abs_diff)\n', (2422, 2432), True, 'import numpy as np\n'), ((481, 512), 'os.path.join', 'os.path.join', (['PATH', 'FILE_FORMAT'], {}), '(PATH, FILE_FORMAT)\n', (493, 512), False, 'import os\n'), ((622, 653), 'os.path.join', 'os.path.join', (['PATH', 'FILE_FORMAT'], {}), '(PATH, FILE_FORMAT)\n', (634, 653), False, 'import os\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:mod:`fit`
==================
.. module:: fit
:synopsis:
.. moduleauthor:: hbldh <<EMAIL>>
Created on 2015-09-24, 07:18:22
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import numpy as np
from b2ac.compat import *
import b2ac.matrix.matrix_operations as mo
import b2ac.eigenmethods.qr_algorithm as qr
import b2ac.eigenmethods.inverse_iteration as inv_iter
def fit_improved_B2AC_double(points):
"""Ellipse fitting in Python with improved B2AC algorithm as described in
this `paper <http://autotrace.sourceforge.net/WSCG98.pdf>`_.
This version of the fitting uses float storage during calculations and performs the
eigensolver on a float array. It only uses `b2ac` package methods for fitting, to
be as similar to the integer implementation as possible.
:param points: The [Nx2] array of points to fit ellipse to.
:type points: :py:class:`numpy.ndarray`
:return: The conic section array defining the fitted ellipse.
:rtype: :py:class:`numpy.ndarray`
"""
e_conds = []
points = np.array(points, 'float')
M, T = _calculate_M_and_T_double(points)
e_vals = sorted(qr.QR_algorithm_shift_Givens_double(M)[0])
a = None
for ev_ind in [1, 2, 0]:
# Find the eigenvector that matches this eigenvector.
eigenvector = inv_iter.inverse_iteration_for_eigenvector_double(M, e_vals[ev_ind], 5)
# See if that eigenvector yields an elliptical solution.
elliptical_condition = (4 * eigenvector[0] * eigenvector[2]) - (eigenvector[1] ** 2)
e_conds.append(elliptical_condition)
if elliptical_condition > 0:
a = eigenvector
break
if a is None:
print("Eigenvalues = {0}".format(e_vals))
print("Elliptical conditions = {0}".format(e_conds))
raise ArithmeticError("No elliptical solution found.")
conic_coefficients = np.concatenate((a, np.dot(T, a)))
return conic_coefficients
def _calculate_M_and_T_double(points):
"""Part of the B2AC ellipse fitting algorithm, calculating the M and T
matrices needed.
:param points: The [Nx2] array of points to fit ellipse to.
:type points: :py:class:`numpy.ndarray`
:return: Matrices M and T.
:rtype: tuple
"""
S = _calculate_scatter_matrix_double(points[:, 0], points[:, 1])
S1 = S[:3, :3]
S3 = np.array([S[3, 3], S[3, 4], S[3, 5], S[4, 4], S[4, 5], S[5, 5]])
S3_inv = mo.inverse_symmetric_3by3_double(S3).reshape((3, 3))
S2 = S[:3, 3:]
T = -np.dot(S3_inv, S2.T)
M_term_2 = np.dot(S2, T)
M = S1 + M_term_2
M[[0, 2], :] = M[[2, 0], :] / 2
M[1, :] = -M[1, :]
return M, T
def _calculate_scatter_matrix_double(x, y):
"""Calculates the complete scatter matrix for the input coordinates.
:param x: The x coordinates.
:type x: :py:class:`numpy.ndarray`
:param y: The y coordinates.
:type y: :py:class:`numpy.ndarray`
:return: The complete scatter matrix.
:rtype: :py:class:`numpy.ndarray`
"""
D = np.ones((len(x), 6), 'int64')
D[:, 0] = x * x
D[:, 1] = x * y
D[:, 2] = y * y
D[:, 3] = x
D[:, 4] = y
return D.T.dot(D)
|
[
"b2ac.eigenmethods.inverse_iteration.inverse_iteration_for_eigenvector_double",
"b2ac.matrix.matrix_operations.inverse_symmetric_3by3_double",
"numpy.array",
"numpy.dot",
"b2ac.eigenmethods.qr_algorithm.QR_algorithm_shift_Givens_double"
] |
[((1195, 1220), 'numpy.array', 'np.array', (['points', '"""float"""'], {}), "(points, 'float')\n", (1203, 1220), True, 'import numpy as np\n'), ((2502, 2566), 'numpy.array', 'np.array', (['[S[3, 3], S[3, 4], S[3, 5], S[4, 4], S[4, 5], S[5, 5]]'], {}), '([S[3, 3], S[3, 4], S[3, 5], S[4, 4], S[4, 5], S[5, 5]])\n', (2510, 2566), True, 'import numpy as np\n'), ((2697, 2710), 'numpy.dot', 'np.dot', (['S2', 'T'], {}), '(S2, T)\n', (2703, 2710), True, 'import numpy as np\n'), ((1458, 1529), 'b2ac.eigenmethods.inverse_iteration.inverse_iteration_for_eigenvector_double', 'inv_iter.inverse_iteration_for_eigenvector_double', (['M', 'e_vals[ev_ind]', '(5)'], {}), '(M, e_vals[ev_ind], 5)\n', (1507, 1529), True, 'import b2ac.eigenmethods.inverse_iteration as inv_iter\n'), ((2661, 2681), 'numpy.dot', 'np.dot', (['S3_inv', 'S2.T'], {}), '(S3_inv, S2.T)\n', (2667, 2681), True, 'import numpy as np\n'), ((1288, 1326), 'b2ac.eigenmethods.qr_algorithm.QR_algorithm_shift_Givens_double', 'qr.QR_algorithm_shift_Givens_double', (['M'], {}), '(M)\n', (1323, 1326), True, 'import b2ac.eigenmethods.qr_algorithm as qr\n'), ((2055, 2067), 'numpy.dot', 'np.dot', (['T', 'a'], {}), '(T, a)\n', (2061, 2067), True, 'import numpy as np\n'), ((2580, 2616), 'b2ac.matrix.matrix_operations.inverse_symmetric_3by3_double', 'mo.inverse_symmetric_3by3_double', (['S3'], {}), '(S3)\n', (2612, 2616), True, 'import b2ac.matrix.matrix_operations as mo\n')]
|
import logging
import multiprocessing
import os
import pickle as pkl
import numpy as np
import tensorflow as tf
from gensim.models import word2vec
from gensim.models.word2vec import PathLineSentences
logger = logging.getLogger('Word2Vec')
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
seg_file = 'data/processed/seg_text.txt'
word_vec_file = 'data/processed/word2vec.txt'
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('processed_data_path', 'data/processed', 'processed data dir to load')
tf.app.flags.DEFINE_integer('word_dim', 300, 'dimension of word embedding')
def word_vec():
logger.info('Word to vec')
model = word2vec.Word2Vec(PathLineSentences(seg_file), sg=1, size=300, window=5, min_count=10, sample=1e-4,
workers=multiprocessing.cpu_count())
model.wv.save_word2vec_format(word_vec_file, binary=False)
def dump_pkl(file_path, obj):
with open(file_path, 'wb') as f:
pkl.dump(obj, f)
f.close()
def create_word_vec(flags):
logger.info('Word map and embedding')
word_map = {}
word_map['PAD'] = len(word_map)
word_map['UNK'] = len(word_map)
word_embed = []
for line in open(word_vec_file, 'r'):
content = line.strip().split()
if len(content) != flags.word_dim + 1:
continue
word_map[content[0]] = len(word_map)
word_embed.append(np.asarray(content[1:], dtype=np.float32))
word_embed = np.stack(word_embed)
embed_mean, embed_std = word_embed.mean(), word_embed.std()
pad_embed = np.random.normal(embed_mean, embed_std, (2, flags.word_dim))
word_embed = np.concatenate((pad_embed, word_embed), axis=0)
word_embed = word_embed.astype(np.float32)
print('Word in dict - {}'.format(len(word_map)))
dump_pkl(os.path.join(flags.processed_data_path, 'word_map.pkl'), word_map)
dump_pkl(os.path.join(flags.processed_data_path, 'word_embed.pkl'), word_embed)
def main(_):
word_vec()
create_word_vec(FLAGS)
if __name__ == "__main__":
tf.app.run()
|
[
"logging.getLogger",
"numpy.random.normal",
"logging.StreamHandler",
"pickle.dump",
"tensorflow.app.flags.DEFINE_integer",
"logging.Formatter",
"os.path.join",
"numpy.asarray",
"tensorflow.app.flags.DEFINE_string",
"multiprocessing.cpu_count",
"numpy.stack",
"numpy.concatenate",
"gensim.models.word2vec.PathLineSentences",
"tensorflow.app.run"
] |
[((210, 239), 'logging.getLogger', 'logging.getLogger', (['"""Word2Vec"""'], {}), "('Word2Vec')\n", (227, 239), False, 'import logging\n'), ((282, 355), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (299, 355), False, 'import logging\n'), ((374, 397), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (395, 397), False, 'import logging\n'), ((628, 729), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""processed_data_path"""', '"""data/processed"""', '"""processed data dir to load"""'], {}), "('processed_data_path', 'data/processed',\n 'processed data dir to load')\n", (654, 729), True, 'import tensorflow as tf\n'), ((726, 801), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""word_dim"""', '(300)', '"""dimension of word embedding"""'], {}), "('word_dim', 300, 'dimension of word embedding')\n", (753, 801), True, 'import tensorflow as tf\n'), ((1664, 1684), 'numpy.stack', 'np.stack', (['word_embed'], {}), '(word_embed)\n', (1672, 1684), True, 'import numpy as np\n'), ((1766, 1826), 'numpy.random.normal', 'np.random.normal', (['embed_mean', 'embed_std', '(2, flags.word_dim)'], {}), '(embed_mean, embed_std, (2, flags.word_dim))\n', (1782, 1826), True, 'import numpy as np\n'), ((1844, 1891), 'numpy.concatenate', 'np.concatenate', (['(pad_embed, word_embed)'], {'axis': '(0)'}), '((pad_embed, word_embed), axis=0)\n', (1858, 1891), True, 'import numpy as np\n'), ((2247, 2259), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (2257, 2259), True, 'import tensorflow as tf\n'), ((881, 908), 'gensim.models.word2vec.PathLineSentences', 'PathLineSentences', (['seg_file'], {}), '(seg_file)\n', (898, 908), False, 'from gensim.models.word2vec import PathLineSentences\n'), ((1170, 1186), 'pickle.dump', 'pkl.dump', (['obj', 'f'], {}), '(obj, f)\n', (1178, 1186), True, 'import pickle as pkl\n'), ((2006, 2061), 'os.path.join', 'os.path.join', (['flags.processed_data_path', '"""word_map.pkl"""'], {}), "(flags.processed_data_path, 'word_map.pkl')\n", (2018, 2061), False, 'import os\n'), ((2086, 2143), 'os.path.join', 'os.path.join', (['flags.processed_data_path', '"""word_embed.pkl"""'], {}), "(flags.processed_data_path, 'word_embed.pkl')\n", (2098, 2143), False, 'import os\n'), ((1001, 1028), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (1026, 1028), False, 'import multiprocessing\n'), ((1603, 1644), 'numpy.asarray', 'np.asarray', (['content[1:]'], {'dtype': 'np.float32'}), '(content[1:], dtype=np.float32)\n', (1613, 1644), True, 'import numpy as np\n')]
|
import struct
import numpy
from math import floor
class HeightMap:
def __init__(self, width, length, heightData=None, max_val=0):
self.heightData = (
heightData if heightData != None else [0 for n in range(width * length)]
)
self.width = width
self.length = length
self.highest = max_val
def copy(self):
return HeightMap(self.width, self.length, list(self.heightData))
def getMax(self):
return self.highest
@classmethod
def from_data(cls, data, res, width):
heightData, length, m = cls._parse_data(data, res, width)
return cls(width, length, heightData, m)
def serialize(self, res):
return HeightMap._serialize_data(self.heightData, res, self.getWidth())
# Takes a bz height map byte buffer and converts it to an array of height points
@classmethod
def _parse_data(cls, data, res, width):
size = int(len(data) / 2)
zone_size = 2 ** res
length = int(size / width)
m = 0
obuffer = [0 for n in range(size)]
for n in range(size):
try:
d_idx = n * 2
zone = int(n / (zone_size ** 2))
x = (n % zone_size) + zone * zone_size % width
z = (int(n / zone_size) % zone_size) + int(
zone * zone_size / width
) * zone_size
height = struct.unpack("<H", data[d_idx : d_idx + 2])[0]
m = max(m, height)
b_idx = int(x + ((length - 1) - z) * width)
obuffer[b_idx] = height
except Exception as e:
break
return obuffer, length, m
# Takes an array of height points and converts it to a bz height map
@classmethod
def _serialize_data(cls, data, res, width):
size = len(data)
zone_size = 2 ** res
length = int(size / width)
obuffer = [b"" for n in range(size)]
for n in range(size):
try:
zone = int(n / (zone_size ** 2))
x = (n % zone_size) + zone * zone_size % width
z = (int(n / zone_size) % zone_size) + int(
zone * zone_size / width
) * zone_size
b_idx = int(x + ((length - 1) - z) * width)
obuffer[n] = struct.pack("<H", data[b_idx])
except Exception as e:
print(e)
break
return b"".join(obuffer)
def getWidth(self):
return self.width
def getLength(self):
return self.length
def getHeight(self, x, z):
xx = int(min(max(x, 0), self.getWidth() - 1))
zz = int(min(max(z, 0), self.getLength() - 1))
return self.heightData[xx + zz * self.getWidth()]
def getCroped(self, x, z, w, h):
return HeightMap(
w, h, [self.getHeight(x + n % w, z + int(n / w)) for n in range(w * h)]
)
def getResized(self, newW, newL, method=lambda x, z, map: map.getHeight(x, z)):
newMap = [0 for n in range(int(newW * newL))]
wf, lf = self.getWidth() / newW, self.getLength() / newL
m = 0
print("Resizing:")
lp = 0
for i in range(len(newMap)):
x = i % newW
z = int(i / newW)
newMap[i] = int(method(int(x * wf), int(z * lf), self))
m = max(m, newMap[i])
p = int((i + 1) / len(newMap) * 25)
if p != lp:
print(
"[{}{}] - {:>8}/{:<8}".format(
"=" * p, " " * (25 - p), i + 1, len(newMap)
),
end="\r",
)
lp = p
print("\nDone")
return HeightMap(int(newW), int(newL), newMap, m)
w_cache = {}
def createWeightGrid(size):
if not int(size) in w_cache:
c = size / 2
weights = [
size - ((c - n % size) ** 2 + (c - int(n / size)) ** 2) ** 0.5
for n in range(0, size * size)
]
w_cache[int(size)] = weights
return w_cache[int(size)]
def AvgEdge(x, z, map, grid=5):
cropped = map.getCroped(int(x - grid / 2), int(z - grid / 2), grid, grid)
hdata = cropped.heightData
weights = createWeightGrid(grid)
mean, median = numpy.average(hdata, weights=weights), numpy.median(hdata)
d = [n for n in hdata if (abs(n - mean) >= abs(n - median))]
return numpy.mean([numpy.mean(d)])
def Avg(x, z, map, grid=5):
cropped = map.getCroped(int(x - grid / 2), int(z - grid / 2), grid, grid)
weights = createWeightGrid(grid)
hdata = cropped.heightData
return numpy.average(hdata, weights=weights)
|
[
"numpy.mean",
"numpy.median",
"numpy.average",
"struct.pack",
"struct.unpack"
] |
[((4669, 4706), 'numpy.average', 'numpy.average', (['hdata'], {'weights': 'weights'}), '(hdata, weights=weights)\n', (4682, 4706), False, 'import numpy\n'), ((4318, 4355), 'numpy.average', 'numpy.average', (['hdata'], {'weights': 'weights'}), '(hdata, weights=weights)\n', (4331, 4355), False, 'import numpy\n'), ((4357, 4376), 'numpy.median', 'numpy.median', (['hdata'], {}), '(hdata)\n', (4369, 4376), False, 'import numpy\n'), ((4465, 4478), 'numpy.mean', 'numpy.mean', (['d'], {}), '(d)\n', (4475, 4478), False, 'import numpy\n'), ((2358, 2388), 'struct.pack', 'struct.pack', (['"""<H"""', 'data[b_idx]'], {}), "('<H', data[b_idx])\n", (2369, 2388), False, 'import struct\n'), ((1428, 1470), 'struct.unpack', 'struct.unpack', (['"""<H"""', 'data[d_idx:d_idx + 2]'], {}), "('<H', data[d_idx:d_idx + 2])\n", (1441, 1470), False, 'import struct\n')]
|
from ctypes import *
from typing import *
from pathlib import Path
from numpy import array, cos, ndarray, pi, random, sin, zeros, tan
try:
lib = cdll.LoadLibrary(str(Path(__file__).with_name("libkmeans.so")))
except Exception as E:
print(f"Cannot load DLL")
print(E)
class observation_2d(Structure):
_fields_ = [("x", c_double), ("y", c_double), ("group", c_size_t)]
class observation_3d(Structure):
_fields_ = [("x", c_double), ("y", c_double), ("z", c_double), ("group", c_size_t)]
class cluster_2d(Structure):
_fields_ = [("x", c_double), ("y", c_double), ("count", c_size_t)]
class cluster_3d(Structure):
_fields_ = [("x", c_double), ("y", c_double), ("z", c_double), ("count", c_size_t)]
lib.kmeans_2d.restype = POINTER(cluster_2d)
def kmeans_2d(observations: ndarray, k: Optional[int] = 5) -> Tuple[ndarray, ndarray]:
"""Partition observations into k clusters.
Parameters
----------
observations : ndarray, `shape (N, 2)`
An array of observations (x, y) to be clustered.
Data should be provided as: `[(x, y), (x, y), (x, y), ...]`
k : int, optional
Amount of clusters to partition observations into, by default 5
Returns
-------
center : ndarray, `shape (k, 2)`
An array of positions to center of each cluster.
count : ndarray, `shape (k, )`
Array of counts of datapoints closest to the center of its cluster.
Examples
-------
>>> observations = [[6, 1], [-4, -4], [1, -7], [9, -2], [6, -6]]
>>> center, count = kmeans_2d(observations, k=2)
>>> center
[[-4, -4
5, -3]]
>>> count
[1, 4]
"""
if not isinstance(observations, ndarray):
raise TypeError("Observations must be a ndarray.")
# Fix orientation on data
if observations.shape[-1] == 2:
observations = observations.T
else:
raise ValueError("Provided array should contain ((x, y), ) observations.")
# Find observation_2d length
n = observations.shape[-1]
# Create a Python list of observations
py_observations_list = map(observation_2d, *observations)
# Convert the Python list into a c-array
c_observations_array = (observation_2d * n)(*py_observations_list)
# Get c-array of cluster_2d
c_clusters_array = lib.kmeans_2d(
c_observations_array, c_size_t(n), c_size_t(k))
# Convert c-array of clusters into a python list
py_clusters_list = [c_clusters_array[index] for index in range(k)]
# Split clusters
center = zeros([k, 2], dtype=observations.dtype)
count = zeros(k, dtype=int)
for index, cluster_object in enumerate(py_clusters_list):
center[index][0] = cluster_object.x
center[index][1] = cluster_object.y
count[index] = cluster_object.count
# Pack into DataFrame and return
return (center, count)
lib.kmeans_3d.restype = POINTER(cluster_3d)
def kmeans_3d(observations: ndarray, k: Optional[int] = 5) -> Tuple[ndarray, ndarray]:
"""Partition observations into k clusters.
Parameters
----------
observations : ndarray, `shape (N, 3)`
An array of observations (x, y) to be clustered.
Data should be provided as: `[(x, y, z), (x, y, z), (x, y, z), ...]`
k : int, optional
Amount of clusters to partition observations into, by default 5
Returns
-------
center : ndarray, `shape (k, 3)`
An array of positions to center of each cluster.
count : ndarray, `shape (k, )`
Array of counts of datapoints closest to the center of its cluster.
Examples
-------
>>> observations = [[6, 1, 3], [-4, -4, -4], [1, -7, 7], [9, -2, 1], [6, -6, 6]]
>>> center, count = kmeans_3d(observations, k=2)
>>> center
[[ -0.35830777 -7.41219447 201.90265473]
[ 1.83808572 -5.86460671 -28.00696338]
[ -0.81562641 -1.20418037 1.60364838]]
>>> count
[2, 3]
"""
if not isinstance(observations, ndarray):
raise TypeError("Observations must be a ndarray.")
# Fix orientation on data
if observations.shape[-1] == 3:
observations = observations.T
else:
raise ValueError("Provided array should contain ((x, y, z), ) observations.")
# Find observation_3d length
n = observations.shape[-1]
# Create a Python list of observations
py_observations_list = map(observation_3d, *observations)
# Convert the Python list into a c-array
c_observations_array = (observation_3d * n)(*py_observations_list)
# Get c-array of cluster_2d
c_clusters_array = lib.kmeans_3d(c_observations_array, c_size_t(n), c_size_t(k))
# Convert c-array of clusters into a python list
py_clusters_list = [c_clusters_array[index] for index in range(k)]
# Split clusters
center = zeros([k, 3], dtype=observations.dtype)
count = zeros(k, dtype=int)
for index, cluster_object in enumerate(py_clusters_list):
center[index][0] = cluster_object.x
center[index][1] = cluster_object.y
center[index][2] = cluster_object.z
count[index] = cluster_object.count
# Pack into DataFrame and return
return (center, count)
def kmeans(observations: ndarray, k: Optional[int] = 5) -> Tuple[ndarray, ndarray]:
"""Partition observations into k clusters.
Parameters
----------
observations : ndarray, `shape (N, 2)` or `shape (N, 3)`
An array of observations (x, y) to be clustered.
Data should be provided as:
`[(x, y), (x, y), (x, y), ...]`
or
`[(x, y, z), (x, y, z), (x, y, z), ...]`
k : int, optional
Amount of clusters to partition observations into, by default 5
Returns
-------
center : ndarray, `shape (k, 2)` or `shape (k, 3)`
An array of positions to center of each cluster.
count : ndarray, `shape (k, )`
Array of counts of datapoints closest to the center of its cluster.
Examples
-------
>>> observations = [[6, 1], [-4, -4], [1, -7], [9, -2], [6, -6]]
>>> center, count = kmeans_2d(observations, k=2)
>>> center
[[-4, -4
5, -3]]
>>> count
[1, 4]
"""
if not isinstance(observations, ndarray):
raise TypeError("Observations must be a ndarray.")
if observations.shape[-1] == 3:
return kmeans_3d(observations, k)
elif observations.shape[-1] == 2:
return kmeans_2d(observations, k)
else:
pass
if __name__ == "__main__":
random.seed(1234)
rand_list = random.random(100)
x = 10 * rand_list * cos(2 * pi * rand_list)
y = 10 * rand_list * sin(2 * pi * rand_list)
z = 10 * rand_list * tan(2 * pi * rand_list)
df = array([x, y, z]).T
print(f"Observations:\n{df[0:5]}\n...\n\nshape {len(df), len(df[0])}\n")
centers, count = kmeans(df, 3)
print(f"Centers:\n{centers}\n")
print(f"Count:\n{count}")
observations = [[6, 1], [-4, -4], [1, -7], [9, -2], [6, -6]]
center, count = kmeans_2d(array(observations), k=2)
print(f"Centers:\n{centers}\n")
print(f"Count:\n{count}")
|
[
"numpy.tan",
"pathlib.Path",
"numpy.random.random",
"numpy.array",
"numpy.zeros",
"numpy.cos",
"numpy.random.seed",
"numpy.sin"
] |
[((2541, 2580), 'numpy.zeros', 'zeros', (['[k, 2]'], {'dtype': 'observations.dtype'}), '([k, 2], dtype=observations.dtype)\n', (2546, 2580), False, 'from numpy import array, cos, ndarray, pi, random, sin, zeros, tan\n'), ((2593, 2612), 'numpy.zeros', 'zeros', (['k'], {'dtype': 'int'}), '(k, dtype=int)\n', (2598, 2612), False, 'from numpy import array, cos, ndarray, pi, random, sin, zeros, tan\n'), ((4813, 4852), 'numpy.zeros', 'zeros', (['[k, 3]'], {'dtype': 'observations.dtype'}), '([k, 3], dtype=observations.dtype)\n', (4818, 4852), False, 'from numpy import array, cos, ndarray, pi, random, sin, zeros, tan\n'), ((4865, 4884), 'numpy.zeros', 'zeros', (['k'], {'dtype': 'int'}), '(k, dtype=int)\n', (4870, 4884), False, 'from numpy import array, cos, ndarray, pi, random, sin, zeros, tan\n'), ((6497, 6514), 'numpy.random.seed', 'random.seed', (['(1234)'], {}), '(1234)\n', (6508, 6514), False, 'from numpy import array, cos, ndarray, pi, random, sin, zeros, tan\n'), ((6532, 6550), 'numpy.random.random', 'random.random', (['(100)'], {}), '(100)\n', (6545, 6550), False, 'from numpy import array, cos, ndarray, pi, random, sin, zeros, tan\n'), ((6577, 6600), 'numpy.cos', 'cos', (['(2 * pi * rand_list)'], {}), '(2 * pi * rand_list)\n', (6580, 6600), False, 'from numpy import array, cos, ndarray, pi, random, sin, zeros, tan\n'), ((6626, 6649), 'numpy.sin', 'sin', (['(2 * pi * rand_list)'], {}), '(2 * pi * rand_list)\n', (6629, 6649), False, 'from numpy import array, cos, ndarray, pi, random, sin, zeros, tan\n'), ((6675, 6698), 'numpy.tan', 'tan', (['(2 * pi * rand_list)'], {}), '(2 * pi * rand_list)\n', (6678, 6698), False, 'from numpy import array, cos, ndarray, pi, random, sin, zeros, tan\n'), ((6709, 6725), 'numpy.array', 'array', (['[x, y, z]'], {}), '([x, y, z])\n', (6714, 6725), False, 'from numpy import array, cos, ndarray, pi, random, sin, zeros, tan\n'), ((7005, 7024), 'numpy.array', 'array', (['observations'], {}), '(observations)\n', (7010, 7024), False, 'from numpy import array, cos, ndarray, pi, random, sin, zeros, tan\n'), ((172, 186), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (176, 186), False, 'from pathlib import Path\n')]
|
from collections import Counter, defaultdict
import matplotlib as mpl
import networkx as nx
import numba
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import seaborn as sns
from fa2 import ForceAtlas2
from scipy import sparse
def to_adjacency_matrix(net):
if sparse.issparse(net):
if type(net) == "scipy.sparse.csr.csr_matrix":
return net
return sparse.csr_matrix(net, dtype=np.float64), np.arange(net.shape[0])
elif "networkx" in "%s" % type(net):
return (
sparse.csr_matrix(nx.adjacency_matrix(net), dtype=np.float64),
net.nodes(),
)
elif "numpy.ndarray" == type(net):
return sparse.csr_matrix(net, dtype=np.float64), np.arange(net.shape[0])
def to_nxgraph(net):
if sparse.issparse(net):
return nx.from_scipy_sparse_matrix(net)
elif "networkx" in "%s" % type(net):
return net
elif "numpy.ndarray" == type(net):
return nx.from_numpy_array(net)
def set_node_colors(c, x, cmap, colored_nodes):
node_colors = defaultdict(lambda x: "#8d8d8d")
node_edge_colors = defaultdict(lambda x: "#4d4d4d")
cnt = Counter([c[d] for d in colored_nodes])
num_groups = len(cnt)
# Set up the palette
if cmap is None:
if num_groups <= 10:
cmap = sns.color_palette().as_hex()
elif num_groups <= 20:
cmap = sns.color_palette("tab20").as_hex()
else:
cmap = sns.color_palette("hls", num_groups).as_hex()
# Calc size of groups
cmap = dict(
zip(
[d[0] for d in cnt.most_common(num_groups)],
[cmap[i] for i in range(num_groups)],
)
)
bounds = np.linspace(0, 1, 11)
norm = mpl.colors.BoundaryNorm(bounds, ncolors=12, extend="both")
# Calculate the color for each node using the palette
cmap_coreness = {
k: sns.light_palette(v, n_colors=12).as_hex() for k, v in cmap.items()
}
cmap_coreness_dark = {
k: sns.dark_palette(v, n_colors=12).as_hex() for k, v in cmap.items()
}
for d in colored_nodes:
node_colors[d] = cmap_coreness[c[d]][norm(x[d]) - 1]
node_edge_colors[d] = cmap_coreness_dark[c[d]][-norm(x[d])]
return node_colors, node_edge_colors
def classify_nodes(G, c, x, max_num=None):
non_residuals = [d for d in G.nodes() if (c[d] is not None) and (x[d] is not None)]
residuals = [d for d in G.nodes() if (c[d] is None) or (x[d] is None)]
# Count the number of groups
cnt = Counter([c[d] for d in non_residuals])
cvals = np.array([d[0] for d in cnt.most_common(len(cnt))])
if max_num is not None:
cvals = set(cvals[:max_num])
else:
cvals = set(cvals)
#
colored_nodes = [d for d in non_residuals if c[d] in cvals]
muted = [d for d in non_residuals if not c[d] in cvals]
# Bring core nodes to front
order = np.argsort([x[d] for d in colored_nodes])
colored_nodes = [colored_nodes[d] for d in order]
return colored_nodes, muted, residuals
def calc_node_pos(G, iterations=300, **params):
default_params = dict(
# Behavior alternatives
outboundAttractionDistribution=False, # Dissuade hubs
linLogMode=False, # NOT IMPLEMENTED
adjustSizes=False, # Prevent overlap (NOT IMPLEMENTED)
edgeWeightInfluence=1.0,
# Performance
jitterTolerance=1.0, # Tolerance
barnesHutOptimize=True,
barnesHutTheta=1.2,
multiThreaded=False, # NOT IMPLEMENTED
# Tuning
scalingRatio=2.0,
strongGravityMode=False,
gravity=1.0,
verbose=False,
)
if params is not None:
for k, v in params.items():
default_params[k] = v
forceatlas2 = ForceAtlas2(**default_params)
return forceatlas2.forceatlas2_networkx_layout(G, pos=None, iterations=iterations)
def draw(
G,
c,
x,
ax,
draw_edge=True,
font_size=0,
pos=None,
cmap=None,
max_group_num=None,
draw_nodes_kwd={},
draw_edges_kwd={"edge_color": "#adadad"},
draw_labels_kwd={},
layout_kwd={},
):
"""Plot the core-periphery structure in the networks.
:param G: Graph
:type G: networkx.Graph
:param c: dict
:type c: group membership c[i] of i
:param x: core (x[i])=1 or periphery (x[i]=0)
:type x: dict
:param ax: axis
:type ax: matplotlib.pyplot.ax
:param draw_edge: whether to draw edges, defaults to True
:type draw_edge: bool, optional
:param font_size: font size for node labels, defaults to 0
:type font_size: int, optional
:param pos: pos[i] is the xy coordinate of node i, defaults to None
:type pos: dict, optional
:param cmap: colomap defaults to None
:type cmap: matplotlib.cmap, optional
:param max_group_num: Number of groups to color, defaults to None
:type max_group_num: int, optional
:param draw_nodes_kwd: Parameter for networkx.draw_networkx_nodes, defaults to {}
:type draw_nodes_kwd: dict, optional
:param draw_edges_kwd: Parameter for networkx.draw_networkx_edges, defaults to {"edge_color": "#adadad"}
:type draw_edges_kwd: dict, optional
:param draw_labels_kwd: Parameter for networkx.draw_networkx_labels, defaults to {}
:type draw_labels_kwd: dict, optional
:param layout_kwd: layout keywords, defaults to {}
:type layout_kwd: dict, optional
:return: (ax, pos)
:rtype: matplotlib.pyplot.ax, dict
"""
# Split node into residual and non-residual
colored_nodes, muted_nodes, residuals = classify_nodes(G, c, x, max_group_num)
node_colors, node_edge_colors = set_node_colors(c, x, cmap, colored_nodes)
# Set the position of nodes
if pos is None:
pos = calc_node_pos(G, **layout_kwd)
# Draw
nodes = nx.draw_networkx_nodes(
G,
pos,
node_color=[node_colors[d] for d in colored_nodes],
nodelist=colored_nodes,
ax=ax,
# zorder=3,
**draw_nodes_kwd
)
if nodes is not None:
nodes.set_zorder(3)
nodes.set_edgecolor([node_edge_colors[r] for r in colored_nodes])
draw_nodes_kwd_residual = draw_nodes_kwd.copy()
draw_nodes_kwd_residual["node_size"] = 0.1 * draw_nodes_kwd.get("node_size", 100)
nodes = nx.draw_networkx_nodes(
G,
pos,
node_color="#efefef",
nodelist=residuals,
node_shape="s",
ax=ax,
**draw_nodes_kwd_residual
)
if nodes is not None:
nodes.set_zorder(1)
nodes.set_edgecolor("#4d4d4d")
if draw_edge:
nx.draw_networkx_edges(
G.subgraph(colored_nodes + residuals), pos, ax=ax, **draw_edges_kwd
)
if font_size > 0:
nx.draw_networkx_labels(G, pos, ax=ax, font_size=font_size, **draw_labels_kwd)
ax.axis("off")
return ax, pos
def draw_interactive(G, c, x, hover_text=None, node_size=10.0, pos=None, cmap=None):
node_colors, node_edge_colors = set_node_colors(G, c, x, cmap)
if pos is None:
pos = nx.spring_layout(G)
nodelist = [d for d in G.nodes()]
group_ids = [c[d] if c[d] is not None else "residual" for d in nodelist]
coreness = [x[d] if x[d] is not None else "residual" for d in nodelist]
node_size_list = [(x[d] + 1) if x[d] is not None else 1 / 2 for d in nodelist]
pos_x = [pos[d][0] for d in nodelist]
pos_y = [pos[d][1] for d in nodelist]
df = pd.DataFrame(
{
"x": pos_x,
"y": pos_y,
"name": nodelist,
"group_id": group_ids,
"coreness": coreness,
"node_size": node_size_list,
}
)
df["marker"] = df["group_id"].apply(
lambda s: "circle" if s != "residual" else "square"
)
df["hovertext"] = df.apply(
lambda s: "{ht}<br>Group: {group}<br>Coreness: {coreness}".format(
ht="Node %s" % s["name"]
if hover_text is None
else hover_text.get(s["name"], ""),
group=s["group_id"],
coreness=s["coreness"],
),
axis=1,
)
fig = go.Figure(
data=go.Scatter(
x=df["x"],
y=df["y"],
marker_size=df["node_size"],
marker_symbol=df["marker"],
hovertext=df["hovertext"],
hoverlabel=dict(namelength=0),
hovertemplate="%{hovertext}",
marker={
"color": node_colors,
"sizeref": 1.0 / node_size,
"line": {"color": node_edge_colors, "width": 1},
},
mode="markers",
),
)
fig.update_layout(
autosize=False,
width=800,
height=800,
template="plotly_white",
# layout=go.Layout(xaxis={"showgrid": False}, yaxis={"showgrid": True}),
)
return fig
|
[
"numpy.argsort",
"networkx.draw_networkx_nodes",
"networkx.draw_networkx_labels",
"numpy.arange",
"seaborn.color_palette",
"networkx.spring_layout",
"networkx.from_scipy_sparse_matrix",
"numpy.linspace",
"networkx.from_numpy_array",
"pandas.DataFrame",
"scipy.sparse.csr_matrix",
"networkx.adjacency_matrix",
"seaborn.light_palette",
"scipy.sparse.issparse",
"seaborn.dark_palette",
"fa2.ForceAtlas2",
"collections.Counter",
"collections.defaultdict",
"matplotlib.colors.BoundaryNorm"
] |
[((293, 313), 'scipy.sparse.issparse', 'sparse.issparse', (['net'], {}), '(net)\n', (308, 313), False, 'from scipy import sparse\n'), ((792, 812), 'scipy.sparse.issparse', 'sparse.issparse', (['net'], {}), '(net)\n', (807, 812), False, 'from scipy import sparse\n'), ((1070, 1102), 'collections.defaultdict', 'defaultdict', (["(lambda x: '#8d8d8d')"], {}), "(lambda x: '#8d8d8d')\n", (1081, 1102), False, 'from collections import Counter, defaultdict\n'), ((1126, 1158), 'collections.defaultdict', 'defaultdict', (["(lambda x: '#4d4d4d')"], {}), "(lambda x: '#4d4d4d')\n", (1137, 1158), False, 'from collections import Counter, defaultdict\n'), ((1170, 1208), 'collections.Counter', 'Counter', (['[c[d] for d in colored_nodes]'], {}), '([c[d] for d in colored_nodes])\n', (1177, 1208), False, 'from collections import Counter, defaultdict\n'), ((1717, 1738), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(11)'], {}), '(0, 1, 11)\n', (1728, 1738), True, 'import numpy as np\n'), ((1750, 1808), 'matplotlib.colors.BoundaryNorm', 'mpl.colors.BoundaryNorm', (['bounds'], {'ncolors': '(12)', 'extend': '"""both"""'}), "(bounds, ncolors=12, extend='both')\n", (1773, 1808), True, 'import matplotlib as mpl\n'), ((2537, 2575), 'collections.Counter', 'Counter', (['[c[d] for d in non_residuals]'], {}), '([c[d] for d in non_residuals])\n', (2544, 2575), False, 'from collections import Counter, defaultdict\n'), ((2919, 2960), 'numpy.argsort', 'np.argsort', (['[x[d] for d in colored_nodes]'], {}), '([x[d] for d in colored_nodes])\n', (2929, 2960), True, 'import numpy as np\n'), ((3786, 3815), 'fa2.ForceAtlas2', 'ForceAtlas2', ([], {}), '(**default_params)\n', (3797, 3815), False, 'from fa2 import ForceAtlas2\n'), ((5833, 5968), 'networkx.draw_networkx_nodes', 'nx.draw_networkx_nodes', (['G', 'pos'], {'node_color': '[node_colors[d] for d in colored_nodes]', 'nodelist': 'colored_nodes', 'ax': 'ax'}), '(G, pos, node_color=[node_colors[d] for d in\n colored_nodes], nodelist=colored_nodes, ax=ax, **draw_nodes_kwd)\n', (5855, 5968), True, 'import networkx as nx\n'), ((6318, 6444), 'networkx.draw_networkx_nodes', 'nx.draw_networkx_nodes', (['G', 'pos'], {'node_color': '"""#efefef"""', 'nodelist': 'residuals', 'node_shape': '"""s"""', 'ax': 'ax'}), "(G, pos, node_color='#efefef', nodelist=residuals,\n node_shape='s', ax=ax, **draw_nodes_kwd_residual)\n", (6340, 6444), True, 'import networkx as nx\n'), ((7466, 7600), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': pos_x, 'y': pos_y, 'name': nodelist, 'group_id': group_ids,\n 'coreness': coreness, 'node_size': node_size_list}"], {}), "({'x': pos_x, 'y': pos_y, 'name': nodelist, 'group_id':\n group_ids, 'coreness': coreness, 'node_size': node_size_list})\n", (7478, 7600), True, 'import pandas as pd\n'), ((829, 861), 'networkx.from_scipy_sparse_matrix', 'nx.from_scipy_sparse_matrix', (['net'], {}), '(net)\n', (856, 861), True, 'import networkx as nx\n'), ((6768, 6846), 'networkx.draw_networkx_labels', 'nx.draw_networkx_labels', (['G', 'pos'], {'ax': 'ax', 'font_size': 'font_size'}), '(G, pos, ax=ax, font_size=font_size, **draw_labels_kwd)\n', (6791, 6846), True, 'import networkx as nx\n'), ((7077, 7096), 'networkx.spring_layout', 'nx.spring_layout', (['G'], {}), '(G)\n', (7093, 7096), True, 'import networkx as nx\n'), ((408, 448), 'scipy.sparse.csr_matrix', 'sparse.csr_matrix', (['net'], {'dtype': 'np.float64'}), '(net, dtype=np.float64)\n', (425, 448), False, 'from scipy import sparse\n'), ((450, 473), 'numpy.arange', 'np.arange', (['net.shape[0]'], {}), '(net.shape[0])\n', (459, 473), True, 'import numpy as np\n'), ((976, 1000), 'networkx.from_numpy_array', 'nx.from_numpy_array', (['net'], {}), '(net)\n', (995, 1000), True, 'import networkx as nx\n'), ((1901, 1934), 'seaborn.light_palette', 'sns.light_palette', (['v'], {'n_colors': '(12)'}), '(v, n_colors=12)\n', (1918, 1934), True, 'import seaborn as sns\n'), ((2013, 2045), 'seaborn.dark_palette', 'sns.dark_palette', (['v'], {'n_colors': '(12)'}), '(v, n_colors=12)\n', (2029, 2045), True, 'import seaborn as sns\n'), ((562, 586), 'networkx.adjacency_matrix', 'nx.adjacency_matrix', (['net'], {}), '(net)\n', (581, 586), True, 'import networkx as nx\n'), ((696, 736), 'scipy.sparse.csr_matrix', 'sparse.csr_matrix', (['net'], {'dtype': 'np.float64'}), '(net, dtype=np.float64)\n', (713, 736), False, 'from scipy import sparse\n'), ((738, 761), 'numpy.arange', 'np.arange', (['net.shape[0]'], {}), '(net.shape[0])\n', (747, 761), True, 'import numpy as np\n'), ((1330, 1349), 'seaborn.color_palette', 'sns.color_palette', ([], {}), '()\n', (1347, 1349), True, 'import seaborn as sns\n'), ((1409, 1435), 'seaborn.color_palette', 'sns.color_palette', (['"""tab20"""'], {}), "('tab20')\n", (1426, 1435), True, 'import seaborn as sns\n'), ((1478, 1514), 'seaborn.color_palette', 'sns.color_palette', (['"""hls"""', 'num_groups'], {}), "('hls', num_groups)\n", (1495, 1514), True, 'import seaborn as sns\n')]
|
"""
A sampler defines a method to sample random data from certain distribution.
"""
from typing import List
import numpy as np
class BaseSampler(object):
def __init__(self):
pass
def sample(self, shape, *args):
raise NotImplementedError
class IntSampler(BaseSampler):
def __init__(self, low, high=None):
super(IntSampler, self).__init__()
if high is None:
self.low = 0
self.high = low
else:
self.low = low
self.high = high
def sample(self, shape, *args):
return np.random.randint(low=self.low, high=self.high, size=shape, dtype=np.int64)
class UniformSampler(BaseSampler):
def __init__(self, low, high):
super(UniformSampler, self).__init__()
self.low = np.array(low)
self.high = np.array(high)
assert self.low.shape == self.high.shape, 'The shape of low and high must be the same. Got low type {} and high type {}'.format(
self.low.shape, self.high.shape)
def sample(self, shape, *args):
return np.random.uniform(low=self.low, high=self.high, size=shape + self.low.shape).astype(np.float32)
class GaussianSampler(BaseSampler):
def __init__(self, mu=0.0, sigma=1.0):
super(GaussianSampler, self).__init__()
self.mu = mu
self.sigma = sigma
def sample(self, shape, *args):
return np.random.normal(self.mu, self.sigma, shape)
class GaussianMixtureSampler(BaseSampler):
""" Sample from GMM with prior probability distribution """
def __init__(self, mu: List, sigma: List, prob=None):
assert type(mu) == list and type(sigma) == list, 'mu and sigma must be list'
assert len(mu) == len(sigma), 'length of mu and sigma must be the same'
if type(prob) == list:
assert len(mu) == len(prob) and np.sum(prob) == 1., 'The sum of probability list should be 1.'
super(GaussianMixtureSampler, self).__init__()
self.mu = mu
self.sigma = sigma
self.prob = prob
def sample(self, shape, *args):
ind = np.random.choice(len(self.mu), p=self.prob)
return np.random.randn(*shape) * self.sigma[ind] + self.mu[ind]
class ConditionGaussianSampler(BaseSampler):
""" Conditional Gaussian sampler """
def __init__(self, mu: List, sigma: List):
assert type(mu) == list and type(sigma) == list, 'mu and sigma must be list'
assert len(mu) == len(sigma), 'length of mu and sigma must be the same'
super(ConditionGaussianSampler, self).__init__()
self.mu = np.expand_dims(np.array(mu), axis=1)
self.sigma = np.expand_dims(np.array(sigma), axis=1)
def sample(self, shape, *args):
ind = args[0]
return np.random.randn(*shape) * self.sigma[ind] + self.mu[ind]
|
[
"numpy.random.normal",
"numpy.array",
"numpy.random.randint",
"numpy.sum",
"numpy.random.uniform",
"numpy.random.randn"
] |
[((582, 657), 'numpy.random.randint', 'np.random.randint', ([], {'low': 'self.low', 'high': 'self.high', 'size': 'shape', 'dtype': 'np.int64'}), '(low=self.low, high=self.high, size=shape, dtype=np.int64)\n', (599, 657), True, 'import numpy as np\n'), ((796, 809), 'numpy.array', 'np.array', (['low'], {}), '(low)\n', (804, 809), True, 'import numpy as np\n'), ((830, 844), 'numpy.array', 'np.array', (['high'], {}), '(high)\n', (838, 844), True, 'import numpy as np\n'), ((1405, 1449), 'numpy.random.normal', 'np.random.normal', (['self.mu', 'self.sigma', 'shape'], {}), '(self.mu, self.sigma, shape)\n', (1421, 1449), True, 'import numpy as np\n'), ((2607, 2619), 'numpy.array', 'np.array', (['mu'], {}), '(mu)\n', (2615, 2619), True, 'import numpy as np\n'), ((2665, 2680), 'numpy.array', 'np.array', (['sigma'], {}), '(sigma)\n', (2673, 2680), True, 'import numpy as np\n'), ((1080, 1156), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'self.low', 'high': 'self.high', 'size': '(shape + self.low.shape)'}), '(low=self.low, high=self.high, size=shape + self.low.shape)\n', (1097, 1156), True, 'import numpy as np\n'), ((2159, 2182), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (2174, 2182), True, 'import numpy as np\n'), ((2764, 2787), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (2779, 2787), True, 'import numpy as np\n'), ((1858, 1870), 'numpy.sum', 'np.sum', (['prob'], {}), '(prob)\n', (1864, 1870), True, 'import numpy as np\n')]
|
import numpy as np
class StandardDeviation():
@staticmethod
def standardDeviation(data):
return np.std(data)
|
[
"numpy.std"
] |
[((113, 125), 'numpy.std', 'np.std', (['data'], {}), '(data)\n', (119, 125), True, 'import numpy as np\n')]
|
import numpy as np
def bowl(vs, v_ref=1.0, scale=.1):
def normal(v, loc, scale):
return 1 / np.sqrt(2 * np.pi * scale**2) * np.exp( - 0.5 * np.square(v - loc) / scale**2 )
def _bowl(v):
if np.abs(v-v_ref) > 0.05:
return 2 * np.abs(v-v_ref) - 0.095
else:
return - 0.01 * normal(v, v_ref, scale) + 0.04
return np.array([_bowl(v) for v in vs])
|
[
"numpy.abs",
"numpy.sqrt",
"numpy.square"
] |
[((216, 233), 'numpy.abs', 'np.abs', (['(v - v_ref)'], {}), '(v - v_ref)\n', (222, 233), True, 'import numpy as np\n'), ((107, 138), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi * scale ** 2)'], {}), '(2 * np.pi * scale ** 2)\n', (114, 138), True, 'import numpy as np\n'), ((263, 280), 'numpy.abs', 'np.abs', (['(v - v_ref)'], {}), '(v - v_ref)\n', (269, 280), True, 'import numpy as np\n'), ((155, 173), 'numpy.square', 'np.square', (['(v - loc)'], {}), '(v - loc)\n', (164, 173), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 7 13:43:01 2016
@author: fergal
A series of metrics to quantify the noise in a lightcurve:
Includes:
x sgCdpp
x Marshall's noise estimate
o An FT based estimate of 6 hour artifact strength.
o A per thruster firing estimate of 6 hour artifact strength.
$Id$
$URL$
"""
__version__ = "$Id$"
__URL__ = "$URL$"
from scipy.signal import savgol_filter
import matplotlib.pyplot as mp
import numpy as np
import fft
keplerLongCadence_s = 1765.4679
keplerLongCadence_days = keplerLongCadence_s / float(86400)
def computeRollTweakAmplitude(y, nHarmonics = 3, tweakPeriod_days = .25, \
expTime_days=None, plot=False):
"""Compute strength of roll tweak artifact in K2 data with an FT approach.
Compute FT of lightcurve
Optional Inputs:
-----------------
plot
Show a diagnostic plot
Returns:
--------
float indicating strength of correction. A value of 1 means the
amplitude of the tweak is approx equal to the strength of all other
signals in the FT.
"""
if expTime_days is None:
expTime_days = keplerLongCadence_days
#computes FT with frequencies in cycles per days
ft = fft.computeFft(y, expTime_days)
#Thruster firings every 6 hours
artifactFreq_cd = 1/tweakPeriod_days #cycles per day
if plot:
mp.clf()
mp.plot(ft[:,0], 1e6*ft[:,1], 'b-')
metric = 0
nPtsForMed = 50
for i in range(1, nHarmonics+1):
wh = np.argmin( np.fabs(ft[:,0] - i*artifactFreq_cd))
med = np.median(ft[wh-nPtsForMed:wh+nPtsForMed, 1])
metric += ft[wh, 1] / med
if plot:
mp.axvline(i*artifactFreq_cd, color='m')
return metric / float(nHarmonics)
def computeSgCdpp_ppm(y, transitDuration_cadences=13, plot=False):
"""Estimates 6hr CDPP using <NAME> Cleve's Savitzy-Golay technique
An interesting estimate of the noise in a lightcurve is the scatter
after all long term trends have been removed. This is the kernel of
the idea behind the Combined Differential Photometric Precision (CDPP)
metric used in classic Kepler. <NAME> devised a much simpler
algorithm for computing CDPP using a Savitzy-Golay detrending, which
he called Savitzy-Golay CDPP, or SG-CDPP. We implement his algorithm
here.
Inputs:
----------
y
(1d numpy array) normalised flux to calculate noise from. Flux
should have a mean of zero and be in units of fractional amplitude.
Note: Bad data in input will skew result. Some filtering of
outliers is performed, but Nan's or Infs will not be caught.
Optional Inputs:
-----------------
transitDuration_cadences
(int) Adjust the assumed transit width, in cadences. Default is
13, which corresponds to a 6.5 hour transit in K2
plot
Show a diagnostic plot
Returns:
------------
Estimated noise in parts per million.
Notes:
-------------
Taken from
svn+ssh://murzim/repo/so/trunk/Develop/jvc/common/compute_SG_noise.m
by <NAME>
"""
#These 3 values were chosen for the original algorithm, and we don't
#change them here.
window = 101
polyorder=2
noiseNorm = 1.40
#Name change for consistency with original algorithm
cadencesPerTransit = transitDuration_cadences
if cadencesPerTransit < 4:
raise ValueError("Cadences per transit must be >= 4")
if len(y) < window:
raise ValueError("Can't compute CDPP for timeseries with fewer points than defined window (%i points)" %(window))
trend = savgol_filter(y, window_length=window, polyorder=polyorder)
detrend = y-trend
filtered = np.ones(cadencesPerTransit)/float(cadencesPerTransit)
smoothed = np.convolve(detrend, filtered, mode='same')
if plot:
mp.clf()
mp.plot(y, 'ko')
mp.plot(trend, 'r-')
mp.plot(smoothed, 'g.')
sgCdpp_ppm = noiseNorm*robustStd(smoothed, 1)*1e6
return sgCdpp_ppm
def estimateScatterWithMarshallMethod(flux, plot=False):
"""Estimate the typical scatter in a lightcurve.
Uses the same method as Marshall (Mullally et al 2016 submitted)
Inputs:
----------
flux
(np 1d array). Flux to measure scatter of. Need not have
zero mean.
Optional Inputs:
-----------------
plot
Show a diagnostic plot
Returns:
------------
(float) scatter of data in the same units as in the input ``flux``
Notes:
----------
Algorithm is reasonably sensitive to outliers. For best results
uses outlier rejection on your lightcurve before computing scatter.
Nan's and infs in lightcurve will propegate to the return value.
"""
diff= np.diff(flux)
#Remove egregious outliers. Shouldn't make much difference
idx = sigmaClip(diff, 5)
diff = diff[~idx]
mean = np.mean(diff)
mad = np.median(np.fabs(diff-mean))
std = 1.4826*mad
if plot:
mp.clf()
mp.plot(flux, 'ko')
mp.plot(diff, 'r.')
mp.figure(2)
mp.clf()
bins = np.linspace(-3000, 3000, 61)
mp.hist(1e6*diff, bins=bins, ec="none")
mp.xlim(-3000, 3000)
mp.axvline(-1e6*float(std/np.sqrt(2)), color='r')
mp.axvline(1e6*float(std/np.sqrt(2)), color='r')
#std is the rms of the diff. std on single point
#is 1/sqrt(2) of that value,
return float(std/np.sqrt(2))
def singlePointDifferenceSigmaClip(a, nSigma=4, maxIter=1e4, initialClip=None):
"""Iteratively find and remove outliers in first derivative
If a dataset can be modeled as a constant offset + noise + outliers,
those outliers can be found and rejected with a sigma-clipping approach.
If the data contains some time-varying signal, this signal must be removed
before applying a sigma clip. This function removes the signal by applying
a single point difference.
The function computes a[i+1] - a[i], and sigma clips the result. Slowly
varying trends will have single point differences that are dominated by noise,
but outliers have strong first derivatives and will show up strongly in this
metric.
Inputs:
----------
y
(1d numpy array) Array to be cleaned
nSigma
(float) Threshold to cut at. 5 is typically a good value for
most arrays found in practice.
Optional Inputs:
-------------------
maxIter
(int) Maximum number of iterations
initialClip
(1d boolean array) If an element of initialClip is set to True,
that value is treated as a bad value in the first iteration, and
not included in the computation of the mean and std.
Returns:
------------
1d numpy array. Where set to True, the corresponding element of y
is an outlier.
"""
#Scatter in single point difference is root 2 time larger
#than in initial lightcurve
threshold = nSigma/np.sqrt(2)
diff1 = np.roll(a, -1) - a
diff1[-1] = 0 #Don't trust the last value because a[-1] not necessarily equal to a
idx1 = sigmaClip(diff1, nSigma, maxIter, initialClip)
diff2 = np.roll(a, 1) - a
diff2[0] = 0
idx2 = sigmaClip(diff2, nSigma, maxIter, initialClip)
flags = idx1 & idx2
#This bit of magic ensures only single point outliers are marked,
#not strong trends in the data. It insists that the previous point
#in difference time series is an outlier in the opposite direction, otherwise
#the point is considered unflagged. This prevents marking transits as bad data.
outlierIdx = flags
outlierIdx &= np.roll(idx1, 1)
outlierIdx &= (np.roll(diff1, 1) * diff1 < 0)
return outlierIdx
def sigmaClip(y, nSigma, maxIter=1e4, initialClip=None):
"""Iteratively find and remove outliers
Find outliers by identifiny all points more than **nSigma** from
the mean value. The recalculate the mean and std and repeat until
no more outliers found.
Inputs:
----------
y
(1d numpy array) Array to be cleaned
nSigma
(float) Threshold to cut at. 5 is typically a good value for
most arrays found in practice.
Optional Inputs:
-------------------
maxIter
(int) Maximum number of iterations
initialClip
(1d boolean array) If an element of initialClip is set to True,
that value is treated as a bad value in the first iteration, and
not included in the computation of the mean and std.
Returns:
------------
1d numpy array. Where set to True, the corresponding element of y
is an outlier.
"""
#import matplotlib.pyplot as mp
idx = initialClip
if initialClip is None:
idx = np.zeros( len(y), dtype=bool)
assert(len(idx) == len(y))
#x = np.arange(len(y))
#mp.plot(x, y, 'k.')
oldNumClipped = np.sum(idx)
for i in range(int(maxIter)):
mean = np.nanmean(y[~idx])
std = np.nanstd(y[~idx])
newIdx = np.fabs(y-mean) > nSigma*std
newIdx = np.logical_or(idx, newIdx)
newNumClipped = np.sum(newIdx)
#print "Iter %i: %i (%i) clipped points " \
#%(i, newNumClipped, oldNumClipped)
if newNumClipped == oldNumClipped:
return newIdx
oldNumClipped = newNumClipped
idx = newIdx
i+=1
return idx
def robustMean(y, percent):
"""Compute the mean of the percent.. 100-percent percentile points
A fast, and typically good enough estimate of the mean in the presence
of outliers.
"""
ySorted = np.sort( y[np.isfinite(y)] )
num = len(ySorted)
lwr = int( percent/100. * num)
upr = int( (100-percent)/100. * num)
return np.mean( ySorted[lwr:upr])
def robustStd(y, percent):
"""Compute a robust standard deviation with JVC's technique
A fast, and typically good enough estimate of the mean in the presence
of outliers.Cuts out 1st and 99th percentile values and computes std
of the rest. Used by computeSgCdpp() to match the behaviour of
<NAME> Cleve's original algorithm
Taken from
svn+ssh://murzim/repo/so/trunk/Develop/jvc/common/robust_std.m
"""
ySorted = np.sort( y[np.isfinite(y)] )
num = len(ySorted)
lwr = int( percent/100. * num)
upr = int( (100-percent)/100. * num)
return np.std( ySorted[lwr:upr])
|
[
"numpy.convolve",
"matplotlib.pyplot.hist",
"numpy.sqrt",
"scipy.signal.savgol_filter",
"numpy.nanmean",
"numpy.isfinite",
"fft.computeFft",
"numpy.mean",
"matplotlib.pyplot.plot",
"numpy.diff",
"numpy.linspace",
"numpy.nanstd",
"numpy.ones",
"numpy.std",
"matplotlib.pyplot.xlim",
"numpy.fabs",
"numpy.median",
"numpy.roll",
"matplotlib.pyplot.clf",
"numpy.logical_or",
"numpy.sum",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axvline"
] |
[((1197, 1228), 'fft.computeFft', 'fft.computeFft', (['y', 'expTime_days'], {}), '(y, expTime_days)\n', (1211, 1228), False, 'import fft\n'), ((3608, 3667), 'scipy.signal.savgol_filter', 'savgol_filter', (['y'], {'window_length': 'window', 'polyorder': 'polyorder'}), '(y, window_length=window, polyorder=polyorder)\n', (3621, 3667), False, 'from scipy.signal import savgol_filter\n'), ((3775, 3818), 'numpy.convolve', 'np.convolve', (['detrend', 'filtered'], {'mode': '"""same"""'}), "(detrend, filtered, mode='same')\n", (3786, 3818), True, 'import numpy as np\n'), ((4759, 4772), 'numpy.diff', 'np.diff', (['flux'], {}), '(flux)\n', (4766, 4772), True, 'import numpy as np\n'), ((4900, 4913), 'numpy.mean', 'np.mean', (['diff'], {}), '(diff)\n', (4907, 4913), True, 'import numpy as np\n'), ((7666, 7682), 'numpy.roll', 'np.roll', (['idx1', '(1)'], {}), '(idx1, 1)\n', (7673, 7682), True, 'import numpy as np\n'), ((8912, 8923), 'numpy.sum', 'np.sum', (['idx'], {}), '(idx)\n', (8918, 8923), True, 'import numpy as np\n'), ((9772, 9797), 'numpy.mean', 'np.mean', (['ySorted[lwr:upr]'], {}), '(ySorted[lwr:upr])\n', (9779, 9797), True, 'import numpy as np\n'), ((10392, 10416), 'numpy.std', 'np.std', (['ySorted[lwr:upr]'], {}), '(ySorted[lwr:upr])\n', (10398, 10416), True, 'import numpy as np\n'), ((1348, 1356), 'matplotlib.pyplot.clf', 'mp.clf', ([], {}), '()\n', (1354, 1356), True, 'import matplotlib.pyplot as mp\n'), ((1365, 1410), 'matplotlib.pyplot.plot', 'mp.plot', (['ft[:, 0]', '(1000000.0 * ft[:, 1])', '"""b-"""'], {}), "(ft[:, 0], 1000000.0 * ft[:, 1], 'b-')\n", (1372, 1410), True, 'import matplotlib.pyplot as mp\n'), ((1551, 1600), 'numpy.median', 'np.median', (['ft[wh - nPtsForMed:wh + nPtsForMed, 1]'], {}), '(ft[wh - nPtsForMed:wh + nPtsForMed, 1])\n', (1560, 1600), True, 'import numpy as np\n'), ((3706, 3733), 'numpy.ones', 'np.ones', (['cadencesPerTransit'], {}), '(cadencesPerTransit)\n', (3713, 3733), True, 'import numpy as np\n'), ((3841, 3849), 'matplotlib.pyplot.clf', 'mp.clf', ([], {}), '()\n', (3847, 3849), True, 'import matplotlib.pyplot as mp\n'), ((3858, 3874), 'matplotlib.pyplot.plot', 'mp.plot', (['y', '"""ko"""'], {}), "(y, 'ko')\n", (3865, 3874), True, 'import matplotlib.pyplot as mp\n'), ((3883, 3903), 'matplotlib.pyplot.plot', 'mp.plot', (['trend', '"""r-"""'], {}), "(trend, 'r-')\n", (3890, 3903), True, 'import matplotlib.pyplot as mp\n'), ((3912, 3935), 'matplotlib.pyplot.plot', 'mp.plot', (['smoothed', '"""g."""'], {}), "(smoothed, 'g.')\n", (3919, 3935), True, 'import matplotlib.pyplot as mp\n'), ((4934, 4954), 'numpy.fabs', 'np.fabs', (['(diff - mean)'], {}), '(diff - mean)\n', (4941, 4954), True, 'import numpy as np\n'), ((4997, 5005), 'matplotlib.pyplot.clf', 'mp.clf', ([], {}), '()\n', (5003, 5005), True, 'import matplotlib.pyplot as mp\n'), ((5014, 5033), 'matplotlib.pyplot.plot', 'mp.plot', (['flux', '"""ko"""'], {}), "(flux, 'ko')\n", (5021, 5033), True, 'import matplotlib.pyplot as mp\n'), ((5042, 5061), 'matplotlib.pyplot.plot', 'mp.plot', (['diff', '"""r."""'], {}), "(diff, 'r.')\n", (5049, 5061), True, 'import matplotlib.pyplot as mp\n'), ((5070, 5082), 'matplotlib.pyplot.figure', 'mp.figure', (['(2)'], {}), '(2)\n', (5079, 5082), True, 'import matplotlib.pyplot as mp\n'), ((5091, 5099), 'matplotlib.pyplot.clf', 'mp.clf', ([], {}), '()\n', (5097, 5099), True, 'import matplotlib.pyplot as mp\n'), ((5115, 5143), 'numpy.linspace', 'np.linspace', (['(-3000)', '(3000)', '(61)'], {}), '(-3000, 3000, 61)\n', (5126, 5143), True, 'import numpy as np\n'), ((5152, 5199), 'matplotlib.pyplot.hist', 'mp.hist', (['(1000000.0 * diff)'], {'bins': 'bins', 'ec': '"""none"""'}), "(1000000.0 * diff, bins=bins, ec='none')\n", (5159, 5199), True, 'import matplotlib.pyplot as mp\n'), ((5201, 5221), 'matplotlib.pyplot.xlim', 'mp.xlim', (['(-3000)', '(3000)'], {}), '(-3000, 3000)\n', (5208, 5221), True, 'import matplotlib.pyplot as mp\n'), ((6981, 6991), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (6988, 6991), True, 'import numpy as np\n'), ((7009, 7023), 'numpy.roll', 'np.roll', (['a', '(-1)'], {}), '(a, -1)\n', (7016, 7023), True, 'import numpy as np\n'), ((7191, 7204), 'numpy.roll', 'np.roll', (['a', '(1)'], {}), '(a, 1)\n', (7198, 7204), True, 'import numpy as np\n'), ((8973, 8992), 'numpy.nanmean', 'np.nanmean', (['y[~idx]'], {}), '(y[~idx])\n', (8983, 8992), True, 'import numpy as np\n'), ((9007, 9025), 'numpy.nanstd', 'np.nanstd', (['y[~idx]'], {}), '(y[~idx])\n', (9016, 9025), True, 'import numpy as np\n'), ((9090, 9116), 'numpy.logical_or', 'np.logical_or', (['idx', 'newIdx'], {}), '(idx, newIdx)\n', (9103, 9116), True, 'import numpy as np\n'), ((9141, 9155), 'numpy.sum', 'np.sum', (['newIdx'], {}), '(newIdx)\n', (9147, 9155), True, 'import numpy as np\n'), ((1498, 1537), 'numpy.fabs', 'np.fabs', (['(ft[:, 0] - i * artifactFreq_cd)'], {}), '(ft[:, 0] - i * artifactFreq_cd)\n', (1505, 1537), True, 'import numpy as np\n'), ((1661, 1703), 'matplotlib.pyplot.axvline', 'mp.axvline', (['(i * artifactFreq_cd)'], {'color': '"""m"""'}), "(i * artifactFreq_cd, color='m')\n", (1671, 1703), True, 'import matplotlib.pyplot as mp\n'), ((5445, 5455), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (5452, 5455), True, 'import numpy as np\n'), ((7702, 7719), 'numpy.roll', 'np.roll', (['diff1', '(1)'], {}), '(diff1, 1)\n', (7709, 7719), True, 'import numpy as np\n'), ((9044, 9061), 'numpy.fabs', 'np.fabs', (['(y - mean)'], {}), '(y - mean)\n', (9051, 9061), True, 'import numpy as np\n'), ((9643, 9657), 'numpy.isfinite', 'np.isfinite', (['y'], {}), '(y)\n', (9654, 9657), True, 'import numpy as np\n'), ((10263, 10277), 'numpy.isfinite', 'np.isfinite', (['y'], {}), '(y)\n', (10274, 10277), True, 'import numpy as np\n'), ((5256, 5266), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (5263, 5266), True, 'import numpy as np\n'), ((5313, 5323), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (5320, 5323), True, 'import numpy as np\n')]
|
# Notes from this experiment:
# 1. adapt() is way slower than np.unique -- takes forever for 1M, hangs for 10M
# 2. TF returns error if adapt is inside tf.function. adapt uses graph inside anyway
# 3. OOM in batch mode during sparse_to_dense despite of seting sparse in keras
# 4. Mini-batch works but 15x(g)/20x slower than sklearn
# 5. Always replace NaNs in string cols as np.nan is float
# 6. Full graph mode lazily triggers all models together -- produce OOM
# 7. Partial graph mode sequentially execute graph-models
# TODO1: all sparse intermediates, including the outputs
# TODO2: Tune mini-batch size for best performance
import sys
import time
import numpy as np
import scipy as sp
from scipy.sparse import csr_matrix
import pandas as pd
import math
import warnings
import os
# Force to CPU (default is GPU)
os.environ["CUDA_VISIBLE_DEVICES"] = ""
import tensorflow as tf
from tensorflow.keras.layers.experimental import preprocessing
from tensorflow.keras import layers
# Make numpy values easier to read.
np.set_printoptions(precision=3, suppress=True)
warnings.filterwarnings('ignore') #cleaner, but not recommended
def readNprep(nRows):
# Read the 1M or the 10M dataset
if nRows == 1:
print("Reading file: criteo_day21_1M")
criteo = pd.read_csv("~/datasets/criteo_day21_1M", delimiter=",", header=None)
else:
print("Reading file: criteo_day21_10M")
criteo = pd.read_csv("~/datasets/criteo_day21_10M", delimiter=",", header=None)
print(criteo.head())
# Replace NaNs with 0 for numeric and empty string for categorical
criteo = criteo.apply(lambda x: x.fillna(0) if x.dtype.kind in 'biufc' else x.fillna(''))
# Pandas infer the type of first 14 columns as float and int.
# SystemDS reads those as STRINGS and apply passthrough FT on those.
# For a fair comparision, convert those here to str and later back to float
pt = [*range(0,14)]
criteo[pt] = criteo[pt].astype(str)
#print(criteo.info())
return criteo
def getCategoricalLayer(X, name, useNumpy):
# NaN handling. np.nan is a float, which leads to ValueError for str cols
X[name].fillna('', inplace=True)
if useNumpy:
vocab = np.unique(X[name].astype(np.string_))
onehot = layers.StringLookup(vocabulary=vocab, output_mode="multi_hot", num_oov_indices=0, sparse=True)
# adapt is not required if vocabulary is passed
else:
onehot = layers.StringLookup(output_mode="multi_hot", num_oov_indices=0)
df2tf = tf.convert_to_tensor(np.array(X[name], dtype=np.string_))
onehot.adapt(df2tf)
#print("#uniques in col ", name, " is ", onehot.vocabulary_size())
return onehot
def getLayers(X):
# Passh through transformation -- convert to float
pt = [*range(0,14)]
X[pt] = X[pt].astype(np.float64)
# Build a dictionary with symbolic input tensors w/ proper dtype
inputs = {}
for name, column in X.items():
dtype = column.dtype
if dtype == object:
dtype = tf.string
else:
dtype = tf.float64
inputs[name] = tf.keras.Input(shape=(1,), dtype=dtype, sparse=True)
# Seperate out the numeric inputs
numeric = {name:input for name,input in inputs.items()
if input.dtype==tf.float64}
# Concatenate the numeric inputs together and
# add to the list of layers as is
prepro = [layers.Concatenate()(list(numeric.values()))]
# Recode and dummycode the string inputs
for name, input in inputs.items():
if input.dtype == tf.float64:
continue
onehot = getCategoricalLayer(X, name, True) #use np.unique
encoded = onehot(input)
# Append to the same list
prepro.append(encoded)
# Concatenate all the preprocessed inputs together,
# and build a model to apply batch wise later
cat_layers = layers.Concatenate()(prepro)
print(cat_layers)
model_prep = tf.keras.Model(inputs, cat_layers)
return model_prep
def lazyGraphTransform(X, model, n, isSmall):
# This method builds a graph of all the mini-batch transformations
# by pushing the loop-slicing logic inside a tf.function.
# However, lazily triggering all the models produce OOM
X_dict = {name: tf.convert_to_tensor(np.array(value)) for name, value in X.items()}
res = batchTransform(X_dict, model, X.shape[0], isSmall)
@tf.function
def batchTransform(X, model_prep, n, isSmall):
# Batch-wise transform to avoid OOM
# 10k/1.5k: best performance within memory budget
batch_size = 10000 if isSmall==1 else 1500
beg = 0
allRes = []
while beg < n:
end = beg + batch_size
if end > n:
end = n
batch_dict = {name: X[name][beg:end] for name, value in X.items()}
X_batch = model_prep(batch_dict)
print(X_batch[:1, :]) #print the placeholder
allRes.append(X_batch)
if end == n:
break
else:
beg = end
out = tf.stack(allRes, axis=0) #fix rank
print(out.shape)
return out
def batchGraphTransform(X, model, n, isSmall):
# Batch-wise eager transform to avoid OOM
# 10k/1.5k: best performance within memory budget
batch_size = 10000 if isSmall==1 else 1500
beg = 0
while beg < n:
end = beg + batch_size
if end > n:
end = n
batch_dict = {name: np.array(value)[beg:end] for name, value in X.items()}
X_batch = transform(batch_dict, model)
# Don't stack the results to avoid OOM
print(X_batch[:1, :]) #print first 1 row
if end == n:
break
else:
beg = end
@tf.function
def transform(X_dict, model_prep):
X_prep = model_prep(X_dict)
#print(X_prep[:5, :]) #print to verify lazy execution
return X_prep
isSmall = int(sys.argv[1]) #1M vs 10M subset of Criteo
X = readNprep(isSmall)
t1 = time.time()
model = getLayers(X)
# Lazy transform triggers all models togther -- produce OOM
#res = lazyGraphTransform(X, model, X.shape[0], isSmall)
# Partially lazy mode keeps the slice-look outside of tf.function
batchGraphTransform(X, model, X.shape[0], isSmall)
print("Elapsed time for transformations using tf-keras = %s sec" % (time.time() - t1))
#np.savetxt("X_prep_sk.csv", X_prep, fmt='%1.2f', delimiter=',') #dense
#sp.sparse.save_npz("X_prep_sk.npz", X_prep) #sparse
|
[
"tensorflow.keras.layers.Concatenate",
"pandas.read_csv",
"tensorflow.keras.layers.StringLookup",
"numpy.array",
"tensorflow.keras.Input",
"tensorflow.keras.Model",
"time.time",
"warnings.filterwarnings",
"tensorflow.stack",
"numpy.set_printoptions"
] |
[((1020, 1067), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(3)', 'suppress': '(True)'}), '(precision=3, suppress=True)\n', (1039, 1067), True, 'import numpy as np\n'), ((1068, 1101), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (1091, 1101), False, 'import warnings\n'), ((5797, 5808), 'time.time', 'time.time', ([], {}), '()\n', (5806, 5808), False, 'import time\n'), ((3889, 3923), 'tensorflow.keras.Model', 'tf.keras.Model', (['inputs', 'cat_layers'], {}), '(inputs, cat_layers)\n', (3903, 3923), True, 'import tensorflow as tf\n'), ((4912, 4936), 'tensorflow.stack', 'tf.stack', (['allRes'], {'axis': '(0)'}), '(allRes, axis=0)\n', (4920, 4936), True, 'import tensorflow as tf\n'), ((1271, 1340), 'pandas.read_csv', 'pd.read_csv', (['"""~/datasets/criteo_day21_1M"""'], {'delimiter': '""","""', 'header': 'None'}), "('~/datasets/criteo_day21_1M', delimiter=',', header=None)\n", (1282, 1340), True, 'import pandas as pd\n'), ((1412, 1482), 'pandas.read_csv', 'pd.read_csv', (['"""~/datasets/criteo_day21_10M"""'], {'delimiter': '""","""', 'header': 'None'}), "('~/datasets/criteo_day21_10M', delimiter=',', header=None)\n", (1423, 1482), True, 'import pandas as pd\n'), ((2246, 2344), 'tensorflow.keras.layers.StringLookup', 'layers.StringLookup', ([], {'vocabulary': 'vocab', 'output_mode': '"""multi_hot"""', 'num_oov_indices': '(0)', 'sparse': '(True)'}), "(vocabulary=vocab, output_mode='multi_hot',\n num_oov_indices=0, sparse=True)\n", (2265, 2344), False, 'from tensorflow.keras import layers\n'), ((2420, 2483), 'tensorflow.keras.layers.StringLookup', 'layers.StringLookup', ([], {'output_mode': '"""multi_hot"""', 'num_oov_indices': '(0)'}), "(output_mode='multi_hot', num_oov_indices=0)\n", (2439, 2483), False, 'from tensorflow.keras import layers\n'), ((3067, 3119), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(1,)', 'dtype': 'dtype', 'sparse': '(True)'}), '(shape=(1,), dtype=dtype, sparse=True)\n', (3081, 3119), True, 'import tensorflow as tf\n'), ((3821, 3841), 'tensorflow.keras.layers.Concatenate', 'layers.Concatenate', ([], {}), '()\n', (3839, 3841), False, 'from tensorflow.keras import layers\n'), ((2519, 2554), 'numpy.array', 'np.array', (['X[name]'], {'dtype': 'np.string_'}), '(X[name], dtype=np.string_)\n', (2527, 2554), True, 'import numpy as np\n'), ((3356, 3376), 'tensorflow.keras.layers.Concatenate', 'layers.Concatenate', ([], {}), '()\n', (3374, 3376), False, 'from tensorflow.keras import layers\n'), ((4227, 4242), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (4235, 4242), True, 'import numpy as np\n'), ((6135, 6146), 'time.time', 'time.time', ([], {}), '()\n', (6144, 6146), False, 'import time\n'), ((5299, 5314), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (5307, 5314), True, 'import numpy as np\n')]
|
import warnings
import numpy as np
import scipy.sparse as sp
from joblib import Parallel, delayed
from scipy.special import expit
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils import check_array, check_random_state
from sklearn.linear_model import LogisticRegression
from tqdm import tqdm
from .multidynet import initialize_node_effects_single
from .omega_lsm import update_omega
from .deltas_lsm import update_deltas
from .lds_lsm import update_latent_positions
from .variances import update_tau_sq, update_sigma_sq
__all__ = ['DynamicNetworkLSM']
class ModelParameters(object):
def __init__(self, omega, X, X_sigma, X_cross_cov,
delta, delta_sigma,
a_tau_sq, b_tau_sq, c_sigma_sq, d_sigma_sq):
self.omega_ = omega
self.X_ = X
self.X_sigma_ = X_sigma
self.X_cross_cov_ = X_cross_cov
self.delta_ = delta
self.delta_sigma_ = delta_sigma
self.a_tau_sq_ = a_tau_sq
self.b_tau_sq_ = b_tau_sq
self.c_sigma_sq_ = c_sigma_sq
self.d_sigma_sq_ = d_sigma_sq
self.converged_ = False
self.logp_ = []
def initialize_parameters(Y, n_features, delta_var_prior,
a, b, c, d, random_state):
rng = check_random_state(random_state)
n_time_steps, n_nodes, _ = Y.shape
# omega is initialized by drawing from the prior?
omega = np.zeros((n_time_steps, n_nodes, n_nodes))
# intialize latent space randomly
X = rng.randn(n_time_steps, n_nodes, n_features)
# intialize to marginal covariances
sigma_init = np.eye(n_features)
X_sigma = np.tile(
sigma_init[None, None], reps=(n_time_steps, n_nodes, 1, 1))
# initialize cross-covariances
cross_init = np.eye(n_features)
X_cross_cov = np.tile(
cross_init[None, None], reps=(n_time_steps - 1, n_nodes, 1, 1))
# initialize node-effects based on a logistic regression with
# no higher order structure
delta = initialize_node_effects_single(Y)
delta_sigma = delta_var_prior * np.ones(n_nodes)
# initialize based on prior information
a_tau_sq = a
b_tau_sq = b
c_sigma_sq = c
d_sigma_sq = d
return ModelParameters(
omega=omega, X=X, X_sigma=X_sigma, X_cross_cov=X_cross_cov,
delta=delta, delta_sigma=delta_sigma,
a_tau_sq=a_tau_sq, b_tau_sq=b_tau_sq, c_sigma_sq=c_sigma_sq,
d_sigma_sq=d_sigma_sq)
def optimize_elbo(Y, n_features, delta_var_prior, tau_sq, sigma_sq, a, b, c, d,
max_iter, tol, random_state, verbose=True):
# convergence criteria (Eq{L(Y | theta)})
loglik = -np.infty
# initialize parameters of the model
model = initialize_parameters(
Y, n_features, delta_var_prior, a, b, c, d, random_state)
for n_iter in tqdm(range(max_iter), disable=not verbose):
prev_loglik = loglik
# coordinate ascent
# omega updates
loglik = update_omega(
Y, model.omega_, model.X_, model.X_sigma_,
model.delta_, model.delta_sigma_)
# latent trajectory updates
tau_sq_prec = (
model.a_tau_sq_ / model.b_tau_sq_ if tau_sq == 'auto' else
1. / tau_sq)
sigma_sq_prec = (
model.c_sigma_sq_ / model.d_sigma_sq_ if sigma_sq == 'auto' else
1. / sigma_sq)
update_latent_positions(
Y, model.X_, model.X_sigma_, model.X_cross_cov_,
model.delta_, model.omega_, tau_sq_prec, sigma_sq_prec)
# update node random effects
update_deltas(
Y, model.X_, model.delta_, model.delta_sigma_,
model.omega_, delta_var_prior)
# update intial variance of the latent space
if tau_sq == 'auto':
model.a_tau_sq_, model.b_tau_sq_ = update_tau_sq(
Y, model.X_, model.X_sigma_, a, b)
# update step sizes
if sigma_sq == 'auto':
model.c_sigma_sq_, model.d_sigma_sq_ = update_sigma_sq(
Y, model.X_, model.X_sigma_, model.X_cross_cov_, c, d)
model.logp_.append(loglik)
# check convergence
change = loglik - prev_loglik
if abs(change) < tol:
model.converged_ = True
model.logp_ = np.asarray(model.logp_)
break
return model
def calculate_probabilities(X, delta):
n_time_steps = X.shape[0]
n_nodes = X.shape[1]
probas = np.zeros(
(n_time_steps, n_nodes, n_nodes), dtype=np.float64)
deltas = delta.reshape(-1, 1)
for t in range(n_time_steps):
probas[t] = expit(np.add(deltas, deltas.T) + np.dot(X[t], X[t].T))
return probas
class DynamicNetworkLSM(object):
def __init__(self, n_features=2, delta_var_prior=4,
tau_sq='auto', sigma_sq='auto',
a=4.0, b=20.0, c=10, d=0.1,
n_init=1, max_iter=500, tol=1e-2,
n_jobs=-1, random_state=42):
self.n_features = n_features
self.delta_var_prior = delta_var_prior
self.tau_sq = tau_sq
self.sigma_sq = sigma_sq
self.a = a
self.b = b
self.c = c
self.d = d
self.n_init = n_init
self.max_iter = max_iter
self.tol = tol
self.n_jobs = n_jobs
self.random_state = random_state
def fit(self, Y):
"""
Parameters
----------
Y : array-like, shape (n_time_steps, n_nodes, n_nodes)
"""
Y = check_array(Y, order='C', dtype=np.float64,
ensure_2d=False, allow_nd=True, copy=False)
random_state = check_random_state(self.random_state)
# run the elbo optimization over different initializations
seeds = random_state.randint(np.iinfo(np.int32).max, size=self.n_init)
verbose = True if self.n_init == 1 else False
models = Parallel(n_jobs=self.n_jobs)(delayed(optimize_elbo)(
Y, self.n_features, self.delta_var_prior,
self.tau_sq, self.sigma_sq, self.a, self.b, self.c, self.d,
self.max_iter, self.tol, seed, verbose=verbose)
for seed in seeds)
# choose model with the largest convergence criteria
best_model = models[0]
best_criteria = models[0].logp_[-1]
for i in range(1, len(models)):
if models[i].logp_[-1] > best_criteria:
best_model = models[i]
if not best_model.converged_:
warnings.warn('Best model did not converge. '
'Try a different random initialization, '
'or increase max_iter, tol '
'or check for degenerate data.', ConvergenceWarning)
self._set_parameters(best_model)
# calculate dyad-probabilities
self.probas_ = calculate_probabilities(
self.X_, self.delta_)
# calculate in-sample AUC
#self.auc_ = calculate_auc_layer(Y, self.probas_)
return self
def _set_parameters(self, model):
self.omega_ = model.omega_
self.X_ = model.X_
self.X_sigma_ = model.X_sigma_
self.X_cross_cov_ = model.X_cross_cov_
self.delta_ = model.delta_
self.delta_sigma_ = model.delta_sigma_
self.a_tau_sq_ = model.a_tau_sq_
self.b_tau_sq_ = model.b_tau_sq_
self.tau_sq_ = self.b_tau_sq_ / (self.a_tau_sq_ - 1)
self.c_sigma_sq_ = model.c_sigma_sq_
self.d_sigma_sq_ = model.d_sigma_sq_
self.sigma_sq_ = self.d_sigma_sq_ / (self.c_sigma_sq_ - 1)
self.logp_ = model.logp_
return self
|
[
"numpy.tile",
"numpy.eye",
"sklearn.utils.check_random_state",
"numpy.ones",
"numpy.add",
"numpy.asarray",
"numpy.iinfo",
"joblib.Parallel",
"numpy.zeros",
"numpy.dot",
"sklearn.utils.check_array",
"warnings.warn",
"joblib.delayed"
] |
[((1275, 1307), 'sklearn.utils.check_random_state', 'check_random_state', (['random_state'], {}), '(random_state)\n', (1293, 1307), False, 'from sklearn.utils import check_array, check_random_state\n'), ((1415, 1457), 'numpy.zeros', 'np.zeros', (['(n_time_steps, n_nodes, n_nodes)'], {}), '((n_time_steps, n_nodes, n_nodes))\n', (1423, 1457), True, 'import numpy as np\n'), ((1608, 1626), 'numpy.eye', 'np.eye', (['n_features'], {}), '(n_features)\n', (1614, 1626), True, 'import numpy as np\n'), ((1641, 1708), 'numpy.tile', 'np.tile', (['sigma_init[None, None]'], {'reps': '(n_time_steps, n_nodes, 1, 1)'}), '(sigma_init[None, None], reps=(n_time_steps, n_nodes, 1, 1))\n', (1648, 1708), True, 'import numpy as np\n'), ((1771, 1789), 'numpy.eye', 'np.eye', (['n_features'], {}), '(n_features)\n', (1777, 1789), True, 'import numpy as np\n'), ((1808, 1879), 'numpy.tile', 'np.tile', (['cross_init[None, None]'], {'reps': '(n_time_steps - 1, n_nodes, 1, 1)'}), '(cross_init[None, None], reps=(n_time_steps - 1, n_nodes, 1, 1))\n', (1815, 1879), True, 'import numpy as np\n'), ((4463, 4523), 'numpy.zeros', 'np.zeros', (['(n_time_steps, n_nodes, n_nodes)'], {'dtype': 'np.float64'}), '((n_time_steps, n_nodes, n_nodes), dtype=np.float64)\n', (4471, 4523), True, 'import numpy as np\n'), ((2070, 2086), 'numpy.ones', 'np.ones', (['n_nodes'], {}), '(n_nodes)\n', (2077, 2086), True, 'import numpy as np\n'), ((5515, 5606), 'sklearn.utils.check_array', 'check_array', (['Y'], {'order': '"""C"""', 'dtype': 'np.float64', 'ensure_2d': '(False)', 'allow_nd': '(True)', 'copy': '(False)'}), "(Y, order='C', dtype=np.float64, ensure_2d=False, allow_nd=True,\n copy=False)\n", (5526, 5606), False, 'from sklearn.utils import check_array, check_random_state\n'), ((5651, 5688), 'sklearn.utils.check_random_state', 'check_random_state', (['self.random_state'], {}), '(self.random_state)\n', (5669, 5688), False, 'from sklearn.utils import check_array, check_random_state\n'), ((4293, 4316), 'numpy.asarray', 'np.asarray', (['model.logp_'], {}), '(model.logp_)\n', (4303, 4316), True, 'import numpy as np\n'), ((5907, 5935), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'self.n_jobs'}), '(n_jobs=self.n_jobs)\n', (5915, 5935), False, 'from joblib import Parallel, delayed\n'), ((6508, 6678), 'warnings.warn', 'warnings.warn', (['"""Best model did not converge. Try a different random initialization, or increase max_iter, tol or check for degenerate data."""', 'ConvergenceWarning'], {}), "(\n 'Best model did not converge. Try a different random initialization, or increase max_iter, tol or check for degenerate data.'\n , ConvergenceWarning)\n", (6521, 6678), False, 'import warnings\n'), ((4628, 4652), 'numpy.add', 'np.add', (['deltas', 'deltas.T'], {}), '(deltas, deltas.T)\n', (4634, 4652), True, 'import numpy as np\n'), ((4655, 4675), 'numpy.dot', 'np.dot', (['X[t]', 'X[t].T'], {}), '(X[t], X[t].T)\n', (4661, 4675), True, 'import numpy as np\n'), ((5794, 5812), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (5802, 5812), True, 'import numpy as np\n'), ((5936, 5958), 'joblib.delayed', 'delayed', (['optimize_elbo'], {}), '(optimize_elbo)\n', (5943, 5958), False, 'from joblib import Parallel, delayed\n')]
|
#!/usr/bin/env python
__author__ = "<NAME>"
__license__ = "MIT"
__email__ = "<EMAIL>"
__credits__ = "<NAME> -- An amazing Linear Algebra Professor"
import cvxopt
import numpy as np
class SupportVectorClassification:
"""
Support Vector Machine classification model
"""
def __init__(self):
"""
Instantiate class: SupportVectorClassification
"""
self._bias = np.array([])
self._weights = np.array([])
def predict(self, predictors):
"""
Predict output given a set of predictors
:param predictors: ndarray -> used to calculate prediction
:return: ndarray -> prediction
"""
def train(self, predictors, expected_values):
"""
Train model based on list of predictors and expected value
:param predictors: list(ndarray) -> list of predictors to train model on
:param expected_values: list(float) -> list of expected values for given predictors
"""
if len(predictors) != len(expected_values):
raise Exception('Length of predictors != length of expected values')
self._generate_optimal_hyperplanes(predictors, expected_values)
def _generate_optimal_hyperplanes(self, predictors, expected_values):
"""
Find and generate optimal hyperplanes given set of predictors and expected values
:param predictors: list(ndarray) -> list of predictors to train model on
:param expected_values: list(float) -> list of expected values for given predictors
"""
m = predictors.shape[0]
k = np.array([np.dot(predictors[i], predictors[j]) for j in range(m) for i in range(m)]).reshape((m, m))
p = cvxopt.matrix(np.outer(expected_values, expected_values)*k)
q = cvxopt.matrix(-1*np.ones(m))
equality_constraint1 = cvxopt.matrix(expected_values, (1, m))
equality_constraint2 = cvxopt.matrix(0.0)
inequality_constraint1 = cvxopt.matrix(np.diag(-1*np.ones(m)))
inequality_constraint2 = cvxopt.matrix(np.zeros(m))
solution = cvxopt.solvers.qp(p, q, inequality_constraint1, inequality_constraint2,
equality_constraint1, equality_constraint2)
multipliers = np.ravel(solution['x'])
has_positive_multiplier = multipliers > 1e-7
sv_multipliers = multipliers[has_positive_multiplier]
support_vectors = predictors[has_positive_multiplier]
support_vectors_y = expected_values[has_positive_multiplier]
if support_vectors and support_vectors_y and sv_multipliers:
self._weights = np.sum(multipliers[i]*expected_values[i]*predictors[i] for i in range(len(expected_values)))
self._bias = np.sum([expected_values[i] - np.dot(self._weights, predictors[i])
for i in range(len(predictors))])/len(predictors)
else:
pass
svm = SupportVectorClassification()
y = np.array([np.array([1]), np.array([-1]), np.array([-1]), np.array([1]), np.array([-1])])
t_data = np.array([np.array([1, 1]), np.array([2, 2]), np.array([2, 3]), np.array([0, 0]), np.array([2, 4])])
svm.train(t_data, y)
|
[
"numpy.ones",
"numpy.array",
"numpy.zeros",
"numpy.outer",
"numpy.dot",
"cvxopt.matrix",
"numpy.ravel",
"cvxopt.solvers.qp"
] |
[((407, 419), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (415, 419), True, 'import numpy as np\n'), ((444, 456), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (452, 456), True, 'import numpy as np\n'), ((1843, 1881), 'cvxopt.matrix', 'cvxopt.matrix', (['expected_values', '(1, m)'], {}), '(expected_values, (1, m))\n', (1856, 1881), False, 'import cvxopt\n'), ((1914, 1932), 'cvxopt.matrix', 'cvxopt.matrix', (['(0.0)'], {}), '(0.0)\n', (1927, 1932), False, 'import cvxopt\n'), ((2083, 2202), 'cvxopt.solvers.qp', 'cvxopt.solvers.qp', (['p', 'q', 'inequality_constraint1', 'inequality_constraint2', 'equality_constraint1', 'equality_constraint2'], {}), '(p, q, inequality_constraint1, inequality_constraint2,\n equality_constraint1, equality_constraint2)\n', (2100, 2202), False, 'import cvxopt\n'), ((2258, 2281), 'numpy.ravel', 'np.ravel', (["solution['x']"], {}), "(solution['x'])\n", (2266, 2281), True, 'import numpy as np\n'), ((2975, 2988), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (2983, 2988), True, 'import numpy as np\n'), ((2990, 3004), 'numpy.array', 'np.array', (['[-1]'], {}), '([-1])\n', (2998, 3004), True, 'import numpy as np\n'), ((3006, 3020), 'numpy.array', 'np.array', (['[-1]'], {}), '([-1])\n', (3014, 3020), True, 'import numpy as np\n'), ((3022, 3035), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (3030, 3035), True, 'import numpy as np\n'), ((3037, 3051), 'numpy.array', 'np.array', (['[-1]'], {}), '([-1])\n', (3045, 3051), True, 'import numpy as np\n'), ((3073, 3089), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (3081, 3089), True, 'import numpy as np\n'), ((3091, 3107), 'numpy.array', 'np.array', (['[2, 2]'], {}), '([2, 2])\n', (3099, 3107), True, 'import numpy as np\n'), ((3109, 3125), 'numpy.array', 'np.array', (['[2, 3]'], {}), '([2, 3])\n', (3117, 3125), True, 'import numpy as np\n'), ((3127, 3143), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (3135, 3143), True, 'import numpy as np\n'), ((3145, 3161), 'numpy.array', 'np.array', (['[2, 4]'], {}), '([2, 4])\n', (3153, 3161), True, 'import numpy as np\n'), ((2051, 2062), 'numpy.zeros', 'np.zeros', (['m'], {}), '(m)\n', (2059, 2062), True, 'import numpy as np\n'), ((1725, 1767), 'numpy.outer', 'np.outer', (['expected_values', 'expected_values'], {}), '(expected_values, expected_values)\n', (1733, 1767), True, 'import numpy as np\n'), ((1800, 1810), 'numpy.ones', 'np.ones', (['m'], {}), '(m)\n', (1807, 1810), True, 'import numpy as np\n'), ((1991, 2001), 'numpy.ones', 'np.ones', (['m'], {}), '(m)\n', (1998, 2001), True, 'import numpy as np\n'), ((1608, 1644), 'numpy.dot', 'np.dot', (['predictors[i]', 'predictors[j]'], {}), '(predictors[i], predictors[j])\n', (1614, 1644), True, 'import numpy as np\n'), ((2772, 2808), 'numpy.dot', 'np.dot', (['self._weights', 'predictors[i]'], {}), '(self._weights, predictors[i])\n', (2778, 2808), True, 'import numpy as np\n')]
|
"""
MIT License
Copyright 2021 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import signal
from netCDF4 import Dataset
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from datetime import datetime
from pkg_resources import get_distribution
import numpy as np
import shutil
from hans.tools import abort
from hans.material import Material
from hans.plottools import adaptiveLimits
from hans.integrate import ConservedField
class Problem:
def __init__(self, options, disc, bc, geometry, numerics, material, surface, ic):
"""
Collects all information about a single problem
and contains the methods to run a simulation, based on the problem defintiion."
Parameters
----------
options : dict
Contains IO options.
disc : dict
Contains discretization parameters.
bc : dict
Contains boundary condition parameters.
geometry : dict
Contains geometry parameters.
numerics : dict
Contains numerics parameters.
material : dict
Contains material parameters.
surface : dict
Contains surface parameters.
restart_file : str
Filename of the netCDF file, from which simulation is restarted.
"""
self.options = options
self.disc = disc
self.bc = bc
self.geometry = geometry
self.numerics = numerics
self.material = material
self.surface = surface
self.ic = ic
self.sanity_checks()
def sanity_checks(self):
self.check_options()
self.check_disc()
self.check_geo()
self.check_num()
self.check_mat()
self.check_bc()
if self.ic is not None:
self.check_ic()
if self.surface is not None:
self.check_surface()
print("Sanity checks completed. Start simulation!")
print(60 * "-")
def run(self, out_dir="data", out_name=None, plot=False):
"""
Starts the simulation.
Parameters
----------
out_dir : str
Output directory (default: data).
out_name : str
NetCDF output filename (default: None)
plot : bool
On-the-fly plotting flag (default: False).
"""
# global write options
writeInterval = self.options['writeInterval']
if "maxT" in self.numerics.keys():
maxT = self.numerics["maxT"]
else:
maxT = np.inf
if "maxIt" in self.numerics.keys():
maxIt = self.numerics["maxIt"]
else:
maxIt = np.inf
if "tol" in self.numerics.keys():
tol = self.numerics["tol"]
else:
tol = 0.
# Initial conditions
q_init, t_init = self.get_initial_conditions()
# intialize solution field
self.q = ConservedField(self.disc,
self.bc,
self.geometry,
self.material,
self.numerics,
self.surface,
q_init=q_init,
t_init=t_init)
rank = self.q.comm.Get_rank()
# time stamp of simulation start time
self.tStart = datetime.now()
# Header line for screen output
if rank == 0:
print(f"{'Step':10s}\t{'Timestep':12s}\t{'Time':12s}\t{'Epsilon':12s}", flush=True)
if plot:
# on-the-fly plotting
self.plot(writeInterval)
else:
nc = self.init_netcdf(out_dir, out_name, rank)
i = 0
self._write_mode = 0
while self._write_mode == 0:
# Perform time update
self.q.update(i)
# increase time step
i += 1
# catch signals and execute signal handler
signal.signal(signal.SIGINT, self.receive_signal)
signal.signal(signal.SIGTERM, self.receive_signal)
signal.signal(signal.SIGHUP, self.receive_signal)
signal.signal(signal.SIGUSR1, self.receive_signal)
signal.signal(signal.SIGUSR2, self.receive_signal)
# convergence
if i > 1 and self.q.eps < tol:
self._write_mode = 1
break
# maximum time reached
if round(self.q.time, 15) >= maxT:
self._write_mode = 2
break
# maximum number of iterations reached
if i >= maxIt:
self._write_mode = 3
break
if i % writeInterval == 0:
self.write_to_netcdf(i, nc, mode=self._write_mode)
if rank == 0:
self.write_to_stdout(i, mode=self._write_mode)
self.write_to_netcdf(i, nc, mode=self._write_mode)
if rank == 0:
self.write_to_stdout(i, mode=self._write_mode)
def get_initial_conditions(self):
"""
Return the initial field given by last frame of restart file
or as defined through inputs.
Returns
-------
np.array
Inital field of conserved variables.
tuple
Inital time and timestep
"""
if self.ic is None:
q_init = np.zeros((3, self.disc["Nx"], self.disc["Ny"]))
q_init[0] += self.material["rho0"]
t_init = (0., self.numerics["dt"])
else:
# read last frame of restart file
if self.ic["type"] == "restart":
q_init, t_init = self.read_last_frame()
elif self.ic["type"] == "perturbation":
q_init = np.zeros((3, self.disc["Nx"], self.disc["Ny"]))
q_init[0] += self.material["rho0"]
t_init = (0., self.numerics["dt"])
q_init[0, self.disc["Nx"] // 2, self.disc["Ny"] // 2] *= self.ic["factor"]
elif self.ic["type"] == "longitudinal_wave":
x = np.linspace(0 + self.disc["dx"]/2, self.disc["Lx"] - self.disc["dx"]/2, self.disc["Nx"])
y = np.linspace(0 + self.disc["dy"]/2, self.disc["Ly"] - self.disc["dy"]/2, self.disc["Ny"])
xx, yy = np.meshgrid(x, y, indexing="ij")
q_init = np.zeros((3, self.disc["Nx"], self.disc["Ny"]))
q_init[0] += self.material["rho0"]
k = 2. * np.pi / self.disc["Lx"] * self.ic["nwave"]
q_init[1] += self.ic["amp"] * np.sin(k * xx)
t_init = (0., self.numerics["dt"])
elif self.ic["type"] == "shear_wave":
x = np.linspace(0 + self.disc["dx"]/2, self.disc["Lx"] - self.disc["dx"]/2, self.disc["Nx"])
y = np.linspace(0 + self.disc["dy"]/2, self.disc["Ly"] - self.disc["dy"]/2, self.disc["Ny"])
xx, yy = np.meshgrid(x, y, indexing="ij")
q_init = np.zeros((3, self.disc["Nx"], self.disc["Ny"]))
q_init[0] += self.material["rho0"]
k = 2. * np.pi / self.disc["Lx"] * self.ic["nwave"]
q_init[2] += self.ic["amp"] * np.sin(k * xx)
t_init = (0., self.numerics["dt"])
return q_init, t_init
def read_last_frame(self):
"""
Read last frame from restart file and use as initial values for new run.
Returns
-------
np.array
Solution field at last frame, used as inital field.
tuple
Total time and timestep of last frame.
"""
file = Dataset(self.ic["file"], "r")
rho = np.array(file.variables['rho'])[-1]
jx = np.array(file.variables['jx'])[-1]
jy = np.array(file.variables['jy'])[-1]
dt = float(file.variables["dt"][-1])
t = float(file.variables["time"][-1])
q0 = np.zeros([3] + list(rho.shape))
q0[0] = rho
q0[1] = jx
q0[2] = jy
return q0, (t, dt)
def init_netcdf(self, out_dir, out_name, rank):
"""
Initialize netCDF4 file, create dimensions, variables and metadata.
Parameters
----------
out_dir : str
Output directoy.
out_name : str
Filename prefix.
rank : int
Rank of the MPI communicator
Returns
-------
netCDF4.Dataset
Initialized dataset.
"""
if rank == 0:
if not(os.path.exists(out_dir)):
os.makedirs(out_dir)
if self.ic is None or self.ic["type"] != "restart":
if rank == 0:
if out_name is None:
# default unique filename with timestamp
timestamp = datetime.now().replace(microsecond=0).strftime("%Y-%m-%d_%H%M%S")
name = self.options["name"]
outfile = f"{timestamp}_{name}.nc"
else:
# custom filename with zero padded number
tag = str(len([1 for f in os.listdir(out_dir) if f.startswith(out_name)]) + 1).zfill(4)
outfile = f"{out_name}-{tag}.nc"
self.outpath = os.path.join(out_dir, outfile)
else:
self.outpath = None
self.outpath = self.q.comm.bcast(self.outpath, root=0)
# initialize NetCDF file
parallel = False
if self.q.comm.Get_size() > 1:
parallel = True
nc = Dataset(self.outpath, 'w', parallel=parallel, format='NETCDF3_64BIT_OFFSET')
nc.restarts = 0
nc.createDimension('x', self.disc["Nx"])
nc.createDimension('y', self.disc["Ny"])
nc.createDimension('step', None)
# create unknown variable buffer as timeseries of 2D fields
var0 = nc.createVariable('rho', 'f8', ('step', 'x', 'y'))
var1 = nc.createVariable('jx', 'f8', ('step', 'x', 'y'))
var2 = nc.createVariable('jy', 'f8', ('step', 'x', 'y'))
var0.set_collective(True)
var1.set_collective(True)
var2.set_collective(True)
# create scalar variables
nc.createVariable('time', 'f8', ('step'))
nc.createVariable('mass', 'f8', ('step'))
nc.createVariable('vmax', 'f8', ('step'))
nc.createVariable('vSound', 'f8', ('step'))
nc.createVariable('dt', 'f8', ('step'))
nc.createVariable('eps', 'f8', ('step'))
nc.createVariable('ekin', 'f8', ('step'))
# write metadata
nc.setncattr(f"tStart-{nc.restarts}", self.tStart.strftime("%d/%m/%Y %H:%M:%S"))
nc.setncattr("version", get_distribution('hans').version)
disc = self.disc.copy()
bc = self.bc.copy()
categories = {"options": self.options,
"disc": disc,
"bc": bc,
"geometry": self.geometry,
"numerics": self.numerics,
"material": self.material}
if self.surface is not None:
categories["surface"] = self.surface
if self.ic is not None:
categories["ic"] = self.ic
# reset modified input dictionaries
bc["x0"] = "".join(bc["x0"])
bc["x1"] = "".join(bc["x1"])
bc["y0"] = "".join(bc["y0"])
bc["y1"] = "".join(bc["y1"])
del disc["nghost"]
del disc["pX"]
del disc["pY"]
for name, cat in categories.items():
for key, value in cat.items():
nc.setncattr(f"{name}_{key}", value)
else:
# append to existing netCDF file
parallel = False
if self.q.comm.Get_size() > 1:
parallel = True
nc = Dataset(self.ic["file"], 'a', parallel=parallel, format='NETCDF3_64BIT_OFFSET')
self.outpath = os.path.relpath(self.ic["file"])
backup_file = f"{os.path.splitext(self.ic['file'])[0]}-{nc.restarts}.nc"
# create backup
if rank == 0:
shutil.copy(self.ic["file"], backup_file)
# increase restart counter
nc.restarts += 1
# append modified attributes
nc.setncattr(f"tStart-{nc.restarts}", self.tStart.strftime("%d/%m/%Y %H:%M:%S"))
for key, value in self.numerics.items():
name = f"numerics_{key}-{nc.restarts}"
nc.setncattr(name, value)
nc.setncattr(f"ic_type-{nc.restarts}", "restart")
nc.setncattr(f"ic_file-{nc.restarts}", backup_file)
return nc
def write_to_stdout(self, i, mode):
"""
Write information about the current time step to stdout.
Parameters
----------
i : int
Current time step.
mode : int
Writing mode (0: normal, 1: converged, 2: max time, 3: execution stopped).
"""
print(f"{i:10d}\t{self.q.dt:.6e}\t{self.q.time:.6e}\t{self.q.eps:.6e}", flush=True)
if mode == 1:
print(f"\nSolution has converged after {i:d} steps. Output written to: {self.outpath}", flush=True)
elif mode == 2:
print(f"\nNo convergence within {i: d} steps.", end=" ", flush=True)
print(f"Stopping criterion: maximum time {self.numerics['maxT']: .1e} s reached.", flush=True)
print(f"Output written to: {self.outpath}", flush=True)
elif mode == 3:
print(f"\nNo convergence within {i: d} steps.", end=" ", flush=True)
print(f"Stopping criterion: maximum number of iterations reached.", flush=True)
print(f"Output written to: {self.outpath}", flush=True)
elif mode == 4:
print(f"Execution stopped. Output written to: {self.outpath}", flush=True)
if mode > 0:
walltime = datetime.now() - self.tStart
print(f"Total wall clock time: {str(walltime).split('.')[0]}", end=" ", flush=True)
print(f"(Performance: {i/walltime.total_seconds(): .2f} steps/s", end=" ", flush=True)
print(f"on {self.q.comm.dims[0]} x {self.q.comm.dims[1]} MPI grid)", flush=True)
def write_to_netcdf(self, i, nc, mode):
"""
Append current solution field to netCDF file.
Parameters
----------
i : int
Current time step.
nc : netCDF4.Dataset
NetCDF Dataset object.
mode : int
Writing mode (0: normal, 1: converged, 2: max time, 3: execution stopped).
"""
step = nc.variables["rho"].shape[0]
xrange, yrange = self.q.without_ghost
nc.variables['rho'][step, xrange, yrange] = self.q.inner[0]
nc.variables['jx'][step, xrange, yrange] = self.q.inner[1]
nc.variables['jy'][step, xrange, yrange] = self.q.inner[2]
nc.variables["time"][step] = self.q.time
nc.variables["mass"][step] = self.q.mass
nc.variables["vmax"][step] = self.q.vmax
nc.variables["vSound"][step] = self.q.vSound
nc.variables["dt"][step] = self.q.dt
nc.variables["eps"][step] = self.q.eps
nc.variables["ekin"][step] = self.q.ekin
if mode > 0:
nc.setncattr(f"tEnd-{nc.restarts}", datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
nc.close()
def receive_signal(self, signum, frame):
"""
Signal handler. Catches signals send to the process and sets write mode to 3 (abort).
Parameters
----------
signum :
signal code
frame :
Description of parameter `frame`.
"""
if signum in [signal.SIGINT, signal.SIGTERM, signal.SIGHUP, signal.SIGUSR1]:
self._write_mode = 4
def plot(self, writeInterval):
"""
Initialize on-the-fly plotting.
Parameters
----------
writeInterval : int
Write interval for stdout in plotting mode.
"""
fig, ax = plt.subplots(2, 2, figsize=(14, 9), sharex=True)
Nx = self.disc["Nx"]
dx = self.disc["dx"]
x = np.arange(Nx) * dx + dx / 2
ax[0, 0].plot(x, self.q.centerline_x[1])
ax[0, 1].plot(x, self.q.centerline_x[2])
ax[1, 0].plot(x, self.q.centerline_x[0])
ax[1, 1].plot(x, Material(self.material).eos_pressure(self.q.centerline_x[0]))
ax[0, 0].set_title(r'$j_x$')
ax[0, 1].set_title(r'$j_y$')
ax[1, 0].set_title(r'$\rho$')
ax[1, 1].set_title(r'$p$')
ax[1, 0].set_xlabel('distance x (m)')
ax[1, 1].set_xlabel('distance x (m)')
def init():
pass
_ = animation.FuncAnimation(fig,
self.animate1D,
100000,
fargs=(fig, ax, writeInterval),
interval=1,
init_func=init,
repeat=False)
plt.show()
def animate1D(self, i, fig, ax, writeInterval):
"""
Animator function. Update solution and plots.
Parameters
----------
i : type
Current time step.
fig : matplotlib.figure
Figure object.
ax : np.array
Array containing the axes of the figure.
writeInterval : int
Write interval for stdout in plotting mode.
"""
self.q.update(i)
fig.suptitle('time = {:.2f} ns'.format(self.q.time * 1e9))
ax[0, 0].lines[0].set_ydata(self.q.centerline_x[1])
ax[0, 1].lines[0].set_ydata(self.q.centerline_x[2])
ax[1, 0].lines[0].set_ydata(self.q.centerline_x[0])
ax[1, 1].lines[0].set_ydata(Material(self.material).eos_pressure(self.q.centerline_x[0]))
ax = adaptiveLimits(ax)
if i % writeInterval == 0:
print(f"{i:10d}\t{self.q.dt:.6e}\t{self.q.time:.6e}\t{self.q.eps:.6e}", flush=True)
def check_options(self):
"""
Sanity check for I/O options input.
"""
print("Checking I/O options... ")
try:
writeInterval = int(self.options["writeInterval"])
assert writeInterval > 0
except KeyError:
print("***Output interval not given, fallback to 1000")
self.options["writeInterval"] = 1000
except AssertionError:
try:
assert writeInterval != 0
except AssertionError:
print("***Output interval is zero. fallback to 1000")
self.options["writeInterval"] = 1000
else:
print("***Output interval is negative. Converting to positive value.")
writeInterval *= -1
self.options["writeInterval"] = writeInterval
def check_disc(self):
"""
Sanity check for discretization input.
[Nx, Ny] are required, then look for [Lx, Ly] or [dx, dy] (in that order).
"""
print("Checking discretization... ")
try:
self.disc["Nx"] = int(self.disc['Nx'])
assert self.disc["Nx"] > 0
except KeyError:
print("***Number of grid cells Nx not specified. Abort.")
abort()
except AssertionError:
print("***Number of grid cells Nx must be larger than zero. Abort")
abort()
try:
self.disc["Ny"] = int(self.disc['Ny'])
assert self.disc["Ny"] > 0
except KeyError:
print("***Number of grid cells 'Ny' not specified. Abort.")
abort()
except AssertionError:
print("***Number of grid cells 'Ny' must be larger than zero. Abort")
abort()
try:
self.disc["Lx"] = float(self.disc["Lx"])
except KeyError:
try:
self.disc["dx"] = float(self.disc["dx"])
except KeyError:
print("At least two of 'Nx' 'Lx', 'dx' must be given. Abort.")
abort()
else:
self.disc["Lx"] = self.disc["dx"] * self.disc["Nx"]
else:
self.disc["dx"] = self.disc["Lx"] / self.disc["Nx"]
try:
self.disc["Ly"] = float(self.disc["Ly"])
except KeyError:
try:
self.disc["dy"] = float(self.disc["dy"])
except KeyError:
print("At least two of 'Ny' 'Ly', 'dy' must be given. Abort.")
abort()
else:
self.disc["Ly"] = self.disc["dy"] * self.disc["Ny"]
else:
self.disc["dy"] = self.disc["Ly"] / self.disc["Ny"]
def check_geo(self):
"""
Sanity check for geometry input.
"""
print("Checking geometry... ")
if self.geometry["type"] in ["journal", "journal_x", "journal_y"]:
self.geometry["CR"] = float(self.geometry["CR"])
self.geometry["eps"] = float(self.geometry["eps"])
elif self.geometry["type"] == "parabolic":
self.geometry["hmin"] = float(self.geometry['hmin'])
self.geometry["hmax"] = float(self.geometry['hmax'])
elif self.geometry["type"] == "twin_parabolic":
self.geometry["hmin"] = float(self.geometry['hmin'])
self.geometry["hmax"] = float(self.geometry['hmax'])
elif self.geometry["type"] in ["inclined", "inclined_x", "inclined_y"]:
self.geometry["h1"] = float(self.geometry['h1'])
self.geometry["h2"] = float(self.geometry['h2'])
elif self.geometry["type"] == "inclined_pocket":
self.geometry["h1"] = float(self.geometry['h1'])
self.geometry["h2"] = float(self.geometry['h2'])
self.geometry["hp"] = float(self.geometry['hp'])
self.geometry["c"] = float(self.geometry['c'])
self.geometry["l"] = float(self.geometry['l'])
self.geometry["w"] = float(self.geometry['w'])
elif self.geometry["type"] in ["half_sine", "half_sine_squared"]:
self.geometry["h0"] = float(self.geometry['h0'])
self.geometry["amp"] = float(self.geometry['amp'])
self.geometry["num"] = float(self.geometry['num'])
def check_num(self):
"""
Sanity check for numerics options.
"""
print("Checking numerics options... ")
try:
self.numerics["integrator"] = self.numerics["integrator"]
assert self.numerics["integrator"] in ["MC", "MC_bf", "MC_fb", "MC_alt", "LW", "RK3"]
except KeyError:
print("***Integrator not specified. Use default (MacCormack).")
self.numerics["integrator"] = "MC"
except AssertionError:
print(f'***Unknown integrator \'{self.numerics["integrator"]}\'. Abort.')
abort()
if self.numerics["integrator"].startswith("MC"):
try:
self.numerics["fluxLim"] = float(self.numerics["fluxLim"])
except KeyError:
pass
try:
self.numerics["stokes"] = int(self.numerics["stokes"])
except KeyError:
print("***Boolean parameter 'stokes' not given. Use default (True).")
self.numerics["stokes"] = 1
try:
self.numerics["adaptive"] = int(self.numerics["adaptive"])
except KeyError:
print("***Boolean parameter 'adaptive' not given. Use default (False).")
self.numerics["adaptive"] = 0
if self.numerics["adaptive"] == 1:
try:
self.numerics["C"] = float(self.numerics["C"])
except KeyError:
print("***CFL number not given. Use default (0.5).")
self.numerics["C"] = 0.5
try:
self.numerics["dt"] = float(self.numerics["dt"])
except KeyError:
print("***Timestep not given. Use default (1e-10).")
self.numerics["dt"] = 1e-10
stopping_criteria = 0
try:
self.numerics["tol"] = float(self.numerics["tol"])
stopping_criteria += 1
except KeyError:
pass
try:
self.numerics["maxT"] = float(self.numerics["maxT"])
stopping_criteria += 1
except KeyError:
pass
try:
self.numerics["maxIt"] = int(self.numerics["maxIt"])
stopping_criteria += 1
except KeyError:
pass
if stopping_criteria == 0:
print("***No stopping criterion given. Abort.")
abort()
if self.numerics["integrator"] == "RK3":
self.disc["nghost"] = 2
else:
self.disc["nghost"] = 1
def check_mat(self):
"""
Sanity check on material settings.
"""
print("Checking material options... ")
if self.material["EOS"] == "DH":
self.material["rho0"] = float(self.material["rho0"])
self.material["P0"] = float(self.material["P0"])
self.material["C1"] = float(self.material["C1"])
self.material["C2"] = float(self.material["C2"])
elif self.material["EOS"] == "PL":
self.material["rho0"] = float(self.material["rho0"])
self.material["P0"] = float(self.material["P0"])
self.material["alpha"] = float(self.material['alpha'])
elif self.material["EOS"] == "vdW":
self.material["M"] = float(self.material['M'])
self.material["T"] = float(self.material['T0'])
self.material["a"] = float(self.material['a'])
self.material["b"] = float(self.material['b'])
elif self.material["EOS"] == "Tait":
self.material["rho0"] = float(self.material["rho0"])
self.material["P0"] = float(self.material["P0"])
self.material["K"] = float(self.material['K'])
self.material["n"] = float(self.material['n'])
elif self.material["EOS"] == "cubic":
self.material["a"] = float(self.material['a'])
self.material["b"] = float(self.material['b'])
self.material["c"] = float(self.material['c'])
self.material["d"] = float(self.material['d'])
elif self.material["EOS"].startswith("Bayada"):
self.material["cl"] = float(self.material["cl"])
self.material["cv"] = float(self.material["cv"])
self.material["rhol"] = float(self.material["rhol"])
self.material["rhov"] = float(self.material["rhov"])
self.material["shear"] = float(self.material["shear"])
self.material["shearv"] = float(self.material["shearv"])
self.material["rhov"] = float(self.material["rhov"])
self.material["shear"] = float(self.material["shear"])
self.material["bulk"] = float(self.material["bulk"])
if "Pcav" in self.material.keys():
self.material["Pcav"] = float(self.material["Pcav"])
if "piezo" in self.material.keys():
if self.material["piezo"] == "Barus":
self.material["aB"] = float(self.material["aB"])
elif self.material["piezo"] == "Vogel":
self.material["rho0"] = float(self.material['rho0'])
self.material["g"] = float(self.material["g"])
self.material["mu_inf"] = float(self.material["mu_inf"])
self.material["phi_inf"] = float(self.material["phi_inf"])
self.material["BF"] = float(self.material["BF"])
if "thinning" in self.material.keys():
if self.material["thinning"] == "Eyring":
self.material["tau0"] = float(self.material["tau0"])
elif self.material["thinning"] == "Carreau":
self.material["relax"] = float(self.material["relax"])
self.material["a"] = float(self.material["a"])
self.material["N"] = float(self.material["N"])
elif self.material["thinning"] == "PL":
self.material["shear"] = float(self.material["shear"])
self.material["n"] = float(self.material["n"])
if "PLindex" in self.material.keys():
self.material["PLindex"] = float(self.material["PLindex"])
def check_surface(self):
"""
Sanity check for surface input.
"""
print("Checking surface parameters... ")
if "lslip" in self.surface.keys():
self.surface["lslip"] = float(self.surface["lslip"])
else:
self.surface["lslip"] = 0.
if self.surface["type"] in ["stripes", "stripes_x", "stripes_y"]:
try:
self.surface["num"] = int(self.surface["num"])
except KeyError:
self.surface["num"] = 1
try:
self.surface["sign"] = int(self.surface["sign"])
except KeyError:
self.surface["sign"] = -1
def check_ic(self):
"""
Sanity check for initial conditions input.
"""
print("Checking initial conditions... ")
if self.ic["type"] != "restart":
if self.ic["type"] == "perturbation":
self.ic["factor"] = float(self.ic["factor"])
elif self.ic["type"] in ["longitudinal_wave", "shear_wave"]:
self.ic["amp"] = float(self.ic["amp"])
if "nwave" in self.ic.keys():
self.ic["nwave"] = int(self.ic["nwave"])
else:
self.ic["nwave"] = 1
def check_bc(self):
"""
Sanity check for boundary condition input.
Parameters
----------
bc : dict
Boundary condition parameters read from yaml input file.
disc : dict
Discretization parameters.
material : dict
Material parameters.
Returns
-------
dict
Boundary condition parameters.
"""
print("Checking boundary conditions... ")
self.bc["x0"] = np.array(list(self.bc["x0"]))
self.bc["x1"] = np.array(list(self.bc["x1"]))
self.bc["y0"] = np.array(list(self.bc["y0"]))
self.bc["y1"] = np.array(list(self.bc["y1"]))
assert len(self.bc["x0"]) == 3
assert len(self.bc["x1"]) == 3
assert len(self.bc["y0"]) == 3
assert len(self.bc["y1"]) == 3
if "P" in self.bc["x0"] and "P" in self.bc["x1"]:
self.disc["pX"] = 1
else:
self.disc["pX"] = 0
if "P" in self.bc["y0"] and "P" in self.bc["y1"]:
self.disc["pY"] = 1
else:
self.disc["pY"] = 0
if "D" in self.bc["x0"]:
if "px0" in self.bc.keys():
px0 = float(self.bc["px0"])
self.bc["rhox0"] = Material(self.material).eos_density(px0)
else:
self.bc["rhox0"] = self.material["rho0"]
if "D" in self.bc["x1"]:
if "px1" in self.bc.keys():
px1 = float(self.bc["px1"])
self.bc["rhox1"] = Material(self.material).eos_density(px1)
else:
self.bc["rhox1"] = self.material["rho0"]
if "D" in self.bc["y0"]:
if "py0" in self.bc.keys():
py0 = float(self.bc["py0"])
self.bc["rhoy0"] = Material(self.material).eos_density(py0)
else:
self.bc["rhoy0"] = self.material["rho0"]
if "D" in self.bc["y1"]:
if "py1" in self.bc.keys():
py1 = float(self.bc["py1"])
self.bc["rhoy1"] = Material(self.material).eos_density(py1)
else:
self.bc["rhoy1"] = self.material["rho0"]
assert np.all((self.bc["x0"] == "P") == (self.bc["x1"] == "P")), "Inconsistent boundary conditions (x)"
assert np.all((self.bc["y0"] == "P") == (self.bc["y1"] == "P")), "Inconsistent boundary conditions (y)"
|
[
"hans.tools.abort",
"numpy.array",
"numpy.sin",
"numpy.arange",
"os.path.exists",
"os.listdir",
"netCDF4.Dataset",
"numpy.linspace",
"numpy.meshgrid",
"pkg_resources.get_distribution",
"os.path.relpath",
"os.path.splitext",
"shutil.copy",
"hans.plottools.adaptiveLimits",
"hans.material.Material",
"matplotlib.pyplot.show",
"signal.signal",
"os.makedirs",
"matplotlib.animation.FuncAnimation",
"os.path.join",
"datetime.datetime.now",
"numpy.zeros",
"hans.integrate.ConservedField",
"numpy.all",
"matplotlib.pyplot.subplots"
] |
[((3950, 4078), 'hans.integrate.ConservedField', 'ConservedField', (['self.disc', 'self.bc', 'self.geometry', 'self.material', 'self.numerics', 'self.surface'], {'q_init': 'q_init', 't_init': 't_init'}), '(self.disc, self.bc, self.geometry, self.material, self.\n numerics, self.surface, q_init=q_init, t_init=t_init)\n', (3964, 4078), False, 'from hans.integrate import ConservedField\n'), ((4406, 4420), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4418, 4420), False, 'from datetime import datetime\n'), ((8815, 8844), 'netCDF4.Dataset', 'Dataset', (["self.ic['file']", '"""r"""'], {}), "(self.ic['file'], 'r')\n", (8822, 8844), False, 'from netCDF4 import Dataset\n'), ((17398, 17446), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'figsize': '(14, 9)', 'sharex': '(True)'}), '(2, 2, figsize=(14, 9), sharex=True)\n', (17410, 17446), True, 'import matplotlib.pyplot as plt\n'), ((18073, 18203), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['fig', 'self.animate1D', '(100000)'], {'fargs': '(fig, ax, writeInterval)', 'interval': '(1)', 'init_func': 'init', 'repeat': '(False)'}), '(fig, self.animate1D, 100000, fargs=(fig, ax,\n writeInterval), interval=1, init_func=init, repeat=False)\n', (18096, 18203), True, 'import matplotlib.animation as animation\n'), ((18425, 18435), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18433, 18435), True, 'import matplotlib.pyplot as plt\n'), ((19260, 19278), 'hans.plottools.adaptiveLimits', 'adaptiveLimits', (['ax'], {}), '(ax)\n', (19274, 19278), False, 'from hans.plottools import adaptiveLimits\n'), ((33229, 33285), 'numpy.all', 'np.all', (["((self.bc['x0'] == 'P') == (self.bc['x1'] == 'P'))"], {}), "((self.bc['x0'] == 'P') == (self.bc['x1'] == 'P'))\n", (33235, 33285), True, 'import numpy as np\n'), ((33341, 33397), 'numpy.all', 'np.all', (["((self.bc['y0'] == 'P') == (self.bc['y1'] == 'P'))"], {}), "((self.bc['y0'] == 'P') == (self.bc['y1'] == 'P'))\n", (33347, 33397), True, 'import numpy as np\n'), ((6562, 6609), 'numpy.zeros', 'np.zeros', (["(3, self.disc['Nx'], self.disc['Ny'])"], {}), "((3, self.disc['Nx'], self.disc['Ny']))\n", (6570, 6609), True, 'import numpy as np\n'), ((8860, 8891), 'numpy.array', 'np.array', (["file.variables['rho']"], {}), "(file.variables['rho'])\n", (8868, 8891), True, 'import numpy as np\n'), ((8909, 8939), 'numpy.array', 'np.array', (["file.variables['jx']"], {}), "(file.variables['jx'])\n", (8917, 8939), True, 'import numpy as np\n'), ((8957, 8987), 'numpy.array', 'np.array', (["file.variables['jy']"], {}), "(file.variables['jy'])\n", (8965, 8987), True, 'import numpy as np\n'), ((10742, 10818), 'netCDF4.Dataset', 'Dataset', (['self.outpath', '"""w"""'], {'parallel': 'parallel', 'format': '"""NETCDF3_64BIT_OFFSET"""'}), "(self.outpath, 'w', parallel=parallel, format='NETCDF3_64BIT_OFFSET')\n", (10749, 10818), False, 'from netCDF4 import Dataset\n'), ((13167, 13246), 'netCDF4.Dataset', 'Dataset', (["self.ic['file']", '"""a"""'], {'parallel': 'parallel', 'format': '"""NETCDF3_64BIT_OFFSET"""'}), "(self.ic['file'], 'a', parallel=parallel, format='NETCDF3_64BIT_OFFSET')\n", (13174, 13246), False, 'from netCDF4 import Dataset\n'), ((13274, 13306), 'os.path.relpath', 'os.path.relpath', (["self.ic['file']"], {}), "(self.ic['file'])\n", (13289, 13306), False, 'import os\n'), ((26034, 26041), 'hans.tools.abort', 'abort', ([], {}), '()\n', (26039, 26041), False, 'from hans.tools import abort\n'), ((5045, 5094), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'self.receive_signal'], {}), '(signal.SIGINT, self.receive_signal)\n', (5058, 5094), False, 'import signal\n'), ((5111, 5161), 'signal.signal', 'signal.signal', (['signal.SIGTERM', 'self.receive_signal'], {}), '(signal.SIGTERM, self.receive_signal)\n', (5124, 5161), False, 'import signal\n'), ((5178, 5227), 'signal.signal', 'signal.signal', (['signal.SIGHUP', 'self.receive_signal'], {}), '(signal.SIGHUP, self.receive_signal)\n', (5191, 5227), False, 'import signal\n'), ((5244, 5294), 'signal.signal', 'signal.signal', (['signal.SIGUSR1', 'self.receive_signal'], {}), '(signal.SIGUSR1, self.receive_signal)\n', (5257, 5294), False, 'import signal\n'), ((5311, 5361), 'signal.signal', 'signal.signal', (['signal.SIGUSR2', 'self.receive_signal'], {}), '(signal.SIGUSR2, self.receive_signal)\n', (5324, 5361), False, 'import signal\n'), ((9703, 9726), 'os.path.exists', 'os.path.exists', (['out_dir'], {}), '(out_dir)\n', (9717, 9726), False, 'import os\n'), ((9745, 9765), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (9756, 9765), False, 'import os\n'), ((10429, 10459), 'os.path.join', 'os.path.join', (['out_dir', 'outfile'], {}), '(out_dir, outfile)\n', (10441, 10459), False, 'import os\n'), ((13464, 13505), 'shutil.copy', 'shutil.copy', (["self.ic['file']", 'backup_file'], {}), "(self.ic['file'], backup_file)\n", (13475, 13505), False, 'import shutil\n'), ((15257, 15271), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (15269, 15271), False, 'from datetime import datetime\n'), ((17518, 17531), 'numpy.arange', 'np.arange', (['Nx'], {}), '(Nx)\n', (17527, 17531), True, 'import numpy as np\n'), ((20695, 20702), 'hans.tools.abort', 'abort', ([], {}), '()\n', (20700, 20702), False, 'from hans.tools import abort\n'), ((20826, 20833), 'hans.tools.abort', 'abort', ([], {}), '()\n', (20831, 20833), False, 'from hans.tools import abort\n'), ((21047, 21054), 'hans.tools.abort', 'abort', ([], {}), '()\n', (21052, 21054), False, 'from hans.tools import abort\n'), ((21180, 21187), 'hans.tools.abort', 'abort', ([], {}), '()\n', (21185, 21187), False, 'from hans.tools import abort\n'), ((24288, 24295), 'hans.tools.abort', 'abort', ([], {}), '()\n', (24293, 24295), False, 'from hans.tools import abort\n'), ((6942, 6989), 'numpy.zeros', 'np.zeros', (["(3, self.disc['Nx'], self.disc['Ny'])"], {}), "((3, self.disc['Nx'], self.disc['Ny']))\n", (6950, 6989), True, 'import numpy as np\n'), ((11968, 11992), 'pkg_resources.get_distribution', 'get_distribution', (['"""hans"""'], {}), "('hans')\n", (11984, 11992), False, 'from pkg_resources import get_distribution\n'), ((17719, 17742), 'hans.material.Material', 'Material', (['self.material'], {}), '(self.material)\n', (17727, 17742), False, 'from hans.material import Material\n'), ((19184, 19207), 'hans.material.Material', 'Material', (['self.material'], {}), '(self.material)\n', (19192, 19207), False, 'from hans.material import Material\n'), ((7260, 7356), 'numpy.linspace', 'np.linspace', (["(0 + self.disc['dx'] / 2)", "(self.disc['Lx'] - self.disc['dx'] / 2)", "self.disc['Nx']"], {}), "(0 + self.disc['dx'] / 2, self.disc['Lx'] - self.disc['dx'] / 2,\n self.disc['Nx'])\n", (7271, 7356), True, 'import numpy as np\n'), ((7369, 7465), 'numpy.linspace', 'np.linspace', (["(0 + self.disc['dy'] / 2)", "(self.disc['Ly'] - self.disc['dy'] / 2)", "self.disc['Ny']"], {}), "(0 + self.disc['dy'] / 2, self.disc['Ly'] - self.disc['dy'] / 2,\n self.disc['Ny'])\n", (7380, 7465), True, 'import numpy as np\n'), ((7483, 7515), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {'indexing': '"""ij"""'}), "(x, y, indexing='ij')\n", (7494, 7515), True, 'import numpy as np\n'), ((7542, 7589), 'numpy.zeros', 'np.zeros', (["(3, self.disc['Nx'], self.disc['Ny'])"], {}), "((3, self.disc['Nx'], self.disc['Ny']))\n", (7550, 7589), True, 'import numpy as np\n'), ((13337, 13370), 'os.path.splitext', 'os.path.splitext', (["self.ic['file']"], {}), "(self.ic['file'])\n", (13353, 13370), False, 'import os\n'), ((16660, 16674), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (16672, 16674), False, 'from datetime import datetime\n'), ((21478, 21485), 'hans.tools.abort', 'abort', ([], {}), '()\n', (21483, 21485), False, 'from hans.tools import abort\n'), ((21940, 21947), 'hans.tools.abort', 'abort', ([], {}), '()\n', (21945, 21947), False, 'from hans.tools import abort\n'), ((32290, 32313), 'hans.material.Material', 'Material', (['self.material'], {}), '(self.material)\n', (32298, 32313), False, 'from hans.material import Material\n'), ((32559, 32582), 'hans.material.Material', 'Material', (['self.material'], {}), '(self.material)\n', (32567, 32582), False, 'from hans.material import Material\n'), ((32828, 32851), 'hans.material.Material', 'Material', (['self.material'], {}), '(self.material)\n', (32836, 32851), False, 'from hans.material import Material\n'), ((33097, 33120), 'hans.material.Material', 'Material', (['self.material'], {}), '(self.material)\n', (33105, 33120), False, 'from hans.material import Material\n'), ((7755, 7769), 'numpy.sin', 'np.sin', (['(k * xx)'], {}), '(k * xx)\n', (7761, 7769), True, 'import numpy as np\n'), ((7891, 7987), 'numpy.linspace', 'np.linspace', (["(0 + self.disc['dx'] / 2)", "(self.disc['Lx'] - self.disc['dx'] / 2)", "self.disc['Nx']"], {}), "(0 + self.disc['dx'] / 2, self.disc['Lx'] - self.disc['dx'] / 2,\n self.disc['Nx'])\n", (7902, 7987), True, 'import numpy as np\n'), ((8000, 8096), 'numpy.linspace', 'np.linspace', (["(0 + self.disc['dy'] / 2)", "(self.disc['Ly'] - self.disc['dy'] / 2)", "self.disc['Ny']"], {}), "(0 + self.disc['dy'] / 2, self.disc['Ly'] - self.disc['dy'] / 2,\n self.disc['Ny'])\n", (8011, 8096), True, 'import numpy as np\n'), ((8114, 8146), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {'indexing': '"""ij"""'}), "(x, y, indexing='ij')\n", (8125, 8146), True, 'import numpy as np\n'), ((8173, 8220), 'numpy.zeros', 'np.zeros', (["(3, self.disc['Nx'], self.disc['Ny'])"], {}), "((3, self.disc['Nx'], self.disc['Ny']))\n", (8181, 8220), True, 'import numpy as np\n'), ((8386, 8400), 'numpy.sin', 'np.sin', (['(k * xx)'], {}), '(k * xx)\n', (8392, 8400), True, 'import numpy as np\n'), ((9983, 9997), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (9995, 9997), False, 'from datetime import datetime\n'), ((10282, 10301), 'os.listdir', 'os.listdir', (['out_dir'], {}), '(out_dir)\n', (10292, 10301), False, 'import os\n')]
|
"""
Script to make nucleosome occupancy track!
@author: <NAME>
"""
##### IMPORT MODULES #####
# import necessary python modules
#import matplotlib as mpl
#mpl.use('PS')
import matplotlib.pyplot as plt
import multiprocessing as mp
import numpy as np
import traceback
import itertools
import pysam
from pyatac.utils import shell_command,read_chrom_sizes_from_bam, read_chrom_sizes_from_fasta
from pyatac.chunk import ChunkList
from nucleoatac.Occupancy import FragmentMixDistribution, OccupancyParameters, OccChunk
from pyatac.fragmentsizes import FragmentSizes
from pyatac.bias import PWM
def _occHelper(arg):
"""function to get occupancy for a set of bed regions
"""
(chunk, params) = arg
try:
occ = OccChunk(chunk)
occ.process(params)
out = (occ.getNucDist(),
occ.occ, [occ.peaks[i] for i in sorted(occ.peaks.keys())])
occ.removeData()
except Exception as e:
print(('Caught exception when processing:\n'+ chunk.asBed()+"\n"))
traceback.print_exc()
print()
raise e
return out
def _writeOcc(track_queue, out):
out_handle1 = open(out + '.occ.bedgraph','a')
out_handle2 = open(out + '.occ.lower_bound.bedgraph','a')
out_handle3 = open(out + '.occ.upper_bound.bedgraph','a')
try:
for track in iter(track_queue.get, 'STOP'):
track.write_track(out_handle1, vals = track.smoothed_vals)
track.write_track(out_handle2, vals = track.smoothed_lower)
track.write_track(out_handle3, vals = track.smoothed_upper)
track_queue.task_done()
except Exception as e:
print('Caught exception when writing occupancy track\n')
traceback.print_exc()
print()
raise e
out_handle1.close()
out_handle2.close()
out_handle3.close()
return True
def _writePeaks(pos_queue, out):
out_handle = open(out + '.occpeaks.bed','a')
try:
for poslist in iter(pos_queue.get, 'STOP'):
for pos in poslist:
pos.write(out_handle)
pos_queue.task_done()
except Exception as e:
print('Caught exception when writing occupancy track\n')
traceback.print_exc()
print()
raise e
out_handle.close()
return True
def run_occ(args):
"""run occupancy calling
"""
if args.fasta:
chrs = read_chrom_sizes_from_fasta(args.fasta)
else:
chrs = read_chrom_sizes_from_bam(args.bam)
pwm = PWM.open(args.pwm)
chunks = ChunkList.read(args.bed, chromDict = chrs, min_offset = args.flank + args.upper//2 + max(pwm.up,pwm.down) + args.nuc_sep//2)
chunks.slop(chrs, up = args.nuc_sep//2, down = args.nuc_sep//2)
chunks.merge()
maxQueueSize = args.cores*10
fragment_dist = FragmentMixDistribution(0, upper = args.upper)
if args.sizes is not None:
tmp = FragmentSizes.open(args.sizes)
fragment_dist.fragmentsizes = FragmentSizes(0, args.upper, vals = tmp.get(0,args.upper))
else:
fragment_dist.getFragmentSizes(args.bam, chunks)
fragment_dist.modelNFR()
fragment_dist.plotFits(args.out + '.occ_fit.pdf')
fragment_dist.fragmentsizes.save(args.out + '.fragmentsizes.txt')
params = OccupancyParameters(fragment_dist, args.upper, args.fasta, args.pwm, sep = args.nuc_sep, min_occ = args.min_occ,
flank = args.flank, bam = args.bam, ci = args.confidence_interval, step = args.step)
sets = chunks.split(items = args.cores * 5)
pool1 = mp.Pool(processes = max(1,args.cores-1))
out_handle1 = open(args.out + '.occ.bedgraph','w')
out_handle1.close()
out_handle2 = open(args.out + '.occ.lower_bound.bedgraph','w')
out_handle2.close()
out_handle3 = open(args.out + '.occ.upper_bound.bedgraph','w')
out_handle3.close()
write_queue = mp.JoinableQueue(maxsize = maxQueueSize)
write_process = mp.Process(target = _writeOcc, args=(write_queue, args.out))
write_process.start()
peaks_handle = open(args.out + '.occpeaks.bed','w')
peaks_handle.close()
peaks_queue = mp.JoinableQueue()
peaks_process = mp.Process(target = _writePeaks, args=(peaks_queue, args.out))
peaks_process.start()
nuc_dist = np.zeros(args.upper)
for j in sets:
tmp = pool1.map(_occHelper, list(zip(j,itertools.repeat(params))))
for result in tmp:
nuc_dist += result[0]
write_queue.put(result[1])
peaks_queue.put(result[2])
pool1.close()
pool1.join()
write_queue.put('STOP')
peaks_queue.put('STOP')
write_process.join()
peaks_process.join()
pysam.tabix_compress(args.out + '.occpeaks.bed', args.out + '.occpeaks.bed.gz',force = True)
shell_command('rm ' + args.out + '.occpeaks.bed')
pysam.tabix_index(args.out + '.occpeaks.bed.gz', preset = "bed", force = True)
for i in ('occ','occ.lower_bound','occ.upper_bound'):
pysam.tabix_compress(args.out + '.' + i + '.bedgraph', args.out + '.'+i+'.bedgraph.gz',force = True)
shell_command('rm ' + args.out + '.' + i + '.bedgraph')
pysam.tabix_index(args.out + '.' + i + '.bedgraph.gz', preset = "bed", force = True)
dist_out = FragmentSizes(0, args.upper, vals = nuc_dist)
dist_out.save(args.out + '.nuc_dist.txt')
print("Making figure")
#make figure
fig = plt.figure()
plt.plot(list(range(0,args.upper)),dist_out.get(0,args.upper),label = "Nucleosome Distribution")
plt.xlabel("Fragment Size")
plt.ylabel("Frequency")
fig.savefig(args.out+'.nuc_dist.pdf')
plt.close(fig)
|
[
"pysam.tabix_compress",
"multiprocessing.JoinableQueue",
"nucleoatac.Occupancy.OccChunk",
"matplotlib.pyplot.ylabel",
"multiprocessing.Process",
"itertools.repeat",
"pyatac.utils.read_chrom_sizes_from_fasta",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.close",
"pysam.tabix_index",
"traceback.print_exc",
"nucleoatac.Occupancy.FragmentMixDistribution",
"pyatac.utils.read_chrom_sizes_from_bam",
"pyatac.utils.shell_command",
"pyatac.fragmentsizes.FragmentSizes",
"nucleoatac.Occupancy.OccupancyParameters",
"pyatac.bias.PWM.open",
"pyatac.fragmentsizes.FragmentSizes.open",
"numpy.zeros",
"matplotlib.pyplot.figure"
] |
[((2492, 2510), 'pyatac.bias.PWM.open', 'PWM.open', (['args.pwm'], {}), '(args.pwm)\n', (2500, 2510), False, 'from pyatac.bias import PWM\n'), ((2789, 2833), 'nucleoatac.Occupancy.FragmentMixDistribution', 'FragmentMixDistribution', (['(0)'], {'upper': 'args.upper'}), '(0, upper=args.upper)\n', (2812, 2833), False, 'from nucleoatac.Occupancy import FragmentMixDistribution, OccupancyParameters, OccChunk\n'), ((3242, 3437), 'nucleoatac.Occupancy.OccupancyParameters', 'OccupancyParameters', (['fragment_dist', 'args.upper', 'args.fasta', 'args.pwm'], {'sep': 'args.nuc_sep', 'min_occ': 'args.min_occ', 'flank': 'args.flank', 'bam': 'args.bam', 'ci': 'args.confidence_interval', 'step': 'args.step'}), '(fragment_dist, args.upper, args.fasta, args.pwm, sep=\n args.nuc_sep, min_occ=args.min_occ, flank=args.flank, bam=args.bam, ci=\n args.confidence_interval, step=args.step)\n', (3261, 3437), False, 'from nucleoatac.Occupancy import FragmentMixDistribution, OccupancyParameters, OccChunk\n'), ((3832, 3870), 'multiprocessing.JoinableQueue', 'mp.JoinableQueue', ([], {'maxsize': 'maxQueueSize'}), '(maxsize=maxQueueSize)\n', (3848, 3870), True, 'import multiprocessing as mp\n'), ((3893, 3951), 'multiprocessing.Process', 'mp.Process', ([], {'target': '_writeOcc', 'args': '(write_queue, args.out)'}), '(target=_writeOcc, args=(write_queue, args.out))\n', (3903, 3951), True, 'import multiprocessing as mp\n'), ((4079, 4097), 'multiprocessing.JoinableQueue', 'mp.JoinableQueue', ([], {}), '()\n', (4095, 4097), True, 'import multiprocessing as mp\n'), ((4118, 4178), 'multiprocessing.Process', 'mp.Process', ([], {'target': '_writePeaks', 'args': '(peaks_queue, args.out)'}), '(target=_writePeaks, args=(peaks_queue, args.out))\n', (4128, 4178), True, 'import multiprocessing as mp\n'), ((4222, 4242), 'numpy.zeros', 'np.zeros', (['args.upper'], {}), '(args.upper)\n', (4230, 4242), True, 'import numpy as np\n'), ((4626, 4721), 'pysam.tabix_compress', 'pysam.tabix_compress', (["(args.out + '.occpeaks.bed')", "(args.out + '.occpeaks.bed.gz')"], {'force': '(True)'}), "(args.out + '.occpeaks.bed', args.out +\n '.occpeaks.bed.gz', force=True)\n", (4646, 4721), False, 'import pysam\n'), ((4723, 4772), 'pyatac.utils.shell_command', 'shell_command', (["('rm ' + args.out + '.occpeaks.bed')"], {}), "('rm ' + args.out + '.occpeaks.bed')\n", (4736, 4772), False, 'from pyatac.utils import shell_command, read_chrom_sizes_from_bam, read_chrom_sizes_from_fasta\n'), ((4777, 4851), 'pysam.tabix_index', 'pysam.tabix_index', (["(args.out + '.occpeaks.bed.gz')"], {'preset': '"""bed"""', 'force': '(True)'}), "(args.out + '.occpeaks.bed.gz', preset='bed', force=True)\n", (4794, 4851), False, 'import pysam\n'), ((5201, 5244), 'pyatac.fragmentsizes.FragmentSizes', 'FragmentSizes', (['(0)', 'args.upper'], {'vals': 'nuc_dist'}), '(0, args.upper, vals=nuc_dist)\n', (5214, 5244), False, 'from pyatac.fragmentsizes import FragmentSizes\n'), ((5348, 5360), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5358, 5360), True, 'import matplotlib.pyplot as plt\n'), ((5466, 5493), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Fragment Size"""'], {}), "('Fragment Size')\n", (5476, 5493), True, 'import matplotlib.pyplot as plt\n'), ((5498, 5521), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency"""'], {}), "('Frequency')\n", (5508, 5521), True, 'import matplotlib.pyplot as plt\n'), ((5568, 5582), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (5577, 5582), True, 'import matplotlib.pyplot as plt\n'), ((728, 743), 'nucleoatac.Occupancy.OccChunk', 'OccChunk', (['chunk'], {}), '(chunk)\n', (736, 743), False, 'from nucleoatac.Occupancy import FragmentMixDistribution, OccupancyParameters, OccChunk\n'), ((2381, 2420), 'pyatac.utils.read_chrom_sizes_from_fasta', 'read_chrom_sizes_from_fasta', (['args.fasta'], {}), '(args.fasta)\n', (2408, 2420), False, 'from pyatac.utils import shell_command, read_chrom_sizes_from_bam, read_chrom_sizes_from_fasta\n'), ((2446, 2481), 'pyatac.utils.read_chrom_sizes_from_bam', 'read_chrom_sizes_from_bam', (['args.bam'], {}), '(args.bam)\n', (2471, 2481), False, 'from pyatac.utils import shell_command, read_chrom_sizes_from_bam, read_chrom_sizes_from_fasta\n'), ((2881, 2911), 'pyatac.fragmentsizes.FragmentSizes.open', 'FragmentSizes.open', (['args.sizes'], {}), '(args.sizes)\n', (2899, 2911), False, 'from pyatac.fragmentsizes import FragmentSizes\n'), ((4927, 5034), 'pysam.tabix_compress', 'pysam.tabix_compress', (["(args.out + '.' + i + '.bedgraph')", "(args.out + '.' + i + '.bedgraph.gz')"], {'force': '(True)'}), "(args.out + '.' + i + '.bedgraph', args.out + '.' + i +\n '.bedgraph.gz', force=True)\n", (4947, 5034), False, 'import pysam\n'), ((5036, 5091), 'pyatac.utils.shell_command', 'shell_command', (["('rm ' + args.out + '.' + i + '.bedgraph')"], {}), "('rm ' + args.out + '.' + i + '.bedgraph')\n", (5049, 5091), False, 'from pyatac.utils import shell_command, read_chrom_sizes_from_bam, read_chrom_sizes_from_fasta\n'), ((5100, 5185), 'pysam.tabix_index', 'pysam.tabix_index', (["(args.out + '.' + i + '.bedgraph.gz')"], {'preset': '"""bed"""', 'force': '(True)'}), "(args.out + '.' + i + '.bedgraph.gz', preset='bed', force=True\n )\n", (5117, 5185), False, 'import pysam\n'), ((1016, 1037), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (1035, 1037), False, 'import traceback\n'), ((1705, 1726), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (1724, 1726), False, 'import traceback\n'), ((2196, 2217), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (2215, 2217), False, 'import traceback\n'), ((4314, 4338), 'itertools.repeat', 'itertools.repeat', (['params'], {}), '(params)\n', (4330, 4338), False, 'import itertools\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
import sys, os
from soma import aims, aimsalgo
import numpy
import optparse
parser = optparse.OptionParser( description='Voronoi diagram of the sulci ' \
'nodes regions, in the grey matter, and extending to the whole 3D space' )
parser.add_option( '-g', '--greywhite', dest='lgw',
help='left grey/white mask' )
parser.add_option( '-o', '--output', dest='voronoi',
help='output voronoi diagram volume' )
parser.add_option( '-f', '--folds', dest='graph',
help='sulci graph file' )
options, args = parser.parse_args()
lgw_vol_file = options.lgw
fold_graph_file = options.graph
voronoi_vol_file = options.voronoi
if lgw_vol_file is None and len( args ) > 0:
lgw_vol_file = args[0]
del args[0]
if voronoi_vol_file is None and len( args ) > 0:
voronoi_vol_file = args[0]
del args[0]
if fold_graph_file is None and len( args ) > 0:
fold_graph_file = args[0]
del args[0]
if lgw_vol_file is None or voronoi_vol_file is None \
or fold_graph_file is None or len( args ) != 0:
parser.parse( [ '-h' ] )
lgw_vol = aims.read( lgw_vol_file )
fold_graph = aims.read( fold_graph_file )
LCR_label = 255
GM_label = 100
seed = - lgw_vol
voxel_size = lgw_vol.header()["voxel_size"]
def printbucket( bck, vol, value ):
c = aims.RawConverter_BucketMap_VOID_rc_ptr_Volume_S16( False, True, value )
c.printToVolume( bck._get(), vol )
seed_label_list = []
for v in fold_graph.vertices():
try:
b = v[ 'aims_ss' ]
index = v[ 'skeleton_label' ]
seed_label_list.append(int(index))
printbucket( b, seed, index )
printbucket( b, lgw_vol, LCR_label ) #pour que le lcr rentre jusqu au fond des silons
try:
b = v[ 'aims_bottom' ]
printbucket( b, seed, index )
except:
pass
try:
b = v[ 'aims_other' ]
printbucket( b, seed, index )
except:
pass
except:
pass
f1 = aims.FastMarching()
print("Voronoi in Grey matter")
f1.doit(seed, [-LCR_label, -GM_label], seed_label_list)
voronoi_vol = f1.voronoiVol()
print("Voronoi in White matter")
f1 = aims.FastMarching()
n = numpy.array( voronoi_vol, copy=False )
n[ n == -1 ] = -100
f1.doit( voronoi_vol, [-100], seed_label_list )
# f1.doit( voronoi_vol, [-100], [ 940, 760] )
voronoi_vol = f1.voronoiVol()
aims.write( voronoi_vol, voronoi_vol_file )
|
[
"soma.aims.write",
"soma.aims.FastMarching",
"optparse.OptionParser",
"numpy.array",
"soma.aims.RawConverter_BucketMap_VOID_rc_ptr_Volume_S16",
"soma.aims.read"
] |
[((211, 357), 'optparse.OptionParser', 'optparse.OptionParser', ([], {'description': '"""Voronoi diagram of the sulci nodes regions, in the grey matter, and extending to the whole 3D space"""'}), "(description=\n 'Voronoi diagram of the sulci nodes regions, in the grey matter, and extending to the whole 3D space'\n )\n", (232, 357), False, 'import optparse\n'), ((1154, 1177), 'soma.aims.read', 'aims.read', (['lgw_vol_file'], {}), '(lgw_vol_file)\n', (1163, 1177), False, 'from soma import aims, aimsalgo\n'), ((1193, 1219), 'soma.aims.read', 'aims.read', (['fold_graph_file'], {}), '(fold_graph_file)\n', (1202, 1219), False, 'from soma import aims, aimsalgo\n'), ((1967, 1986), 'soma.aims.FastMarching', 'aims.FastMarching', ([], {}), '()\n', (1984, 1986), False, 'from soma import aims, aimsalgo\n'), ((2143, 2162), 'soma.aims.FastMarching', 'aims.FastMarching', ([], {}), '()\n', (2160, 2162), False, 'from soma import aims, aimsalgo\n'), ((2167, 2203), 'numpy.array', 'numpy.array', (['voronoi_vol'], {'copy': '(False)'}), '(voronoi_vol, copy=False)\n', (2178, 2203), False, 'import numpy\n'), ((2351, 2392), 'soma.aims.write', 'aims.write', (['voronoi_vol', 'voronoi_vol_file'], {}), '(voronoi_vol, voronoi_vol_file)\n', (2361, 2392), False, 'from soma import aims, aimsalgo\n'), ((1358, 1428), 'soma.aims.RawConverter_BucketMap_VOID_rc_ptr_Volume_S16', 'aims.RawConverter_BucketMap_VOID_rc_ptr_Volume_S16', (['(False)', '(True)', 'value'], {}), '(False, True, value)\n', (1408, 1428), False, 'from soma import aims, aimsalgo\n')]
|
#!/usr/bin/env python3
import numpy as np
import copy
import itertools
import sys
import ete3
import numpy as np
from Bio import AlignIO
# import CIAlign.cropSeq as cropSeq
# from AlignmentStats import find_removed_cialign
def writeOutfile(outfile, arr, nams, rmfile=None):
'''
Writes an alignment stored in a numpy array into a FASTA file.
Parameters
----------
outfile: str
Path to FASTA file where the output should be stored
arr: np.array
Numpy array containing the cleaned alignment
nams: list
List of nams of sequences in the input alignment
removed: set
Set of names of sequences which have been removed
rmfile: str
Path to file used to log sequences and columns which have been removed
Returns
-------
None
'''
out = open(outfile, "w")
i = 0
for nam in nams:
out.write(">%s\n%s\n" % (nam, "".join(list(arr[i]))))
i += 1
out.close()
# helper function to read MSA from file into np array
def readMSA(infile, log=None, outfile_stem=None):
'''
Convert an alignment into a numpy array.
Parameters
----------
infile: string
path to input alignment file in FASTA format
log: logging.Logger
An open log file object
Returns
-------
arr: np.array
2D numpy array in the same order as fasta_dict where each row
represents a single column in the alignment and each column a
single sequence.
nams: list
List of sequence names in the same order as in the input file
'''
formatErrorMessage = "The MSA file needs to be in FASTA format."
nams = []
seqs = []
nam = ""
seq = ""
with open(infile) as input:
for line in input:
line = line.strip()
if len(line) == 0:
continue # todo: test!
if line[0] == ">":
seqs.append([s.upper() for s in seq])
nams.append(nam.upper())
seq = []
nam = line.replace(">", "")
else:
if len(nams) == 0:
if log:
log.error(formatErrorMessage)
print(formatErrorMessage)
exit()
seq += list(line)
seqs.append(np.array([s.upper() for s in seq]))
nams.append(nam.upper())
arr = np.array(seqs[1:])
return (arr, nams[1:])
def find_removed_cialign(removed_file, arr, nams, keeprows=False):
'''
Reads the "_removed.txt" file generated by CIAlign to determine
what CIAlign has removed from the original alignment.
Replaces nucleotides removed by CIAlign with "!" in the array representing
the alignment so that it is still possible to compare these alignments
with uncleaned alignments in terms of knowing which columns and pairs
of residues are aligned.
! characters are always counted as mismatches in comparisons between
alignments.
Also counts how many total characters were removed by CIAlign and
how many non-gap characters.
Parameters
----------
removed_file: str
Path to a CIAlign _removed.txt log file
arr: np.array
Numpy array containing the alignment represented as a 2D matrix, where
dimension 1 is sequences and dimension 2 is columns
nams: list
List of names in the original alignment, in the same order as in the
input and the sequence array (these should always be the same).
Returns
-------
cleanarr:
2D numpy array containing the alignment represented as a 2D matrix,
where dimension 1 is sequences and dimension 2 is columns, with
residues removed by CIAlign represented as !
Fully removed sequences are removed from this array.
cleannams:
List of names in the output alignment, with any sequences fully
removed by CIAlign removed.
'''
# Read the CIAlign _removed.txt log file
lines = [line.strip().split("\t")
for line in open(removed_file).readlines()]
removed_count_total = 0
removed_count_nongap = 0
# Make an empty dictionary
D = {x: set() for x in nams}
for line in lines:
func = line[0]
if len(line) != 1:
ids = line[-1].split(",")
else:
ids = []
ids = [id.upper() for id in ids]
# for crop_ends and remove_insertions columns are removed so keep
# track of column numbers as integers
if func in ['crop_ends', 'remove_insertions', 'other']:
ids = [int(x) for x in ids]
# crop_ends is only applied to some sequences so also
# keep track of sequence names
if func == "crop_ends":
nam = line[1].upper()
D[nam] = D[nam] | set(ids)
# no need to remove insertions from sequences which were removed
# completely later
elif func == "remove_insertions":
for nam in nams:
# nam = nam.upper()
if D[nam] != "removed":
D[nam] = D[nam] | set(ids)
# remove divergent and remove short remove the whole sequence
elif func in ["remove_divergent", "remove_short", "otherc"]:
for nam in ids:
D[nam] = "removed"
elif func == "other":
for nam in nams:
if D[nam] != "removed":
D[nam] = D[nam] | set(ids)
# make copies of the arrays (because I'm never quite sure when
# python makes links rather than copies)
cleannams = copy.copy(nams)
cleannams = np.array([x.upper() for x in cleannams])
cleanarr = copy.copy(arr)
# iterate through everything that has been changed
for nam, val in D.items():
which_nam = np.where(cleannams == nam)[0][0]
# remove the removed sequences from the array
if val == "removed":
# keep track of the number of removed positions
removed_count_total += len(cleanarr[which_nam])
# keep track of the number of removed residues
removed_count_nongap += sum(cleanarr[which_nam] != "-")
# only keep names of sequences which are not removed
cleannams = np.append(cleannams[:which_nam],
cleannams[which_nam + 1:])
# only keep the sequences which are not removed
cleanarr = np.vstack([cleanarr[:which_nam],
cleanarr[which_nam+1:]])
# remove them from the input temporarily just to keep the shapes
# the same
arr = np.vstack([arr[:which_nam], arr[which_nam+1:]])
else:
# replace column substitutions with !
which_pos = np.array(sorted(list(val)))
if len(which_pos) != 0:
cleanarr[which_nam, which_pos] = "!"
removed_count_total += np.sum(cleanarr == "!")
# sometimes gaps are removed - make these gaps in the output rather than
# !s
cleanarr[arr == "-"] = "-"
removed_count_nongap += np.sum(cleanarr == "!")
return (cleanarr, cleannams, removed_count_total, removed_count_nongap)
msa_file = sys.argv[1]
removed_file = sys.argv[2]
fake_outfile = sys.argv[3]
arr, nams = readMSA(msa_file)
arr = np.char.upper(arr)
(cleanarr, cleannams, removed_count_total, removed_count_nongap) = find_removed_cialign(removed_file, arr, nams)
writeOutfile(fake_outfile, cleanarr, cleannams)
|
[
"numpy.char.upper",
"numpy.where",
"numpy.append",
"numpy.array",
"numpy.sum",
"numpy.vstack",
"copy.copy"
] |
[((7324, 7342), 'numpy.char.upper', 'np.char.upper', (['arr'], {}), '(arr)\n', (7337, 7342), True, 'import numpy as np\n'), ((2403, 2421), 'numpy.array', 'np.array', (['seqs[1:]'], {}), '(seqs[1:])\n', (2411, 2421), True, 'import numpy as np\n'), ((5606, 5621), 'copy.copy', 'copy.copy', (['nams'], {}), '(nams)\n', (5615, 5621), False, 'import copy\n'), ((5694, 5708), 'copy.copy', 'copy.copy', (['arr'], {}), '(arr)\n', (5703, 5708), False, 'import copy\n'), ((6938, 6961), 'numpy.sum', 'np.sum', (["(cleanarr == '!')"], {}), "(cleanarr == '!')\n", (6944, 6961), True, 'import numpy as np\n'), ((7108, 7131), 'numpy.sum', 'np.sum', (["(cleanarr == '!')"], {}), "(cleanarr == '!')\n", (7114, 7131), True, 'import numpy as np\n'), ((6269, 6328), 'numpy.append', 'np.append', (['cleannams[:which_nam]', 'cleannams[which_nam + 1:]'], {}), '(cleannams[:which_nam], cleannams[which_nam + 1:])\n', (6278, 6328), True, 'import numpy as np\n'), ((6447, 6506), 'numpy.vstack', 'np.vstack', (['[cleanarr[:which_nam], cleanarr[which_nam + 1:]]'], {}), '([cleanarr[:which_nam], cleanarr[which_nam + 1:]])\n', (6456, 6506), True, 'import numpy as np\n'), ((6657, 6706), 'numpy.vstack', 'np.vstack', (['[arr[:which_nam], arr[which_nam + 1:]]'], {}), '([arr[:which_nam], arr[which_nam + 1:]])\n', (6666, 6706), True, 'import numpy as np\n'), ((5816, 5842), 'numpy.where', 'np.where', (['(cleannams == nam)'], {}), '(cleannams == nam)\n', (5824, 5842), True, 'import numpy as np\n')]
|
"""
Contacts between nucleotides in a tetracycline aptamer
======================================================
This example reproduces a figure from the publication
*"StreAM-Tg: algorithms for analyzing coarse grained RNA dynamics based
on Markov models of connectivity-graphs"* [1]_.
The figure displays a coarse grained model of a tetracycline aptamer
and highlights interacting nucleotides based on a cutoff distance.
.. [1] <NAME>, <NAME>, <NAME>, <NAME>, <NAME> and <NAME>,
"StreAM-Tg: algorithms for analyzing coarse grained RNA dynamics based
on Markov models of connectivity-graphs."
Algorithms Mol Biol 12 (2017).
"""
# Code source: <NAME>
# License: CC0
import numpy as np
import biotite.structure as struc
import biotite.structure.io.mmtf as mmtf
import biotite.database.rcsb as rcsb
import ammolite
PNG_SIZE = (800, 800)
########################################################################
mmtf_file = mmtf.MMTFFile.read(rcsb.fetch("3EGZ", "mmtf"))
structure = mmtf.get_structure(mmtf_file, model=1)
aptamer = structure[struc.filter_nucleotides(structure)]
# Coarse graining: Represent each nucleotide using its C3' atom
aptamer = aptamer[aptamer.atom_name == "C3'"]
# Connect consecutive nucleotides
indices = np.arange(aptamer.array_length())
aptamer.bonds = struc.BondList(
aptamer.array_length(),
np.stack((indices[:-1], indices[1:]), axis=-1)
)
pymol_obj = ammolite.PyMOLObject.from_structure(aptamer)
pymol_obj.show("sticks")
pymol_obj.show("spheres")
pymol_obj.color("black")
ammolite.cmd.set("stick_color", "red")
ammolite.cmd.set("stick_radius", 0.5)
ammolite.cmd.set("sphere_scale", 1.0)
ammolite.cmd.set("sphere_quality", 4)
# Adjust camera
pymol_obj.orient()
pymol_obj.zoom(buffer=10)
ammolite.cmd.rotate("z", 90)
ammolite.show(PNG_SIZE)
########################################################################
CUTOFF = 13
# Find contacts within cutoff distance
adjacency_matrix = struc.CellList(aptamer, CUTOFF) \
.create_adjacency_matrix(CUTOFF)
for i, j in zip(*np.where(adjacency_matrix)):
pymol_obj.distance("", i, j, show_label=False, gap=0)
ammolite.cmd.set("dash_color", "firebrick")
# Add black outlines
ammolite.cmd.bg_color("white")
ammolite.cmd.set("ray_trace_mode", 1)
ammolite.cmd.set("ray_trace_disco_factor", 0.5)
ammolite.show(PNG_SIZE)
# sphinx_gallery_thumbnail_number = 2
|
[
"ammolite.PyMOLObject.from_structure",
"biotite.database.rcsb.fetch",
"numpy.where",
"biotite.structure.io.mmtf.get_structure",
"numpy.stack",
"biotite.structure.CellList",
"ammolite.show",
"biotite.structure.filter_nucleotides",
"ammolite.cmd.set",
"ammolite.cmd.rotate",
"ammolite.cmd.bg_color"
] |
[((998, 1036), 'biotite.structure.io.mmtf.get_structure', 'mmtf.get_structure', (['mmtf_file'], {'model': '(1)'}), '(mmtf_file, model=1)\n', (1016, 1036), True, 'import biotite.structure.io.mmtf as mmtf\n'), ((1409, 1453), 'ammolite.PyMOLObject.from_structure', 'ammolite.PyMOLObject.from_structure', (['aptamer'], {}), '(aptamer)\n', (1444, 1453), False, 'import ammolite\n'), ((1530, 1568), 'ammolite.cmd.set', 'ammolite.cmd.set', (['"""stick_color"""', '"""red"""'], {}), "('stick_color', 'red')\n", (1546, 1568), False, 'import ammolite\n'), ((1569, 1606), 'ammolite.cmd.set', 'ammolite.cmd.set', (['"""stick_radius"""', '(0.5)'], {}), "('stick_radius', 0.5)\n", (1585, 1606), False, 'import ammolite\n'), ((1607, 1644), 'ammolite.cmd.set', 'ammolite.cmd.set', (['"""sphere_scale"""', '(1.0)'], {}), "('sphere_scale', 1.0)\n", (1623, 1644), False, 'import ammolite\n'), ((1645, 1682), 'ammolite.cmd.set', 'ammolite.cmd.set', (['"""sphere_quality"""', '(4)'], {}), "('sphere_quality', 4)\n", (1661, 1682), False, 'import ammolite\n'), ((1745, 1773), 'ammolite.cmd.rotate', 'ammolite.cmd.rotate', (['"""z"""', '(90)'], {}), "('z', 90)\n", (1764, 1773), False, 'import ammolite\n'), ((1774, 1797), 'ammolite.show', 'ammolite.show', (['PNG_SIZE'], {}), '(PNG_SIZE)\n', (1787, 1797), False, 'import ammolite\n'), ((2135, 2178), 'ammolite.cmd.set', 'ammolite.cmd.set', (['"""dash_color"""', '"""firebrick"""'], {}), "('dash_color', 'firebrick')\n", (2151, 2178), False, 'import ammolite\n'), ((2201, 2231), 'ammolite.cmd.bg_color', 'ammolite.cmd.bg_color', (['"""white"""'], {}), "('white')\n", (2222, 2231), False, 'import ammolite\n'), ((2232, 2269), 'ammolite.cmd.set', 'ammolite.cmd.set', (['"""ray_trace_mode"""', '(1)'], {}), "('ray_trace_mode', 1)\n", (2248, 2269), False, 'import ammolite\n'), ((2270, 2317), 'ammolite.cmd.set', 'ammolite.cmd.set', (['"""ray_trace_disco_factor"""', '(0.5)'], {}), "('ray_trace_disco_factor', 0.5)\n", (2286, 2317), False, 'import ammolite\n'), ((2319, 2342), 'ammolite.show', 'ammolite.show', (['PNG_SIZE'], {}), '(PNG_SIZE)\n', (2332, 2342), False, 'import ammolite\n'), ((958, 984), 'biotite.database.rcsb.fetch', 'rcsb.fetch', (['"""3EGZ"""', '"""mmtf"""'], {}), "('3EGZ', 'mmtf')\n", (968, 984), True, 'import biotite.database.rcsb as rcsb\n'), ((1057, 1092), 'biotite.structure.filter_nucleotides', 'struc.filter_nucleotides', (['structure'], {}), '(structure)\n', (1081, 1092), True, 'import biotite.structure as struc\n'), ((1347, 1393), 'numpy.stack', 'np.stack', (['(indices[:-1], indices[1:])'], {'axis': '(-1)'}), '((indices[:-1], indices[1:]), axis=-1)\n', (1355, 1393), True, 'import numpy as np\n'), ((1944, 1975), 'biotite.structure.CellList', 'struc.CellList', (['aptamer', 'CUTOFF'], {}), '(aptamer, CUTOFF)\n', (1958, 1975), True, 'import biotite.structure as struc\n'), ((2047, 2073), 'numpy.where', 'np.where', (['adjacency_matrix'], {}), '(adjacency_matrix)\n', (2055, 2073), True, 'import numpy as np\n')]
|
import numpy as np
class DOM:
"""
Object representing a discretized observation model. Comprised primarily by the
DOM.edges and DOM.chi vectors, which represent the discrete mask and state-dependent
emission probabilities, respectively.
"""
def __init__(self):
self.k = None
self.n_bins = None
self.edges = None
self.classes = None
self.chi = None
self.type = 'DOM'
self.n_params = None
def set_params(self, config):
"""
Set relevant parameters for DOM object.
Args:
config (dict): Parameters to set.
"""
params = {'n_bins', 'edges', 'classes', 'chi', 'n_params'}
self.__dict__.update((param, np.array(value)) for param, value in config.items() if param in params)
def initialize(self, k, stats):
"""
Initialize DOM parameters according to dataset properties.
Args:
k (int): Number of components to use
stats (dict): Dictionary of dataset sets, generated by Dataset.compute_stats()
"""
k = k + 5
qbin_sizes = 0.5 / k # Quantile sizes
qbin_edges = 0.25 + qbin_sizes*np.arange(0, k+1) # Edge locations (in quantile terms)
bin_edges = np.interp(qbin_edges, stats['quantile_basis'], stats['quantiles'])
self.k = k
self.n_bins = k + 2
self.classes = list(range(1, self.n_bins + 2))
self.edges = [-np.Inf] + [edge for edge in bin_edges] + [np.Inf]
self.chi = np.zeros((2, self.n_bins + 1))
dist = np.linspace(2, 1, self.n_bins) # Bins captured by observations
scaled_dist = 0.9 * dist / dist.sum() # Scaling by 0.9 to allow for 0.1 emission prob of NaN
self.chi[1, :-1] = scaled_dist # Paired emission dist
self.chi[0, :-1] = np.flip(scaled_dist) # Unpaired emission dist
self.chi[1, -1] = 0.1 # NaN observations
self.chi[0, -1] = 0.1 # NaN observations
self.n_params = 2*(self.n_bins-2)
def discretize(self, transcript):
"""
Compute the DOM class for all nucleotides in an RNA and save the resulting vector
to Transcript.obs_dom.
"""
# np.searchsorted is identical to the digitize call here, but marginally faster (especially
# for a large number of bins and/or a large number of RNAs).
# transcript.obs_dom = np.digitize(transcript.obs, bins=self.edges)
transcript.obs_dom = np.searchsorted(self.edges, transcript.obs, side='left')
def compute_emissions(self, transcript, reference=False):
"""
Compute emission probabilities according to the discretized observation model.
This amounts to simply accessing the correct indices of the DOM pdf matrix, chi.
Args:
transcript (src.patteRNA.Transcript.Transcript): Transcript to process
reference (bool): Whether or not it's a reference transcript
"""
if reference:
pass
transcript.B = self.chi[:, transcript.obs_dom-1]
@staticmethod
def post_process(transcript):
pass # No post-processing needed for DOM model
def m_step(self, transcript):
"""
Compute pseudo-counts en route to updating model parameters according to maximium-likelihood approach.
Args:
transcript (Transcript): Transcript to process
Returns:
params (dict): Partial pseudo-counts
"""
chi_0 = np.fromiter((transcript.gamma[0, transcript.obs_dom == dom_class].sum()
for dom_class in self.classes), float)
chi_1 = np.fromiter((transcript.gamma[1, transcript.obs_dom == dom_class].sum()
for dom_class in self.classes), float)
params = {'chi': np.vstack((chi_0, chi_1)),
'chi_norm': np.sum(transcript.gamma, axis=1)}
return params
def update_from_pseudocounts(self, pseudocounts, nan=False):
"""
Scheme model parameters from transcript-level pseudo-counts.
Args:
pseudocounts (dict): Dictionary of total pseudo-counts
nan (bool): Whether or not to treat NaNs as informative
"""
self.chi = pseudocounts['chi'] / pseudocounts['chi_norm'][:, None]
self.scale_chi(nan=nan)
def scale_chi(self, nan=False):
"""
Scale chi vector to a probability distribution.
Args:
nan (bool): Whether or not to treat NaNs as informative
"""
if nan:
self.chi[:, :] = self.chi[:, :] / np.sum(self.chi[:, :], axis=1)[:, np.newaxis]
else:
self.chi[:, :-1] = 0.9 * self.chi[:, :-1] / np.sum(self.chi[:, :-1], axis=1)[:, np.newaxis]
self.chi[:, -1] = 0.1 # NaN observations
def snapshot(self):
"""
Returns a text summary of model parameters.
"""
text = ""
text += "{}:\n{}\n".format('chi', np.array2string(self.chi))
return text
def serialize(self):
"""
Return a dictionary containing all of the parameters needed to describe the emission model.
"""
return {'type': self.type,
'n_bins': self.n_bins,
'classes': self.classes,
'edges': self.edges,
'chi': self.chi.tolist(),
'n_params': self.n_params}
def reset(self):
"""
Reset DOM object to un-initialized state.
"""
self.edges = None
self.chi = None
self.k = None
self.n_bins = None
self.classes = None
self.n_params = None
|
[
"numpy.flip",
"numpy.searchsorted",
"numpy.array2string",
"numpy.sum",
"numpy.zeros",
"numpy.linspace",
"numpy.array",
"numpy.vstack",
"numpy.interp",
"numpy.arange"
] |
[((1282, 1348), 'numpy.interp', 'np.interp', (['qbin_edges', "stats['quantile_basis']", "stats['quantiles']"], {}), "(qbin_edges, stats['quantile_basis'], stats['quantiles'])\n", (1291, 1348), True, 'import numpy as np\n'), ((1544, 1574), 'numpy.zeros', 'np.zeros', (['(2, self.n_bins + 1)'], {}), '((2, self.n_bins + 1))\n', (1552, 1574), True, 'import numpy as np\n'), ((1591, 1621), 'numpy.linspace', 'np.linspace', (['(2)', '(1)', 'self.n_bins'], {}), '(2, 1, self.n_bins)\n', (1602, 1621), True, 'import numpy as np\n'), ((1847, 1867), 'numpy.flip', 'np.flip', (['scaled_dist'], {}), '(scaled_dist)\n', (1854, 1867), True, 'import numpy as np\n'), ((2497, 2553), 'numpy.searchsorted', 'np.searchsorted', (['self.edges', 'transcript.obs'], {'side': '"""left"""'}), "(self.edges, transcript.obs, side='left')\n", (2512, 2553), True, 'import numpy as np\n'), ((3846, 3871), 'numpy.vstack', 'np.vstack', (['(chi_0, chi_1)'], {}), '((chi_0, chi_1))\n', (3855, 3871), True, 'import numpy as np\n'), ((3903, 3935), 'numpy.sum', 'np.sum', (['transcript.gamma'], {'axis': '(1)'}), '(transcript.gamma, axis=1)\n', (3909, 3935), True, 'import numpy as np\n'), ((5020, 5045), 'numpy.array2string', 'np.array2string', (['self.chi'], {}), '(self.chi)\n', (5035, 5045), True, 'import numpy as np\n'), ((1205, 1224), 'numpy.arange', 'np.arange', (['(0)', '(k + 1)'], {}), '(0, k + 1)\n', (1214, 1224), True, 'import numpy as np\n'), ((743, 758), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (751, 758), True, 'import numpy as np\n'), ((4640, 4670), 'numpy.sum', 'np.sum', (['self.chi[:, :]'], {'axis': '(1)'}), '(self.chi[:, :], axis=1)\n', (4646, 4670), True, 'import numpy as np\n'), ((4756, 4788), 'numpy.sum', 'np.sum', (['self.chi[:, :-1]'], {'axis': '(1)'}), '(self.chi[:, :-1], axis=1)\n', (4762, 4788), True, 'import numpy as np\n')]
|
"""Calculate the partial derivatives of the source coordinates
Description:
------------
Calculate the partial derivatives of the source coordinates.
This is done according to equations (2.47) - (2.50) in Teke [2]_.
References:
-----------
.. [1] <NAME>. and <NAME>. (eds.), IERS Conventions (2010), IERS Technical Note No. 36, BKG (2010).
http://www.iers.org/IERS/EN/Publications/TechnicalNotes/tn36.html
.. [2] <NAME>, Sub-daily parameter estimation in VLBI data analysis.
https://geo.tuwien.ac.at/fileadmin/editors/GM/GM87_teke.pdf
"""
# External library imports
import numpy as np
# Midgard imports
from midgard.dev import plugins
# Where imports
from where.lib import config
from where import apriori
from where.lib import log
# Name of parameter
PARAMETER = __name__.split(".")[-1]
@plugins.register
def src_dir(dset):
"""Calculate the partial derivative of the source coordinates
Args:
dset: A Dataset containing model data.
Returns:
Tuple: Array of partial derivatives, list of their names, and their unit
"""
column_names = ["ra", "dec"]
sources = np.asarray(dset.unique("source"))
icrf = apriori.get("crf", time=dset.time)
# Remove sources that should be fixed
fix_idx = np.zeros(len(sources))
for group in config.tech[PARAMETER].fix_sources.list:
fix_idx = np.logical_or(
[icrf[src].meta[group] if group in icrf[src].meta else src == group for src in sources], fix_idx
)
for group in config.tech[PARAMETER].except_sources.list:
except_idx = np.array([icrf[src].meta[group] if group in icrf[src].meta else src == group for src in sources])
fix_idx = np.logical_and(np.logical_not(except_idx), fix_idx)
sources = sources[np.logical_not(fix_idx)]
# Calculate partials
partials = np.zeros((dset.num_obs, len(sources) * 2))
baseline = (dset.site_pos_2.gcrs.pos - dset.site_pos_1.gcrs.pos).mat
dK_dra = dset.src_dir.dsrc_dra[:, None, :]
dK_ddec = dset.src_dir.dsrc_ddec[:, None, :]
all_partials = np.hstack((-dK_dra @ baseline, -dK_ddec @ baseline))[:, :, 0]
for idx, src in enumerate(sources):
src_idx = dset.filter(source=src)
partials[src_idx, idx * 2 : idx * 2 + 2] = all_partials[src_idx]
column_names = [s + "_" + name for s in sources for name in column_names]
return partials, column_names, "meter"
|
[
"numpy.hstack",
"numpy.logical_not",
"where.apriori.get",
"numpy.logical_or",
"numpy.array"
] |
[((1179, 1213), 'where.apriori.get', 'apriori.get', (['"""crf"""'], {'time': 'dset.time'}), "('crf', time=dset.time)\n", (1190, 1213), False, 'from where import apriori\n'), ((1371, 1488), 'numpy.logical_or', 'np.logical_or', (['[(icrf[src].meta[group] if group in icrf[src].meta else src == group) for\n src in sources]', 'fix_idx'], {}), '([(icrf[src].meta[group] if group in icrf[src].meta else src ==\n group) for src in sources], fix_idx)\n', (1384, 1488), True, 'import numpy as np\n'), ((1588, 1691), 'numpy.array', 'np.array', (['[(icrf[src].meta[group] if group in icrf[src].meta else src == group) for\n src in sources]'], {}), '([(icrf[src].meta[group] if group in icrf[src].meta else src ==\n group) for src in sources])\n', (1596, 1691), True, 'import numpy as np\n'), ((1779, 1802), 'numpy.logical_not', 'np.logical_not', (['fix_idx'], {}), '(fix_idx)\n', (1793, 1802), True, 'import numpy as np\n'), ((2076, 2128), 'numpy.hstack', 'np.hstack', (['(-dK_dra @ baseline, -dK_ddec @ baseline)'], {}), '((-dK_dra @ baseline, -dK_ddec @ baseline))\n', (2085, 2128), True, 'import numpy as np\n'), ((1719, 1745), 'numpy.logical_not', 'np.logical_not', (['except_idx'], {}), '(except_idx)\n', (1733, 1745), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
#
# fsl_ents.py - Extract ICA component time courses from a MELODIC directory.
#
# Author: <NAME> <<EMAIL>>
#
"""This module defines the ``fsl_ents`` script, for extracting component
time series from a MELODIC ``.ica`` directory.
"""
import os.path as op
import sys
import argparse
import warnings
import numpy as np
# See atlasq.py for explanation
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
import fsl.data.fixlabels as fixlabels
import fsl.data.melodicanalysis as melanalysis
DTYPE = np.float64
name = "fsl_ents"
desc = 'Extract component time series from a MELODIC .ica directory'
usage = """
{name}: {desc}
Usage:
{name} <.ica directory> [-o outfile] <fixfile>
{name} <.ica directory> [-o outfile] <component> [<component> ...]
{name} <.ica directory> [-o outfile] [-c conffile] [-c conffile] <fixfile>
{name} <.ica directory> [-o outfile] [-c conffile] [-c conffile] <component> [<component> ...]
""".format(name=name, desc=desc).strip() # noqa
helps = {
'outfile' :
'File to save time series to',
'overwrite' :
'Overwrite output file if it exists',
'icadir' :
'.ica directory to extract time series from.',
'component' :
'Component number or FIX/AROMA file specifying components to extract.',
'confound' :
'Extra files to append to output file.',
}
def parseArgs(args):
"""Parses command line arguments.
:arg args: Sequence of command line arguments.
:returns: An ``argparse.Namespace`` object containing parsed arguments.
"""
if len(args) == 0:
print(usage)
sys.exit(0)
parser = argparse.ArgumentParser(prog=name,
usage=usage,
description=desc)
parser.add_argument('-o', '--outfile',
help=helps['outfile'],
default='confound_timeseries.txt')
parser.add_argument('-ow', '--overwrite',
action='store_true',
help=helps['overwrite'])
parser.add_argument('-c', '--conffile',
action='append',
help=helps['confound'])
parser.add_argument('icadir',
help=helps['icadir'])
parser.add_argument('components',
nargs='+',
help=helps['component'])
args = parser.parse_args(args)
# Error if ica directory does not exist
if not op.exists(args.icadir):
print('ICA directory {} does not exist'.format(args.icadir))
sys.exit(1)
# Error if output exists, but overwrite not specified
if op.exists(args.outfile) and not args.overwrite:
print('Output file {} already exists and --overwrite not '
'specified'.format(args.outfile))
sys.exit(1)
# Convert components into integers,
# or absolute file paths, and error
# if any are not one of these.
for i, c in enumerate(args.components):
if op.exists(c):
args.components[i] = op.abspath(c)
else:
try:
args.components[i] = int(c)
except ValueError:
print('Bad component: {}. Components must either be component '
'indices (starting from 1), or paths to FIX/AROMA '
'files.')
sys.exit(1)
# Convert confound files to absolute
# paths, error if any do not exist.
if args.conffile is None:
args.conffile = []
for i, cf in enumerate(args.conffile):
if not op.exists(cf):
print('Confound file does not exist: {}'.format(cf))
sys.exit(1)
args.conffile[i] = op.abspath(cf)
args.outfile = op.abspath(args.outfile)
args.icadir = op.abspath(args.icadir)
return args
def genComponentIndexList(comps, ncomps):
"""Turns the given sequence of integers and file paths into a list
of 0-based component indices.
:arg comps: Sequence containing 1-based component indices, and/or paths
to FIX/AROMA label text files.
:arg ncomps: Number of components in the input data - indices larger than
this will be ignored.
:returns: List of 0-based component indices.
"""
allcomps = []
for c in comps:
if isinstance(c, int):
ccomps = [c]
else:
ccomps = fixlabels.loadLabelFile(c, returnIndices=True)[2]
allcomps.extend([c - 1 for c in ccomps])
if any([c < 0 or c >= ncomps for c in allcomps]):
raise ValueError('Invalid component indices: {}'.format(allcomps))
return list(sorted(set(allcomps)))
def loadConfoundFiles(conffiles, npts):
"""Loads the given confound files, and copies them all into a single 2D
``(npoints, nconfounds)`` matrix.
:arg conffiles: Sequence of paths to files containing confound time series
(where each row corresponds to a time point, and each
column corresponds to a single confound).
:arg npts: Expected number of time points
:returns: A ``(npoints, nconfounds)`` ``numpy`` matrix.
"""
matrices = []
for cfile in conffiles:
mat = np.loadtxt(cfile, dtype=DTYPE)
if len(mat.shape) == 1:
mat = np.atleast_2d(mat).T
if mat.shape[0] != npts:
raise ValueError('Confound file {} does not have correct number '
'of points (expected {}, has {})'.format(
cfile, npts, mat.shape[0]))
matrices.append(mat)
ncols = sum([m.shape[1] for m in matrices])
confounds = np.zeros((npts, ncols), dtype=DTYPE)
coli = 0
for mat in matrices:
matcols = mat.shape[1]
confounds[:, coli:coli + matcols] = mat
coli = coli + matcols
return confounds
def main(argv=None):
"""Entry point for the ``fsl_ents`` script.
Identifies component time series to extract, extracts them, loads extra
confound files, and saves them out to a file.
"""
if argv is None:
argv = sys.argv[1:]
args = parseArgs(argv)
try:
ts = melanalysis.getComponentTimeSeries(args.icadir)
npts, ncomps = ts.shape
confs = loadConfoundFiles(args.conffile, npts)
comps = genComponentIndexList(args.components, ncomps)
ts = ts[:, comps]
except Exception as e:
print(e)
sys.exit(1)
ts = np.hstack((ts, confs))
np.savetxt(args.outfile, ts, fmt='%10.5f')
if __name__ == '__main__':
sys.exit(main())
|
[
"os.path.exists",
"numpy.atleast_2d",
"argparse.ArgumentParser",
"numpy.hstack",
"warnings.catch_warnings",
"numpy.zeros",
"fsl.data.fixlabels.loadLabelFile",
"numpy.savetxt",
"sys.exit",
"os.path.abspath",
"numpy.loadtxt",
"warnings.filterwarnings",
"fsl.data.melodicanalysis.getComponentTimeSeries"
] |
[((415, 440), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (438, 440), False, 'import warnings\n'), ((446, 503), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'FutureWarning'}), "('ignore', category=FutureWarning)\n", (469, 503), False, 'import warnings\n'), ((1718, 1783), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': 'name', 'usage': 'usage', 'description': 'desc'}), '(prog=name, usage=usage, description=desc)\n', (1741, 1783), False, 'import argparse\n'), ((3852, 3876), 'os.path.abspath', 'op.abspath', (['args.outfile'], {}), '(args.outfile)\n', (3862, 3876), True, 'import os.path as op\n'), ((3896, 3919), 'os.path.abspath', 'op.abspath', (['args.icadir'], {}), '(args.icadir)\n', (3906, 3919), True, 'import os.path as op\n'), ((5800, 5836), 'numpy.zeros', 'np.zeros', (['(npts, ncols)'], {'dtype': 'DTYPE'}), '((npts, ncols), dtype=DTYPE)\n', (5808, 5836), True, 'import numpy as np\n'), ((6703, 6725), 'numpy.hstack', 'np.hstack', (['(ts, confs)'], {}), '((ts, confs))\n', (6712, 6725), True, 'import numpy as np\n'), ((6730, 6772), 'numpy.savetxt', 'np.savetxt', (['args.outfile', 'ts'], {'fmt': '"""%10.5f"""'}), "(args.outfile, ts, fmt='%10.5f')\n", (6740, 6772), True, 'import numpy as np\n'), ((1692, 1703), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1700, 1703), False, 'import sys\n'), ((2575, 2597), 'os.path.exists', 'op.exists', (['args.icadir'], {}), '(args.icadir)\n', (2584, 2597), True, 'import os.path as op\n'), ((2676, 2687), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2684, 2687), False, 'import sys\n'), ((2754, 2777), 'os.path.exists', 'op.exists', (['args.outfile'], {}), '(args.outfile)\n', (2763, 2777), True, 'import os.path as op\n'), ((2925, 2936), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2933, 2936), False, 'import sys\n'), ((3108, 3120), 'os.path.exists', 'op.exists', (['c'], {}), '(c)\n', (3117, 3120), True, 'import os.path as op\n'), ((3817, 3831), 'os.path.abspath', 'op.abspath', (['cf'], {}), '(cf)\n', (3827, 3831), True, 'import os.path as op\n'), ((5354, 5384), 'numpy.loadtxt', 'np.loadtxt', (['cfile'], {'dtype': 'DTYPE'}), '(cfile, dtype=DTYPE)\n', (5364, 5384), True, 'import numpy as np\n'), ((6380, 6427), 'fsl.data.melodicanalysis.getComponentTimeSeries', 'melanalysis.getComponentTimeSeries', (['args.icadir'], {}), '(args.icadir)\n', (6414, 6427), True, 'import fsl.data.melodicanalysis as melanalysis\n'), ((3155, 3168), 'os.path.abspath', 'op.abspath', (['c'], {}), '(c)\n', (3165, 3168), True, 'import os.path as op\n'), ((3686, 3699), 'os.path.exists', 'op.exists', (['cf'], {}), '(cf)\n', (3695, 3699), True, 'import os.path as op\n'), ((3778, 3789), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3786, 3789), False, 'import sys\n'), ((6681, 6692), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6689, 6692), False, 'import sys\n'), ((4522, 4568), 'fsl.data.fixlabels.loadLabelFile', 'fixlabels.loadLabelFile', (['c'], {'returnIndices': '(True)'}), '(c, returnIndices=True)\n', (4545, 4568), True, 'import fsl.data.fixlabels as fixlabels\n'), ((5436, 5454), 'numpy.atleast_2d', 'np.atleast_2d', (['mat'], {}), '(mat)\n', (5449, 5454), True, 'import numpy as np\n'), ((3477, 3488), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3485, 3488), False, 'import sys\n')]
|
import collections
import io
import numpy as np
import tensorflow as tf
hidden_dim = 1000
input_size = 28 * 28
output_size = 10
train_data_file = "/home/harper/dataset/mnist/train-images.idx3-ubyte"
train_label_file = "/home/harper/dataset/mnist/train-labels.idx1-ubyte"
test_data_file = "/home/harper/dataset/mnist/t10k-images.idx3-ubyte"
test_label_file = "/home/harper/dataset/mnist/t10k-labels.idx1-ubyte"
Datasets = collections.namedtuple("Datasets", ['train', 'test'])
class Dataset(object):
def __init__(self, data, label):
self.data = data
self.label = label
self.size = self.data.shape[0]
perm = np.random.permutation(self.size)
self.data = self.data[perm]
self.label = self.label[perm]
self.start = 0
def next_batch(self, batch_size):
if self.start == self.size:
perm = np.random.permutation(self.size)
self.data = self.data[perm]
self.label = self.label[perm]
self.start = 0
start = self.start
end = min(start + batch_size, self.size)
self.start = end
return [self.data[start:end], self.label[start:end]]
def read_data(file):
with io.open(file, 'rb') as stream:
magic = stream.read(4)
num_record = np.frombuffer(stream.read(4), np.dtype(np.uint32).newbyteorder(">"))[0]
raw = stream.read(input_size * num_record)
flat = np.frombuffer(raw, np.uint8).astype(np.float32) / 255
result = flat.reshape([-1, input_size])
return result
def read_label(file):
with io.open(file, 'rb') as stream:
magic = stream.read(4)
num_record = np.frombuffer(stream.read(4), np.dtype(np.uint32).newbyteorder(">"))[0]
raw = stream.read(num_record)
return np.frombuffer(raw, np.uint8).astype(np.int32)
def read_datasets():
train_data = read_data(train_data_file)
train_label = read_label(train_label_file)
test_data = read_data(test_data_file)
test_label = read_label(test_label_file)
return Datasets(train=Dataset(train_data, train_label),
test=Dataset(test_data, test_label))
mnist = read_datasets()
x = tf.placeholder(tf.float32, [None, input_size], name="x")
label = tf.placeholder(tf.int64, [None], name="label")
with tf.name_scope("layer1"):
w1 = tf.Variable(tf.truncated_normal([input_size, hidden_dim], stddev=0.01), name="w1")
b1 = tf.Variable(tf.zeros([hidden_dim]), name="b1")
layer1_out = tf.nn.relu(tf.matmul(x, w1) + b1, "l1o")
with tf.name_scope("layer2"):
w2 = tf.Variable(tf.truncated_normal([hidden_dim, output_size], stddev=0.01), name="w2")
b2 = tf.Variable(tf.zeros([output_size]), name="b2")
layer2_out = tf.matmul(layer1_out, w2) + b2
with tf.name_scope("loss"):
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label, logits=layer2_out,
name="cross_entropy")
cross_entropy = tf.reduce_mean(cross_entropy)
with tf.name_scope("sgd"):
train_step = tf.train.AdamOptimizer(1e-3).minimize(cross_entropy)
with tf.name_scope("accuracy"):
prediction = tf.argmax(layer2_out, 1, name="prediction")
correct_prediction = tf.equal(prediction, label)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
with tf.name_scope("summary"):
tf.summary.histogram('b1', b1)
tf.summary.histogram('w1', w1)
tf.summary.histogram('w2', w2)
tf.summary.histogram('b2', b2)
tf.summary.scalar('accuracy', accuracy)
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter("/home/harper/tftemp")
train_writer.add_graph(tf.get_default_graph())
builder = tf.saved_model.builder.SavedModelBuilder("/home/harper/mnistmodel")
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(5000):
batch = mnist.train.next_batch(50)
if batch is None:
break
if i % 100 == 0:
train_accuracy, merged_summary = sess.run([accuracy, merged], feed_dict={x: batch[0], label: batch[1]})
train_writer.add_summary(merged_summary, i)
print('step %d, training accuracy %g' % (i, train_accuracy))
print(
'test accuracy %g' % accuracy.eval(feed_dict={x: mnist.test.data, label: mnist.test.label}))
_, merged_summary = sess.run([train_step, merged], feed_dict={x: batch[0], label: batch[1]})
train_writer.add_summary(merged_summary, i)
print(
'test accuracy %g' % accuracy.eval(feed_dict={x: mnist.test.data, label: mnist.test.label}))
# Build Signature to save to model
signature = tf.saved_model.signature_def_utils.build_signature_def(
inputs={
'input': tf.saved_model.utils.build_tensor_info(x)
},
outputs={
'output': tf.saved_model.utils.build_tensor_info(prediction)
},
method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME
)
builder.add_meta_graph_and_variables(sess,
[tf.saved_model.tag_constants.SERVING],
signature_def_map={
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
signature})
builder.save()
train_writer.close()
|
[
"tensorflow.equal",
"io.open",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.reduce_mean",
"tensorflow.cast",
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.matmul",
"numpy.frombuffer",
"tensorflow.summary.scalar",
"tensorflow.train.AdamOptimizer",
"numpy.dtype",
"tensorflow.zeros",
"tensorflow.get_default_graph",
"numpy.random.permutation",
"collections.namedtuple",
"tensorflow.summary.merge_all",
"tensorflow.summary.histogram",
"tensorflow.summary.FileWriter",
"tensorflow.truncated_normal",
"tensorflow.saved_model.utils.build_tensor_info",
"tensorflow.global_variables_initializer",
"tensorflow.argmax",
"tensorflow.name_scope",
"tensorflow.saved_model.builder.SavedModelBuilder"
] |
[((425, 478), 'collections.namedtuple', 'collections.namedtuple', (['"""Datasets"""', "['train', 'test']"], {}), "('Datasets', ['train', 'test'])\n", (447, 478), False, 'import collections\n'), ((2192, 2248), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, input_size]'], {'name': '"""x"""'}), "(tf.float32, [None, input_size], name='x')\n", (2206, 2248), True, 'import tensorflow as tf\n'), ((2257, 2303), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int64', '[None]'], {'name': '"""label"""'}), "(tf.int64, [None], name='label')\n", (2271, 2303), True, 'import tensorflow as tf\n'), ((3004, 3033), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['cross_entropy'], {}), '(cross_entropy)\n', (3018, 3033), True, 'import tensorflow as tf\n'), ((3576, 3598), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (3596, 3598), True, 'import tensorflow as tf\n'), ((3615, 3659), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['"""/home/harper/tftemp"""'], {}), "('/home/harper/tftemp')\n", (3636, 3659), True, 'import tensorflow as tf\n'), ((3717, 3784), 'tensorflow.saved_model.builder.SavedModelBuilder', 'tf.saved_model.builder.SavedModelBuilder', (['"""/home/harper/mnistmodel"""'], {}), "('/home/harper/mnistmodel')\n", (3757, 3784), True, 'import tensorflow as tf\n'), ((2310, 2333), 'tensorflow.name_scope', 'tf.name_scope', (['"""layer1"""'], {}), "('layer1')\n", (2323, 2333), True, 'import tensorflow as tf\n'), ((2547, 2570), 'tensorflow.name_scope', 'tf.name_scope', (['"""layer2"""'], {}), "('layer2')\n", (2560, 2570), True, 'import tensorflow as tf\n'), ((2775, 2796), 'tensorflow.name_scope', 'tf.name_scope', (['"""loss"""'], {}), "('loss')\n", (2788, 2796), True, 'import tensorflow as tf\n'), ((2818, 2924), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'label', 'logits': 'layer2_out', 'name': '"""cross_entropy"""'}), "(labels=label, logits=\n layer2_out, name='cross_entropy')\n", (2864, 2924), True, 'import tensorflow as tf\n'), ((3040, 3060), 'tensorflow.name_scope', 'tf.name_scope', (['"""sgd"""'], {}), "('sgd')\n", (3053, 3060), True, 'import tensorflow as tf\n'), ((3138, 3163), 'tensorflow.name_scope', 'tf.name_scope', (['"""accuracy"""'], {}), "('accuracy')\n", (3151, 3163), True, 'import tensorflow as tf\n'), ((3182, 3225), 'tensorflow.argmax', 'tf.argmax', (['layer2_out', '(1)'], {'name': '"""prediction"""'}), "(layer2_out, 1, name='prediction')\n", (3191, 3225), True, 'import tensorflow as tf\n'), ((3251, 3278), 'tensorflow.equal', 'tf.equal', (['prediction', 'label'], {}), '(prediction, label)\n', (3259, 3278), True, 'import tensorflow as tf\n'), ((3356, 3380), 'tensorflow.name_scope', 'tf.name_scope', (['"""summary"""'], {}), "('summary')\n", (3369, 3380), True, 'import tensorflow as tf\n'), ((3386, 3416), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""b1"""', 'b1'], {}), "('b1', b1)\n", (3406, 3416), True, 'import tensorflow as tf\n'), ((3421, 3451), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""w1"""', 'w1'], {}), "('w1', w1)\n", (3441, 3451), True, 'import tensorflow as tf\n'), ((3456, 3486), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""w2"""', 'w2'], {}), "('w2', w2)\n", (3476, 3486), True, 'import tensorflow as tf\n'), ((3491, 3521), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""b2"""', 'b2'], {}), "('b2', b2)\n", (3511, 3521), True, 'import tensorflow as tf\n'), ((3526, 3565), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""accuracy"""', 'accuracy'], {}), "('accuracy', accuracy)\n", (3543, 3565), True, 'import tensorflow as tf\n'), ((3683, 3705), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (3703, 3705), True, 'import tensorflow as tf\n'), ((3791, 3803), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3801, 3803), True, 'import tensorflow as tf\n'), ((647, 679), 'numpy.random.permutation', 'np.random.permutation', (['self.size'], {}), '(self.size)\n', (668, 679), True, 'import numpy as np\n'), ((1208, 1227), 'io.open', 'io.open', (['file', '"""rb"""'], {}), "(file, 'rb')\n", (1215, 1227), False, 'import io\n'), ((1588, 1607), 'io.open', 'io.open', (['file', '"""rb"""'], {}), "(file, 'rb')\n", (1595, 1607), False, 'import io\n'), ((2356, 2414), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[input_size, hidden_dim]'], {'stddev': '(0.01)'}), '([input_size, hidden_dim], stddev=0.01)\n', (2375, 2414), True, 'import tensorflow as tf\n'), ((2448, 2470), 'tensorflow.zeros', 'tf.zeros', (['[hidden_dim]'], {}), '([hidden_dim])\n', (2456, 2470), True, 'import tensorflow as tf\n'), ((2593, 2652), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[hidden_dim, output_size]'], {'stddev': '(0.01)'}), '([hidden_dim, output_size], stddev=0.01)\n', (2612, 2652), True, 'import tensorflow as tf\n'), ((2686, 2709), 'tensorflow.zeros', 'tf.zeros', (['[output_size]'], {}), '([output_size])\n', (2694, 2709), True, 'import tensorflow as tf\n'), ((2739, 2764), 'tensorflow.matmul', 'tf.matmul', (['layer1_out', 'w2'], {}), '(layer1_out, w2)\n', (2748, 2764), True, 'import tensorflow as tf\n'), ((3309, 3348), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (3316, 3348), True, 'import tensorflow as tf\n'), ((3826, 3859), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3857, 3859), True, 'import tensorflow as tf\n'), ((872, 904), 'numpy.random.permutation', 'np.random.permutation', (['self.size'], {}), '(self.size)\n', (893, 904), True, 'import numpy as np\n'), ((2511, 2527), 'tensorflow.matmul', 'tf.matmul', (['x', 'w1'], {}), '(x, w1)\n', (2520, 2527), True, 'import tensorflow as tf\n'), ((3079, 3108), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['(0.001)'], {}), '(0.001)\n', (3101, 3108), True, 'import tensorflow as tf\n'), ((1796, 1824), 'numpy.frombuffer', 'np.frombuffer', (['raw', 'np.uint8'], {}), '(raw, np.uint8)\n', (1809, 1824), True, 'import numpy as np\n'), ((4788, 4829), 'tensorflow.saved_model.utils.build_tensor_info', 'tf.saved_model.utils.build_tensor_info', (['x'], {}), '(x)\n', (4826, 4829), True, 'import tensorflow as tf\n'), ((4881, 4931), 'tensorflow.saved_model.utils.build_tensor_info', 'tf.saved_model.utils.build_tensor_info', (['prediction'], {}), '(prediction)\n', (4919, 4931), True, 'import tensorflow as tf\n'), ((1431, 1459), 'numpy.frombuffer', 'np.frombuffer', (['raw', 'np.uint8'], {}), '(raw, np.uint8)\n', (1444, 1459), True, 'import numpy as np\n'), ((1322, 1341), 'numpy.dtype', 'np.dtype', (['np.uint32'], {}), '(np.uint32)\n', (1330, 1341), True, 'import numpy as np\n'), ((1701, 1720), 'numpy.dtype', 'np.dtype', (['np.uint32'], {}), '(np.uint32)\n', (1709, 1720), True, 'import numpy as np\n')]
|
import json
import tempfile
from collections import OrderedDict
import os
import numpy as np
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from utils import BoxList
#from utils.pycocotools_rotation import Rotation_COCOeval
def evaluate(dataset, predictions, result_file, score_threshold=None, epoch=0):
coco_results = {}
coco_results['bbox'] = make_coco_detection(predictions, dataset, score_threshold)
results = COCOResult('bbox')
path = os.path.join(result_file, str(epoch)+'_result.json')
res = evaluate_predictions_on_coco(dataset.coco, coco_results['bbox'], path, 'bbox')
results.update(res)
# with tempfile.NamedTemporaryFile() as f:
# path = f.name
# res = evaluate_predictions_on_coco(
# dataset.coco, coco_results['bbox'], path, 'bbox'
# )
# results.update(res)
print(results)
return results.results
def evaluate_predictions_on_coco(coco_gt, results, result_file, iou_type):
with open(result_file, 'w') as f:
json.dump(results, f)
coco_dt = coco_gt.loadRes(str(result_file)) if results else COCO()
coco_eval = Rotation_COCOeval(coco_gt, coco_dt, iou_type)
coco_eval.params.iouThrs = np.linspace(.25, 0.95, int(np.round((0.95 - .25) / .05)) + 1, endpoint=True)
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
score_threshold = compute_thresholds_for_classes(coco_eval)
return coco_eval
def compute_thresholds_for_classes(coco_eval):
precision = coco_eval.eval['precision']
precision = precision[0, :, :, 0, -1]
scores = coco_eval.eval['scores']
scores = scores[0, :, :, 0, -1]
recall = np.linspace(0, 1, num=precision.shape[0])
recall = recall[:, None]
f1 = (2 * precision * recall) / (np.maximum(precision + recall, 1e-6))
max_f1 = f1.max(0)
max_f1_id = f1.argmax(0)
scores = scores[max_f1_id, range(len(max_f1_id))]
print('Maximum f1 for classes:')
print(list(max_f1))
print('Score thresholds for classes')
print(list(scores))
print('')
return scores
def make_coco_detection(predictions, dataset, score_threshold=None):
coco_results = []
for id, pred in enumerate(predictions):
orig_id = dataset.id2img[id]
if len(pred) == 0:
continue
img_meta = dataset.get_image_meta(id)
pred_resize = map_to_origin_image(img_meta, pred, flipmode='no', resize_mode='letterbox')
boxes = pred_resize.bbox.tolist()
scores = pred_resize.get_field('scores').tolist()
labels = pred_resize.get_field('labels').tolist()
labels = [dataset.id2category[i] for i in labels]
if score_threshold is None:
score_threshold = [0]*len(dataset.id2category)
coco_results.extend(
[
{
'image_id': orig_id,
'category_id': labels[k],
'bbox': box,
'score': scores[k],
}
for k, box in enumerate(boxes)
if scores[k] > score_threshold[labels[k] - 1]
]
)
return coco_results
class COCOResult:
METRICS = {
'bbox': ['AP', 'AP50', 'AP75', 'APs', 'APm', 'APl'],
'segm': ['AP', 'AP50', 'AP75', 'APs', 'APm', 'APl'],
'box_proposal': [
'AR@100',
'ARs@100',
'ARm@100',
'ARl@100',
'AR@1000',
'ARs@1000',
'ARm@1000',
'ARl@1000',
],
'keypoints': ['AP', 'AP50', 'AP75', 'APm', 'APl'],
}
def __init__(self, *iou_types):
allowed_types = ("box_proposal", "bbox", "segm", "keypoints")
assert all(iou_type in allowed_types for iou_type in iou_types)
results = OrderedDict()
for iou_type in iou_types:
results[iou_type] = OrderedDict(
[(metric, -1) for metric in COCOResult.METRICS[iou_type]]
)
self.results = results
def update(self, coco_eval):
if coco_eval is None:
return
assert isinstance(coco_eval, COCOeval)
s = coco_eval.stats
iou_type = coco_eval.params.iouType
res = self.results[iou_type]
metrics = COCOResult.METRICS[iou_type]
for idx, metric in enumerate(metrics):
res[metric] = s[idx]
def __repr__(self):
return repr(self.results)
def map_to_origin_image(img_meta, pred, flipmode='no', resize_mode='letterbox'):
'''
img_meta: "id": int, "width": int, "height": int,"file_name": str,
pred: boxlist object
flipmode:'h':Horizontal flip,'v':vertical flip 'no': no flip
resize_mode: 'letterbox' , 'wrap'
'''
assert pred.mode == 'xyxyxyxy'
if flipmode == 'h':
pred = pred.transpose(0)
elif flipmode == 'v':
pred = pred.transpose(1)
elif flipmode == 'no':
pass
else:
raise Exception("unspported flip mode, 'h', 'v' or 'no' ")
width = img_meta['width']
height = img_meta['height']
resized_width, resized_height = pred.size
if resize_mode == 'letterbox':
if width > height:
scale = resized_width / width
size = (resized_width, int(scale * height))
else:
scale = resized_height / height
size = (int(width * scale), resized_height)
pred_resize = BoxList(pred.bbox, size, mode='xyxyxyxy')
pred_resize._copy_extra_fields(pred)
pred_resize = pred_resize.clip_to_image(remove_empty=True)
pred_resize = pred_resize.resize((width, height))
pred_resize = pred_resize.clip_to_image(remove_empty=True)
#pred_resize = pred_resize.convert('xywh')
elif resize_mode == 'wrap':
pred_resize = pred.resize((width, height))
pred_resize = pred_resize.convert('xyxyxyxy')
pred_resize = pred_resize.clip_to_image(remove_empty=True)
else:
raise Exception("unspported reisze mode, either 'letterbox' or 'wrap' ")
return pred_resize
|
[
"collections.OrderedDict",
"numpy.round",
"utils.BoxList",
"pycocotools.coco.COCO",
"numpy.linspace",
"numpy.maximum",
"json.dump"
] |
[((1758, 1799), 'numpy.linspace', 'np.linspace', (['(0)', '(1)'], {'num': 'precision.shape[0]'}), '(0, 1, num=precision.shape[0])\n', (1769, 1799), True, 'import numpy as np\n'), ((1082, 1103), 'json.dump', 'json.dump', (['results', 'f'], {}), '(results, f)\n', (1091, 1103), False, 'import json\n'), ((1171, 1177), 'pycocotools.coco.COCO', 'COCO', ([], {}), '()\n', (1175, 1177), False, 'from pycocotools.coco import COCO\n'), ((1870, 1907), 'numpy.maximum', 'np.maximum', (['(precision + recall)', '(1e-06)'], {}), '(precision + recall, 1e-06)\n', (1880, 1907), True, 'import numpy as np\n'), ((3970, 3983), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3981, 3983), False, 'from collections import OrderedDict\n'), ((5638, 5679), 'utils.BoxList', 'BoxList', (['pred.bbox', 'size'], {'mode': '"""xyxyxyxy"""'}), "(pred.bbox, size, mode='xyxyxyxy')\n", (5645, 5679), False, 'from utils import BoxList\n'), ((4053, 4123), 'collections.OrderedDict', 'OrderedDict', (['[(metric, -1) for metric in COCOResult.METRICS[iou_type]]'], {}), '([(metric, -1) for metric in COCOResult.METRICS[iou_type]])\n', (4064, 4123), False, 'from collections import OrderedDict\n'), ((1302, 1332), 'numpy.round', 'np.round', (['((0.95 - 0.25) / 0.05)'], {}), '((0.95 - 0.25) / 0.05)\n', (1310, 1332), True, 'import numpy as np\n')]
|
#! /usr/env/bin python
import os
import linecache
import numpy as np
from collections import OrderedDict
from CP2K_kit.tools import call
from CP2K_kit.tools import data_op
from CP2K_kit.tools import file_tools
from CP2K_kit.tools import read_input
from CP2K_kit.tools import traj_info
from CP2K_kit.deepff import load_data
from CP2K_kit.deepff import gen_lammps_task
def get_sys_num(exe_dir):
'''
get_sys_num: get the number of systems
Args:
exe_dir: string
exe_dir is the directory where shell script will be excuted.
Returns:
sys_num: int
sys_num is the number of systems.
'''
cmd = "ls | grep %s" % ('sys_')
sys_num = len(call.call_returns_shell(exe_dir, cmd))
return sys_num
def get_data_num(exe_dir):
'''
get_data_num: get the number of data
Args:
exe_dir: string
exe_dir is the directory where shell script will be excuted.
Returns:
data_num: int
data_num is the number of data.
'''
cmd = "ls | grep %s" % ('data_')
data_num = len(call.call_returns_shell(exe_dir, cmd))
return data_num
def get_task_num(exe_dir, get_task_dir=False):
'''
get_task_num: get the number of tasks in a system
Args:
exe_dir: string
exe_dir is the directory where shell script will be excuted.
Returns:
task_num: int
task_num is the number of tasks in a system.
'''
cmd = "ls | grep %s" % ('task_')
task_dir = call.call_returns_shell(exe_dir, cmd)
task_num = len(task_dir)
if get_task_dir:
return task_num, task_dir
else:
return task_num
def get_lmp_model_num(exe_dir):
'''
get_lmp_model_num: get the number of models in lammps directory.
Args:
exe_dir: string
exe_dir is the directory where shell script will be excuted.
Returns:
model_num: int
model_num is the number of models in lammps directory.
'''
cmd = "ls | grep %s" % ("'model_[0-9]'")
model_num = len(call.call_returns_shell(exe_dir, cmd))
return model_num
def get_deepmd_model_num(exe_dir):
'''
get_deepmd_model_num: get the number of models in deepmd directory.
Args:
exe_dir: string
exe_dir is the directory where shell script will be excuted.
Returns:
model_num: int
model_num is the number of models in deepmd directory.
'''
model_num = len(call.call_returns_shell(exe_dir, "ls -ll |awk '/^d/ {print $NF}'"))
return model_num
def get_traj_num(exe_dir):
'''
get_traj_num: get the number of frames
Args:
exe_dir: string
exe_dir is the directory where shell script will be excuted.
Returns:
traj_num: int
traj_num is the number of frames.
'''
cmd = "ls | grep %s" % ('traj_')
traj_num = len(call.call_returns_shell(exe_dir, cmd))
return traj_num
def dump_input(work_dir, inp_file, f_key):
'''
dump_input: dump deepff input file, it will call read_input module.
Args:
work_dir: string
work_dir is the working directory of CP2K_kit.
inp_file: string
inp_file is the deepff input file
f_key: 1-d string list
f_key is fixed to: ['deepmd', 'lammps', 'cp2k', 'model_devi', 'environ']
Returns :
deepmd_dic: dictionary
deepmd_dic contains keywords used in deepmd.
lammps_dic: dictionary
lammpd_dic contains keywords used in lammps.
cp2k_dic: dictionary
cp2k_dic contains keywords used in cp2k.
model_devi_dic: dictionary
model_devi contains keywords used in model_devi.
environ_dic: dictionary
environ_dic contains keywords used in environment.
'''
job_type_param = read_input.dump_info(work_dir, inp_file, f_key)
deepmd_dic = job_type_param[0]
lammps_dic = job_type_param[1]
cp2k_dic = job_type_param[2]
active_learn_dic = job_type_param[3]
environ_dic = job_type_param[4]
return deepmd_dic, lammps_dic, cp2k_dic, active_learn_dic, environ_dic
def get_atoms_type(deepmd_dic):
'''
get_atoms_type: get atoms type for total systems
Args:
deepmd_dic: dictionary
deepmd_dic contains keywords used in deepmd.
Returns:
final_atoms_type: list
final_atoms_type is the atoms type for all systems.
Example: ['O', 'H']
'''
import linecache
atoms_type = []
train_dic = deepmd_dic['training']
for key in train_dic:
if ( 'system' in key ):
traj_coord_file = train_dic[key]['traj_coord_file']
line_num = file_tools.grep_line_num("'PDB file'", traj_coord_file, os.getcwd())
if ( line_num == 0 ):
coord_file_type = 'coord_xyz'
else:
coord_file_type = 'coord_pdb'
atoms_num, pre_base_block, end_base_block, pre_base, frames_num, each, start_id, end_id, time_step = \
traj_info.get_traj_info(traj_coord_file, coord_file_type)
atoms = []
for i in range(atoms_num):
line_i = linecache.getline(traj_coord_file, pre_base+pre_base_block+i+1)
line_i_split = data_op.split_str(line_i, ' ', '\n')
if ( coord_file_type == 'coord_xyz' ):
atoms.append(line_i_split[0])
elif ( coord_file_type == 'coord_pdb' ):
atoms.append(line_i_split[len(line_i_split)-1])
linecache.clearcache()
atoms_type.append(data_op.list_replicate(atoms))
tot_atoms_type = data_op.list_reshape(atoms_type)
final_atoms_type = data_op.list_replicate(tot_atoms_type)
return final_atoms_type
def dump_init_data(work_dir, deepmd_dic, train_stress, tot_atoms_type_dic):
'''
dump_init_data: load initial training data.
Args:
work_dir: string
work_dir is working directory of CP2K_kit.
deepmd_dic: dictionary
deepmd_dic contains keywords used in deepmd.
train_stress: bool
train_stress is whether we need to dump stress.
tot_atoms_type_dic: dictionary
tot_atoms_type_dic is the atoms type dictionary.
Returns:
init_train_data: 1-d string list
init_train_data contains initial training data directories.
init_data_num : int
init_data_num is the number of data for initial training.
'''
init_train_data_dir = ''.join((work_dir, '/init_train_data'))
if ( not os.path.exists(init_train_data_dir) ):
cmd = "mkdir %s" % ('init_train_data')
call.call_simple_shell(work_dir, cmd)
i = 0
init_train_data = []
init_data_num = 0
train_dic = deepmd_dic['training']
shuffle_data = train_dic['shuffle_data']
for key in train_dic:
if ( 'system' in key):
save_dir = ''.join((work_dir, '/init_train_data/data_', str(i)))
if ( not os.path.exists(save_dir) ):
cmd = "mkdir %s" % (save_dir)
call.call_simple_shell(work_dir, cmd)
init_train_data.append(save_dir)
cmd = "ls | grep %s" %("'set.'")
set_dir_name = call.call_returns_shell(save_dir, cmd)
choosed_num = train_dic[key]['choosed_frame_num']
data_num = []
if ( len(set_dir_name) > 0 ):
for set_dir in set_dir_name:
data_num_part = []
set_dir_abs = ''.join((save_dir, '/', set_dir))
coord_npy_file = ''.join((set_dir_abs, '/coord.npy'))
force_npy_file = ''.join((set_dir_abs, '/force.npy'))
box_npy_file = ''.join((set_dir_abs, '/box.npy'))
energy_npy_file = ''.join((set_dir_abs, '/energy.npy'))
if ( all(os.path.exists(npy_file) for npy_file in [coord_npy_file, force_npy_file, box_npy_file, energy_npy_file]) ):
for npy_file in [coord_npy_file, force_npy_file, box_npy_file, energy_npy_file]:
data_num_part.append(len(np.load(npy_file)))
else:
data_num_part = [0,0,0,0]
virial_npy_file = ''.join((set_dir_abs, '/virial.npy'))
if ( os.path.exists(virial_npy_file) ):
data_num_part.append(len(np.load(virial_npy_file)))
data_num.append(data_num_part)
else:
data_num = [[0,0,0,0]]
data_num = data_op.add_2d_list(data_num)
if ( all(j == choosed_num for j in data_num) ):
if ( len(set_dir_name) == 1 ):
init_data_num_part = choosed_num
else:
final_set_dir_abs = ''.join((save_dir, '/', set_dir_name[len(set_dir_name)-1]))
final_energy_npy_file = ''.join((final_set_dir_abs, '/energy.npy'))
init_data_num_part = choosed_num-len(np.load(final_energy_npy_file))
else:
traj_type = train_dic[key]['traj_type']
start = train_dic[key]['start_frame']
end = train_dic[key]['end_frame']
parts = train_dic[key]['set_parts']
if ( traj_type == 'md' ):
traj_coord_file = train_dic[key]['traj_coord_file']
traj_frc_file = train_dic[key]['traj_frc_file']
traj_cell_file = train_dic[key]['traj_cell_file']
traj_stress_file = train_dic[key]['traj_stress_file']
load_data.load_data_from_dir(traj_coord_file, traj_frc_file, traj_cell_file, traj_stress_file, \
train_stress, work_dir, save_dir, start, end, choosed_num, tot_atoms_type_dic)
elif ( traj_type == 'mtd' ):
data_dir = train_dic[key]['data_dir']
task_dir_prefix = train_dic[key]['task_dir_prefix']
proj_name = train_dic[key]['proj_name']
out_file_name = train_dic[key]['out_file_name']
choosed_index = data_op.gen_list(start, end, 1)
choosed_index_array = np.array(choosed_index)
np.random.shuffle(choosed_index_array)
choosed_index = list(choosed_index_array[0:choosed_num])
load_data.load_data_from_sepfile(data_dir, save_dir, task_dir_prefix, proj_name, tot_atoms_type_dic, \
sorted(choosed_index), out_file_name)
energy_array, coord_array, frc_array, box_array, virial_array = load_data.read_raw_data(save_dir)
init_data_num_part, init_test_data_num_part = load_data.raw_data_to_set(parts, shuffle_data, save_dir, energy_array, \
coord_array, frc_array, box_array, virial_array)
init_data_num = init_data_num+init_data_num_part
i = i+1
if ( 'set_data_dir' in train_dic.keys() ):
init_train_data.append(os.path.abspath(train_dic['set_data_dir']))
energy_npy_file = ''.join((os.path.abspath(train_dic['set_data_dir']), '/set.000/energy.npy'))
set_data_num = len(np.load(energy_npy_file))
init_data_num = init_data_num+set_data_num
return init_train_data, init_data_num
def check_deepff_run(work_dir, iter_id):
'''
check_deepff_run: check the running state of deepff
Args:
work_dir: string
work_dir is workding directory.
iter_id: int
iter_id is current iteration number.
Returns:
failure_model: 1-d int list
failure_model is the id of failure models.
'''
train_dir = ''.join((work_dir, '/iter_', str(iter_id), '/01.train'))
model_num = get_deepmd_model_num(train_dir)
failure_model = []
for i in range(model_num):
model_dir = ''.join((train_dir, '/', str(i)))
lcurve_file = ''.join((model_dir, '/lcurve.out'))
whole_line_num = len(open(lcurve_file).readlines())
choosed_line_num = int(0.1*whole_line_num)
start_line = whole_line_num-choosed_line_num
force_trn = []
for j in range(choosed_line_num):
line = linecache.getline(lcurve_file, start_line+j+1)
line_split = data_op.split_str(line, ' ')
if ( data_op.eval_str(line_split[0]) == 1 and len(line_split) >= 8 ):
force_trn.append(float(line_split[6]))
linecache.clearcache()
force_max = max(force_trn)
force_min = min(force_trn)
force_avg = np.mean(np.array(force_trn))
if ( ((force_max-force_min) >= 0.04 and force_max >= 0.08) or force_avg >= 0.08 ):
failure_model.append(i)
return failure_model
def get_md_sys_info(lmp_dic, tot_atoms_type_dic):
'''
get_md_sys_info: get the system information for lammps md.
Args:
lmp_dic: dictionary
lmp_dic contains parameters for lammps.
tot_atoms_type_dic: dictionary
tot_atoms_type_dic is the atoms type dictionary.
Returns:
sys_num: int
sys_num is the number of systems.
atoms_type_multi_sys: 2-d dictionary, dim = (num of lammps systems) * (num of atom types)
atoms_type_multi_sys is the atoms type for multi-systems.
example: {0:{'O':1,'H':2,'N':3},1:{'O':1,'S':2,'N':3}}
atoms_num_tot: dictionary
atoms_num_tot contains number of atoms for different systems.
Example: {0:3, 1:3}
use_mtd_tot: bool
use_mtd_tot is whethet using metadynamics for whole systems.
'''
atoms_type_multi_sys = []
atoms_num_tot = []
use_mtd_tot = []
sys_num = 0
for key in lmp_dic:
if 'system' in key:
sys_num = sys_num + 1
for i in range(sys_num):
sys = 'system' + str(i)
box_file_name = lmp_dic[sys]['box']
coord_file_name = lmp_dic[sys]['coord']
use_mtd = lmp_dic[sys]['use_mtd']
tri_cell_vec, atoms, x, y, z = gen_lammps_task.get_box_coord(box_file_name, coord_file_name)
atoms_type = data_op.list_replicate(atoms)
atoms_type_dic = OrderedDict()
for j in atoms_type:
if j in tot_atoms_type_dic.keys():
atoms_type_dic[j] = tot_atoms_type_dic[j]+1
else:
log_info.log_error('Input error: %s atom type in system %d is not trained, please check deepff/lammps/system' %(j, i))
exit()
atoms_type_multi_sys.append(atoms_type_dic)
atoms_num_tot.append(len(atoms))
use_mtd_tot.append(use_mtd)
return sys_num, atoms_type_multi_sys, atoms_num_tot, use_mtd_tot
|
[
"CP2K_kit.deepff.gen_lammps_task.get_box_coord",
"CP2K_kit.tools.data_op.list_replicate",
"numpy.array",
"CP2K_kit.tools.data_op.gen_list",
"CP2K_kit.tools.data_op.add_2d_list",
"os.path.exists",
"CP2K_kit.deepff.load_data.load_data_from_dir",
"CP2K_kit.tools.data_op.eval_str",
"CP2K_kit.tools.data_op.split_str",
"linecache.getline",
"CP2K_kit.tools.read_input.dump_info",
"collections.OrderedDict",
"CP2K_kit.tools.traj_info.get_traj_info",
"CP2K_kit.tools.data_op.list_reshape",
"CP2K_kit.deepff.load_data.read_raw_data",
"linecache.clearcache",
"CP2K_kit.deepff.load_data.raw_data_to_set",
"os.getcwd",
"CP2K_kit.tools.call.call_returns_shell",
"CP2K_kit.tools.call.call_simple_shell",
"os.path.abspath",
"numpy.load",
"numpy.random.shuffle"
] |
[((1413, 1450), 'CP2K_kit.tools.call.call_returns_shell', 'call.call_returns_shell', (['exe_dir', 'cmd'], {}), '(exe_dir, cmd)\n', (1436, 1450), False, 'from CP2K_kit.tools import call\n'), ((3556, 3603), 'CP2K_kit.tools.read_input.dump_info', 'read_input.dump_info', (['work_dir', 'inp_file', 'f_key'], {}), '(work_dir, inp_file, f_key)\n', (3576, 3603), False, 'from CP2K_kit.tools import read_input\n'), ((5204, 5236), 'CP2K_kit.tools.data_op.list_reshape', 'data_op.list_reshape', (['atoms_type'], {}), '(atoms_type)\n', (5224, 5236), False, 'from CP2K_kit.tools import data_op\n'), ((5258, 5296), 'CP2K_kit.tools.data_op.list_replicate', 'data_op.list_replicate', (['tot_atoms_type'], {}), '(tot_atoms_type)\n', (5280, 5296), False, 'from CP2K_kit.tools import data_op\n'), ((664, 701), 'CP2K_kit.tools.call.call_returns_shell', 'call.call_returns_shell', (['exe_dir', 'cmd'], {}), '(exe_dir, cmd)\n', (687, 701), False, 'from CP2K_kit.tools import call\n'), ((1017, 1054), 'CP2K_kit.tools.call.call_returns_shell', 'call.call_returns_shell', (['exe_dir', 'cmd'], {}), '(exe_dir, cmd)\n', (1040, 1054), False, 'from CP2K_kit.tools import call\n'), ((1918, 1955), 'CP2K_kit.tools.call.call_returns_shell', 'call.call_returns_shell', (['exe_dir', 'cmd'], {}), '(exe_dir, cmd)\n', (1941, 1955), False, 'from CP2K_kit.tools import call\n'), ((2302, 2368), 'CP2K_kit.tools.call.call_returns_shell', 'call.call_returns_shell', (['exe_dir', '"""ls -ll |awk \'/^d/ {print $NF}\'"""'], {}), '(exe_dir, "ls -ll |awk \'/^d/ {print $NF}\'")\n', (2325, 2368), False, 'from CP2K_kit.tools import call\n'), ((2690, 2727), 'CP2K_kit.tools.call.call_returns_shell', 'call.call_returns_shell', (['exe_dir', 'cmd'], {}), '(exe_dir, cmd)\n', (2713, 2727), False, 'from CP2K_kit.tools import call\n'), ((6062, 6097), 'os.path.exists', 'os.path.exists', (['init_train_data_dir'], {}), '(init_train_data_dir)\n', (6076, 6097), False, 'import os\n'), ((6148, 6185), 'CP2K_kit.tools.call.call_simple_shell', 'call.call_simple_shell', (['work_dir', 'cmd'], {}), '(work_dir, cmd)\n', (6170, 6185), False, 'from CP2K_kit.tools import call\n'), ((11439, 11461), 'linecache.clearcache', 'linecache.clearcache', ([], {}), '()\n', (11459, 11461), False, 'import linecache\n'), ((12874, 12935), 'CP2K_kit.deepff.gen_lammps_task.get_box_coord', 'gen_lammps_task.get_box_coord', (['box_file_name', 'coord_file_name'], {}), '(box_file_name, coord_file_name)\n', (12903, 12935), False, 'from CP2K_kit.deepff import gen_lammps_task\n'), ((12953, 12982), 'CP2K_kit.tools.data_op.list_replicate', 'data_op.list_replicate', (['atoms'], {}), '(atoms)\n', (12975, 12982), False, 'from CP2K_kit.tools import data_op\n'), ((13004, 13017), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (13015, 13017), False, 'from collections import OrderedDict\n'), ((4657, 4714), 'CP2K_kit.tools.traj_info.get_traj_info', 'traj_info.get_traj_info', (['traj_coord_file', 'coord_file_type'], {}), '(traj_coord_file, coord_file_type)\n', (4680, 4714), False, 'from CP2K_kit.tools import traj_info\n'), ((5106, 5128), 'linecache.clearcache', 'linecache.clearcache', ([], {}), '()\n', (5126, 5128), False, 'import linecache\n'), ((6666, 6704), 'CP2K_kit.tools.call.call_returns_shell', 'call.call_returns_shell', (['save_dir', 'cmd'], {}), '(save_dir, cmd)\n', (6689, 6704), False, 'from CP2K_kit.tools import call\n'), ((7810, 7839), 'CP2K_kit.tools.data_op.add_2d_list', 'data_op.add_2d_list', (['data_num'], {}), '(data_num)\n', (7829, 7839), False, 'from CP2K_kit.tools import data_op\n'), ((10115, 10157), 'os.path.abspath', 'os.path.abspath', (["train_dic['set_data_dir']"], {}), "(train_dic['set_data_dir'])\n", (10130, 10157), False, 'import os\n'), ((10281, 10305), 'numpy.load', 'np.load', (['energy_npy_file'], {}), '(energy_npy_file)\n', (10288, 10305), True, 'import numpy as np\n'), ((11217, 11267), 'linecache.getline', 'linecache.getline', (['lcurve_file', '(start_line + j + 1)'], {}), '(lcurve_file, start_line + j + 1)\n', (11234, 11267), False, 'import linecache\n'), ((11283, 11311), 'CP2K_kit.tools.data_op.split_str', 'data_op.split_str', (['line', '""" """'], {}), "(line, ' ')\n", (11300, 11311), False, 'from CP2K_kit.tools import data_op\n'), ((11548, 11567), 'numpy.array', 'np.array', (['force_trn'], {}), '(force_trn)\n', (11556, 11567), True, 'import numpy as np\n'), ((4413, 4424), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4422, 4424), False, 'import os\n'), ((4782, 4851), 'linecache.getline', 'linecache.getline', (['traj_coord_file', '(pre_base + pre_base_block + i + 1)'], {}), '(traj_coord_file, pre_base + pre_base_block + i + 1)\n', (4799, 4851), False, 'import linecache\n'), ((4869, 4905), 'CP2K_kit.tools.data_op.split_str', 'data_op.split_str', (['line_i', '""" """', '"""\n"""'], {}), "(line_i, ' ', '\\n')\n", (4886, 4905), False, 'from CP2K_kit.tools import data_op\n'), ((5153, 5182), 'CP2K_kit.tools.data_op.list_replicate', 'data_op.list_replicate', (['atoms'], {}), '(atoms)\n', (5175, 5182), False, 'from CP2K_kit.tools import data_op\n'), ((6455, 6479), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (6469, 6479), False, 'import os\n'), ((6529, 6566), 'CP2K_kit.tools.call.call_simple_shell', 'call.call_simple_shell', (['work_dir', 'cmd'], {}), '(work_dir, cmd)\n', (6551, 6566), False, 'from CP2K_kit.tools import call\n'), ((9683, 9716), 'CP2K_kit.deepff.load_data.read_raw_data', 'load_data.read_raw_data', (['save_dir'], {}), '(save_dir)\n', (9706, 9716), False, 'from CP2K_kit.deepff import load_data\n'), ((9771, 9894), 'CP2K_kit.deepff.load_data.raw_data_to_set', 'load_data.raw_data_to_set', (['parts', 'shuffle_data', 'save_dir', 'energy_array', 'coord_array', 'frc_array', 'box_array', 'virial_array'], {}), '(parts, shuffle_data, save_dir, energy_array,\n coord_array, frc_array, box_array, virial_array)\n', (9796, 9894), False, 'from CP2K_kit.deepff import load_data\n'), ((10190, 10232), 'os.path.abspath', 'os.path.abspath', (["train_dic['set_data_dir']"], {}), "(train_dic['set_data_dir'])\n", (10205, 10232), False, 'import os\n'), ((7610, 7641), 'os.path.exists', 'os.path.exists', (['virial_npy_file'], {}), '(virial_npy_file)\n', (7624, 7641), False, 'import os\n'), ((8717, 8898), 'CP2K_kit.deepff.load_data.load_data_from_dir', 'load_data.load_data_from_dir', (['traj_coord_file', 'traj_frc_file', 'traj_cell_file', 'traj_stress_file', 'train_stress', 'work_dir', 'save_dir', 'start', 'end', 'choosed_num', 'tot_atoms_type_dic'], {}), '(traj_coord_file, traj_frc_file, traj_cell_file,\n traj_stress_file, train_stress, work_dir, save_dir, start, end,\n choosed_num, tot_atoms_type_dic)\n', (8745, 8898), False, 'from CP2K_kit.deepff import load_data\n'), ((11323, 11354), 'CP2K_kit.tools.data_op.eval_str', 'data_op.eval_str', (['line_split[0]'], {}), '(line_split[0])\n', (11339, 11354), False, 'from CP2K_kit.tools import data_op\n'), ((9213, 9244), 'CP2K_kit.tools.data_op.gen_list', 'data_op.gen_list', (['start', 'end', '(1)'], {}), '(start, end, 1)\n', (9229, 9244), False, 'from CP2K_kit.tools import data_op\n'), ((9277, 9300), 'numpy.array', 'np.array', (['choosed_index'], {}), '(choosed_index)\n', (9285, 9300), True, 'import numpy as np\n'), ((9311, 9349), 'numpy.random.shuffle', 'np.random.shuffle', (['choosed_index_array'], {}), '(choosed_index_array)\n', (9328, 9349), True, 'import numpy as np\n'), ((7214, 7238), 'os.path.exists', 'os.path.exists', (['npy_file'], {}), '(npy_file)\n', (7228, 7238), False, 'import os\n'), ((8205, 8235), 'numpy.load', 'np.load', (['final_energy_npy_file'], {}), '(final_energy_npy_file)\n', (8212, 8235), True, 'import numpy as np\n'), ((7682, 7706), 'numpy.load', 'np.load', (['virial_npy_file'], {}), '(virial_npy_file)\n', (7689, 7706), True, 'import numpy as np\n'), ((7455, 7472), 'numpy.load', 'np.load', (['npy_file'], {}), '(npy_file)\n', (7462, 7472), True, 'import numpy as np\n')]
|
import time
import numpy as np
import utils.measurement_subs as measurement_subs
import utils.socket_subs as socket_subs
from .do_fridge_sweep import do_fridge_sweep
from .do_device_sweep import do_device_sweep
def device_fridge_2d(
graph_proc, rpg, data_file,
read_inst, sweep_inst=[], set_inst=[],
set_value=[], pre_value=[], finish_value=[],
fridge_sweep="B", fridge_set=0.0,
device_start=0.0, device_stop=1.0, device_step=0.1, device_finish=0.0,
device_mid=[],
fridge_start=0.0, fridge_stop=1.0, fridge_rate=0.1,
delay=0, sample=1,
timeout=-1, wait=0.0,
comment="No comment!", network_dir="Z:\\DATA",
persist=True, x_custom=[]
):
"""2D data acquisition either by sweeping a device parameter
or by sweepng a fridge parameter
The program decides which of these to do depending on if the
the variable "sweep_inst" is assigned.
i.e. if "sweep_inst" is assigned the device is swept and the
fridge parameter is stepped.
If the device is being swept the variable "fridge_rate" is the size
of successive steps of either T or B.
If the fridge is being swept the first set_inst is stepped by the
"device_step"
For the case of successive B sweeps the fridge will be swept
forwards and backwards
e.g. Vg = -60 V B = -9 --> +9 T
Vg = -50 V B = +9 --> -9 T
etc ...
Note that in this case the first "set_value" will be overwritten
therefore a dummy e.g. 0.0 should be written in the case that there
are additional set_inst
"""
if sweep_inst:
sweep_device = True
else:
sweep_device = False
if fridge_sweep == "B":
b_sweep = True
else:
b_sweep = False
if not finish_value:
finish_value = list(set_value)
# We step over the x variable and sweep over the y
if sweep_device:
x_vec = np.hstack((np.arange(fridge_start, fridge_stop, fridge_rate), fridge_stop))
y_start = device_start
y_stop = device_stop
y_step = device_step
else:
x_vec = np.hstack((np.arange(device_start, device_stop, device_step), device_stop))
y_start = fridge_start
y_stop = fridge_stop
y_step = fridge_rate
if not not x_custom:
x_vec = x_custom
if sweep_device:
y_len = len(measurement_subs.generate_device_sweep(
device_start, device_stop, device_step, mid=device_mid))
else:
y_len = abs(y_start - y_stop) / y_step + 1
num_of_inst = len(read_inst)
plot_2d_window = [None] * num_of_inst
view_box = [None] * num_of_inst
image_view = [None] * num_of_inst
z_array = [np.zeros((len(x_vec), y_len)) for i in range(num_of_inst)]
if sweep_device:
for i in range(num_of_inst):
plot_2d_window[i] = rpg.QtGui.QMainWindow()
plot_2d_window[i].resize(500, 500)
view_box[i] = rpg.ViewBox(invertY=True)
image_view[i] = rpg.ImageView(view=rpg.PlotItem(viewBox=view_box[i]))
plot_2d_window[i].setCentralWidget(image_view[i])
plot_2d_window[i].setWindowTitle("read_inst %d" % i)
plot_2d_window[i].show()
view_box[i].setAspectLocked(False)
y_scale = y_step
x_scale = (x_vec[-2] - x_vec[0]) / np.float(len(x_vec) - 1)
for j in range(num_of_inst):
image_view[j].setImage(z_array[j], scale=(x_scale, y_scale), pos=(x_vec[0], y_start))
for i, v in enumerate(x_vec):
if sweep_device:
# sweep the device and fix T or B
if b_sweep:
data_list = do_device_sweep(
graph_proc, rpg, data_file,
sweep_inst, read_inst, set_inst=set_inst, set_value=set_value,
finish_value=finish_value, pre_value=pre_value, b_set=v, persist=False,
sweep_start=device_start, sweep_stop=device_stop, sweep_step=device_step,
sweep_finish=device_finish, sweep_mid=device_mid,
delay=delay, sample=sample, t_set=fridge_set,
timeout=timeout, wait=wait, return_data=True, make_plot=False,
comment=comment, network_dir=network_dir
)
else:
data_list = do_device_sweep(
graph_proc, rpg, data_file,
sweep_inst, read_inst, set_inst=set_inst, set_value=set_value,
finish_value=finish_value, pre_value=pre_value, b_set=fridge_set, persist=True,
sweep_start=device_start, sweep_stop=device_stop, sweep_step=device_step,
sweep_mid=device_mid,
delay=delay, sample=sample, t_set=v,
timeout=timeout, wait=wait, return_data=True, make_plot=False,
comment=comment, network_dir=network_dir
)
else:
set_value[0] = v
if i == len(x_vec) - 1:
finish_value[0] = 0.0
else:
finish_value[0] = x_vec[i + 1]
# Fix the device and sweep T or B
if b_sweep:
data_list = do_fridge_sweep(
graph_proc, rpg, data_file,
read_inst, set_inst=set_inst, set_value=set_value,
finish_value=finish_value, pre_value=pre_value,
fridge_sweep="B", fridge_set=fridge_set,
sweep_start=fridge_start, sweep_stop=fridge_stop,
sweep_rate=fridge_rate, sweep_finish=fridge_stop,
persist=False,
delay=delay, sample=sample,
timeout=timeout, wait=wait,
return_data=True,
comment=comment, network_dir=network_dir)
tmp_sweep = [fridge_start, fridge_stop]
fridge_start = tmp_sweep[1]
fridge_stop = tmp_sweep[0]
else:
data_list = do_fridge_sweep(
graph_proc, rpg, data_file,
read_inst, set_inst=set_inst, set_value=set_value,
finish_value=finish_value, pre_value=pre_value,
fridge_sweep="T", fridge_set=fridge_set,
sweep_start=fridge_start, sweep_stop=fridge_stop,
sweep_rate=fridge_rate, sweep_finish=fridge_stop,
persist=True,
delay=delay, sample=sample,
timeout=timeout, wait=wait,
return_data=True,
comment=comment, network_dir=network_dir)
if sweep_device:
for j in range(num_of_inst):
z_array[j][i, :] = data_list[j + 1]
image_view[j].setImage(z_array[j], pos=(x_vec[0], y_start), scale=(x_scale, y_scale))
m_client = socket_subs.SockClient('localhost', 18861)
time.sleep(2)
measurement_subs.socket_write(m_client, "SET 0.0 0")
time.sleep(2)
m_client.close()
time.sleep(2)
return
|
[
"numpy.arange",
"time.sleep",
"utils.socket_subs.SockClient",
"utils.measurement_subs.generate_device_sweep",
"utils.measurement_subs.socket_write"
] |
[((7004, 7046), 'utils.socket_subs.SockClient', 'socket_subs.SockClient', (['"""localhost"""', '(18861)'], {}), "('localhost', 18861)\n", (7026, 7046), True, 'import utils.socket_subs as socket_subs\n'), ((7051, 7064), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (7061, 7064), False, 'import time\n'), ((7069, 7121), 'utils.measurement_subs.socket_write', 'measurement_subs.socket_write', (['m_client', '"""SET 0.0 0"""'], {}), "(m_client, 'SET 0.0 0')\n", (7098, 7121), True, 'import utils.measurement_subs as measurement_subs\n'), ((7126, 7139), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (7136, 7139), False, 'import time\n'), ((7166, 7179), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (7176, 7179), False, 'import time\n'), ((1944, 1993), 'numpy.arange', 'np.arange', (['fridge_start', 'fridge_stop', 'fridge_rate'], {}), '(fridge_start, fridge_stop, fridge_rate)\n', (1953, 1993), True, 'import numpy as np\n'), ((2135, 2184), 'numpy.arange', 'np.arange', (['device_start', 'device_stop', 'device_step'], {}), '(device_start, device_stop, device_step)\n', (2144, 2184), True, 'import numpy as np\n'), ((2390, 2488), 'utils.measurement_subs.generate_device_sweep', 'measurement_subs.generate_device_sweep', (['device_start', 'device_stop', 'device_step'], {'mid': 'device_mid'}), '(device_start, device_stop,\n device_step, mid=device_mid)\n', (2428, 2488), True, 'import utils.measurement_subs as measurement_subs\n')]
|
import re
import numpy as np
import sympy as sp
import random as rd
from functools import reduce
NORMAL_VECTOR_ID = 'hyperplane_normal_vector_%s_%i'
NUM_NORMAL_VECS_ID = 'num_normal_vectors_%s'
CHAMBER_ID = 'chamber_%s_%s'
FVECTOR_ID = 'feature_vector_%s'
FVEC_ID_EX = re.compile(r'feature_vector_([\S]*)')
class HyperplaneHasher():
def __init__(self, kvstore, name, normal_vectors=None):
"""'name' is a string used for cribbing names of things to be stored
in the KeyValueStore instance 'kvstore'. 'normal_vectors' is
either a list of 1-rankal numpy arrays, all of the same rank,
or else of type None. In the latter case, normal vectors are assumed to
exist in 'kvstore', and are named NORMAL_VECTOR_ID % ('name', i),
where i is an integer."""
self.kvstore = kvstore
self.name = name
if normal_vectors is None:
self.num_normal_vectors = kvstore.get_int(
NUM_NORMAL_VECS_ID % name)
self.normal_vectors = [kvstore.get_vector(NORMAL_VECTOR_ID % (name, i))
for i in range(self.num_normal_vectors)]
else:
self.normal_vectors = normal_vectors
self.num_normal_vectors = len(normal_vectors)
self.rank = len(self.normal_vectors[0])
def _compute_num_chambers(self):
"""Computes the number of chambers defined by the hyperplanes
corresponding to the normal vectors."""
d = self.rank
n = self.num_normal_vectors
raw_cfs = sp.binomial_coefficients_list(n)
cfs = np.array([(-1)**i * raw_cfs[i] for i in range(n + 1)])
powers = np.array([max(entry, 0)
for entry in [d - k for k in range(n + 1)]])
ys = np.array([-1] * len(powers))
return (-1)**d * sum(cfs * (ys**powers))
@classmethod
def _flip_digit(cls, binary_string, i):
"""Given a string 'binary_string' of length n, each letter of
which is either '0' or '1', and an integer 0 <= i <= n-1, returns
the binary_string in which the i-th letter is flipped."""
for letter in binary_string:
if letter not in ['0', '1']:
raise ValueError(
"""Input string contains characters other than '0' and '1'.""")
if i > len(binary_string) - 1 or i < 0:
raise ValueError(
"""Argument 'i' outside range 0 <= i <= len(binary_string) - 1.""")
else:
flip_dict = {'0': '1', '1': '0'}
letters = [letter for letter in binary_string]
letters[i] = flip_dict[binary_string[i]]
return ''.join(letters)
@classmethod
def _hamming_distance(cls, bstring_1, bstring_2):
"""Given two strings of equal length, composed of only 0s and 1s, computes the
Hamming Distance between them: the number of places at which they differ."""
for pair in zip(bstring_1, bstring_2):
if not set(pair).issubset(set(['0', '1'])):
raise ValueError(
"""Input strings contain characters other than '0' and '1'.""")
if len(bstring_1) != len(bstring_2):
raise ValueError("""Lengths of input strings disagree.""")
else:
total = 0
for i in range(len(bstring_1)):
if bstring_1[i] != bstring_2[i]:
total += 1
return total
def _hamming_distance_i(self, chamber_id, i):
"""Given a chamber_id 'chamber_id' and an integer 0 <= i <= self.rank - 1,
returns the alphabetically sorted list of all chamber_ids having Hamming Distance
equal to i from 'chamber_id'."""
for letter in chamber_id:
if letter not in ['0', '1']:
raise ValueError(
"""Input string contains characters other than '0' and '1'.""")
if i < 0 or i > self.num_normal_vectors - 1:
raise ValueError(
"""Argument 'i' outside range 0 <= i <= len(binary_string) - 1.""")
if len(chamber_id) != self.num_normal_vectors:
raise ValueError("""len(chamber_id) != self.num_normal_vectors.""")
else:
result = []
cids = self._all_binary_strings()
for cid in cids:
if self._hamming_distance(chamber_id, cid) == i:
result.append(cid)
return result
def _all_binary_strings(self):
"""Returns a list of all binary strings of length
self.num_normal_vectors."""
n = self.num_normal_vectors
strings = [np.binary_repr(i) for i in range(2**n)]
return ['0' * (n - len(entry)) + entry for entry in strings]
@classmethod
def _random_vectors(cls, num, rank):
"""This class method return a list of length 'num' or
vectors (numpy arrays) of rank 'rank'. Both arguments
are assumed to be positive integers."""
vec_list = [
np.array([rd.random() - 0.5 for i in range(rank)]) for j in range(num)]
return vec_list
def label_chamber(self, chamber_id, label):
"""Appends the string 'label' to the set with key
'chamber_id' in self.kvstore, if such exists. If not, then
a new singleton set {'label'} is created in self.kvstore
with key 'chamber_id'. The method is idempotent."""
full_chamber_id = CHAMBER_ID % (self.name, chamber_id)
full_label_id = FVECTOR_ID % label
self.kvstore.add_to_set(full_chamber_id, full_label_id)
def bulk_label_chamber(self, chamber_ids, labels):
"""The arguments 'chamber_ids' and 'labels' must be lists of strings
of equal length, else ValueError is raised. This method produces the same result
as calling self.label_chamber(ch_id, label) for all pairs (ch_id, label) in
chamber_ids x labels, but may be faster if self.kvstore is an instance of
class DynamoDBAdapter."""
chamber_ids = [CHAMBER_ID %
(self.name, chamber_id) for chamber_id in chamber_ids]
labels = [FVECTOR_ID % label for label in labels]
self.kvstore.bulk_add_to_set(chamber_ids, labels)
def unlabel_chamber(self, chamber_id, label):
"""Removes 'label' from the set corresponding to 'chamber_id'.
Raises KeyError if 'label' is not an element of the
corresponding set."""
full_chamber_id = CHAMBER_ID % (self.name, chamber_id)
full_label_id = FVECTOR_ID % label
self.kvstore.remove_from_set(full_chamber_id, full_label_id)
def chamber_labels(self, chamber_id):
"""Returns the set of labels corresponding
to key chamber_id. Returns empty set if
chamber_id is unknown."""
try:
full_chamber_id = CHAMBER_ID % (self.name, chamber_id)
result = set([FVEC_ID_EX.findall(entry)[0] for entry in self.kvstore.get_set(
full_chamber_id) if len(FVEC_ID_EX.findall(entry)) > 0])
return result
except KeyError:
return set()
def get_chamber_id(self, vector):
"""Returns the chamber_id of the chamber to which
vector belongs. Throws a ValueError if rank(vector) differs
from the ranks of the normal vectors. The binary digits
of the chamber_id for vectors are computed in the order
given by the output of the get_normal_vectors() method."""
if len(vector) != self.rank:
raise ValueError("""len(vector) != self.rank""")
else:
PMZO = {1: 1, -1: 0}
signs = [int(np.sign(np.dot(vector, nvec)))
for nvec in self.normal_vectors]
chamber_id = ''.join([str(PMZO[entry]) for entry in signs])
return chamber_id
def get_chamber_ids(self):
"""Returns the set of all chamber ids."""
chamber_id_prefix = 'chamber_%s' % self.name
chamber_id_ex = re.compile(r'%s_([\S]*)' % chamber_id_prefix)
chamber_ids = [''.join(chamber_id_ex.findall(entry))
for entry in self.kvstore.get_set_ids()]
return set([entry for entry in chamber_ids if len(entry) > 0])
def adjacent_chamber_ids(self, chamber_id):
"""Returns the set of ids of all chambers directly adjacent
to the chamber corresponding to 'chamber_id'."""
results = set([chamber_id])
for i in range(len(chamber_id)):
results.add(self._flip_digit(chamber_id, i))
results = sorted(results)
return results
def proximal_chamber_ids(self, chamber_id, num_labels):
"""This method returns the smallest list of chamber ids proximal to
the string 'chamber_id', such that the union of the corresponding chambers
contains at least 'num_labels' labels, assumed to be a positive integer.
The list is sorted by ascending distance.
NOTE: A set S of chambers is _proximal_ to a given chamber C if
(i) C is in S, and (ii) D in S implies all chambers nearer to
C than D are also in S. Here, the distance between two chambers
is given by the alphabetical distance of their ids."""
total = 0
pids = []
for i in range(self.num_normal_vectors):
if total >= num_labels:
break
hdi = self._hamming_distance_i(chamber_id, i)
for j in range(len(hdi)):
if total >= num_labels:
break
next_id = hdi[j]
total += len(self.chamber_labels(next_id))
pids.append(next_id)
if total >= num_labels:
break
return pids
def proximal_chamber_labels(self, chamber_id, num_labels):
"""Finds the smallest set of proximal chambers containing
at least 'num_labels' labels, assumed to be a positive integer,
and returns the set of all labels from this."""
pcids = self.proximal_chamber_ids(chamber_id, num_labels)
labels_list = [self.chamber_labels(cid) for cid in pcids]
labels = reduce(lambda x, y: x.union(y), labels_list)
return labels
def get_normal_vectors(self):
"""Returns the list of normal vectors."""
return self.normal_vectors
|
[
"re.compile",
"numpy.binary_repr",
"numpy.dot",
"sympy.binomial_coefficients_list",
"random.random"
] |
[((270, 307), 're.compile', 're.compile', (['"""feature_vector_([\\\\S]*)"""'], {}), "('feature_vector_([\\\\S]*)')\n", (280, 307), False, 'import re\n'), ((1551, 1583), 'sympy.binomial_coefficients_list', 'sp.binomial_coefficients_list', (['n'], {}), '(n)\n', (1580, 1583), True, 'import sympy as sp\n'), ((7996, 8041), 're.compile', 're.compile', (["('%s_([\\\\S]*)' % chamber_id_prefix)"], {}), "('%s_([\\\\S]*)' % chamber_id_prefix)\n", (8006, 8041), False, 'import re\n'), ((4648, 4665), 'numpy.binary_repr', 'np.binary_repr', (['i'], {}), '(i)\n', (4662, 4665), True, 'import numpy as np\n'), ((5031, 5042), 'random.random', 'rd.random', ([], {}), '()\n', (5040, 5042), True, 'import random as rd\n'), ((7658, 7678), 'numpy.dot', 'np.dot', (['vector', 'nvec'], {}), '(vector, nvec)\n', (7664, 7678), True, 'import numpy as np\n')]
|
"""
Set of programs and tools to read the outputs from RH (Han's version)
"""
import os
import sys
import io
import xdrlib
import numpy as np
class Rhout:
"""
Reads outputs from RH.
Currently the reading the following output files is supported:
- input.out
- geometry.out
- atmos.out
- spectrum.out (no Stokes)
- spectrum_XX (no Stokes, from solveray)
- brs.out
- J.dat
- opacity.out (no Stokes)
These output files are NOT supported:
- Atom (atom, collrate, damping, pops, radrate)
- Flux
- metals
- molecule
Parameters
----------
fdir : str, optional
Directory with output files.
verbose : str, optional
If True, will print more details.
Notes
-----
In general, the way to read all the XDR files should be:
Modify read_xdr_file so that it returns only xdata.
Then, on each read_xxx, read the necessary header variables,
rewind (xdata.set_position(0)), then read the variables in order.
This allows the flexibility of derived datatypes, and appending to dictionary
(e.g. as in readatmos for all the elements and etc.). It also allows one to
read directly into attribute of the class (with setattr(self,'aa',<data>))
"""
def __init__(self, fdir='.', verbose=True):
''' Reads all the output data from a RH run.'''
self.verbose = verbose
self.fdir = fdir
self.read_input('{0}/input.out'.format(fdir))
self.read_geometry('{0}/geometry.out'.format(fdir))
self.read_atmosphere('{0}/atmos.out'.format(fdir))
self.read_spectrum('{0}/spectrum.out'.format(fdir))
if os.path.isfile('{0}/spectrum_1.00'.format(fdir)):
self.read_ray('{0}/spectrum_1.00'.format(fdir))
def read_input(self, infile='input.out'):
''' Reads RH input.out file. '''
data = read_xdr_file(infile)
self.input = {}
input_vars = [('magneto_optical', 'i'), ('PRD_angle_dep', 'i'),
('XRD', 'i'), ('start_solution', 'i'),
('stokes_mode', 'i'), ('metallicity', 'd'),
('backgr_pol', 'i'), ('big_endian', 'i')]
for v in input_vars:
self.input[v[0]] = read_xdr_var(data, v[1:])
close_xdr(data, infile, verbose=self.verbose)
def read_geometry(self, infile='geometry.out'):
''' Reads RH geometry.out file. '''
data = read_xdr_file(infile)
self.geometry = {}
geom_type = ['ONE_D_PLANE', 'TWO_D_PLANE',
'SPHERICAL_SYMMETRIC', 'THREE_D_PLANE']
type = read_xdr_var(data, ('i',))
if type not in list(range(4)):
raise ValueError('read_geometry: invalid geometry type {0} in {1}'.
format(type, infile))
nrays = read_xdr_var(data, ('i',))
self.nrays = nrays
self.geometry_type = geom_type[type]
# read some parameters and define structure to be read
if self.geometry_type == 'ONE_D_PLANE':
ndep = read_xdr_var(data, ('i',))
self.ndep = ndep
geom_vars = [('xmu', 'd', (nrays,)), ('wmu', 'd', (nrays,)),
('height', 'd', (ndep,)), ('cmass', 'd', (ndep,)),
('tau500', 'd', (ndep,)), ('vz', 'd', (ndep,))]
elif self.geometry_type == 'TWO_D_PLANE':
nx = read_xdr_var(data, ('i',))
nz = read_xdr_var(data, ('i',))
self.nx = nx
self.nz = nz
geom_vars = [('angleSet', 'i'), ('xmu', 'd', (nrays,)),
('ymu', 'd', (nrays,)), ('wmu', 'd', (nrays,)),
('x', 'd', (nx,)), ('z', 'd', (nz,)),
('vx', 'd', (nx, nz)), ('vz', 'd', (nx, nz))]
elif self.geometry_type == 'THREE_D_PLANE':
nx = read_xdr_var(data, ('i',))
ny = read_xdr_var(data, ('i',))
nz = read_xdr_var(data, ('i',))
self.nx = nx
self.ny = ny
self.nz = nz
geom_vars = [('angleSet', 'i'), ('xmu', 'd', (nrays,)),
('ymu', 'd', (nrays,)), ('wmu', 'd', (nrays,)),
('dx', 'd'), ('dy', 'd'),
('z', 'd', (nz,)), ('vx', 'd', (nx, ny, nz)),
('vy', 'd', (nx, ny, nz)), ('vz', 'd', (nx, ny, nz))]
elif self.geometry_type == 'SPHERICAL_SYMMETRIC':
nradius = read_xdr_var(data, ('i',))
ncore = read_xdr_var(data, ('i',))
self.nradius = nradius
self.ncore = ncore
geom_vars = [('radius', 'd'), ('xmu', 'd', (nrays,)),
('wmu', 'd', (nrays,)), ('r', 'd', (nradius,)),
('cmass', 'd', (nradius,)), ('tau500', 'd', (nradius,)),
('vr', 'd', (nradius,))]
# read data
for v in geom_vars:
self.geometry[v[0]] = read_xdr_var(data, v[1:])
close_xdr(data, infile, verbose=self.verbose)
def read_atmosphere(self, infile='atmos.out'):
''' Reads RH atmos.out file '''
if not hasattr(self, 'geometry'):
em = ('read_atmosphere: geometry data not loaded, '
'call read_geometry() first!')
raise ValueError(em)
data = read_xdr_file(infile)
self.atmos = {}
nhydr = read_xdr_var(data, ('i',))
nelem = read_xdr_var(data, ('i',))
self.atmos['nhydr'] = nhydr
self.atmos['nelem'] = nelem
# read some parameters and define structure to be read
if self.geometry_type == 'ONE_D_PLANE':
ndep = self.ndep
atmos_vars = [('moving', 'i'), ('T', 'd', (ndep,)),
('n_elec', 'd', (ndep,)), ('vturb', 'd', (ndep,)),
('nh', 'd', (ndep, nhydr)), ('id', 's')]
elif self.geometry_type == 'TWO_D_PLANE':
nx, nz = self.nx, self.nz
atmos_vars = [('moving', 'i'), ('T', 'd', (nx, nz)),
('n_elec', 'd', (nx, nz)), ('vturb', 'd', (nx, nz)),
('nh', 'd', (nx, nz, nhydr)), ('id', 's')]
elif self.geometry_type == 'THREE_D_PLANE':
nx, ny, nz = self.nx, self.ny, self.nz
atmos_vars = [('moving', 'i'), ('T', 'd', (nx, ny, nz)),
('n_elec', 'd', (nx, ny, nz)
), ('vturb', 'd', (nx, ny, nz)),
('nh', 'd', (nx, ny, nz, nhydr)), ('id', 's')]
elif self.geometry_type == 'SPHERICAL_SYMMETRIC':
nradius = self.nradius
atmos_vars = [('moving', 'i'), ('T', 'd', (nradius,)),
('n_elec', 'd', (nradius,)), ('vturb', 'd', (nradius,)),
('nh', 'd', (nradius, nhydr)), ('id', 's')]
# read data
for v in atmos_vars:
self.atmos[v[0]] = read_xdr_var(data, v[1:])
# read elements into nested dictionaries
self.elements = {}
for v in range(nelem):
el = read_xdr_var(data, ('s',)).strip()
weight = read_xdr_var(data, ('d',))
abund = read_xdr_var(data, ('d',))
self.elements[el] = {'weight': weight, 'abund': abund}
# read stokes data, if present
self.stokes = False
if self.geometry_type != 'SPHERICAL_SYMMETRIC':
try:
stokes = read_xdr_var(data, ('i',))
except EOFError or IOError:
if self.verbose:
print('(WWW) read_atmos: no Stokes data in atmos.out,'
' skipping.')
return
self.stokes = True
ss = self.atmos['T'].shape
stokes_vars = [('B', 'd', ss), ('gamma_B', 'd', ss),
('chi_B', 'd', ss)]
for v in stokes_vars:
self.atmos[v[0]] = read_xdr_var(data, v[1:])
close_xdr(data, infile, verbose=self.verbose)
def read_spectrum(self, infile='spectrum.out'):
''' Reads RH spectrum.out file '''
if not hasattr(self, 'geometry'):
em = ('read_spectrum: geometry data not loaded, '
'call read_geometry() first!')
raise ValueError(em)
if not hasattr(self, 'atmos'):
em = ('read_spectrum: atmos data not loaded, '
'call read_atmos() first!')
raise ValueError(em)
data = read_xdr_file(infile)
profs = {}
self.spec = {}
nspect = read_xdr_var(data, ('i',))
self.spec['nspect'] = nspect
nrays = self.nrays
self.wave = read_xdr_var(data, ('d', (nspect,)))
if self.geometry_type == 'ONE_D_PLANE':
ishape = (nrays, nspect)
elif self.geometry_type == 'TWO_D_PLANE':
ishape = (self.nx, nrays, nspect)
elif self.geometry_type == 'THREE_D_PLANE':
ishape = (self.nx, self.ny, nrays, nspect)
elif self.geometry_type == 'SPHERICAL_SYMMETRIC':
ishape = (nrays, nspect)
self.imu = read_xdr_var(data, ('d', ishape))
self.spec['vacuum_to_air'] = read_xdr_var(data, ('i',))
self.spec['air_limit'] = read_xdr_var(data, ('d',))
if self.stokes:
self.stokes_Q = read_xdr_var(data, ('d', ishape))
self.stokes_U = read_xdr_var(data, ('d', ishape))
self.stokes_V = read_xdr_var(data, ('d', ishape))
close_xdr(data, infile, verbose=self.verbose)
# read as_rn, if it exists
if os.path.isfile('asrs.out'):
data = read_xdr_file('asrs.out')
if self.atmos['moving'] or self.stokes or self.input['PRD_angle_dep']:
self.spec['as_rn'] = read_xdr_var(data, ('i', (nrays, nspect)))
else:
self.spec['as_rn'] = read_xdr_var(data, ('i', (nspect,)))
close_xdr(data, 'asrs.out', verbose=self.verbose)
def read_ray(self, infile='spectrum_1.00'):
''' Reads spectra for single ray files (e.g. mu=1). '''
if not hasattr(self, 'geometry'):
em = ('read_spectrum: geometry data not loaded,'
' call read_geometry() first!')
raise ValueError(em)
if not hasattr(self, 'spec'):
em = ('read_spectrum: spectral data not loaded, '
'call read_spectrum() first!')
raise ValueError(em)
data = read_xdr_file(infile)
nspect = self.spec['nspect']
self.ray = {}
if self.geometry_type == 'ONE_D_PLANE':
self.muz = read_xdr_var(data, ('d',))
ishape = (nspect,)
sshape = (self.ndep,)
elif self.geometry_type == 'TWO_D_PLANE':
self.mux = read_xdr_var(data, ('d',))
self.muz = read_xdr_var(data, ('d',))
ishape = (self.nx, nspect)
sshape = (self.nx, self.nz)
elif self.geometry_type == 'THREE_D_PLANE':
self.mux = read_xdr_var(data, ('d',))
self.muy = read_xdr_var(data, ('d',))
ishape = (self.nx, self.ny, nspect)
sshape = (self.nx, self.ny, self.nz)
elif self.geometry_type == 'SPHERICAL_SYMMETRIC':
self.muz = read_xdr_var(data, ('d',))
ishape = (nspect,)
sshape = (self.nradius,)
# read intensity
self.int = read_xdr_var(data, ('d', ishape))
# read absorption and source function if written
ns = read_xdr_var(data, ('i',))
if ns > 0:
nshape = (ns,) + sshape
self.ray['chi'] = np.zeros(nshape, dtype='d')
self.ray['S'] = np.zeros(nshape, dtype='d')
self.ray['wave_idx'] = np.zeros(ns, dtype='l')
for i in range(ns):
self.ray['wave_idx'][i] = read_xdr_var(data, ('i',))
self.ray['chi'][i] = read_xdr_var(data, ('d', sshape))
self.ray['S'][i] = read_xdr_var(data, ('d', sshape))
if self.stokes:
self.ray_stokes_Q = read_xdr_var(data, ('d', ishape))
self.ray_stokes_U = read_xdr_var(data, ('d', ishape))
self.ray_stokes_V = read_xdr_var(data, ('d', ishape))
close_xdr(data, infile, verbose=self.verbose)
def read_brs(self, infile='brs.out'):
''' Reads the file with the background opacity record settings,
in the old (xdr) format. '''
if not hasattr(self, 'geometry'):
em = ('read_brs: geometry data not loaded, call read_geometry()'
' first!')
raise ValueError(em)
if not hasattr(self, 'spec'):
em = ('read_brs: spectrum data not loaded, call read_spectrum()'
' first!')
raise ValueError(em)
data = read_xdr_file(infile)
atmosID = read_xdr_var(data, ('s',)).strip()
nspace = read_xdr_var(data, ('i',))
nspect = read_xdr_var(data, ('i',))
if nspect != self.spec['nspect']:
em = ('(EEE) read_brs: nspect in file different from atmos. '
'Aborting.')
raise ValueError(em)
self.brs = {}
if self.atmos['moving'] or self.stokes:
ishape = (2, self.nrays, nspect)
else:
ishape = (nspect,)
self.brs['hasline'] = read_xdr_var(
data, ('i', (nspect,))).astype('Bool')
self.brs['ispolarized'] = read_xdr_var(
data, ('i', (nspect,))).astype('Bool')
self.brs['backgrrecno'] = read_xdr_var(data, ('i', ishape))
close_xdr(data, infile, verbose=self.verbose)
def read_j(self, infile='J.dat'):
''' Reads the mean radiation field, for all wavelengths. '''
if not hasattr(self, 'geometry'):
em = 'read_j: geometry data not loaded, call read_geometry() first!'
raise ValueError(em)
if not hasattr(self, 'spec'):
em = 'read_j: spectrum data not loaded, call read_spec() first!'
raise ValueError(em)
data_file = open(infile, 'r')
nspect = self.spec['nspect']
if self.geometry_type == 'ONE_D_PLANE':
rec_len = self.ndep * 8
ishape = (nspect, self.ndep)
elif self.geometry_type == 'TWO_D_PLANE':
rec_len = (self.nx * self.nz) * 8
ishape = (nspect, self.nx, self.nz)
elif self.geometry_type == 'THREE_D_PLANE':
rec_len = (self.nx * self.ny * self.nz) * 8
ishape = (nspect, self.nx, self.ny, self.nz)
elif self.geometry_type == 'SPHERICAL_SYMMETRIC':
rec_len = self.nradius * 8
ishape = (nspect, self.nradius)
self.J = np.zeros(ishape)
for i in range(nspect):
# point background file to position and read
data_file.seek(i * rec_len)
self.J[i] = read_file_var(data_file, ('d', ishape[1:]))
data_file.close()
def read_opacity(self, infile_line='opacity.out', infile_bg='background.dat',
imu=0):
''' Reads RH atmos.out file '''
if not hasattr(self, 'geometry'):
em = ('read_opacity: geometry data not loaded,'
' call read_geometry() first!')
raise ValueError(em)
if not hasattr(self, 'spec'):
em = ('read_opacity: spectrum data not loaded,'
' call read_spec() first!')
raise ValueError(em)
if not hasattr(self.atmos, 'brs'):
self.read_brs()
data_line = read_xdr_file(infile_line)
file_bg = open(infile_bg, 'r')
nspect = self.spec['nspect']
if self.geometry_type == 'ONE_D_PLANE':
as_rec_len = 2 * self.ndep * 8
bg_rec_len = self.ndep * 8
ishape = (nspect, self.ndep)
elif self.geometry_type == 'TWO_D_PLANE':
as_rec_len = 2 * (self.nx * self.nz) * 8
bg_rec_len = (self.nx * self.nz) * 8
ishape = (nspect, self.nx, self.nz)
elif self.geometry_type == 'THREE_D_PLANE':
as_rec_len = 2 * (self.nx * self.ny * self.nz) * 8
bg_rec_len = (self.nx * self.ny * self.nz) * 8
ishape = (nspect, self.nx, self.ny, self.nz)
elif self.geometry_type == 'SPHERICAL_SYMMETRIC':
as_rec_len = 2 * self.nradius * 8
bg_rec_len = self.nradius * 8
ishape = (nspect, self.nradius)
# create arrays
chi_as = np.zeros(ishape)
eta_as = np.zeros(ishape)
chi_c = np.zeros(ishape)
eta_c = np.zeros(ishape)
scatt = np.zeros(ishape)
# NOTE: this will not work when a line is polarised.
# For those cases these arrays must be read per wavelength, and will
# have different sizes for different wavelengths.
if np.sum(self.brs['ispolarized']):
em = ('read_opacity: Polarized line(s) detected, cannot continue'
' with opacity extraction')
raise ValueError(em)
# get record numbers
if self.atmos['moving'] or self.stokes or self.input['PRD_angle_dep']:
as_index = self.spec['as_rn'][imu] * as_rec_len
bg_index = self.brs['backgrrecno'][1, imu] * bg_rec_len
else:
as_index = self.spec['as_rn'] * as_rec_len
bg_index = self.brs['backgrrecno'] * bg_rec_len
# Read arrays
for i in range(nspect):
if as_index[i] >= 0: # avoid non-active set lines
# point xdr buffer to position and read
data_line.set_position(as_index[i])
chi_as[i] = read_xdr_var(data_line, ('d', ishape[1:]))
eta_as[i] = read_xdr_var(data_line, ('d', ishape[1:]))
# point background file to position and read
file_bg.seek(bg_index[i])
chi_c[i] = read_file_var(file_bg, ('d', ishape[1:]))
eta_c[i] = read_file_var(file_bg, ('d', ishape[1:]))
scatt[i] = read_file_var(file_bg, ('d', ishape[1:]))
self.chi_as = chi_as
self.eta_as = eta_as
self.chi_c = chi_c
self.eta_c = eta_c
self.scatt = scatt
close_xdr(data_line, infile_line, verbose=False)
file_bg.close()
def get_contrib_imu(self, imu, type='total', op_file='opacity.out',
bg_file='background.dat', j_file='J.dat'):
''' Calculates the contribution function for intensity, for a
particular ray, defined by imu.
type can be: \'total\', \'line, or \'continuum\'
The units of self.contribi are J m^-2 s^-1 Hz^-1 sr^-1 km^-1
NOTE: This only calculates the contribution function for
the quadrature rays (ie, often not for disk-centre)
For rays calculated with solve ray, one must use
get_contrib_ray
'''
type = type.lower()
if not hasattr(self, 'geometry'):
em = ('get_contrib_imu: geometry data not loaded,'
' call read_geometry() first!')
raise ValueError(em)
if not hasattr(self, 'spec'):
em = ('get_contrib_imu: spectrum data not loaded,'
' call read_spec() first!')
raise ValueError(em)
self.read_opacity(infile_line=op_file, infile_bg=bg_file, imu=imu)
self.read_j(infile=j_file)
mu = self.geometry['xmu'][imu]
# Calculate optical depth
ab = (self.chi_c + self.chi_as)
self.tau = get_tau(self.geometry['height'], mu, ab)
# Calculate source function
if type == 'total':
self.S = (self.eta_as + self.eta_c + self.J * self.scatt) / ab
elif type == 'line':
self.S = self.eta_as / ab
elif type == 'continuum':
self.S = (self.eta_c + self.J * self.scatt) / ab
else:
raise ValueError('get_contrib_imu: invalid type!')
# Calculate contribution function
self.contribi = get_contrib(
self.geometry['height'], mu, self.tau, self.S)
return
def get_contrib_ray(self, inray='ray.input', rayfile='spectrum_1.00'):
''' Calculates the contribution function for intensity, for a
particular ray
The units of self.contrib are J m^-2 s^-1 Hz^-1 sr^-1 km^-1
'''
inray = self.fdir + '/' + inray
rayfile = self.fdir + '/' + rayfile
if not hasattr(self, 'ray'):
self.read_ray(infile=rayfile)
if 'wave_idx' not in list(self.ray.keys()):
em = ('get_contrib_ray: no chi/source function written to '
'ray file, aborting.')
raise ValueError(em)
# read mu from ray.input file
mu = np.loadtxt(inray, dtype='f')[0]
if not (0 <= mu <= 1.):
em = 'get_contrib_ray: invalid mu read: %f' % mu
raise ValueError(em)
idx = self.ray['wave_idx']
# Calculate optical depth
self.tau = get_tau(self.geometry['height'], mu, self.ray['chi'])
# Calculate contribution function
self.contrib = get_contrib(self.geometry['height'], mu, self.tau,
self.ray['S'])
return
class RhAtmos:
"""
Reads input atmosphere from RH. Currently only 2D format supported.
Parameters
----------
format : str, optional
Atmosphere format. Currently only '2D' (default) supported.
filename : str, optional
File to read.
verbose : str, optional
If True, will print more details.
"""
def __init__(self, format="2D", filename=None, verbose=True):
''' Reads RH input atmospheres. '''
self.verbose = verbose
if format.lower() == "2d":
if filename is not None:
self.read_atmos2d(filename)
else:
raise NotImplementedError("Format %s not yet supported" % format)
def read_atmos2d(self, filename):
"""
Reads input 2D atmosphere
"""
data = read_xdr_file(filename)
self.nx = read_xdr_var(data, ('i',))
self.nz = read_xdr_var(data, ('i',))
self.nhydr = read_xdr_var(data, ('i',))
self.hboundary = read_xdr_var(data, ('i',))
self.bvalue = read_xdr_var(data, ('i', (2, )))
nx, nz, nhydr = self.nx, self.nz, self.nhydr
atmos_vars = [('dx', 'd', (nx,)), ('z', 'd', (nz,)),
('T', 'd', (nx, nz)), ('ne', 'd', (nx, nz)),
('vturb', 'd', (nx, nz)), ('vx', 'd', (nx, nz)),
('vz', 'd', (nx, nz)), ('nh', 'd', (nx, nz, nhydr))
]
for v in atmos_vars:
setattr(self, v[0], read_xdr_var(data, v[1:]))
def write_atmos2d(self, filename, dx, z, T, ne, vturb, vx, vz, nh,
hboundary, bvalue):
nx, nz = T.shape
nhydr = nh.shape[-1]
assert T.shape == ne.shape
assert ne.shape == vturb.shape
assert vturb.shape == nh.shape[:-1]
assert dx.shape[0] == nx
assert z.shape[0] == nz
# Pack as double
p = xdrlib.Packer()
p.pack_int(nx)
p.pack_int(nz)
p.pack_int(nhydr)
p.pack_int(hboundary)
p.pack_int(bvalue[0])
p.pack_int(bvalue[1])
p.pack_farray(nx, dx.ravel().astype('d'), p.pack_double)
p.pack_farray(nz, z.ravel().astype('d'), p.pack_double)
p.pack_farray(nx * nz, T.ravel().astype('d'), p.pack_double)
p.pack_farray(nx * nz, ne.ravel().astype('d'), p.pack_double)
p.pack_farray(nx * nz, vturb.ravel().astype('d'), p.pack_double)
p.pack_farray(nx * nz, vx.ravel().astype('d'), p.pack_double)
p.pack_farray(nx * nz, vz.ravel().astype('d'), p.pack_double)
p.pack_farray(nx * nz * nhydr, nh.T.ravel().astype('d'), p.pack_double)
# Write to file
f = open(filename, 'wb')
f.write(p.get_buffer())
f.close()
#############################################################################
# TOOLS
#############################################################################
class EmptyData:
def __init__(self):
pass
def read_xdr_file(filename): # ,var,cl=None,verbose=False):
"""
Reads data from XDR file.
Because of the way xdrlib works, this reads the whole file to
memory at once. Avoid with very large files.
Parameters
----------
filename : string
File to read.
Returns
-------
result : xdrlib.Unpacker object
"""
try:
f = io.open(filename, 'rb')
data = f.read()
f.close()
except IOError as e:
raise IOError(
'read_xdr_file: problem reading {0}: {1}'.format(filename, e))
# return XDR data
return xdrlib.Unpacker(data)
def close_xdr(buf, ofile='', verbose=False):
"""
Closes the xdrlib.Unpacker object, gives warning if not all data read.
Parameters
----------
buf : xdrlib.Unpacker object
data object.
ofile : string, optional
Original file from which data was read.
verbose : bool, optional
Whether to print warning or not.
"""
try:
buf.done()
except: # .done() will raise error if data remaining
if verbose:
print(('(WWW) close_xdr: {0} not all data read!'.format(ofile)))
def read_xdr_var(buf, var):
"""
Reads a single variable/array from a xdrlib.Unpack buffer.
Parameters
----------
buf: xdrlib.Unpack object
Data buffer.
var: tuple with (type[,shape]), where type is 'f', 'd', 'i', 'ui',
or 's'. Shape is optional, and if true is shape of array.
Type and shape of variable to read
Returns
-------
out : int/float or array
Resulting variable.
"""
assert len(var) > 0
if var[0] not in ['f', 'd', 'i', 'ui', 's']:
raise ValueError('read_xdr_var: data type'
' {0} not currently supported'.format(var[0]))
fdict = {'f': buf.unpack_float,
'd': buf.unpack_double,
'i': buf.unpack_int,
'ui': buf.unpack_uint,
's': buf.unpack_string}
func = fdict[var[0]]
# Single or array?
if len(var) == 1:
# this is because RH seems to write the size of the string twice
if var[0] == 's':
buf.unpack_int()
out = func()
else:
nitems = np.prod(var[1])
out = np.array(buf.unpack_farray(nitems, func)).reshape(var[1][::-1])
# invert order of indices, to match IDL's
out = np.transpose(out, list(range(len(var[1])))[::-1])
return out
def read_file_var(buf, var):
''' Reads a single variable/array from a file buffer.
IN:
buf: open file object
var: tuple with (type[,shape]), where type is 'f', 'd', 'i', 'ui',
or 's'. Shape is optional, and if true is shape of array.
OUT:
variable/array
'''
assert len(var) > 0
if len(var) == 1:
out = np.fromfile(buf, dtype=var, count=1)
elif len(var) == 2:
out = np.fromfile(buf, dtype=var[0], count=var[1][0])
else:
nitems = np.prod(var[1])
out = np.array(np.fromfile(buf, dtype=var[0], count=nitems)).\
reshape(var[1][::-1])
out = np.transpose(out, list(range(len(var[1])))[::-1])
return out
def get_tau(x, mu, chi):
''' Calculates the optical depth, given x (height), mu (cos[theta]) and
chi, absorption coefficient. Chi can be n-dimensional, as long as
last index is depth.
'''
# With scipy, this could be done in one line with
# scipy.integrate.quadrature.cumtrapz, but we are avoiding scipy to keep
# these tools more independent
if len(x) != chi.shape[-1]:
raise ValueError('get_tau: x and chi have different sizes!')
path = x / mu
npts = len(x)
# bring depth to first index, to allow n-d algebra
chi_t = np.transpose(chi)
tau = np.zeros(chi_t.shape)
for i in range(1, npts):
tau[i] = tau[i - 1] + 0.5 * \
(chi_t[i - 1] + chi_t[i]) * (path[i - 1] - path[i])
return tau.T
def get_contrib(z, mu, tau_in, S):
''' Calculates contribution function using x, mu, tau, and the source
function. '''
# Tau truncated at 100 (large enough to be useless)
tau = tau_in.copy()
tau[tau_in > 100.] = 100.
# Calculate dtau (transpose to keep n-D generic form), and dx
dtau = np.zeros(tau_in.shape[::-1])
tt = np.transpose(tau_in)
dtau[1:] = tt[1:] - tt[:-1]
dtau = np.transpose(dtau)
dx = np.zeros(z.shape)
dx[1:] = (z[1:] - z[:-1]) / mu
dx[0] = dx[1]
# Calculate contribution function
contrib = S * np.exp(-tau) * (- dtau / dx) / mu
# convert from m^-1 to km^-1, units are now: J m^-2 s^-1 Hz^-1 sr^-1 km^-1
contrib *= 1.e3
return contrib
def write_B(outfile, Bx, By, Bz):
''' Writes a RH magnetic field file. Input B arrays can be any rank, as
they will be flattened before write. Bx, By, Bz units should be T.'''
if (Bx.shape != By.shape) or (By.shape != Bz.shape):
raise TypeError('writeB: B arrays have different shapes!')
n = np.prod(Bx.shape)
# Convert into spherical coordinates
B = np.sqrt(Bx**2 + By**2 + Bz**2)
gamma_B = np.arccos(Bz / B)
chi_B = np.arctan(By / Bx)
# Pack as double
p = xdrlib.Packer()
p.pack_farray(n, B.ravel().astype('d'), p.pack_double)
p.pack_farray(n, gamma_B.ravel().astype('d'), p.pack_double)
p.pack_farray(n, chi_B.ravel().astype('d'), p.pack_double)
# Write to file
f = open(outfile, 'wb')
f.write(p.get_buffer())
f.close()
return
|
[
"xdrlib.Unpacker",
"numpy.prod",
"numpy.fromfile",
"numpy.sqrt",
"numpy.arccos",
"io.open",
"os.path.isfile",
"xdrlib.Packer",
"numpy.zeros",
"numpy.sum",
"numpy.exp",
"numpy.loadtxt",
"numpy.transpose",
"numpy.arctan"
] |
[((24930, 24951), 'xdrlib.Unpacker', 'xdrlib.Unpacker', (['data'], {}), '(data)\n', (24945, 24951), False, 'import xdrlib\n'), ((28119, 28136), 'numpy.transpose', 'np.transpose', (['chi'], {}), '(chi)\n', (28131, 28136), True, 'import numpy as np\n'), ((28147, 28168), 'numpy.zeros', 'np.zeros', (['chi_t.shape'], {}), '(chi_t.shape)\n', (28155, 28168), True, 'import numpy as np\n'), ((28637, 28665), 'numpy.zeros', 'np.zeros', (['tau_in.shape[::-1]'], {}), '(tau_in.shape[::-1])\n', (28645, 28665), True, 'import numpy as np\n'), ((28675, 28695), 'numpy.transpose', 'np.transpose', (['tau_in'], {}), '(tau_in)\n', (28687, 28695), True, 'import numpy as np\n'), ((28739, 28757), 'numpy.transpose', 'np.transpose', (['dtau'], {}), '(dtau)\n', (28751, 28757), True, 'import numpy as np\n'), ((28767, 28784), 'numpy.zeros', 'np.zeros', (['z.shape'], {}), '(z.shape)\n', (28775, 28784), True, 'import numpy as np\n'), ((29368, 29385), 'numpy.prod', 'np.prod', (['Bx.shape'], {}), '(Bx.shape)\n', (29375, 29385), True, 'import numpy as np\n'), ((29435, 29471), 'numpy.sqrt', 'np.sqrt', (['(Bx ** 2 + By ** 2 + Bz ** 2)'], {}), '(Bx ** 2 + By ** 2 + Bz ** 2)\n', (29442, 29471), True, 'import numpy as np\n'), ((29480, 29497), 'numpy.arccos', 'np.arccos', (['(Bz / B)'], {}), '(Bz / B)\n', (29489, 29497), True, 'import numpy as np\n'), ((29510, 29528), 'numpy.arctan', 'np.arctan', (['(By / Bx)'], {}), '(By / Bx)\n', (29519, 29528), True, 'import numpy as np\n'), ((29558, 29573), 'xdrlib.Packer', 'xdrlib.Packer', ([], {}), '()\n', (29571, 29573), False, 'import xdrlib\n'), ((9632, 9658), 'os.path.isfile', 'os.path.isfile', (['"""asrs.out"""'], {}), "('asrs.out')\n", (9646, 9658), False, 'import os\n'), ((14763, 14779), 'numpy.zeros', 'np.zeros', (['ishape'], {}), '(ishape)\n', (14771, 14779), True, 'import numpy as np\n'), ((16544, 16560), 'numpy.zeros', 'np.zeros', (['ishape'], {}), '(ishape)\n', (16552, 16560), True, 'import numpy as np\n'), ((16578, 16594), 'numpy.zeros', 'np.zeros', (['ishape'], {}), '(ishape)\n', (16586, 16594), True, 'import numpy as np\n'), ((16611, 16627), 'numpy.zeros', 'np.zeros', (['ishape'], {}), '(ishape)\n', (16619, 16627), True, 'import numpy as np\n'), ((16644, 16660), 'numpy.zeros', 'np.zeros', (['ishape'], {}), '(ishape)\n', (16652, 16660), True, 'import numpy as np\n'), ((16677, 16693), 'numpy.zeros', 'np.zeros', (['ishape'], {}), '(ishape)\n', (16685, 16693), True, 'import numpy as np\n'), ((16913, 16944), 'numpy.sum', 'np.sum', (["self.brs['ispolarized']"], {}), "(self.brs['ispolarized'])\n", (16919, 16944), True, 'import numpy as np\n'), ((23258, 23273), 'xdrlib.Packer', 'xdrlib.Packer', ([], {}), '()\n', (23271, 23273), False, 'import xdrlib\n'), ((24708, 24731), 'io.open', 'io.open', (['filename', '"""rb"""'], {}), "(filename, 'rb')\n", (24715, 24731), False, 'import io\n'), ((26588, 26603), 'numpy.prod', 'np.prod', (['var[1]'], {}), '(var[1])\n', (26595, 26603), True, 'import numpy as np\n'), ((27185, 27221), 'numpy.fromfile', 'np.fromfile', (['buf'], {'dtype': 'var', 'count': '(1)'}), '(buf, dtype=var, count=1)\n', (27196, 27221), True, 'import numpy as np\n'), ((11676, 11703), 'numpy.zeros', 'np.zeros', (['nshape'], {'dtype': '"""d"""'}), "(nshape, dtype='d')\n", (11684, 11703), True, 'import numpy as np\n'), ((11732, 11759), 'numpy.zeros', 'np.zeros', (['nshape'], {'dtype': '"""d"""'}), "(nshape, dtype='d')\n", (11740, 11759), True, 'import numpy as np\n'), ((11795, 11818), 'numpy.zeros', 'np.zeros', (['ns'], {'dtype': '"""l"""'}), "(ns, dtype='l')\n", (11803, 11818), True, 'import numpy as np\n'), ((20869, 20897), 'numpy.loadtxt', 'np.loadtxt', (['inray'], {'dtype': '"""f"""'}), "(inray, dtype='f')\n", (20879, 20897), True, 'import numpy as np\n'), ((27260, 27307), 'numpy.fromfile', 'np.fromfile', (['buf'], {'dtype': 'var[0]', 'count': 'var[1][0]'}), '(buf, dtype=var[0], count=var[1][0])\n', (27271, 27307), True, 'import numpy as np\n'), ((27335, 27350), 'numpy.prod', 'np.prod', (['var[1]'], {}), '(var[1])\n', (27342, 27350), True, 'import numpy as np\n'), ((28894, 28906), 'numpy.exp', 'np.exp', (['(-tau)'], {}), '(-tau)\n', (28900, 28906), True, 'import numpy as np\n'), ((27374, 27418), 'numpy.fromfile', 'np.fromfile', (['buf'], {'dtype': 'var[0]', 'count': 'nitems'}), '(buf, dtype=var[0], count=nitems)\n', (27385, 27418), True, 'import numpy as np\n')]
|
import logging, tqdm
import numpy as np
import rawpy
import colour_demosaicing as cd
import HDRutils.io as io
from HDRutils.utils import *
logger = logging.getLogger(__name__)
def merge(files, do_align=False, demosaic_first=True, normalize=False, color_space='sRGB',
wb=None, saturation_percent=0.98, black_level=0, bayer_pattern='RGGB',
exp=None, gain=None, aperture=None, estimate_exp='gfxdisp', cam='default',
perc=10, outlier='cerman'):
"""
Merge multiple SDR images into a single HDR image after demosacing. This is a wrapper
function that extracts metadata and calls the appropriate function.
:files: Filenames containing the inpt images
:do_align: Align by estimation homography
:demosaic_first: Order of operations
:color_space: Output color-space. Pick 1 of [sRGB, raw, Adobe, XYZ]
:normalize: Output pixels lie between 0 and 1
:wb: White-balance values after merging. Pick from [None, camera] or supply 3 values.
:saturation_percent: Saturation offset from reported white-point
:black_level: Camera's black level
:bayer_patter: Color filter array pattern of the camera
:exp: Exposure time (in seconds) required when metadata is not present
:gain: Camera gain (ISO/100) required when metadata is not present
:aperture: Aperture required when metadata is not present
:estimate_exp: Estimate exposure times by solving a system. Pick 1 of ['gfxdisp','cerman']
:cam: Camera noise model for exposure estimation
:perc: Estimate exposures using min-variance rows
:outlier: Iterative outlier removal. Pick 1 of [None, 'cerman', 'ransac']
:return: Merged FP32 HDR image
"""
data = get_metadata(files, color_space, saturation_percent, black_level, exp, gain, aperture)
if estimate_exp:
# TODO: Handle imamge stacks with varying gain and aperture
assert len(set(data['gain'])) == 1 and len(set(data['aperture'])) == 1
if do_align:
# TODO: Perform exposure alignment after homography (adds additional overhead since
# images need to be demosaiced)
logger.warning('Exposure alignment is done before homography, may cause it to fail')
Y = np.array([io.imread(f, libraw=False) for f in files], dtype=np.float32)
exif_exp = data['exp']
estimate = np.ones(data['N'], dtype=bool)
for i in range(data['N']):
# Skip images where > 90% of the pixels are saturated
if (Y[i] >= data['saturation_point']).sum() > 0.9*Y[i].size:
logger.warning(f'Skipping exposure estimation for file {files[i]} due to saturation')
estimate[i] = False
data['exp'][estimate] = estimate_exposures(Y[estimate], data['exp'][estimate], data,
estimate_exp, cam=cam, outlier=outlier)
if demosaic_first:
HDR, num_sat = imread_demosaic_merge(files, data, do_align, saturation_percent)
else:
HDR, num_sat = imread_merge_demosaic(files, data, do_align, bayer_pattern)
if num_sat > 0:
logger.warning(f'{num_sat/(data["h"]*data["w"]):.3f}% of pixels (n={num_sat}) are ' \
'saturated in the shortest exposure. The values for these pixels will ' \
'be inaccurate.')
if wb == 'camera':
wb = data['white_balance'][:3]
if wb is not None:
assert len(wb) == 3, 'Provide list [R G B] corresponding to white patch in the image'
HDR = HDR * np.array(wb)[None,None,:]
if HDR.min() < 0:
logger.info('Clipping negative pixels.')
HDR[HDR < 0] = 0
if normalize:
HDR = HDR / HDR.max()
return HDR.astype(np.float32)
def imread_demosaic_merge(files, metadata, do_align, sat_percent):
"""
First postprocess using libraw and then merge RGB images. This function merges in an online
way and can handle a large number of inputs with little memory.
"""
assert metadata['raw_format'], 'Libraw unsupported, use merge(..., demosaic_first=False)'
logger.info('Demosaicing before merging.')
# Check for saturation in shortest exposure
shortest_exposure = np.argmin(metadata['exp'] * metadata['gain'] * metadata['aperture'])
logger.info(f'Shortest exposure is {shortest_exposure}')
if do_align:
ref_idx = np.argsort(metadata['exp'] * metadata['gain']
* metadata['aperture'])[len(files)//2]
ref_img = io.imread(files[ref_idx]) / metadata['exp'][ref_idx] \
/ metadata['gain'][ref_idx] \
/ metadata['aperture'][ref_idx]
num_saturated = 0
num, denom = np.zeros((2, metadata['h'], metadata['w'], 3))
for i, f in enumerate(tqdm.tqdm(files, leave=False)):
raw = rawpy.imread(f)
img = io.imread_libraw(raw, color_space=metadata['color_space'])
saturation_point_img = sat_percent * (2**(8*img.dtype.itemsize) - 1)
if do_align and i != ref_idx:
scaled_img = img / metadata['exp'][i] \
/ metadata['gain'][i] \
/ metadata['aperture'][i]
img = align(ref_img, scaled_img, img)
# Ignore saturated pixels in all but shortest exposure
if i == shortest_exposure:
unsaturated = np.ones_like(img, dtype=bool)
num_sat = np.count_nonzero(np.logical_not(
get_unsaturated(raw.raw_image_visible, metadata['saturation_point'],
img, saturation_point_img))) / 3
else:
unsaturated = get_unsaturated(raw.raw_image_visible, metadata['saturation_point'],
img, saturation_point_img)
X_times_t = img / metadata['gain'][i] / metadata['aperture'][i]
denom[unsaturated] += metadata['exp'][i]
num[unsaturated] += X_times_t[unsaturated]
HDR = num / denom
return HDR, num_sat
def imread_merge_demosaic(files, metadata, do_align, pattern):
"""
Merge RAW images before demosaicing. This function merges in an online
way and can handle a large number of inputs with little memory.
"""
if do_align:
ref_idx = np.argsort(metadata['exp'] * metadata['gain']
* metadata['aperture'])[len(files)//2]
ref_img = io.imread(files[ref_idx]).astype(np.float32)
if metadata['raw_format']:
ref_img = cd.demosaicing_CFA_Bayer_bilinear(ref_img, pattern=pattern)
ref_img = ref_img / metadata['exp'][ref_idx] \
/ metadata['gain'][ref_idx] \
/ metadata['aperture'][ref_idx]
logger.info('Merging before demosaicing.')
# More transforms available here:
# http://www.brucelindbloom.com/index.html?Eqn_RGB_XYZ_Matrix.html
if metadata['color_space'] == 'raw':
color_mat = np.eye(3)
else:
assert metadata['raw_format'], \
'Only RAW color_space supported. Use merge(..., color_space=\'raw\')'
raw = rawpy.imread(files[0])
assert (raw.rgb_xyz_matrix[-1] == 0).all()
native2xyz = np.linalg.inv(raw.rgb_xyz_matrix[:-1])
if metadata['color_space'] == 'xyz':
xyz2out = np.eye(3)
elif metadata['color_space'] == 'srgb':
xyz2out = np.array([[3.2406, -1.5372, -0.4986],
[-0.9689, 1.8758, 0.0415],
[0.0557, -0.2040, 1.0570]])
elif metadata['color_space'] == 'adobe':
xyz2out = np.array([[2.0413690, -0.5649464, -0.3446944],
[-0.9692660, 1.8760108, 0.0415560],
[0.0134474, -0.1183897, 1.0154096]])
else:
logger.warning('Unsupported color-space, switching to camara raw.')
native2xyz = np.eye(3)
xyz2out = np.eye(3)
color_mat = (xyz2out @ native2xyz).transpose()
# Check for saturation in shortest exposure
shortest_exposure = np.argmin(metadata['exp'] * metadata['gain'] * metadata['aperture'])
logger.info(f'Shortest exposure is {shortest_exposure}')
num_saturated = 0
num, denom = np.zeros((2, metadata['h'], metadata['w']))
black_frame = np.tile(metadata['black_level'].reshape(2, 2),
(metadata['h']//2, metadata['w']//2))
for i, f in enumerate(tqdm.tqdm(files, leave=False)):
img = io.imread(f, libraw=False).astype(np.float32)
if do_align and i != ref_idx:
i_img = io.imread(f).astype(np.float32)
if metadata['raw_format']:
i_img = cd.demosaicing_CFA_Bayer_bilinear(i_img, pattern=pattern)
i_img = i_img / metadata['exp'][i] \
/ metadata['gain'][i] \
/ metadata['aperture'][i]
img = align(ref_img, i_img, img)
# Ignore saturated pixels in all but shortest exposure
if i == shortest_exposure:
unsaturated = np.ones_like(img, dtype=bool)
num_sat = np.count_nonzero(np.logical_not(get_unsaturated(
img, metadata['saturation_point'])))
else:
unsaturated = get_unsaturated(img, metadata['saturation_point'])
# Subtract black level for linearity
img -= black_frame
X_times_t = img / metadata['gain'][i] / metadata['aperture'][i]
denom[unsaturated] += metadata['exp'][i]
num[unsaturated] += X_times_t[unsaturated]
HDR_bayer = num / denom
# Libraw does not support 32-bit values. Use colour-demosaicing instead:
# https://colour-demosaicing.readthedocs.io/en/latest/manual.html
logger.info('Running bilinear demosaicing')
HDR = cd.demosaicing_CFA_Bayer_bilinear(HDR_bayer, pattern=pattern)
# Convert to output color-space
logger.info(f'Using color matrix: {color_mat}')
HDR = HDR @ color_mat
return HDR, num_sat
|
[
"logging.getLogger",
"numpy.ones_like",
"numpy.eye",
"HDRutils.io.imread",
"numpy.ones",
"tqdm.tqdm",
"numpy.argsort",
"numpy.array",
"numpy.zeros",
"numpy.linalg.inv",
"HDRutils.io.imread_libraw",
"numpy.argmin",
"rawpy.imread",
"colour_demosaicing.demosaicing_CFA_Bayer_bilinear"
] |
[((151, 178), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (168, 178), False, 'import logging, tqdm\n'), ((3835, 3903), 'numpy.argmin', 'np.argmin', (["(metadata['exp'] * metadata['gain'] * metadata['aperture'])"], {}), "(metadata['exp'] * metadata['gain'] * metadata['aperture'])\n", (3844, 3903), True, 'import numpy as np\n'), ((4267, 4313), 'numpy.zeros', 'np.zeros', (["(2, metadata['h'], metadata['w'], 3)"], {}), "((2, metadata['h'], metadata['w'], 3))\n", (4275, 4313), True, 'import numpy as np\n'), ((7072, 7140), 'numpy.argmin', 'np.argmin', (["(metadata['exp'] * metadata['gain'] * metadata['aperture'])"], {}), "(metadata['exp'] * metadata['gain'] * metadata['aperture'])\n", (7081, 7140), True, 'import numpy as np\n'), ((7233, 7276), 'numpy.zeros', 'np.zeros', (["(2, metadata['h'], metadata['w'])"], {}), "((2, metadata['h'], metadata['w']))\n", (7241, 7276), True, 'import numpy as np\n'), ((8564, 8625), 'colour_demosaicing.demosaicing_CFA_Bayer_bilinear', 'cd.demosaicing_CFA_Bayer_bilinear', (['HDR_bayer'], {'pattern': 'pattern'}), '(HDR_bayer, pattern=pattern)\n', (8597, 8625), True, 'import colour_demosaicing as cd\n'), ((2203, 2233), 'numpy.ones', 'np.ones', (["data['N']"], {'dtype': 'bool'}), "(data['N'], dtype=bool)\n", (2210, 2233), True, 'import numpy as np\n'), ((4337, 4366), 'tqdm.tqdm', 'tqdm.tqdm', (['files'], {'leave': '(False)'}), '(files, leave=False)\n', (4346, 4366), False, 'import logging, tqdm\n'), ((4377, 4392), 'rawpy.imread', 'rawpy.imread', (['f'], {}), '(f)\n', (4389, 4392), False, 'import rawpy\n'), ((4401, 4459), 'HDRutils.io.imread_libraw', 'io.imread_libraw', (['raw'], {'color_space': "metadata['color_space']"}), "(raw, color_space=metadata['color_space'])\n", (4417, 4459), True, 'import HDRutils.io as io\n'), ((6154, 6163), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (6160, 6163), True, 'import numpy as np\n'), ((6287, 6309), 'rawpy.imread', 'rawpy.imread', (['files[0]'], {}), '(files[0])\n', (6299, 6309), False, 'import rawpy\n'), ((6370, 6408), 'numpy.linalg.inv', 'np.linalg.inv', (['raw.rgb_xyz_matrix[:-1]'], {}), '(raw.rgb_xyz_matrix[:-1])\n', (6383, 6408), True, 'import numpy as np\n'), ((7408, 7437), 'tqdm.tqdm', 'tqdm.tqdm', (['files'], {'leave': '(False)'}), '(files, leave=False)\n', (7417, 7437), False, 'import logging, tqdm\n'), ((3989, 4058), 'numpy.argsort', 'np.argsort', (["(metadata['exp'] * metadata['gain'] * metadata['aperture'])"], {}), "(metadata['exp'] * metadata['gain'] * metadata['aperture'])\n", (3999, 4058), True, 'import numpy as np\n'), ((4817, 4846), 'numpy.ones_like', 'np.ones_like', (['img'], {'dtype': 'bool'}), '(img, dtype=bool)\n', (4829, 4846), True, 'import numpy as np\n'), ((5574, 5643), 'numpy.argsort', 'np.argsort', (["(metadata['exp'] * metadata['gain'] * metadata['aperture'])"], {}), "(metadata['exp'] * metadata['gain'] * metadata['aperture'])\n", (5584, 5643), True, 'import numpy as np\n'), ((5766, 5825), 'colour_demosaicing.demosaicing_CFA_Bayer_bilinear', 'cd.demosaicing_CFA_Bayer_bilinear', (['ref_img'], {'pattern': 'pattern'}), '(ref_img, pattern=pattern)\n', (5799, 5825), True, 'import colour_demosaicing as cd\n'), ((6462, 6471), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (6468, 6471), True, 'import numpy as np\n'), ((7915, 7944), 'numpy.ones_like', 'np.ones_like', (['img'], {'dtype': 'bool'}), '(img, dtype=bool)\n', (7927, 7944), True, 'import numpy as np\n'), ((2103, 2129), 'HDRutils.io.imread', 'io.imread', (['f'], {'libraw': '(False)'}), '(f, libraw=False)\n', (2112, 2129), True, 'import HDRutils.io as io\n'), ((3217, 3229), 'numpy.array', 'np.array', (['wb'], {}), '(wb)\n', (3225, 3229), True, 'import numpy as np\n'), ((5679, 5704), 'HDRutils.io.imread', 'io.imread', (['files[ref_idx]'], {}), '(files[ref_idx])\n', (5688, 5704), True, 'import HDRutils.io as io\n'), ((6527, 6622), 'numpy.array', 'np.array', (['[[3.2406, -1.5372, -0.4986], [-0.9689, 1.8758, 0.0415], [0.0557, -0.204, 1.057]\n ]'], {}), '([[3.2406, -1.5372, -0.4986], [-0.9689, 1.8758, 0.0415], [0.0557, -\n 0.204, 1.057]])\n', (6535, 6622), True, 'import numpy as np\n'), ((7448, 7474), 'HDRutils.io.imread', 'io.imread', (['f'], {'libraw': '(False)'}), '(f, libraw=False)\n', (7457, 7474), True, 'import HDRutils.io as io\n'), ((7611, 7668), 'colour_demosaicing.demosaicing_CFA_Bayer_bilinear', 'cd.demosaicing_CFA_Bayer_bilinear', (['i_img'], {'pattern': 'pattern'}), '(i_img, pattern=pattern)\n', (7644, 7668), True, 'import colour_demosaicing as cd\n'), ((4094, 4119), 'HDRutils.io.imread', 'io.imread', (['files[ref_idx]'], {}), '(files[ref_idx])\n', (4103, 4119), True, 'import HDRutils.io as io\n'), ((6692, 6813), 'numpy.array', 'np.array', (['[[2.041369, -0.5649464, -0.3446944], [-0.969266, 1.8760108, 0.041556], [\n 0.0134474, -0.1183897, 1.0154096]]'], {}), '([[2.041369, -0.5649464, -0.3446944], [-0.969266, 1.8760108, \n 0.041556], [0.0134474, -0.1183897, 1.0154096]])\n', (6700, 6813), True, 'import numpy as np\n'), ((6923, 6932), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (6929, 6932), True, 'import numpy as np\n'), ((6946, 6955), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (6952, 6955), True, 'import numpy as np\n'), ((7537, 7549), 'HDRutils.io.imread', 'io.imread', (['f'], {}), '(f)\n', (7546, 7549), True, 'import HDRutils.io as io\n')]
|
import pickle
import numpy as np
import sys
def eigen(num, split_num, layer_num):
prefix = 'min_'
layer_num = int(layer_num)
num = str(num)
#cur = [8, 8, 8, 8, 16, 16, 24, 24, 24, 24, 24, 24, 32, 32]
#cur = [10, 12, 13, 13, 21, 29, 35, 37, 35, 25, 28, 28, 37, 32]
#cur = [12, 12, 18, 17, 28, 54, 55, 45, 40, 25, 28, 28, 37, 32]
#cur = [16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16]
cur = [22, 23, (20, 2), 25, (22, 2), 25, (24, 2), 20, 18, 19, 19, 20, (18, 2), 20]
cur = [27, 39, (22, 2), 39, (37, 2), 40, (30, 2), 20, 18, 21, 21, 21, (19, 2), 20]
cur = [29, 74, (24, 2), 54, (50, 2), 64, (42, 2), 21, 18, 24, 21, 21, (19, 2), 20]
cur = [33, 132, (28, 2), 69, (59, 2), 104, (53, 2), 21, 18, 24, 21, 21, (19, 2), 20]
cur = [33, 209, (34, 2), 90, (72, 2), 160, (64, 2), 21, 18, 24, 21, 21, (19, 2), 20]
cur[2] = cur[2][0]
cur[4] = cur[4][0]
cur[6] = cur[6][0]
cur[12] = cur[12][0]
cur = [4,4,4,4]
cur = [4, 7, 5, 4]
cur = [10, 12, 21, 11]
cur = [11, 18, 29, 12]
cur = [11, 18, 29, 12]
cur = [11, 30, 38, 12]
print(cur)
cur = pickle.load(open('cifar10max' + str(num) + '.pkl', 'rb'))
curt = []
DD = 0
layer_num = len(cur)
'''
for i in cur:
if i != 'M':
curt.append(i)
for i in range(layer_num):
#w = pickle.load(open('eigen/' + prefix+ 'A_' + str(i) + '_' + num + '_.pkl', 'rb'), encoding='latin1')
try:
w = pickle.load(open('eigenm/' + prefix+ 'A_' + str(i) + '_' + num + '_.pkl', 'rb'), encoding='latin1')
#w1 = pickle.load(open('eigen/' + prefix+ 'A_' + str(i) + '_' + num + '_.pkl', 'rb'), encoding='latin1')
print(w)
#print(w.shape)
except:
DD = DD + 1
continue
if i == DD:
W = w
else:
W = np.concatenate([W, w], 0)
'''
prefix = 'max_'
r = [0.116849326, 0.038422294, 0.02061177, 0.02997986, 0.014377874, 0.0062844744, 0.012592447, 0.006363712, 0.008475702, 0.02377023, 0.038945824, 0.03370137, 0.03196905, 0.06754288]
r = np.ones([14])
#r = pickle.load(open('cifar10max' + str(num) + 'mag.pkl','rb'))
for i in range(layer_num):
#w = pickle.load(open('eigen/' + prefix+ 'A_' + str(i) + '_' + num + '_.pkl', 'rb'), encoding='latin1')
try:
w = pickle.load(open('eigenmax/' + prefix+ 'A_' + str(i) + '_' + num + '_.pkl', 'rb'), encoding='latin1')
#print(np.mean(w))
w *= np.sqrt(r[i])
print(np.mean(w))
#w1 = pickle.load(open('eigen/' + prefix+ 'A_' + str(i) + '_' + num + '_.pkl', 'rb'), encoding='latin1')
#print(w.shape)
except:
DD = DD + 1
continue
if i == DD:
W = w
else:
W = np.concatenate([W, -w], 0)
st = np.argsort(W)
L = W.shape[0]
t = int(0.15 * L)
thre = W[st[t]]
SP = {}
VP = {}
SP1 = {}
VP1 = {}
DL = []
dp = []
prefix = sys.argv[3] + '_'
for i in range(layer_num):
if i == 0:
k = 3
else:
k = 1
try:
w = pickle.load(open('eigenmax/' +prefix+ 'A_' + str(i) + '_' + num + '_.pkl', 'rb'), encoding='latin1')
v = pickle.load(open('eigenmax/' +prefix+ 'V_' + str(i) + '_' + num + '_.pkl', 'rb'), encoding='latin1')
w *= np.sqrt(r[i])
except:
print(i)
l = int(0.1 * curt[i])
D = np.random.randint(0, curt[i], size=[int(0.1 * curt[i]), 1])
SP[i] = D
VD = np.zeros([1, 1, 1])
#VP[i] = np.reshape(v, [v.shape[0], -1, k, k])
VP[i] = np.zeros([curt[i], curt[i-1], 1, 1])
DL.append(l)
continue
if prefix == 'max_':
ic = -1
else:
ic = 1
D = np.argwhere((ic * w) < thre)
l = D.shape[0]
SP[i] = np.squeeze(D)
#SP1[i] = np.random.randint(0, curt[i], size=[D.shape[0], 1])
VD = v[D].astype(float)
VP[i] = np.reshape(v, [v.shape[0], -1, k, k])
#VP1[i] = np.zeros_like(VD)
dp.append(l)
DL.append(l)
print(SP[i].shape)
print(VP[i].shape)
print(cur[i])
pickle.dump(SP, open('eigenmax/' + num + prefix + 'global.pkl', 'wb'))
pickle.dump(VP, open('eigenmax/' + num + prefix + 'globalv.pkl', 'wb'))
print(DL)
DL = np.array(DL)
ct = 0
DDL = []
for i in cur:
if i == 'M':
DDL.append('M')
continue
else:
DDL.append(int(i + DL[ct]))
ct += 1
for i in range(len(cur)):
cur[i] = int(DDL[i])
#print(DL)
print(DDL)
print(cur)
pickle.dump(DL, open('maxcfg' + str(int(num) + 1) + '.pkl', 'wb'))
#print(SP)
eigen(sys.argv[1], 1, sys.argv[2])
|
[
"numpy.mean",
"numpy.reshape",
"numpy.ones",
"numpy.sqrt",
"numpy.squeeze",
"numpy.argsort",
"numpy.array",
"numpy.zeros",
"numpy.argwhere",
"numpy.concatenate"
] |
[((2128, 2141), 'numpy.ones', 'np.ones', (['[14]'], {}), '([14])\n', (2135, 2141), True, 'import numpy as np\n'), ((2887, 2900), 'numpy.argsort', 'np.argsort', (['W'], {}), '(W)\n', (2897, 2900), True, 'import numpy as np\n'), ((4491, 4503), 'numpy.array', 'np.array', (['DL'], {}), '(DL)\n', (4499, 4503), True, 'import numpy as np\n'), ((3908, 3934), 'numpy.argwhere', 'np.argwhere', (['(ic * w < thre)'], {}), '(ic * w < thre)\n', (3919, 3934), True, 'import numpy as np\n'), ((3989, 4002), 'numpy.squeeze', 'np.squeeze', (['D'], {}), '(D)\n', (3999, 4002), True, 'import numpy as np\n'), ((4121, 4158), 'numpy.reshape', 'np.reshape', (['v', '[v.shape[0], -1, k, k]'], {}), '(v, [v.shape[0], -1, k, k])\n', (4131, 4158), True, 'import numpy as np\n'), ((2533, 2546), 'numpy.sqrt', 'np.sqrt', (['r[i]'], {}), '(r[i])\n', (2540, 2546), True, 'import numpy as np\n'), ((2851, 2877), 'numpy.concatenate', 'np.concatenate', (['[W, -w]', '(0)'], {}), '([W, -w], 0)\n', (2865, 2877), True, 'import numpy as np\n'), ((3431, 3444), 'numpy.sqrt', 'np.sqrt', (['r[i]'], {}), '(r[i])\n', (3438, 3444), True, 'import numpy as np\n'), ((2565, 2575), 'numpy.mean', 'np.mean', (['w'], {}), '(w)\n', (2572, 2575), True, 'import numpy as np\n'), ((3632, 3651), 'numpy.zeros', 'np.zeros', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (3640, 3651), True, 'import numpy as np\n'), ((3731, 3769), 'numpy.zeros', 'np.zeros', (['[curt[i], curt[i - 1], 1, 1]'], {}), '([curt[i], curt[i - 1], 1, 1])\n', (3739, 3769), True, 'import numpy as np\n')]
|
import numpy as np
import open3d as o3d
import pickle
import torch
import ipdb
st = ipdb.set_trace
def apply_4x4(RT, xyz):
B, N, _ = list(xyz.shape)
ones = torch.ones_like(xyz[:,:,0:1])
xyz1 = torch.cat([xyz, ones], 2)
xyz1_t = torch.transpose(xyz1, 1, 2)
# this is B x 4 x N
xyz2_t = torch.matmul(RT, xyz1_t)
xyz2 = torch.transpose(xyz2_t, 1, 2)
xyz2 = xyz2[:,:,:3]
return xyz2
def make_pcd(pts):
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(pts[:, :3])
# if the dim is greater than 3 I expect the color
if pts.shape[1] == 6:
pcd.colors = o3d.utility.Vector3dVector(pts[:, 3:] / 255.\
if pts[:, 3:].max() > 1. else pts[:, 3:])
return pcd
mesh_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(
size=1, origin=[0, 0, 0])
pcd_list = [mesh_frame]
# for i in range(10):
# path = f"/Users/shamitlal/Desktop/temp/convocc/pointcloud_0{i}.npz"
# pcd = np.load(path)['points']
# st()
# pcd = make_pcd(pcd)
# pcd_list.append(pcd)
path = f"/Users/shamitlal/Desktop/temp/convocc/pointcloud.npz"
pcd = np.load(path)['points']
print("max: ", np.max(pcd, axis=0))
print("min: ", np.min(pcd, axis=0))
pcd = make_pcd(pcd)
pcd_list.append(pcd)
print("Pcd list len is: ", len(pcd_list))
o3d.visualization.draw_geometries(pcd_list)
# Visualize inside points
# path = f"/Users/shamitlal/Desktop/temp/convocc/points.npz"
# pcd = np.load(path)
# occ = np.unpackbits(pcd['occupancies'])
# pcd = pcd['points']
# occ_pts_idx = np.where(occ==1)[0]
# pcd = pcd[occ_pts_idx]
# print("max: ", np.max(pcd, axis=0))
# print("min: ", np.min(pcd, axis=0))
# pcd = make_pcd(pcd)
# pcd_list.append(pcd)
# # Visualize actual pointcloud
# path = f"/Users/shamitlal/Desktop/temp/convocc/pointcloud.npz"
# pcd = np.load(path)['points']
# print("max: ", np.max(pcd, axis=0))
# print("min: ", np.min(pcd, axis=0))
# pcd = make_pcd(pcd)
# pcd_list.append(pcd)
# print("Pcd list len is: ", len(pcd_list))
o3d.visualization.draw_geometries(pcd_list)
#Visualize pydisco shapenet data
# path = f"/Users/shamitlal/Desktop/temp/convocc/02958343_c48a804986a819b4bda733a39f84326d.p"
# pfile = pickle.load(open(path, "rb"))
# xyz_camXs = torch.tensor(pfile['xyz_camXs_raw'])
# origin_T_camXs = torch.tensor(pfile['origin_T_camXs_raw'])
# xyz_origin = apply_4x4(origin_T_camXs, xyz_camXs)
# pcd = xyz_origin.reshape(-1, 3)
# x, y, z = torch.abs(pcd[:,0]), torch.abs(pcd[:,1]), torch.abs(pcd[:,2])
# cond1 = (x<10)
# cond2 = (y<10)
# cond3 = (z<10)
# cond = cond1 & cond2 & cond3
# pcd = pcd[cond]
# pcd_list.append(make_pcd(pcd))
# o3d.visualization.draw_geometries(pcd_list)
# st()
# aa=1
|
[
"torch.ones_like",
"torch.transpose",
"numpy.max",
"open3d.utility.Vector3dVector",
"open3d.visualization.draw_geometries",
"torch.matmul",
"open3d.geometry.PointCloud",
"numpy.min",
"open3d.geometry.TriangleMesh.create_coordinate_frame",
"numpy.load",
"torch.cat"
] |
[((760, 835), 'open3d.geometry.TriangleMesh.create_coordinate_frame', 'o3d.geometry.TriangleMesh.create_coordinate_frame', ([], {'size': '(1)', 'origin': '[0, 0, 0]'}), '(size=1, origin=[0, 0, 0])\n', (809, 835), True, 'import open3d as o3d\n'), ((1315, 1358), 'open3d.visualization.draw_geometries', 'o3d.visualization.draw_geometries', (['pcd_list'], {}), '(pcd_list)\n', (1348, 1358), True, 'import open3d as o3d\n'), ((2012, 2055), 'open3d.visualization.draw_geometries', 'o3d.visualization.draw_geometries', (['pcd_list'], {}), '(pcd_list)\n', (2045, 2055), True, 'import open3d as o3d\n'), ((167, 198), 'torch.ones_like', 'torch.ones_like', (['xyz[:, :, 0:1]'], {}), '(xyz[:, :, 0:1])\n', (182, 198), False, 'import torch\n'), ((208, 233), 'torch.cat', 'torch.cat', (['[xyz, ones]', '(2)'], {}), '([xyz, ones], 2)\n', (217, 233), False, 'import torch\n'), ((247, 274), 'torch.transpose', 'torch.transpose', (['xyz1', '(1)', '(2)'], {}), '(xyz1, 1, 2)\n', (262, 274), False, 'import torch\n'), ((312, 336), 'torch.matmul', 'torch.matmul', (['RT', 'xyz1_t'], {}), '(RT, xyz1_t)\n', (324, 336), False, 'import torch\n'), ((348, 377), 'torch.transpose', 'torch.transpose', (['xyz2_t', '(1)', '(2)'], {}), '(xyz2_t, 1, 2)\n', (363, 377), False, 'import torch\n'), ((448, 473), 'open3d.geometry.PointCloud', 'o3d.geometry.PointCloud', ([], {}), '()\n', (471, 473), True, 'import open3d as o3d\n'), ((491, 529), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['pts[:, :3]'], {}), '(pts[:, :3])\n', (517, 529), True, 'import open3d as o3d\n'), ((1136, 1149), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (1143, 1149), True, 'import numpy as np\n'), ((1175, 1194), 'numpy.max', 'np.max', (['pcd'], {'axis': '(0)'}), '(pcd, axis=0)\n', (1181, 1194), True, 'import numpy as np\n'), ((1211, 1230), 'numpy.min', 'np.min', (['pcd'], {'axis': '(0)'}), '(pcd, axis=0)\n', (1217, 1230), True, 'import numpy as np\n')]
|
"""
Helper functions for image processing
The color space conversion functions are modified from functions of the
Python package scikit-image, https://github.com/scikit-image/scikit-image.
scikit-image has the following license.
Copyright (C) 2019, the scikit-image team
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Neither the name of skimage nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
from PIL import Image
import numpy as np
from sklearn.cluster import KMeans, AgglomerativeClustering
from sklearn.metrics import pairwise_distances
import requests
from io import BytesIO
import os
from dotenv import load_dotenv
load_dotenv()
sls_stage = os.getenv("SLS_STAGE")
if sls_stage == 'local':
import plotly.graph_objects as go
default_k = 4
xyz_ref_white = np.asarray((0.95047, 1.0, 1.08883))
xyz_from_rgb = np.array(
[
[0.412453, 0.357580, 0.180423],
[0.212671, 0.715160, 0.072169],
[0.019334, 0.119193, 0.950227],
]
)
rgb_from_xyz = np.linalg.inv(xyz_from_rgb)
def rgb2xyz(rgb_arr):
"""
Convert colur from RGB to CIE 1931 XYZ
Parameters
----------
rgb_arr: ndarray
Color in RGB
Returns
------
xyz_arr: ndarray
Color in CIE 1931 XYZ
"""
xyz_arr = np.copy(rgb_arr)
mask = xyz_arr > 0.04045
xyz_arr[mask] = np.power((xyz_arr[mask] + 0.055) / 1.055, 2.4)
xyz_arr[~mask] /= 12.92
return xyz_arr @ np.transpose(xyz_from_rgb)
def xyz2lab(xyz_arr):
"""
Convert colur from CIE 1931 XYZ to CIE 1976 L*a*b*
Parameters
----------
xyz_arr: ndarray
Color in CIE 1931 XYZ
Returns
------
lab_arr: ndarray
Color in CIE 1976 L*a*b*
"""
lab_arr = np.copy(xyz_arr) / xyz_ref_white
mask = lab_arr > 0.008856
lab_arr[mask] = np.cbrt(lab_arr[mask])
lab_arr[~mask] = 7.787 * lab_arr[~mask] + 16.0 / 116.0
x, y, z = lab_arr[:, 0], lab_arr[:, 1], lab_arr[:, 2]
L = (116.0 * y) - 16.0
a = 500.0 * (x - y)
b = 200.0 * (y - z)
return np.transpose(np.asarray((L, a, b)))
def lab2xyz(lab_arr):
"""
Convert colur from CIE 1976 L*a*b* to CIE 1931 XYZ
Parameters
----------
lab_arr: ndarray
Color in CIE 1976 L*a*b*
Returns
------
xyz_arr: ndarray
Color in CIE 1931 XYZ
"""
L, a, b = lab_arr[:, 0], lab_arr[:, 1], lab_arr[:, 2]
y = (L + 16.0) / 116.0
x = (a / 500.0) + y
z = y - (b / 200.0)
if np.any(z < 0):
invalid = np.nonzero(z < 0)
warn(
"Color data out of range: Z < 0 in %s pixels" % invalid[0].size,
stacklevel=2,
)
z[invalid] = 0
xyz_arr = np.transpose(np.asarray((x, y, z)))
mask = xyz_arr > 0.2068966
xyz_arr[mask] = np.power(xyz_arr[mask], 3.0)
xyz_arr[~mask] = (xyz_arr[~mask] - 16.0 / 116.0) / 7.787
# rescale to the reference white (illuminant)
xyz_arr *= xyz_ref_white
return xyz_arr
def xyz2rgb(xyz_arr):
"""
Convert colur from CIE 1931 XYZ to RGB
Parameters
----------
xyz_arr: ndarray
Color in CIE 1931 XYZ
Returns
------
rgb_arr: ndarray
Color in RGB
"""
rgb_arr = xyz_arr @ np.transpose(rgb_from_xyz)
mask = rgb_arr > 0.0031308
rgb_arr[mask] = 1.055 * np.power(rgb_arr[mask], 1 / 2.4) - 0.055
rgb_arr[~mask] *= 12.92
rgb_arr = np.clip(rgb_arr, 0, 1)
return rgb_arr
def rgb2lab(rgb_arr):
"""
Convert colur from RGB to CIE 1976 L*a*b*
Parameters
----------
rgb_arr: ndarray
Color in RGB
Returns
-------
lab_arr: ndarray
Color in CIE 1976 L*a*b*
"""
return xyz2lab(rgb2xyz(rgb_arr))
def lab2rgb(lab_arr):
"""
Convert colur from CIE 1976 L*a*b* to RGB
Parameters
----------
lab_arr: ndarray
Color in CIE 1976 L*a*b*
Returns
------
rgb_arr: ndarray
Color in RGB
"""
return xyz2rgb(lab2xyz(lab_arr))
def get_lab_data(im):
"""
Convert colur from CIE 1976 L*a*b* to RGB
Parameters
----------
im: Image
Image to create palette
Returns
------
lab_arr: ndarray
Color in CIE 1976 L*a*b*
"""
img_size = 150, 150
im.thumbnail(img_size)
pixel_rgb = np.asarray(im)
# Range of RGB in Pillow is [0, 255], that in skimage is [0, 1]
pixel_lab = rgb2lab(pixel_rgb.reshape(-1, pixel_rgb.shape[-1]) / 255)
return pixel_lab.reshape(-1, pixel_lab.shape[-1])
def make_img(colors, counts):
"""
Create image from colors
Parameters
----------
colors: ndarray
Color in RGB
counts: ndarray
Number of data points in each color cluster
Returns
------
img: Image
Generated image
"""
img_size = 512, 512
n_clusters = len(colors)
lengths = (
((counts / np.sum(counts)) + (1.0 / n_clusters)) / 2.0 * img_size[0]
).astype(np.uint16)
# Ensure sum of lengths equals img_size[0]
lengths[0] = lengths[0] + (img_size[0] - np.sum(lengths))
pixel_group = np.array(
[np.tile(colors[i], (lengths[i], img_size[1], 1)) for i in range(n_clusters)]
)
pixel_rgb = np.transpose(np.concatenate(pixel_group), (1, 0, 2))
return Image.fromarray(pixel_rgb, mode="RGB")
def get_hex_string(rgb_arr):
"""
Covert RGB color to HEX values
Parameters
----------
rgb_arr: ndarray
Color in RGB
Returns
------
hex_names: str
HEX values of color
"""
def int2hex(integer):
hex_string = hex(integer)[2:]
if len(hex_string) < 2:
return "0" + hex_string
return hex_string
return "".join(np.vectorize(int2hex)(rgb_arr)).upper()
def cluster_kmeans(data, n_clusters):
"""
Partition data with k-means clustering
Parameters
----------
data: ndarray
Data points
n_clusters: int
Number of clusters
Returns
------
centers: ndarray
Clusters centers
labels: ndarray
Center label of every data point
"""
kmeans = KMeans(n_clusters)
labels = kmeans.fit_predict(data)
centers = kmeans.cluster_centers_
return centers, labels
def compute_medoid(data):
"""
Get medoid of data
Parameters
----------
data: ndarray
Data points
Returns
------
medoid: ndarray
Medoid
"""
dist_mat = pairwise_distances(data)
return data[np.argmin(dist_mat.sum(axis=0))]
def cluster_agglo(data, n_clusters):
"""
Partition data with agglomerative clustering
Parameters
----------
data: ndarray
Data points
n_clusters: int
Number of clusters
Returns
------
centers: ndarray
Clusters centers
labels: ndarray
Center label of every data point
"""
ac = AgglomerativeClustering(n_clusters)
labels = ac.fit_predict(data)
print("Completed agglomerative clustering")
centers = np.empty([n_clusters, 3])
for i in range(n_clusters):
centers[i] = compute_medoid(data[labels == i])
return centers, labels
def get_cluster(centers, labels):
"""
Sort cluster centers and count number of labels
Parameters
----------
centers: ndarray
Clusters centers
labels: ndarray
Center label of every data point
Returns
------
sort_centers: ndarray
Clusters centers sorted by number of label descending
sort_labels: ndarray
Sorted center label of every data point
sort_counts: ndarray
Number of data points of sorted centers
"""
_, counts = np.unique(labels, return_counts=True)
sort_idx = (-counts).argsort()
sort_labels = np.vectorize(lambda i: list(sort_idx).index(i))(labels)
return centers[sort_idx], sort_labels, counts[sort_idx]
def get_palette(im, k):
"""
Create a palette from an image
Parameters
----------
im: Image
Image to create palette from
k: int
Number of pallete colors
If None or k is outside of the range [2, 10], uses default_k as k
Returns
------
im_output: Image
Image of palette colors
hex_colors: ndarray
Palette colors in HEX values
"""
if k is None:
k = default_k
elif k < 2 or k > 10:
k = default_k
data = get_lab_data(im)
print("Get {} clusters".format(k))
centers, labels = cluster_agglo(data, k)
sorted_centers, _, counts = get_cluster(centers, labels)
# Range of RGB in Pillow is [0, 255], that in skimage is [0, 1]
centers_rgb = (255 * lab2rgb(sorted_centers)).astype(np.uint8)
print("Clusters are")
print(centers_rgb)
return (
make_img(centers_rgb, counts),
np.apply_along_axis(get_hex_string, 1, centers_rgb),
)
def get_palette_plot(im, k):
"""
Create a palette from an image and plot clusters in 3D
Parameters
----------
img: Image
Image to create palette.
k: int
Number of pallete colors.
If None or k is outside of the range [2, 10], uses default_k as k
Returns
------
im_output: Image
Image of palette colors
hex_colors: ndarray
Palette colors in HEX values
"""
if k is None:
k = default_k
elif k < 2 or k > 10:
k = default_k
data = get_lab_data(im)
print("Get {} clusters".format(k))
centers, labels = cluster_agglo(data, k)
sorted_centers, sorted_labels, counts = get_cluster(centers, labels)
# Range of RGB in Pillow is [0, 255], that in skimage is [0, 1]
centers_rgb = (255 * lab2rgb(sorted_centers)).astype(np.uint8)
print("Clusters in RGB are")
print(centers_rgb)
centers_hex = np.apply_along_axis(get_hex_string, 1, centers_rgb)
plot_3d(data, sorted_labels, centers_hex)
return (
make_img(centers_rgb, counts),
centers_hex,
)
def plot_3d(data, labels, centers_hex):
"""
Plot clustered data in 3D
Parameters
----------
data: ndarray
Data points
labels: ndarray
Labels of every data point
centers_hex: ndarray
Color in HEX values
"""
l, a, b = np.transpose(data)
fig = go.Figure(
data=[
go.Scatter3d(
x=a,
y=b,
z=l,
mode="markers",
marker={
"size": 3,
"color": np.vectorize(lambda hex: "#" + hex)(centers_hex)[labels],
"opacity": 0.1,
},
)
]
)
fig.update_layout(
scene={"xaxis_title": "a", "yaxis_title": "b", "zaxis_title": "L",}
)
fig.show()
def get_image_from_url(url):
"""
Download and create image
Parameters
----------
url: str
Image link
Returns
------
im: Image
Image downloaded
"""
print("Get image from ", url)
response = requests.get(url)
return Image.open(BytesIO(response.content))
|
[
"numpy.clip",
"io.BytesIO",
"numpy.array",
"sklearn.cluster.AgglomerativeClustering",
"numpy.asarray",
"dotenv.load_dotenv",
"numpy.empty",
"numpy.concatenate",
"numpy.tile",
"numpy.any",
"requests.get",
"numpy.nonzero",
"numpy.transpose",
"numpy.vectorize",
"sklearn.cluster.KMeans",
"numpy.copy",
"PIL.Image.fromarray",
"numpy.unique",
"os.getenv",
"numpy.power",
"sklearn.metrics.pairwise_distances",
"numpy.sum",
"numpy.linalg.inv",
"numpy.apply_along_axis",
"numpy.cbrt"
] |
[((1918, 1931), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (1929, 1931), False, 'from dotenv import load_dotenv\n'), ((1945, 1967), 'os.getenv', 'os.getenv', (['"""SLS_STAGE"""'], {}), "('SLS_STAGE')\n", (1954, 1967), False, 'import os\n'), ((2064, 2099), 'numpy.asarray', 'np.asarray', (['(0.95047, 1.0, 1.08883)'], {}), '((0.95047, 1.0, 1.08883))\n', (2074, 2099), True, 'import numpy as np\n'), ((2116, 2225), 'numpy.array', 'np.array', (['[[0.412453, 0.35758, 0.180423], [0.212671, 0.71516, 0.072169], [0.019334, \n 0.119193, 0.950227]]'], {}), '([[0.412453, 0.35758, 0.180423], [0.212671, 0.71516, 0.072169], [\n 0.019334, 0.119193, 0.950227]])\n', (2124, 2225), True, 'import numpy as np\n'), ((2276, 2303), 'numpy.linalg.inv', 'np.linalg.inv', (['xyz_from_rgb'], {}), '(xyz_from_rgb)\n', (2289, 2303), True, 'import numpy as np\n'), ((2558, 2574), 'numpy.copy', 'np.copy', (['rgb_arr'], {}), '(rgb_arr)\n', (2565, 2574), True, 'import numpy as np\n'), ((2624, 2670), 'numpy.power', 'np.power', (['((xyz_arr[mask] + 0.055) / 1.055)', '(2.4)'], {}), '((xyz_arr[mask] + 0.055) / 1.055, 2.4)\n', (2632, 2670), True, 'import numpy as np\n'), ((3108, 3130), 'numpy.cbrt', 'np.cbrt', (['lab_arr[mask]'], {}), '(lab_arr[mask])\n', (3115, 3130), True, 'import numpy as np\n'), ((3774, 3787), 'numpy.any', 'np.any', (['(z < 0)'], {}), '(z < 0)\n', (3780, 3787), True, 'import numpy as np\n'), ((4076, 4104), 'numpy.power', 'np.power', (['xyz_arr[mask]', '(3.0)'], {}), '(xyz_arr[mask], 3.0)\n', (4084, 4104), True, 'import numpy as np\n'), ((4697, 4719), 'numpy.clip', 'np.clip', (['rgb_arr', '(0)', '(1)'], {}), '(rgb_arr, 0, 1)\n', (4704, 4719), True, 'import numpy as np\n'), ((5623, 5637), 'numpy.asarray', 'np.asarray', (['im'], {}), '(im)\n', (5633, 5637), True, 'import numpy as np\n'), ((6602, 6640), 'PIL.Image.fromarray', 'Image.fromarray', (['pixel_rgb'], {'mode': '"""RGB"""'}), "(pixel_rgb, mode='RGB')\n", (6617, 6640), False, 'from PIL import Image\n'), ((7454, 7472), 'sklearn.cluster.KMeans', 'KMeans', (['n_clusters'], {}), '(n_clusters)\n', (7460, 7472), False, 'from sklearn.cluster import KMeans, AgglomerativeClustering\n'), ((7791, 7815), 'sklearn.metrics.pairwise_distances', 'pairwise_distances', (['data'], {}), '(data)\n', (7809, 7815), False, 'from sklearn.metrics import pairwise_distances\n'), ((8230, 8265), 'sklearn.cluster.AgglomerativeClustering', 'AgglomerativeClustering', (['n_clusters'], {}), '(n_clusters)\n', (8253, 8265), False, 'from sklearn.cluster import KMeans, AgglomerativeClustering\n'), ((8362, 8387), 'numpy.empty', 'np.empty', (['[n_clusters, 3]'], {}), '([n_clusters, 3])\n', (8370, 8387), True, 'import numpy as np\n'), ((9023, 9060), 'numpy.unique', 'np.unique', (['labels'], {'return_counts': '(True)'}), '(labels, return_counts=True)\n', (9032, 9060), True, 'import numpy as np\n'), ((11152, 11203), 'numpy.apply_along_axis', 'np.apply_along_axis', (['get_hex_string', '(1)', 'centers_rgb'], {}), '(get_hex_string, 1, centers_rgb)\n', (11171, 11203), True, 'import numpy as np\n'), ((11614, 11632), 'numpy.transpose', 'np.transpose', (['data'], {}), '(data)\n', (11626, 11632), True, 'import numpy as np\n'), ((12399, 12416), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (12411, 12416), False, 'import requests\n'), ((2720, 2746), 'numpy.transpose', 'np.transpose', (['xyz_from_rgb'], {}), '(xyz_from_rgb)\n', (2732, 2746), True, 'import numpy as np\n'), ((3025, 3041), 'numpy.copy', 'np.copy', (['xyz_arr'], {}), '(xyz_arr)\n', (3032, 3041), True, 'import numpy as np\n'), ((3347, 3368), 'numpy.asarray', 'np.asarray', (['(L, a, b)'], {}), '((L, a, b))\n', (3357, 3368), True, 'import numpy as np\n'), ((3807, 3824), 'numpy.nonzero', 'np.nonzero', (['(z < 0)'], {}), '(z < 0)\n', (3817, 3824), True, 'import numpy as np\n'), ((4002, 4023), 'numpy.asarray', 'np.asarray', (['(x, y, z)'], {}), '((x, y, z))\n', (4012, 4023), True, 'import numpy as np\n'), ((4528, 4554), 'numpy.transpose', 'np.transpose', (['rgb_from_xyz'], {}), '(rgb_from_xyz)\n', (4540, 4554), True, 'import numpy as np\n'), ((6551, 6578), 'numpy.concatenate', 'np.concatenate', (['pixel_group'], {}), '(pixel_group)\n', (6565, 6578), True, 'import numpy as np\n'), ((10159, 10210), 'numpy.apply_along_axis', 'np.apply_along_axis', (['get_hex_string', '(1)', 'centers_rgb'], {}), '(get_hex_string, 1, centers_rgb)\n', (10178, 10210), True, 'import numpy as np\n'), ((12439, 12464), 'io.BytesIO', 'BytesIO', (['response.content'], {}), '(response.content)\n', (12446, 12464), False, 'from io import BytesIO\n'), ((4614, 4646), 'numpy.power', 'np.power', (['rgb_arr[mask]', '(1 / 2.4)'], {}), '(rgb_arr[mask], 1 / 2.4)\n', (4622, 4646), True, 'import numpy as np\n'), ((6385, 6400), 'numpy.sum', 'np.sum', (['lengths'], {}), '(lengths)\n', (6391, 6400), True, 'import numpy as np\n'), ((6439, 6487), 'numpy.tile', 'np.tile', (['colors[i]', '(lengths[i], img_size[1], 1)'], {}), '(colors[i], (lengths[i], img_size[1], 1))\n', (6446, 6487), True, 'import numpy as np\n'), ((7050, 7071), 'numpy.vectorize', 'np.vectorize', (['int2hex'], {}), '(int2hex)\n', (7062, 7071), True, 'import numpy as np\n'), ((6211, 6225), 'numpy.sum', 'np.sum', (['counts'], {}), '(counts)\n', (6217, 6225), True, 'import numpy as np\n'), ((11875, 11910), 'numpy.vectorize', 'np.vectorize', (["(lambda hex: '#' + hex)"], {}), "(lambda hex: '#' + hex)\n", (11887, 11910), True, 'import numpy as np\n')]
|
"""
TopView is the main Widget with the related ControllerTopView Class
There are several SliceView windows (sagittal, coronal, possibly tilted etc...) that each have
a SliceController object
The underlying data model object is an ibllib.atlas.AllenAtlas object
TopView(QMainWindow)
ControllerTopView(PgImageController)
SliceView(QWidget)
SliceController(PgImageController)
"""
from dataclasses import dataclass, field
from pathlib import Path
import numpy as np
from PyQt5 import QtWidgets, uic
from PyQt5.QtGui import QTransform
import pyqtgraph as pg
import matplotlib
from ibllib.atlas import AllenAtlas
import qt
class TopView(QtWidgets.QMainWindow):
"""
Main Window of the application.
This is a top view of the brain with 2 movable lines allowing to select sagittal and coronal
slices.
"""
@staticmethod
def _instances():
app = QtWidgets.QApplication.instance()
return [w for w in app.topLevelWidgets() if isinstance(w, TopView)]
@staticmethod
def _get_or_create(title=None, **kwargs):
av = next(filter(lambda e: e.isVisible() and e.windowTitle() == title,
TopView._instances()), None)
if av is None:
av = TopView(**kwargs)
av.setWindowTitle(title)
return av
def __init__(self, **kwargs):
super(TopView, self).__init__()
self.ctrl = ControllerTopView(self, **kwargs)
self.ctrl.image_layers = [ImageLayer()]
uic.loadUi(Path(__file__).parent.joinpath('topview.ui'), self)
self.plotItem_topview.setAspectLocked(True)
self.plotItem_topview.addItem(self.ctrl.imageItem)
# setup one horizontal and one vertical line that can be moved
line_kwargs = {'movable': True, 'pen': pg.mkPen((0, 255, 0), width=3)}
self.line_coronal = pg.InfiniteLine(angle=0, pos=0, **line_kwargs)
self.line_sagittal = pg.InfiniteLine(angle=90, pos=0, **line_kwargs)
self.line_coronal.sigDragged.connect(self._refresh_coronal) # sigPositionChangeFinished
self.line_sagittal.sigDragged.connect(self._refresh_sagittal)
self.plotItem_topview.addItem(self.line_coronal)
self.plotItem_topview.addItem(self.line_sagittal)
# connect signals and slots: mouse moved
s = self.plotItem_topview.getViewBox().scene()
self.proxy = pg.SignalProxy(s.sigMouseMoved, rateLimit=60, slot=self.mouseMoveEvent)
# combobox for the atlas remapping choices
self.comboBox_mappings.addItems(self.ctrl.atlas.regions.mappings.keys())
self.comboBox_mappings.currentIndexChanged.connect(self._refresh)
# slider for transparency between image and labels
self.slider_alpha.sliderMoved.connect(self.slider_alpha_move)
self.ctrl.set_top()
def add_scatter_feature(self, data):
self.ctrl.scatter_data = data / 1e6
self.ctrl.scatter_data_ind = self.ctrl.atlas.bc.xyz2i(self.ctrl.scatter_data)
self.ctrl.fig_coronal.add_scatter()
self.ctrl.fig_sagittal.add_scatter()
self.line_coronal.sigDragged.connect(
lambda: self.ctrl.set_scatter(self.ctrl.fig_coronal, self.line_coronal.value()))
self.line_sagittal.sigDragged.connect(
lambda: self.ctrl.set_scatter(self.ctrl.fig_sagittal, self.line_sagittal.value()))
self.ctrl.set_scatter(self.ctrl.fig_coronal)
self.ctrl.set_scatter(self.ctrl.fig_sagittal)
def add_image_layer(self, **kwargs):
"""
:param pg_kwargs: pyqtgraph setImage arguments: {'levels': None, 'lut': None,
'opacity': 1.0}
:param slice_kwargs: ibllib.atlas.slice arguments: {'volume': 'image', 'mode': 'clip'}
:return:
"""
self.ctrl.fig_sagittal.add_image_layer(**kwargs)
self.ctrl.fig_coronal.add_image_layer(**kwargs)
def add_regions_feature(self, values, cmap, opacity=1.0):
self.ctrl.values = values
# creat cmap look up table
colormap = matplotlib.cm.get_cmap(cmap)
colormap._init()
lut = (colormap._lut * 255).view(np.ndarray)
lut = np.insert(lut, 0, [0, 0, 0, 0], axis=0)
self.add_image_layer(pg_kwargs={'lut': lut, 'opacity': opacity}, slice_kwargs={
'volume': 'value', 'region_values': values, 'mode': 'clip'})
self._refresh()
def slider_alpha_move(self):
annotation_alpha = self.slider_alpha.value() / 100
self.ctrl.fig_coronal.ctrl.image_layers[0].pg_kwargs['opacity'] = 1 - annotation_alpha
self.ctrl.fig_sagittal.ctrl.image_layers[0].pg_kwargs['opacity'] = 1 - annotation_alpha
self.ctrl.fig_coronal.ctrl.image_layers[1].pg_kwargs['opacity'] = annotation_alpha
self.ctrl.fig_sagittal.ctrl.image_layers[1].pg_kwargs['opacity'] = annotation_alpha
self._refresh()
def mouseMoveEvent(self, scenepos):
if isinstance(scenepos, tuple):
scenepos = scenepos[0]
else:
return
pass
# qpoint = self.imageItem.mapFromScene(scenepos)
def _refresh(self):
self._refresh_sagittal()
self._refresh_coronal()
def _refresh_coronal(self):
self.ctrl.set_slice(self.ctrl.fig_coronal, self.line_coronal.value(),
mapping=self.comboBox_mappings.currentText())
def _refresh_sagittal(self):
self.ctrl.set_slice(self.ctrl.fig_sagittal, self.line_sagittal.value(),
mapping=self.comboBox_mappings.currentText())
class SliceView(QtWidgets.QWidget):
"""
Window containing a volume slice
"""
def __init__(self, topview: TopView, waxis, haxis, daxis):
super(SliceView, self).__init__()
self.topview = topview
self.ctrl = SliceController(self, waxis, haxis, daxis)
uic.loadUi(Path(__file__).parent.joinpath('sliceview.ui'), self)
self.add_image_layer(slice_kwargs={'volume': 'image', 'mode': 'clip'},
pg_kwargs={'opacity': 0.8})
self.add_image_layer(slice_kwargs={'volume': 'annotation', 'mode': 'clip'},
pg_kwargs={'opacity': 0.2})
# init the image display
self.plotItem_slice.setAspectLocked(True)
# connect signals and slots
s = self.plotItem_slice.getViewBox().scene()
self.proxy = pg.SignalProxy(s.sigMouseMoved, rateLimit=60, slot=self.mouseMoveEvent)
s.sigMouseClicked.connect(self.mouseClick)
def add_scatter(self):
self.scatterItem = pg.ScatterPlotItem()
self.plotItem_slice.addItem(self.scatterItem)
def add_image_layer(self, **kwargs):
"""
:param pg_kwargs: pyqtgraph setImage arguments: {'levels': None, 'lut': None,
'opacity': 1.0}
:param slice_kwargs: ibllib.atlas.slice arguments: {'volume': 'image', 'mode': 'clip'}
:return:
"""
il = ImageLayer(**kwargs)
self.ctrl.image_layers.append(il)
self.plotItem_slice.addItem(il.image_item)
def closeEvent(self, event):
self.destroy()
def keyPressEvent(self, e):
pass
def mouseClick(self, event):
if not event.double():
return
def mouseMoveEvent(self, scenepos):
if isinstance(scenepos, tuple):
scenepos = scenepos[0]
else:
return
qpoint = self.ctrl.image_layers[0].image_item.mapFromScene(scenepos)
iw, ih, w, h, v, region = self.ctrl.cursor2xyamp(qpoint)
self.label_x.setText(f"{w:.4f}")
self.label_y.setText(f"{h:.4f}")
self.label_ix.setText(f"{iw:.0f}")
self.label_iy.setText(f"{ih:.0f}")
if isinstance(v, np.ndarray):
self.label_v.setText(str(v))
else:
self.label_v.setText(f"{v:.4f}")
if region is None:
self.label_region.setText("")
self.label_acronym.setText("")
else:
self.label_region.setText(region['name'][0])
self.label_acronym.setText(region['acronym'][0])
def replace_image_layer(self, index, **kwargs):
if index and len(self.imageItem) >= index:
il = self.image_layers.pop(index)
self.plotItem_slice.removeItem(il.image_item)
self.add_image_layer(**kwargs)
class PgImageController:
"""
Abstract class that implements mapping from axes to voxels for any window.
Not instantiated directly.
"""
def __init__(self, win, res=25):
self.qwidget = win
self.transform = None # affine transform image indices 2 data domain
self.image_layers = []
def cursor2xyamp(self, qpoint):
"""Used for the mouse hover function over image display"""
iw, ih = self.cursor2ind(qpoint)
v = self.im[iw, ih]
w, h, _ = np.matmul(self.transform, np.array([iw, ih, 1]))
return iw, ih, w, h, v
def cursor2ind(self, qpoint):
""" image coordinates over the image display"""
iw = np.max((0, np.min((int(np.floor(qpoint.x())), self.nw - 1))))
ih = np.max((0, np.min((int(np.round(qpoint.y())), self.nh - 1))))
return iw, ih
@property
def imageItem(self):
"""returns the first image item"""
return self.image_layers[0].image_item
def set_image(self, pg_image_item, im, dw, dh, w0, h0, **pg_kwargs):
"""
:param im:
:param dw:
:param dh:
:param w0:
:param h0:
:param pgkwargs: og.ImageItem.setImage() parameters: level=None, lut=None, opacity=1
:return:
"""
self.im = im
self.nw, self.nh = self.im.shape[0:2]
pg_image_item.setImage(self.im, **pg_kwargs)
transform = [dw, 0., 0., 0., dh, 0., w0, h0, 1.]
self.transform = np.array(transform).reshape((3, 3)).T
pg_image_item.setTransform(QTransform(*transform))
def set_points(self, x=None, y=None):
# at the moment brush and size are fixed! These need to be arguments
# For the colour need to convert the colour to QtGui.QColor
self.qwidget.scatterItem.setData(x=x, y=y, brush='b', size=5)
class ControllerTopView(PgImageController):
"""
TopView ControllerTopView
"""
def __init__(self, qmain: TopView, res: int = 25, volume='image', brainmap='Allen'):
super(ControllerTopView, self).__init__(qmain)
self.volume = volume
self.atlas = AllenAtlas(res, brainmap=brainmap)
self.fig_top = self.qwidget = qmain
# Setup Coronal slice: width: ml, height: dv, depth: ap
self.fig_coronal = SliceView(qmain, waxis=0, haxis=2, daxis=1)
self.fig_coronal.setWindowTitle('Coronal Slice')
self.set_slice(self.fig_coronal)
self.fig_coronal.show()
# Setup Sagittal slice: width: ap, height: dv, depth: ml
self.fig_sagittal = SliceView(qmain, waxis=1, haxis=2, daxis=0)
self.fig_sagittal.setWindowTitle('Sagittal Slice')
self.set_slice(self.fig_sagittal)
self.fig_sagittal.show()
def set_slice(self, fig, coord=0, mapping="Allen"):
waxis, haxis, daxis = (fig.ctrl.waxis, fig.ctrl.haxis, fig.ctrl.daxis)
# construct the transform matrix image 2 ibl coordinates
dw = self.atlas.bc.dxyz[waxis]
dh = self.atlas.bc.dxyz[haxis]
wl = self.atlas.bc.lim(waxis) - dw / 2
hl = self.atlas.bc.lim(haxis) - dh / 2
# the ImageLayer object carries slice kwargs and pyqtgraph ImageSet kwargs
# reversed order so the self.im is set with the base layer
for layer in reversed(fig.ctrl.image_layers):
_slice = self.atlas.slice(coord, axis=daxis, mapping=mapping, **layer.slice_kwargs)
fig.ctrl.set_image(layer.image_item, _slice, dw, dh, wl[0], hl[0], **layer.pg_kwargs)
fig.ctrl.slice_coord = coord
def set_top(self):
img = self.atlas.top.transpose()
img[np.isnan(img)] = np.nanmin(img) # img has dims ml, ap
dw, dh = (self.atlas.bc.dxyz[0], self.atlas.bc.dxyz[1])
wl, hl = (self.atlas.bc.xlim, self.atlas.bc.ylim)
self.set_image(self.image_layers[0].image_item, img, dw, dh, wl[0], hl[0])
def set_scatter(self, fig, coord=0):
waxis = fig.ctrl.waxis
# dealing with coronal slice
if waxis == 0:
idx = np.where(self.scatter_data_ind[:, 1] == self.atlas.bc.y2i(coord))[0]
x = self.scatter_data[idx, 0]
y = self.scatter_data[idx, 2]
else:
idx = np.where(self.scatter_data_ind[:, 0] == self.atlas.bc.x2i(coord))[0]
x = self.scatter_data[idx, 1]
y = self.scatter_data[idx, 2]
fig.ctrl.set_points(x, y)
def set_volume(self, volume):
self.volume = volume
class SliceController(PgImageController):
def __init__(self, fig, waxis=None, haxis=None, daxis=None):
"""
:param waxis: brain atlas axis corresponding to display abscissa (coronal: 0, sagittal: 1)
:param haxis: brain atlas axis corresponding to display ordinate (coronal: 2, sagittal: 2)
:param daxis: brain atlas axis corresponding to display abscissa (coronal: 1, sagittal: 0)
"""
super(SliceController, self).__init__(fig)
self.waxis = waxis
self.haxis = haxis
self.daxis = daxis
def cursor2xyamp(self, qpoint):
"""
Extends the superclass method to also get the brain region from the model
:param qpoint:
:return:
"""
iw, ih, w, h, v = super(SliceController, self).cursor2xyamp(qpoint)
ba = self.qwidget.topview.ctrl.atlas
xyz = np.zeros(3)
xyz[np.array([self.waxis, self.haxis, self.daxis])] = [w, h, self.slice_coord]
mapping = self.qwidget.topview.comboBox_mappings.currentText()
try:
region = ba.regions.get(ba.get_labels(xyz, mapping=mapping))
except ValueError:
region = None
return iw, ih, w, h, v, region
@dataclass
class ImageLayer:
"""
Class for keeping track of image layers.
:param image_item
:param pg_kwargs: pyqtgraph setImage arguments: {'levels': None, 'lut': None, 'opacity': 1.0}
:param slice_kwargs: ibllib.atlas.slice arguments: {'volume': 'image', 'mode': 'clip'}
:param
"""
image_item: pg.ImageItem = field(default_factory=pg.ImageItem)
pg_kwargs: dict = field(default_factory=lambda: {})
slice_kwargs: dict = field(default_factory=lambda: {'volume': 'image', 'mode': 'clip'})
def view(res=25, title=None, brainmap='Allen'):
"""
"""
qt.create_app()
av = TopView._get_or_create(title=title, res=res, brainmap=brainmap)
av.show()
return av
|
[
"numpy.insert",
"PyQt5.QtWidgets.QApplication.instance",
"PyQt5.QtGui.QTransform",
"matplotlib.cm.get_cmap",
"pathlib.Path",
"pyqtgraph.ScatterPlotItem",
"pyqtgraph.InfiniteLine",
"numpy.array",
"ibllib.atlas.AllenAtlas",
"numpy.zeros",
"numpy.isnan",
"pyqtgraph.mkPen",
"numpy.nanmin",
"qt.create_app",
"dataclasses.field",
"pyqtgraph.SignalProxy"
] |
[((14373, 14408), 'dataclasses.field', 'field', ([], {'default_factory': 'pg.ImageItem'}), '(default_factory=pg.ImageItem)\n', (14378, 14408), False, 'from dataclasses import dataclass, field\n'), ((14431, 14465), 'dataclasses.field', 'field', ([], {'default_factory': '(lambda : {})'}), '(default_factory=lambda : {})\n', (14436, 14465), False, 'from dataclasses import dataclass, field\n'), ((14490, 14557), 'dataclasses.field', 'field', ([], {'default_factory': "(lambda : {'volume': 'image', 'mode': 'clip'})"}), "(default_factory=lambda : {'volume': 'image', 'mode': 'clip'})\n", (14495, 14557), False, 'from dataclasses import dataclass, field\n'), ((14627, 14642), 'qt.create_app', 'qt.create_app', ([], {}), '()\n', (14640, 14642), False, 'import qt\n'), ((894, 927), 'PyQt5.QtWidgets.QApplication.instance', 'QtWidgets.QApplication.instance', ([], {}), '()\n', (925, 927), False, 'from PyQt5 import QtWidgets, uic\n'), ((1852, 1898), 'pyqtgraph.InfiniteLine', 'pg.InfiniteLine', ([], {'angle': '(0)', 'pos': '(0)'}), '(angle=0, pos=0, **line_kwargs)\n', (1867, 1898), True, 'import pyqtgraph as pg\n'), ((1928, 1975), 'pyqtgraph.InfiniteLine', 'pg.InfiniteLine', ([], {'angle': '(90)', 'pos': '(0)'}), '(angle=90, pos=0, **line_kwargs)\n', (1943, 1975), True, 'import pyqtgraph as pg\n'), ((2383, 2454), 'pyqtgraph.SignalProxy', 'pg.SignalProxy', (['s.sigMouseMoved'], {'rateLimit': '(60)', 'slot': 'self.mouseMoveEvent'}), '(s.sigMouseMoved, rateLimit=60, slot=self.mouseMoveEvent)\n', (2397, 2454), True, 'import pyqtgraph as pg\n'), ((4019, 4047), 'matplotlib.cm.get_cmap', 'matplotlib.cm.get_cmap', (['cmap'], {}), '(cmap)\n', (4041, 4047), False, 'import matplotlib\n'), ((4140, 4179), 'numpy.insert', 'np.insert', (['lut', '(0)', '[0, 0, 0, 0]'], {'axis': '(0)'}), '(lut, 0, [0, 0, 0, 0], axis=0)\n', (4149, 4179), True, 'import numpy as np\n'), ((6372, 6443), 'pyqtgraph.SignalProxy', 'pg.SignalProxy', (['s.sigMouseMoved'], {'rateLimit': '(60)', 'slot': 'self.mouseMoveEvent'}), '(s.sigMouseMoved, rateLimit=60, slot=self.mouseMoveEvent)\n', (6386, 6443), True, 'import pyqtgraph as pg\n'), ((6550, 6570), 'pyqtgraph.ScatterPlotItem', 'pg.ScatterPlotItem', ([], {}), '()\n', (6568, 6570), True, 'import pyqtgraph as pg\n'), ((10451, 10485), 'ibllib.atlas.AllenAtlas', 'AllenAtlas', (['res'], {'brainmap': 'brainmap'}), '(res, brainmap=brainmap)\n', (10461, 10485), False, 'from ibllib.atlas import AllenAtlas\n'), ((11968, 11982), 'numpy.nanmin', 'np.nanmin', (['img'], {}), '(img)\n', (11977, 11982), True, 'import numpy as np\n'), ((13680, 13691), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (13688, 13691), True, 'import numpy as np\n'), ((1792, 1822), 'pyqtgraph.mkPen', 'pg.mkPen', (['(0, 255, 0)'], {'width': '(3)'}), '((0, 255, 0), width=3)\n', (1800, 1822), True, 'import pyqtgraph as pg\n'), ((8858, 8879), 'numpy.array', 'np.array', (['[iw, ih, 1]'], {}), '([iw, ih, 1])\n', (8866, 8879), True, 'import numpy as np\n'), ((9883, 9905), 'PyQt5.QtGui.QTransform', 'QTransform', (['*transform'], {}), '(*transform)\n', (9893, 9905), False, 'from PyQt5.QtGui import QTransform\n'), ((11951, 11964), 'numpy.isnan', 'np.isnan', (['img'], {}), '(img)\n', (11959, 11964), True, 'import numpy as np\n'), ((13704, 13750), 'numpy.array', 'np.array', (['[self.waxis, self.haxis, self.daxis]'], {}), '([self.waxis, self.haxis, self.daxis])\n', (13712, 13750), True, 'import numpy as np\n'), ((9810, 9829), 'numpy.array', 'np.array', (['transform'], {}), '(transform)\n', (9818, 9829), True, 'import numpy as np\n'), ((1511, 1525), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (1515, 1525), False, 'from pathlib import Path\n'), ((5848, 5862), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (5852, 5862), False, 'from pathlib import Path\n')]
|
import os
import numpy as np
import numpy.random as rnd
import matplotlib.pyplot as plt
import logging
from pandas import DataFrame
from common.gen_samples import *
from common.data_plotter import *
from aad.aad_globals import *
from aad.aad_support import *
from aad.forest_description import *
from aad.anomaly_dataset_support import *
# from percept.percept import *
"""
pythonw -m aad.plot_anomalies_rectangle
"""
def get_x_tau(x, w, tau):
v = x.dot(w)
ranked = np.argsort(-v)
tau_id = ranked[int(tau * len(v))]
return tau_id, x[tau_id]
def plot_anomalies_ifor(outdir, plot=False, plot_legends=False):
u_theta = np.pi * 4. / 4 + np.pi * 5 / 180
x, y = get_sphere_samples([(50, 0, np.pi * 4. / 4, np.pi * 4. / 4 + np.pi * 2 / 4),
(15, 1, u_theta - np.pi * 5 / 180, u_theta + np.pi * 5 / 180),
(15, 1, np.pi * 6. / 4 - np.pi * 1.5 / 180, np.pi * 6. / 4)])
n, d = x.shape
id_nomls = np.where(y == 0)[0]
id_anoms = np.where(y == 1)[0]
n_anoms = len(id_anoms)
x_nomls, y_nomls = x[id_nomls, :], y[id_nomls]
x_anoms, y_anoms = x[id_anoms, :], y[id_anoms]
if plot:
axis_fontsize = 16
line_colors = ["blue", "red", "red"]
line_types = ["--", "--", "-"]
line_widths = [2, 2, 2]
lines = list()
line_labels = list()
tau = n_anoms * 1. / n # multiplying by a factor to move the plane lower
w = normalize(np.ones(2))
r = np.array([np.min(x[:, 0]), np.max(x[:, 0])])
tau_id, x_tau = get_x_tau(x, w, tau)
q_tau = w.dot(x_tau)
# plot the true weight vector
u = interpolate_2D_line_by_point_and_vec(np.array([-1., 1.]), [0., 0.],
[np.cos(u_theta + np.pi * 1 / 4), np.sin(u_theta + np.pi * 1 / 4)])
lines.append(u)
line_labels.append(r"True weights ${\bf u}$")
zd = interpolate_2D_line_by_point_and_vec(np.array([-1., 1.0]), [0., 0.], w)
lines.append(zd)
line_labels.append(r"Uniform weights ${\bf w}_{unif}$")
zw = interpolate_2D_line_by_slope_and_intercept(np.array([-1., 1.]), -w[0] / w[1], q_tau / w[1])
lines.append(zw)
line_labels.append(r"hyperplane $\perp$ ${\bf w}_{unif}$")
pdffile = os.path.join(outdir, "anomalies_in_ifor.pdf")
dp = DataPlotter(pdfpath=pdffile, rows=1, cols=1)
pl = dp.get_next_plot()
pl.set_aspect('equal')
# plt.xlabel('x', fontsize=axis_fontsize)
# plt.ylabel('y', fontsize=axis_fontsize)
plt.xticks([])
plt.yticks([])
plt.xlim([-1.05, 1.05])
plt.ylim([-1.05, 1.05])
pl.scatter(x_nomls[:, 0], x_nomls[:, 1], s=45, c="blue", marker="+", label="Nominal")
pl.scatter(x_anoms[:, 0], x_anoms[:, 1], s=45, c="red", marker="+", label="Anomaly")
for i, line in enumerate(lines):
color = "blue" if line_colors is None else line_colors[i]
pl.plot(line[:, 0], line[:, 1], line_types[i], color=color, linewidth=line_widths[i],
label=line_labels[i] if plot_legends else None)
plt.axhline(0, linestyle="--", color="lightgrey")
plt.axvline(0, linestyle="--", color="lightgrey")
if plot_legends:
pl.legend(loc='lower right', prop={'size': 12})
dp.close()
return x, y
def plot_anomalies_rect(outdir, plot=False, plot_legends=False):
x_nomls = rnd.uniform(0., 1., 500)
x_nomls = np.reshape(x_nomls, newshape=(250, -1))
anom_mu = (0.83, 0.95)
u_theta = np.arctan(0.9 / 0.8)
anom_score_dist = MVNParams(
mu=np.array([anom_mu[0], anom_mu[1]]),
mcorr=np.array([
[1, -0.5],
[0, 1.0]]),
dvar=np.array([0.002, 0.0005])
)
n_anoms = 30
x_anoms = generate_dependent_normal_samples(n_anoms,
anom_score_dist.mu,
anom_score_dist.mcorr,
anom_score_dist.dvar)
x = np.vstack([x_nomls, x_anoms])
y = np.array(np.zeros(x_nomls.shape[0], dtype=int))
y = np.append(y, np.ones(x_anoms.shape[0], dtype=int))
if plot:
n, d = x.shape
# tau is computed assuming that the anomalies occupy tau-proportion
# of the circumference
tau = n_anoms * 1.3 / n # multiplying by a factor to move the plane lower
w = normalize(np.ones(2))
r = np.array([np.min(x[:, 0]), np.max(x[:, 0])])
line_colors = ["blue", "red", "red"]
line_types = ["--", "--", "-"]
line_widths = [2, 2, 2]
lines = list()
line_labels = list()
tau_id, x_tau = get_x_tau(x, w, tau)
q_tau = w.dot(x_tau)
# plot the true weight vector
u = interpolate_2D_line_by_point_and_vec(np.array([0., 1.]), [0., 0.],
[np.cos(u_theta), np.sin(u_theta)])
lines.append(u)
line_labels.append(r"True weights ${\bf u}$")
zd = interpolate_2D_line_by_point_and_vec(np.array([0., 1.0]), [0., 0.], w)
lines.append(zd)
line_labels.append(r"Uniform weights ${\bf w}_{unif}$")
zw = interpolate_2D_line_by_slope_and_intercept(np.array([0., 1.05]), -w[0] / w[1], q_tau / w[1])
lines.append(zw)
line_labels.append(r"hyperplane $\perp$ ${\bf w}_{unif}$")
axis_fontsize = 16
pdffile = os.path.join(outdir, "anomalies_in_rect.pdf")
dp = DataPlotter(pdfpath=pdffile, rows=1, cols=1)
pl = dp.get_next_plot()
pl.set_aspect('equal')
# plt.xlabel('x', fontsize=axis_fontsize)
# plt.ylabel('y', fontsize=axis_fontsize)
plt.xticks([])
plt.yticks([])
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
pl.scatter(x_nomls[:, 0], x_nomls[:, 1], s=45, c="blue", marker="+", label="Nominal")
pl.scatter(x_anoms[:, 0], x_anoms[:, 1], s=45, c="red", marker="+", label="Anomaly")
for i, line in enumerate(lines):
color = "blue" if line_colors is None else line_colors[i]
pl.plot(line[:, 0], line[:, 1], line_types[i], color=color, linewidth=line_widths[i],
label=line_labels[i] if plot_legends else None)
if plot_legends:
pl.legend(loc='lower right', prop={'size': 12})
dp.close()
return x, y
if __name__ == "__main__":
logger = logging.getLogger(__name__)
args = get_command_args(debug=True, debug_args=["--debug",
"--plot",
"--log_file=temp/plot_anomalies_rectangle.log"])
# print "log file: %s" % args.log_file
configure_logger(args)
rnd.seed(42)
outdir = "./temp/illustration"
dir_create(outdir)
# plot isolation forest score distribution illustration
# plot_anomalies_ifor(outdir, plot=True, plot_legends=False)
plot_anomalies_rect(outdir, plot=True, plot_legends=False)
|
[
"logging.getLogger",
"numpy.argsort",
"numpy.array",
"numpy.sin",
"numpy.reshape",
"numpy.where",
"numpy.max",
"matplotlib.pyplot.axhline",
"matplotlib.pyplot.yticks",
"numpy.vstack",
"numpy.random.seed",
"numpy.min",
"matplotlib.pyplot.ylim",
"numpy.arctan",
"numpy.ones",
"matplotlib.pyplot.xticks",
"numpy.cos",
"matplotlib.pyplot.xlim",
"os.path.join",
"numpy.zeros",
"numpy.random.uniform",
"matplotlib.pyplot.axvline"
] |
[((481, 495), 'numpy.argsort', 'np.argsort', (['(-v)'], {}), '(-v)\n', (491, 495), True, 'import numpy as np\n'), ((3499, 3525), 'numpy.random.uniform', 'rnd.uniform', (['(0.0)', '(1.0)', '(500)'], {}), '(0.0, 1.0, 500)\n', (3510, 3525), True, 'import numpy.random as rnd\n'), ((3538, 3577), 'numpy.reshape', 'np.reshape', (['x_nomls'], {'newshape': '(250, -1)'}), '(x_nomls, newshape=(250, -1))\n', (3548, 3577), True, 'import numpy as np\n'), ((3620, 3640), 'numpy.arctan', 'np.arctan', (['(0.9 / 0.8)'], {}), '(0.9 / 0.8)\n', (3629, 3640), True, 'import numpy as np\n'), ((4131, 4160), 'numpy.vstack', 'np.vstack', (['[x_nomls, x_anoms]'], {}), '([x_nomls, x_anoms])\n', (4140, 4160), True, 'import numpy as np\n'), ((6545, 6572), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (6562, 6572), False, 'import logging\n'), ((6875, 6887), 'numpy.random.seed', 'rnd.seed', (['(42)'], {}), '(42)\n', (6883, 6887), True, 'import numpy.random as rnd\n'), ((988, 1004), 'numpy.where', 'np.where', (['(y == 0)'], {}), '(y == 0)\n', (996, 1004), True, 'import numpy as np\n'), ((1023, 1039), 'numpy.where', 'np.where', (['(y == 1)'], {}), '(y == 1)\n', (1031, 1039), True, 'import numpy as np\n'), ((2339, 2384), 'os.path.join', 'os.path.join', (['outdir', '"""anomalies_in_ifor.pdf"""'], {}), "(outdir, 'anomalies_in_ifor.pdf')\n", (2351, 2384), False, 'import os\n'), ((2614, 2628), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (2624, 2628), True, 'import matplotlib.pyplot as plt\n'), ((2637, 2651), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (2647, 2651), True, 'import matplotlib.pyplot as plt\n'), ((2660, 2683), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-1.05, 1.05]'], {}), '([-1.05, 1.05])\n', (2668, 2683), True, 'import matplotlib.pyplot as plt\n'), ((2692, 2715), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-1.05, 1.05]'], {}), '([-1.05, 1.05])\n', (2700, 2715), True, 'import matplotlib.pyplot as plt\n'), ((3189, 3238), 'matplotlib.pyplot.axhline', 'plt.axhline', (['(0)'], {'linestyle': '"""--"""', 'color': '"""lightgrey"""'}), "(0, linestyle='--', color='lightgrey')\n", (3200, 3238), True, 'import matplotlib.pyplot as plt\n'), ((3247, 3296), 'matplotlib.pyplot.axvline', 'plt.axvline', (['(0)'], {'linestyle': '"""--"""', 'color': '"""lightgrey"""'}), "(0, linestyle='--', color='lightgrey')\n", (3258, 3296), True, 'import matplotlib.pyplot as plt\n'), ((4178, 4215), 'numpy.zeros', 'np.zeros', (['x_nomls.shape[0]'], {'dtype': 'int'}), '(x_nomls.shape[0], dtype=int)\n', (4186, 4215), True, 'import numpy as np\n'), ((4238, 4274), 'numpy.ones', 'np.ones', (['x_anoms.shape[0]'], {'dtype': 'int'}), '(x_anoms.shape[0], dtype=int)\n', (4245, 4274), True, 'import numpy as np\n'), ((5539, 5584), 'os.path.join', 'os.path.join', (['outdir', '"""anomalies_in_rect.pdf"""'], {}), "(outdir, 'anomalies_in_rect.pdf')\n", (5551, 5584), False, 'import os\n'), ((5814, 5828), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (5824, 5828), True, 'import matplotlib.pyplot as plt\n'), ((5837, 5851), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (5847, 5851), True, 'import matplotlib.pyplot as plt\n'), ((5860, 5883), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-0.05, 1.05]'], {}), '([-0.05, 1.05])\n', (5868, 5883), True, 'import matplotlib.pyplot as plt\n'), ((5892, 5915), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-0.05, 1.05]'], {}), '([-0.05, 1.05])\n', (5900, 5915), True, 'import matplotlib.pyplot as plt\n'), ((1489, 1499), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (1496, 1499), True, 'import numpy as np\n'), ((1721, 1742), 'numpy.array', 'np.array', (['[-1.0, 1.0]'], {}), '([-1.0, 1.0])\n', (1729, 1742), True, 'import numpy as np\n'), ((1998, 2019), 'numpy.array', 'np.array', (['[-1.0, 1.0]'], {}), '([-1.0, 1.0])\n', (2006, 2019), True, 'import numpy as np\n'), ((2179, 2200), 'numpy.array', 'np.array', (['[-1.0, 1.0]'], {}), '([-1.0, 1.0])\n', (2187, 2200), True, 'import numpy as np\n'), ((3686, 3720), 'numpy.array', 'np.array', (['[anom_mu[0], anom_mu[1]]'], {}), '([anom_mu[0], anom_mu[1]])\n', (3694, 3720), True, 'import numpy as np\n'), ((3736, 3767), 'numpy.array', 'np.array', (['[[1, -0.5], [0, 1.0]]'], {}), '([[1, -0.5], [0, 1.0]])\n', (3744, 3767), True, 'import numpy as np\n'), ((3807, 3832), 'numpy.array', 'np.array', (['[0.002, 0.0005]'], {}), '([0.002, 0.0005])\n', (3815, 3832), True, 'import numpy as np\n'), ((4526, 4536), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (4533, 4536), True, 'import numpy as np\n'), ((4927, 4947), 'numpy.array', 'np.array', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (4935, 4947), True, 'import numpy as np\n'), ((5171, 5191), 'numpy.array', 'np.array', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (5179, 5191), True, 'import numpy as np\n'), ((5351, 5372), 'numpy.array', 'np.array', (['[0.0, 1.05]'], {}), '([0.0, 1.05])\n', (5359, 5372), True, 'import numpy as np\n'), ((1523, 1538), 'numpy.min', 'np.min', (['x[:, 0]'], {}), '(x[:, 0])\n', (1529, 1538), True, 'import numpy as np\n'), ((1540, 1555), 'numpy.max', 'np.max', (['x[:, 0]'], {}), '(x[:, 0])\n', (1546, 1555), True, 'import numpy as np\n'), ((1802, 1833), 'numpy.cos', 'np.cos', (['(u_theta + np.pi * 1 / 4)'], {}), '(u_theta + np.pi * 1 / 4)\n', (1808, 1833), True, 'import numpy as np\n'), ((1835, 1866), 'numpy.sin', 'np.sin', (['(u_theta + np.pi * 1 / 4)'], {}), '(u_theta + np.pi * 1 / 4)\n', (1841, 1866), True, 'import numpy as np\n'), ((4560, 4575), 'numpy.min', 'np.min', (['x[:, 0]'], {}), '(x[:, 0])\n', (4566, 4575), True, 'import numpy as np\n'), ((4577, 4592), 'numpy.max', 'np.max', (['x[:, 0]'], {}), '(x[:, 0])\n', (4583, 4592), True, 'import numpy as np\n'), ((5007, 5022), 'numpy.cos', 'np.cos', (['u_theta'], {}), '(u_theta)\n', (5013, 5022), True, 'import numpy as np\n'), ((5024, 5039), 'numpy.sin', 'np.sin', (['u_theta'], {}), '(u_theta)\n', (5030, 5039), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Barycenters
===========
This example shows three methods to compute barycenters of time series.
For an overview over the available methods see the :mod:`tslearn.barycenters`
module.
*tslearn* provides three methods for calculating barycenters for a given set of
time series:
* *Euclidean barycenter* is simply the arithmetic mean for
each individual point in time, minimizing the summed euclidean distance
for each of them. As can be seen below, it is very different from the
DTW-based methods and may often be inappropriate. However, it is the
fastest of the methods shown.
* *DTW Barycenter Averaging (DBA)* is an iteratively refined barycenter,
starting out with a (potentially) bad candidate and improving it
until convergence criteria are met. The optimization can be accomplished
with (a) expectation-maximization [1] and (b) stochastic subgradient
descent [2]. Empirically, the latter "is [often] more stable and finds better
solutions in shorter time" [2].
* *Soft-DTW barycenter* uses a differentiable loss function to iteratively
find a barycenter [3]. The method itself and the parameter
:math:`\\gamma=1.0` is described in more detail in the section on
:ref:`DTW<dtw>`. There is also a dedicated
:ref:`example<sphx_glr_auto_examples_plot_barycenter_interpolate.py>`
available.
[1] <NAME>, <NAME> & <NAME>. A global averaging method for
dynamic time warping, with applications to clustering. Pattern Recognition,
Elsevier, 2011, Vol. 44, Num. 3, pp. 678-693.
[2] <NAME> & <NAME>. Nonsmooth Analysis and Subgradient Methods for
Averaging in Dynamic Time Warping Spaces. Pattern Recognition, 74, 340-358.
[3] <NAME> & <NAME>. Soft-DTW: a Differentiable Loss Function for
Time-Series. ICML 2017.
"""
# Author: <NAME>, <NAME>
# License: BSD 3 clause
import numpy
import matplotlib.pyplot as plt
from tslearn.barycenters import \
euclidean_barycenter, \
dtw_barycenter_averaging, \
dtw_barycenter_averaging_subgradient, \
softdtw_barycenter
from tslearn.datasets import CachedDatasets
# fetch the example data set
numpy.random.seed(0)
X_train, y_train, _, _ = CachedDatasets().load_dataset("Trace")
X = X_train[y_train == 2]
length_of_sequence = X.shape[1]
def plot_helper(barycenter):
# plot all points of the data set
for series in X:
plt.plot(series.ravel(), "k-", alpha=.2)
# plot the given barycenter of them
plt.plot(barycenter.ravel(), "r-", linewidth=2)
# plot the four variants with the same number of iterations and a tolerance of
# 1e-3 where applicable
ax1 = plt.subplot(4, 1, 1)
plt.title("Euclidean barycenter")
plot_helper(euclidean_barycenter(X))
plt.subplot(4, 1, 2, sharex=ax1)
plt.title("DBA (vectorized version of Petitjean's EM)")
plot_helper(dtw_barycenter_averaging(X, max_iter=50, tol=1e-3))
plt.subplot(4, 1, 3, sharex=ax1)
plt.title("DBA (subgradient descent approach)")
plot_helper(dtw_barycenter_averaging_subgradient(X, max_iter=50, tol=1e-3))
plt.subplot(4, 1, 4, sharex=ax1)
plt.title("Soft-DTW barycenter ($\gamma$=1.0)")
plot_helper(softdtw_barycenter(X, gamma=1., max_iter=50, tol=1e-3))
# clip the axes for better readability
ax1.set_xlim([0, length_of_sequence])
# show the plot(s)
plt.tight_layout()
plt.show()
|
[
"tslearn.barycenters.dtw_barycenter_averaging",
"tslearn.barycenters.dtw_barycenter_averaging_subgradient",
"tslearn.barycenters.euclidean_barycenter",
"tslearn.datasets.CachedDatasets",
"numpy.random.seed",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"tslearn.barycenters.softdtw_barycenter",
"matplotlib.pyplot.show"
] |
[((2102, 2122), 'numpy.random.seed', 'numpy.random.seed', (['(0)'], {}), '(0)\n', (2119, 2122), False, 'import numpy\n'), ((2587, 2607), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(1)', '(1)'], {}), '(4, 1, 1)\n', (2598, 2607), True, 'import matplotlib.pyplot as plt\n'), ((2608, 2641), 'matplotlib.pyplot.title', 'plt.title', (['"""Euclidean barycenter"""'], {}), "('Euclidean barycenter')\n", (2617, 2641), True, 'import matplotlib.pyplot as plt\n'), ((2680, 2712), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(1)', '(2)'], {'sharex': 'ax1'}), '(4, 1, 2, sharex=ax1)\n', (2691, 2712), True, 'import matplotlib.pyplot as plt\n'), ((2713, 2768), 'matplotlib.pyplot.title', 'plt.title', (['"""DBA (vectorized version of Petitjean\'s EM)"""'], {}), '("DBA (vectorized version of Petitjean\'s EM)")\n', (2722, 2768), True, 'import matplotlib.pyplot as plt\n'), ((2834, 2866), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(1)', '(3)'], {'sharex': 'ax1'}), '(4, 1, 3, sharex=ax1)\n', (2845, 2866), True, 'import matplotlib.pyplot as plt\n'), ((2867, 2914), 'matplotlib.pyplot.title', 'plt.title', (['"""DBA (subgradient descent approach)"""'], {}), "('DBA (subgradient descent approach)')\n", (2876, 2914), True, 'import matplotlib.pyplot as plt\n'), ((2992, 3024), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(1)', '(4)'], {'sharex': 'ax1'}), '(4, 1, 4, sharex=ax1)\n', (3003, 3024), True, 'import matplotlib.pyplot as plt\n'), ((3025, 3073), 'matplotlib.pyplot.title', 'plt.title', (['"""Soft-DTW barycenter ($\\\\gamma$=1.0)"""'], {}), "('Soft-DTW barycenter ($\\\\gamma$=1.0)')\n", (3034, 3073), True, 'import matplotlib.pyplot as plt\n'), ((3239, 3257), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3255, 3257), True, 'import matplotlib.pyplot as plt\n'), ((3258, 3268), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3266, 3268), True, 'import matplotlib.pyplot as plt\n'), ((2654, 2677), 'tslearn.barycenters.euclidean_barycenter', 'euclidean_barycenter', (['X'], {}), '(X)\n', (2674, 2677), False, 'from tslearn.barycenters import euclidean_barycenter, dtw_barycenter_averaging, dtw_barycenter_averaging_subgradient, softdtw_barycenter\n'), ((2781, 2832), 'tslearn.barycenters.dtw_barycenter_averaging', 'dtw_barycenter_averaging', (['X'], {'max_iter': '(50)', 'tol': '(0.001)'}), '(X, max_iter=50, tol=0.001)\n', (2805, 2832), False, 'from tslearn.barycenters import euclidean_barycenter, dtw_barycenter_averaging, dtw_barycenter_averaging_subgradient, softdtw_barycenter\n'), ((2927, 2990), 'tslearn.barycenters.dtw_barycenter_averaging_subgradient', 'dtw_barycenter_averaging_subgradient', (['X'], {'max_iter': '(50)', 'tol': '(0.001)'}), '(X, max_iter=50, tol=0.001)\n', (2963, 2990), False, 'from tslearn.barycenters import euclidean_barycenter, dtw_barycenter_averaging, dtw_barycenter_averaging_subgradient, softdtw_barycenter\n'), ((3085, 3141), 'tslearn.barycenters.softdtw_barycenter', 'softdtw_barycenter', (['X'], {'gamma': '(1.0)', 'max_iter': '(50)', 'tol': '(0.001)'}), '(X, gamma=1.0, max_iter=50, tol=0.001)\n', (3103, 3141), False, 'from tslearn.barycenters import euclidean_barycenter, dtw_barycenter_averaging, dtw_barycenter_averaging_subgradient, softdtw_barycenter\n'), ((2148, 2164), 'tslearn.datasets.CachedDatasets', 'CachedDatasets', ([], {}), '()\n', (2162, 2164), False, 'from tslearn.datasets import CachedDatasets\n')]
|
import numpy as np
import pandas as pd
import pandas.api.types as pdtypes
from ..utils import resolution
from ..doctools import document
from .stat import stat
@document
class stat_boxplot(stat):
"""
Compute boxplot statistics
{usage}
Parameters
----------
{common_parameters}
coef : float, optional (default: 1.5)
Length of the whiskers as a multiple of the Interquartile
Range.
See Also
--------
plotnine.geoms.geom_boxplot
"""
_aesthetics_doc = """
{aesthetics_table}
.. rubric:: Options for computed aesthetics
::
'width' # width of boxplot
'lower' # lower hinge, 25% quantile
'middle' # median, 50% quantile
'upper' # upper hinge, 75% quantile
'notchlower' # lower edge of notch, computed as;
# :py:`median - 1.58 * IQR / sqrt(n)`
'notchupper' # upper edge of notch, computed as;
# :py:`median + 1.58 * IQR / sqrt(n)`
'ymin' # lower whisker, computed as; smallest observation
# greater than or equal to lower hinge - 1.5 * IQR
'ymax' # upper whisker, computed as; largest observation
# less than or equal to upper hinge + 1.5 * IQR
Calculated aesthetics are accessed using the `after_stat` function.
e.g. :py:`after_stat('width')`.
"""
REQUIRED_AES = {'x', 'y'}
NON_MISSING_AES = {'weight'}
DEFAULT_PARAMS = {'geom': 'boxplot', 'position': 'dodge',
'na_rm': False, 'coef': 1.5, 'width': None}
CREATES = {'lower', 'upper', 'middle', 'ymin', 'ymax',
'outliers', 'notchupper', 'notchlower', 'width',
'relvarwidth'}
def setup_params(self, data):
if self.params['width'] is None:
self.params['width'] = resolution(data['x'], False) * 0.75
return self.params
@classmethod
def compute_group(cls, data, scales, **params):
y = data['y'].to_numpy()
weights = data.get('weight', None)
total_weight = len(y) if weights is None else np.sum(weights)
res = weighted_boxplot_stats(y, weights=weights, whis=params['coef'])
if len(np.unique(data['x'])) > 1:
width = np.ptp(data['x']) * 0.9
else:
width = params['width']
if pdtypes.is_categorical_dtype(data['x']):
x = data['x'].iloc[0]
else:
x = np.mean([data['x'].min(), data['x'].max()])
d = {
'ymin': res['whislo'],
'lower': res['q1'],
'middle': [res['med']],
'upper': res['q3'],
'ymax': res['whishi'],
'outliers': [res['fliers']],
'notchupper': res['cihi'],
'notchlower': res['cilo'],
'x': x,
'width': width,
'relvarwidth': np.sqrt(total_weight)
}
return pd.DataFrame(d)
def weighted_percentile(a, q, weights=None):
"""
Compute the weighted q-th percentile of data
Parameters
----------
a : array_like
Input that can be converted into an array.
q : array_like[float]
Percentile or sequence of percentiles to compute. Must be int
the range [0, 100]
weights : array_like
Weights associated with the input values.
"""
# Calculate and interpolate weighted percentiles
# method derived from https://en.wikipedia.org/wiki/Percentile
# using numpy's standard C = 1
if weights is None:
weights = np.ones(len(a))
weights = np.asarray(weights)
q = np.asarray(q)
C = 1
idx_s = np.argsort(a)
a_s = a[idx_s]
w_n = weights[idx_s]
S_N = np.sum(weights)
S_n = np.cumsum(w_n)
p_n = (S_n - C * w_n) / (S_N + (1 - 2 * C) * w_n)
pcts = np.interp(q / 100.0, p_n, a_s)
return pcts
def weighted_boxplot_stats(x, weights=None, whis=1.5):
"""
Calculate weighted boxplot plot statistics
Parameters
----------
x : array_like
Data
weights : array_like, optional
Weights associated with the data.
whis : float, optional (default: 1.5)
Position of the whiskers beyond the interquartile range.
The data beyond the whisker are considered outliers.
If a float, the lower whisker is at the lowest datum above
``Q1 - whis*(Q3-Q1)``, and the upper whisker at the highest
datum below ``Q3 + whis*(Q3-Q1)``, where Q1 and Q3 are the
first and third quartiles. The default value of
``whis = 1.5`` corresponds to Tukey's original definition of
boxplots.
Notes
-----
This method adapted from Matplotlibs boxplot_stats. The key difference
is the use of a weighted percentile calculation and then using linear
interpolation to map weight percentiles back to data.
"""
if weights is None:
q1, med, q3 = np.percentile(x, (25, 50, 75))
n = len(x)
else:
q1, med, q3 = weighted_percentile(x, (25, 50, 75), weights)
n = np.sum(weights)
iqr = q3 - q1
mean = np.average(x, weights=weights)
cilo = med - 1.58 * iqr / np.sqrt(n)
cihi = med + 1.58 * iqr / np.sqrt(n)
# low extreme
loval = q1 - whis * iqr
lox = x[x >= loval]
if len(lox) == 0 or np.min(lox) > q1:
whislo = q1
else:
whislo = np.min(lox)
# high extreme
hival = q3 + whis * iqr
hix = x[x <= hival]
if len(hix) == 0 or np.max(hix) < q3:
whishi = q3
else:
whishi = np.max(hix)
bpstats = {
'fliers': x[(x < whislo) | (x > whishi)],
'mean': mean,
'med': med,
'q1': q1,
'q3': q3,
'iqr': iqr,
'whislo': whislo,
'whishi': whishi,
'cilo': cilo,
'cihi': cihi,
}
return bpstats
|
[
"numpy.ptp",
"numpy.sqrt",
"numpy.unique",
"numpy.average",
"numpy.asarray",
"numpy.max",
"numpy.argsort",
"numpy.sum",
"pandas.api.types.is_categorical_dtype",
"numpy.percentile",
"numpy.interp",
"numpy.min",
"pandas.DataFrame",
"numpy.cumsum"
] |
[((3580, 3599), 'numpy.asarray', 'np.asarray', (['weights'], {}), '(weights)\n', (3590, 3599), True, 'import numpy as np\n'), ((3608, 3621), 'numpy.asarray', 'np.asarray', (['q'], {}), '(q)\n', (3618, 3621), True, 'import numpy as np\n'), ((3645, 3658), 'numpy.argsort', 'np.argsort', (['a'], {}), '(a)\n', (3655, 3658), True, 'import numpy as np\n'), ((3713, 3728), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (3719, 3728), True, 'import numpy as np\n'), ((3739, 3753), 'numpy.cumsum', 'np.cumsum', (['w_n'], {}), '(w_n)\n', (3748, 3753), True, 'import numpy as np\n'), ((3819, 3849), 'numpy.interp', 'np.interp', (['(q / 100.0)', 'p_n', 'a_s'], {}), '(q / 100.0, p_n, a_s)\n', (3828, 3849), True, 'import numpy as np\n'), ((5101, 5131), 'numpy.average', 'np.average', (['x'], {'weights': 'weights'}), '(x, weights=weights)\n', (5111, 5131), True, 'import numpy as np\n'), ((2350, 2389), 'pandas.api.types.is_categorical_dtype', 'pdtypes.is_categorical_dtype', (["data['x']"], {}), "(data['x'])\n", (2378, 2389), True, 'import pandas.api.types as pdtypes\n'), ((2925, 2940), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {}), '(d)\n', (2937, 2940), True, 'import pandas as pd\n'), ((4915, 4945), 'numpy.percentile', 'np.percentile', (['x', '(25, 50, 75)'], {}), '(x, (25, 50, 75))\n', (4928, 4945), True, 'import numpy as np\n'), ((5055, 5070), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (5061, 5070), True, 'import numpy as np\n'), ((5374, 5385), 'numpy.min', 'np.min', (['lox'], {}), '(lox)\n', (5380, 5385), True, 'import numpy as np\n'), ((5547, 5558), 'numpy.max', 'np.max', (['hix'], {}), '(hix)\n', (5553, 5558), True, 'import numpy as np\n'), ((2107, 2122), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (2113, 2122), True, 'import numpy as np\n'), ((2878, 2899), 'numpy.sqrt', 'np.sqrt', (['total_weight'], {}), '(total_weight)\n', (2885, 2899), True, 'import numpy as np\n'), ((5162, 5172), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (5169, 5172), True, 'import numpy as np\n'), ((5203, 5213), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (5210, 5213), True, 'import numpy as np\n'), ((5309, 5320), 'numpy.min', 'np.min', (['lox'], {}), '(lox)\n', (5315, 5320), True, 'import numpy as np\n'), ((5482, 5493), 'numpy.max', 'np.max', (['hix'], {}), '(hix)\n', (5488, 5493), True, 'import numpy as np\n'), ((2217, 2237), 'numpy.unique', 'np.unique', (["data['x']"], {}), "(data['x'])\n", (2226, 2237), True, 'import numpy as np\n'), ((2264, 2281), 'numpy.ptp', 'np.ptp', (["data['x']"], {}), "(data['x'])\n", (2270, 2281), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
import math
import numpy as np
import matplotlib.pyplot as plt
#Constants
GNa = 120 #Maximal conductance(Na+) ms/cm2
Gk = 36 #Maximal condectance(K+) ms/cm2
Gleak = 0.3 #Maximal conductance(leak) ms/cm2
cm = 1 #Cell capacitance uF/cm2
delta = 0.01 #Axon condectivity ms2
ENa = 50 #Nernst potential (Na+) mV
Ek = -77 #Nernst potential (K+) mV
Eleak = -54.4 #Nernst potential (leak) mV
#Simulation Parameters
simulation_time = 25.0 #ms
domain_length = 4 #cm
dt = 0.001 #ms
dx = 0.1 #cm
x = np.arange(0,domain_length,dx)
time = np.arange(0,simulation_time,dt)
#Convenience variables
a1 = delta*dt/(dx*dx*cm) #V(i-1,t)
a2 = 1 - 2*delta*dt/(dx*dx*cm) #V(i,t)
a3 = delta*dt/(dx*dx*cm) #V(i+1,t)
#Solution matrix
V = np.zeros((len(x),len(time)))
M = np.zeros((len(x),len(time)))
N = np.zeros((len(x),len(time)))
H = np.zeros((len(x),len(time)))
V_initial = -70 #mV
#Initial condition
#When t=0, V=-70mV M=N=H=0
V[:,0] = V_initial
M[:,0] = 0
N[:,0] = 0
H[:,0] = 0
#time loop
for n in range(0,len(time)-1):
#loop over space
for i in range(1,len(x)-1):
if n*dt <= 0.9 and n*dt >= 0.5 and i*dx <= 0.3:
Istim = -25 #uA/cm2
else:
Istim = 0
#Convenience variables
INa = GNa*math.pow(M[i,n],3)*H[i,n]*(V[i,n]-ENa)
Ik = Gk*math.pow(N[i,n],4)*(V[i,n]-Ek)
Ileak = Gleak*(V[i,n]-Eleak)
Iion = INa + Ik + Ileak + Istim
#FTCS
V[i,n+1] = a1*V[i-1,n] + a2*V[i,n] + a3*V[i+1,n] - dt*Iion/cm
#Gating variables:M
aM = (40+V[i,n])/(1-math.exp(-0.1*(40+V[i,n])))
bM = 0.108*math.exp(-V[i,n]/18)
Minf = aM/(aM+bM)
tM = 1/(aM+bM)
M[i,n+1] = M[i,n] + dt*(Minf-M[i,n])/tM
#Gating variables:H
aH = 0.0027*math.exp(-V[i,n]/20)
bH = 1/(1+math.exp(-0.1*(35-V[i,n])))
Hinf = aH/(aH+bH)
tH = 1/(aH+bH)
H[i,n+1] = H[i,n] + dt*(Hinf-H[i,n])/tH
#Gating variables:N
aN = 0.01*(55+V[i,n])/(1-math.exp(-0.1*(55+V[i,n])))
bN = 0.055*math.exp(-V[i,n]/80)
Ninf = aN/(aN+bN)
tN = 1/(aN+bN)
N[i,n+1] = N[i,n] + dt*(Ninf-N[i,n])/tN
#No flux boundary condition at both end
V[0,n+1] = V[1,n+1]
V[len(x)-1,n+1] = V[len(x)-2,n+1]
#Conduction velocity
Max1 = np.argmax(V[0,:])
Max2 = np.argmax(V[len(x)-1,:])
print(domain_length/((Max2-Max1)*dt))
#Plot V versus time for the first and last node of the axon.
plt.figure(1)
plt.clf()
plt.plot(time,V[0,:],'r-',time,V[len(x)-1,:],'b-')
plt.show()
|
[
"math.pow",
"matplotlib.pyplot.clf",
"numpy.argmax",
"matplotlib.pyplot.figure",
"math.exp",
"numpy.arange",
"matplotlib.pyplot.show"
] |
[((518, 549), 'numpy.arange', 'np.arange', (['(0)', 'domain_length', 'dx'], {}), '(0, domain_length, dx)\n', (527, 549), True, 'import numpy as np\n'), ((555, 588), 'numpy.arange', 'np.arange', (['(0)', 'simulation_time', 'dt'], {}), '(0, simulation_time, dt)\n', (564, 588), True, 'import numpy as np\n'), ((2350, 2368), 'numpy.argmax', 'np.argmax', (['V[0, :]'], {}), '(V[0, :])\n', (2359, 2368), True, 'import numpy as np\n'), ((2502, 2515), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (2512, 2515), True, 'import matplotlib.pyplot as plt\n'), ((2516, 2525), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2523, 2525), True, 'import matplotlib.pyplot as plt\n'), ((2577, 2587), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2585, 2587), True, 'import matplotlib.pyplot as plt\n'), ((1642, 1665), 'math.exp', 'math.exp', (['(-V[i, n] / 18)'], {}), '(-V[i, n] / 18)\n', (1650, 1665), False, 'import math\n'), ((1809, 1832), 'math.exp', 'math.exp', (['(-V[i, n] / 20)'], {}), '(-V[i, n] / 20)\n', (1817, 1832), False, 'import math\n'), ((2085, 2108), 'math.exp', 'math.exp', (['(-V[i, n] / 80)'], {}), '(-V[i, n] / 80)\n', (2093, 2108), False, 'import math\n'), ((1337, 1357), 'math.pow', 'math.pow', (['N[i, n]', '(4)'], {}), '(N[i, n], 4)\n', (1345, 1357), False, 'import math\n'), ((1595, 1626), 'math.exp', 'math.exp', (['(-0.1 * (40 + V[i, n]))'], {}), '(-0.1 * (40 + V[i, n]))\n', (1603, 1626), False, 'import math\n'), ((1850, 1881), 'math.exp', 'math.exp', (['(-0.1 * (35 - V[i, n]))'], {}), '(-0.1 * (35 - V[i, n]))\n', (1858, 1881), False, 'import math\n'), ((2038, 2069), 'math.exp', 'math.exp', (['(-0.1 * (55 + V[i, n]))'], {}), '(-0.1 * (55 + V[i, n]))\n', (2046, 2069), False, 'import math\n'), ((1282, 1302), 'math.pow', 'math.pow', (['M[i, n]', '(3)'], {}), '(M[i, n], 3)\n', (1290, 1302), False, 'import math\n')]
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
def entropy(p):
q = 1. - p
return -p * np.log(p) - q * np.log(q)
def jsd(p, q):
return [entropy(p / 2. + q / 2.) - entropy(p) / 2. - entropy(q) / 2.]
def jsd_grad(go, o, pq_list):
p, q = pq_list
m = (p + q) / 2.
return [np.log(p * (1 - m) / (1 - p) / m) / 2. * go, None]
class TestJSDOps(hu.HypothesisTestCase):
@given(n=st.integers(10, 100), **hu.gcs_cpu_only)
def test_bernoulli_jsd(self, n, gc, dc):
p = np.random.rand(n).astype(np.float32)
q = np.random.rand(n).astype(np.float32)
op = core.CreateOperator("BernoulliJSD", ["p", "q"], ["l"])
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[p, q],
reference=jsd,
output_to_grad='l',
grad_reference=jsd_grad,
)
|
[
"numpy.random.rand",
"numpy.log",
"hypothesis.strategies.integers",
"caffe2.python.core.CreateOperator"
] |
[((1536, 1590), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (['"""BernoulliJSD"""', "['p', 'q']", "['l']"], {}), "('BernoulliJSD', ['p', 'q'], ['l'])\n", (1555, 1590), False, 'from caffe2.python import core\n'), ((1031, 1040), 'numpy.log', 'np.log', (['p'], {}), '(p)\n', (1037, 1040), True, 'import numpy as np\n'), ((1047, 1056), 'numpy.log', 'np.log', (['q'], {}), '(q)\n', (1053, 1056), True, 'import numpy as np\n'), ((1339, 1359), 'hypothesis.strategies.integers', 'st.integers', (['(10)', '(100)'], {}), '(10, 100)\n', (1350, 1359), True, 'import hypothesis.strategies as st\n'), ((1232, 1265), 'numpy.log', 'np.log', (['(p * (1 - m) / (1 - p) / m)'], {}), '(p * (1 - m) / (1 - p) / m)\n', (1238, 1265), True, 'import numpy as np\n'), ((1437, 1454), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (1451, 1454), True, 'import numpy as np\n'), ((1486, 1503), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (1500, 1503), True, 'import numpy as np\n')]
|
from collections import deque
import networkx as nx
import numpy as np
def random_subtree(T, alpha, beta, subtree_mark):
""" Random subtree of T according to Algorithm X in [1].
Args:
alpha (float): probability of continuing to a neighbor
beta (float): probability of non empty subtree
T (NetworkX graph): the tree of which the subtree is taken
Returns:
A subtree of T
References:
[1] <NAME>., <NAME>. Pavlenko Bayesian structure learning in graphical models using sequential Monte Carlo.
"""
# Take empty subtree with prob beta
empty = np.random.multinomial(1, [beta, 1-beta]).argmax()
subtree_edges = []
subtree_nodes = []
if empty == 1:
separators = {}
subtree = nx.Graph()
return (subtree, [], [], {}, separators, 1-beta)
# Take non-empty subtree
n = T.order()
w = 0.0
visited = set() # cliques
q = deque([])
start = np.random.randint(n) # then n means new component
separators = {}
#start_node = T.nodes()[start] # nx < 2.x
start_node = list(T.nodes())[start] # nx > 2.x
q.append(start_node)
subtree_adjlist = {start_node: []}
while len(q) > 0:
node = q.popleft()
visited.add(node)
subtree_nodes.append(node)
#T.node[node]["subnode"] = subtree_mark
for neig in T.neighbors(node):
b = np.random.multinomial(1, [1-alpha, alpha]).argmax()
if neig not in visited:
if b == 1:
subtree_edges.append((node, neig))
subtree_adjlist[node].append(neig)
subtree_adjlist[neig] = [node]
q.append(neig)
# Add separator
sep = neig & node
if not sep in separators:
separators[sep] = []
separators[sep].append((neig, node))
else:
w += 1
# subtree = T.subgraph(subtree_nodes)
# assert subtree_nodes in T.nodes()
subtree = None
v = len(subtree_nodes)
probtree = beta * v * np.power(alpha, v-1) / np.float(n)
probtree *= np.power(1-alpha, w)
return (subtree, subtree_nodes, subtree_edges, subtree_adjlist, separators, probtree)
def pdf(subtree, T, alpha, beta):
""" Returns the probability of the subtree subtree generated by
random_subtree(T, alpha, beta).
Args:
T (NetworkX graph): A tree
subtree (NetworkX graph): a subtree of T drawn by the subtree kernel
alpha (float): Subtree kernel parameter
beta (float): Subtree kernel parameter
Returns:
float
"""
p = subtree.order()
if p == 0:
return 1.0 - beta
forest = T.subgraph(set(T.nodes()) - set(subtree.nodes()))
#components = nx.connected_components(forest)
components = forest.connected_components()
w = float(len(list(components)))
v = float(subtree.order())
alpha = float(alpha)
beta = float(beta)
n = float(T.order())
prob = beta * v * np.power(alpha, v-1) * np.power(1-alpha, w) / n
return prob
|
[
"numpy.float",
"collections.deque",
"numpy.power",
"networkx.Graph",
"numpy.random.multinomial",
"numpy.random.randint"
] |
[((937, 946), 'collections.deque', 'deque', (['[]'], {}), '([])\n', (942, 946), False, 'from collections import deque\n'), ((959, 979), 'numpy.random.randint', 'np.random.randint', (['n'], {}), '(n)\n', (976, 979), True, 'import numpy as np\n'), ((2194, 2216), 'numpy.power', 'np.power', (['(1 - alpha)', 'w'], {}), '(1 - alpha, w)\n', (2202, 2216), True, 'import numpy as np\n'), ((770, 780), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (778, 780), True, 'import networkx as nx\n'), ((2166, 2177), 'numpy.float', 'np.float', (['n'], {}), '(n)\n', (2174, 2177), True, 'import numpy as np\n'), ((612, 654), 'numpy.random.multinomial', 'np.random.multinomial', (['(1)', '[beta, 1 - beta]'], {}), '(1, [beta, 1 - beta])\n', (633, 654), True, 'import numpy as np\n'), ((2143, 2165), 'numpy.power', 'np.power', (['alpha', '(v - 1)'], {}), '(alpha, v - 1)\n', (2151, 2165), True, 'import numpy as np\n'), ((3110, 3132), 'numpy.power', 'np.power', (['(1 - alpha)', 'w'], {}), '(1 - alpha, w)\n', (3118, 3132), True, 'import numpy as np\n'), ((3087, 3109), 'numpy.power', 'np.power', (['alpha', '(v - 1)'], {}), '(alpha, v - 1)\n', (3095, 3109), True, 'import numpy as np\n'), ((1405, 1449), 'numpy.random.multinomial', 'np.random.multinomial', (['(1)', '[1 - alpha, alpha]'], {}), '(1, [1 - alpha, alpha])\n', (1426, 1449), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Constants parameter functions
DRS Import Rules:
- only from apero.lang and apero.core.constants
Created on 2019-01-17 at 15:24
@author: cook
"""
from collections import OrderedDict
import copy
import numpy as np
import os
import pkg_resources
import shutil
import sys
from typing import Union, List, Type
from pathlib import Path
from apero.core.constants import constant_functions
from apero.lang import drs_exceptions
# =============================================================================
# Define variables
# =============================================================================
# Define script name
__NAME__ = 'param_functions.py'
# Define package name
PACKAGE = 'apero'
# Define relative path to 'const' sub-package
CONST_PATH = './core/instruments/'
CORE_PATH = './core/instruments/default/'
# Define config/constant/keyword scripts to open
SCRIPTS = ['default_config.py', 'default_constants.py', 'default_keywords.py']
USCRIPTS = ['user_config.ini', 'user_constants.ini', 'user_keywords.ini']
PSEUDO_CONST_FILE = 'pseudo_const.py'
PSEUDO_CONST_CLASS = 'PseudoConstants'
# get the Drs Exceptions
ArgumentError = drs_exceptions.ArgumentError
ArgumentWarning = drs_exceptions.ArgumentWarning
DRSError = drs_exceptions.DrsError
DRSWarning = drs_exceptions.DrsWarning
TextError = drs_exceptions.TextError
TextWarning = drs_exceptions.TextWarning
ConfigError = drs_exceptions.ConfigError
ConfigWarning = drs_exceptions.ConfigWarning
# get the logger
BLOG = drs_exceptions.basiclogger
# relative folder cache
REL_CACHE = dict()
CONFIG_CACHE = dict()
PCONFIG_CACHE = dict()
# cache some settings
SETTINGS_CACHE_KEYS = ['DRS_DEBUG', 'ALLOW_BREAKPOINTS']
SETTINGS_CACHE = dict()
# =============================================================================
# Define Custom classes
# =============================================================================
# case insensitive dictionary
class CaseInsensitiveDict(dict):
"""
Custom dictionary with string keys that are case insensitive
"""
def __init__(self, *arg, **kw):
"""
Construct the case insensitive dictionary class
:param arg: arguments passed to dict
:param kw: keyword arguments passed to dict
"""
# set function name
_ = display_func(None, '__init__', __NAME__, 'CaseInsensitiveDict')
# super from dict
super(CaseInsensitiveDict, self).__init__(*arg, **kw)
# force keys to be capitals (internally)
self.__capitalise_keys__()
def __getitem__(self, key: str) -> object:
"""
Method used to get the value of an item using "key"
used as x.__getitem__(y) <==> x[y]
where key is case insensitive
:param key: string, the key for the value returned (case insensitive)
:type key: str
:return value: object, the value stored at position "key"
"""
# set function name
_ = display_func(None, '__getitem__', __NAME__, 'CaseInsensitiveDict')
# make key capitals
key = _capitalise_key(key)
# return from supers dictionary storage
return super(CaseInsensitiveDict, self).__getitem__(key)
def __setitem__(self, key: str, value: object, source: str = None):
"""
Sets an item wrapper for self[key] = value
:param key: string, the key to set for the parameter
:param value: object, the object to set (as in dictionary) for the
parameter
:param source: string, the source for the parameter
:type key: str
:type value: object
:type source: str
:return: None
"""
# set function name
_ = display_func(None, '__setitem__', __NAME__, 'CaseInsensitiveDict')
# capitalise string keys
key = _capitalise_key(key)
# then do the normal dictionary setting
super(CaseInsensitiveDict, self).__setitem__(key, value)
def __contains__(self, key: str) -> bool:
"""
Method to find whether CaseInsensitiveDict instance has key="key"
used with the "in" operator
if key exists in CaseInsensitiveDict True is returned else False
is returned
:param key: string, "key" to look for in CaseInsensitiveDict instance
:type key: str
:return bool: True if CaseInsensitiveDict instance has a key "key",
else False
:rtype: bool
"""
# set function name
_ = display_func(None, '__contains__', __NAME__, 'CaseInsensitiveDict')
# capitalize key first
key = _capitalise_key(key)
# return True if key in keys else return False
return super(CaseInsensitiveDict, self).__contains__(key)
def __delitem__(self, key: str):
"""
Deletes the "key" from CaseInsensitiveDict instance, case insensitive
:param key: string, the key to delete from ParamDict instance,
case insensitive
:type key: str
:return None:
"""
# set function name
_ = display_func(None, '__delitem__', __NAME__, 'CaseInsensitiveDict')
# capitalize key first
key = _capitalise_key(key)
# delete key from keys
super(CaseInsensitiveDict, self).__delitem__(key)
def get(self, key: str, default: Union[None, object] = None):
"""
Overrides the dictionary get function
If "key" is in CaseInsensitiveDict instance then returns this value,
else returns "default" (if default returned source is set to None)
key is case insensitive
:param key: string, the key to search for in ParamDict instance
case insensitive
:param default: object or None, if key not in ParamDict instance this
object is returned
:type key: str
:type default: Union[None, object]
:return value: if key in ParamDict instance this value is returned else
the default value is returned (None if undefined)
"""
# set function name
_ = display_func(None, 'get', __NAME__, 'CaseInsensitiveDict')
# capitalise string keys
key = _capitalise_key(key)
# if we have the key return the value
if key in self.keys():
return self.__getitem__(key)
# else return the default key (None if not defined)
else:
return default
def __capitalise_keys__(self):
"""
Capitalizes all keys in ParamDict (used to make ParamDict case
insensitive), only if keys entered are strings
:return None:
"""
# set function name
_ = display_func(None, '__capitalise_keys__', __NAME__,
'CaseInsensitiveDict')
# make keys a list
keys = list(self.keys())
# loop around key in keys
for key in keys:
# check if key is a string
if type(key) == str:
# get value
value = super(CaseInsensitiveDict, self).__getitem__(key)
# delete old key
super(CaseInsensitiveDict, self).__delitem__(key)
# if it is a string set it to upper case
key = key.upper()
# set the new key
super(CaseInsensitiveDict, self).__setitem__(key, value)
class ListCaseInsensitiveDict(CaseInsensitiveDict):
def __getitem__(self, key: str) -> list:
"""
Method used to get the value of an item using "key"
used as x.__getitem__(y) <==> x[y]
where key is case insensitive
:param key: string, the key for the value returned (case insensitive)
:type key: str
:return value: list, the value stored at position "key"
"""
# set function name
_ = display_func(None, '__getitem__', __NAME__,
'ListCaseInsensitiveDict')
# return from supers dictionary storage
# noinspection PyTypeChecker
return list(super(ListCaseInsensitiveDict, self).__getitem__(key))
def __setitem__(self, key: str, value: list, source: str = None):
"""
Sets an item wrapper for self[key] = value
:param key: string, the key to set for the parameter
:param value: object, the object to set (as in dictionary) for the
parameter
:param source: string, the source for the parameter
:type key: str
:type value: list
:type source: str
:return: None
"""
# set function name
_ = display_func(None, '__setitem__', __NAME__,
'ListCaseInsensitiveDict')
# then do the normal dictionary setting
super(ListCaseInsensitiveDict, self).__setitem__(key, list(value))
class ParamDict(CaseInsensitiveDict):
"""
Custom dictionary to retain source of a parameter (added via setSource,
retreived via getSource). String keys are case insensitive.
"""
def __init__(self, *arg, **kw):
"""
Constructor for parameter dictionary, calls dict.__init__
i.e. the same as running dict(*arg, *kw)
:param arg: arguments passed to CaseInsensitiveDict
:param kw: keyword arguments passed to CaseInsensitiveDict
"""
# set function name
_ = display_func(None, '__init__', __NAME__, 'ParamDict')
# storage for the sources
self.sources = CaseInsensitiveDict()
# storage for the source history
self.source_history = ListCaseInsensitiveDict()
# storage for the instances
self.instances = CaseInsensitiveDict()
# the print format
self.pfmt = '\t{0:30s}{1:45s} # {2}'
# the print format for list items
self.pfmt_ns = '\t{1:45s}'
# whether the parameter dictionary is locked for editing
self.locked = False
# get text entry from constants (manual database)
self.textentry = constant_functions.DisplayText()
# run the super class (CaseInsensitiveDict <-- dict)
super(ParamDict, self).__init__(*arg, **kw)
def __getitem__(self, key: str) -> object:
"""
Method used to get the value of an item using "key"
used as x.__getitem__(y) <==> x[y]
where key is case insensitive
:param key: string, the key for the value returned (case insensitive)
:type key: str
:return value: object, the value stored at position "key"
:raises ConfigError: if key not found
"""
# set function name
_ = display_func(None, '__getitem__', __NAME__, 'ParamDict')
# try to get item from super
try:
return super(ParamDict, self).__getitem__(key)
except KeyError:
# log that parameter was not found in parameter dictionary
emsg = self.textentry('00-003-00024', args=[key])
raise ConfigError(emsg, level='error')
def __setitem__(self, key: str, value: object,
source: Union[None, str] = None,
instance: Union[None, object] = None):
"""
Sets an item wrapper for self[key] = value
:param key: string, the key to set for the parameter
:param value: object, the object to set (as in dictionary) for the
parameter
:param source: string, the source for the parameter
:type key: str
:type source: Union[None, str]
:type instance: Union[None, object]
:return: None
:raises ConfigError: if parameter dictionary is locked
"""
global SETTINGS_CACHE
# set function name
_ = display_func(None, '__setitem__', __NAME__, 'ParamDict')
# deal with parameter dictionary being locked
if self.locked:
# log that parameter dictionary is locked so we cannot set key
raise ConfigError(self.textentry('00-003-00025', args=[key, value]))
# if we dont have the key in sources set it regardless
if key not in self.sources:
self.sources[key] = source
self.instances[key] = instance
# if we do have the key only set it if source is not None
elif source is not None:
self.sources[key] = source
self.instances[key] = instance
# if setting in cached settings add
if key in SETTINGS_CACHE_KEYS:
SETTINGS_CACHE[key] = copy.deepcopy(value)
# then do the normal dictionary setting
super(ParamDict, self).__setitem__(key, value)
def __contains__(self, key: str) -> bool:
"""
Method to find whether ParamDict instance has key="key"
used with the "in" operator
if key exists in ParamDict True is returned else False is returned
:param key: string, "key" to look for in ParamDict instance
:return bool: True if ParamDict instance has a key "key", else False
"""
# set function name
_ = display_func(None, '__contains__', __NAME__, 'ParamDict')
# run contains command from super
return super(ParamDict, self).__contains__(key)
def __delitem__(self, key: str):
"""
Deletes the "key" from ParamDict instance, case insensitive
:param key: string, the key to delete from ParamDict instance,
case insensitive
:return None:
"""
# set function name
_ = display_func(None, '__delitem__', __NAME__, 'ParamDict')
# delete item using super
super(ParamDict, self).__delitem__(key)
def __repr__(self):
"""
Get the offical string representation for this instance
:return: return the string representation
:rtype: str
"""
# set function name
_ = display_func(None, '__repr__', __NAME__, 'ParamDict')
# get string from string print
return self._string_print()
def __str__(self) -> str:
"""
Get the informal string representation for this instance
:return: return the string representation
:rtype: str
"""
# set function name
_ = display_func(None, '__repr__', __NAME__, 'ParamDict')
# get string from string print
return self._string_print()
def set(self, key: str, value: object,
source: Union[None, str] = None,
instance: Union[None, object] = None):
"""
Set an item even if params is locked
:param key: str, the key to set
:param value: object, the value of the key to set
:param source: str, the source of the value/key to set
:param instance: object, the instance of the value/key to set
:type key: str
:type source: str
:type instance: object
:return: None
"""
# set function name
_ = display_func(None, 'set', __NAME__, 'ParamDict')
# if we dont have the key in sources set it regardless
if key not in self.sources:
self.sources[key] = source
self.instances[key] = instance
# if we do have the key only set it if source is not None
elif source is not None:
self.sources[key] = source
self.instances[key] = instance
# then do the normal dictionary setting
super(ParamDict, self).__setitem__(key, value)
def lock(self):
"""
Locks the parameter dictionary
:return:
"""
# set function name
_ = display_func(None, 'lock', __NAME__, 'ParamDict')
# set locked to True
self.locked = True
def unlock(self):
"""
Unlocks the parameter dictionary
:return:
"""
# set function name
_ = display_func(None, 'unlock', __NAME__, 'ParamDict')
# set locked to False
self.locked = False
def get(self, key: str, default: Union[None, object] = None) -> object:
"""
Overrides the dictionary get function
If "key" is in ParamDict instance then returns this value, else
returns "default" (if default returned source is set to None)
key is case insensitive
:param key: string, the key to search for in ParamDict instance
case insensitive
:param default: object or None, if key not in ParamDict instance this
object is returned
:type key: str
:return value: if key in ParamDict instance this value is returned else
the default value is returned (None if undefined)
"""
# set function name
_ = display_func(None, 'get', __NAME__, 'ParamDict')
# if we have the key return the value
if key in self.keys():
return self.__getitem__(key)
# else return the default key (None if not defined)
else:
self.sources[key] = None
return default
def set_source(self, key: str, source: str):
"""
Set a key to have sources[key] = source
raises a ConfigError if key not found
:param key: string, the main dictionary string
:param source: string, the source to set
:type key: str
:type source: str
:return None:
:raises ConfigError: if key not found
"""
# set function name
_ = display_func(None, 'set_source', __NAME__, 'ParamDict')
# capitalise
key = _capitalise_key(key)
# don't put full path for sources in package
source = _check_mod_source(source)
# only add if key is in main dictionary
if key in self.keys():
self.sources[key] = source
# add to history
if key in self.source_history:
self.source_history[key].append(source)
else:
self.source_history[key] = [source]
else:
# log error: source cannot be added for key
emsg = self.textentry('00-003-00026', args=[key])
raise ConfigError(emsg, level='error')
def set_instance(self, key: str, instance: object):
"""
Set a key to have instance[key] = instance
raise a Config Error if key not found
:param key: str, the key to add
:param instance: object, the instance to store (normally Const/Keyword)
:type key: str
:return None:
:raises ConfigError: if key not found
"""
# set function name
_ = display_func(None, 'set_instance', __NAME__, 'ParamDict')
# capitalise
key = _capitalise_key(key)
# only add if key is in main dictionary
if key in self.keys():
self.instances[key] = instance
else:
# log error: instance cannot be added for key
emsg = self.textentry('00-003-00027', args=[key])
raise ConfigError(emsg, level='error')
def append_source(self, key: str, source: str):
"""
Adds source to the source of key (appends if exists)
i.e. sources[key] = oldsource + source
:param key: string, the main dictionary string
:param source: string, the source to set
:type key: str
:type source: str
:return None:
"""
# set function name
_ = display_func(None, 'append_source', __NAME__, 'ParamDict')
# capitalise
key = _capitalise_key(key)
# if key exists append source to it
if key in self.keys() and key in list(self.sources.keys()):
self.sources[key] += ' {0}'.format(source)
else:
self.set_source(key, source)
def set_sources(self, keys: List[str],
sources: Union[str, List[str], dict]):
"""
Set a list of keys sources
raises a ConfigError if key not found
:param keys: list of strings, the list of keys to add sources for
:param sources: string or list of strings or dictionary of strings,
the source or sources to add,
if a dictionary source = sources[key] for key = keys[i]
if list source = sources[i] for keys[i]
if string all sources with these keys will = source
:type keys: list
:type sources: Union[str, list, dict]
:return None:
"""
# set function name
_ = display_func(None, 'set_sources', __NAME__, 'ParamDict')
# loop around each key in keys
for k_it in range(len(keys)):
# assign the key from k_it
key = keys[k_it]
# capitalise
key = _capitalise_key(key)
# Get source for this iteration
if type(sources) == list:
source = sources[k_it]
elif type(sources) == dict:
source = sources[key]
else:
source = str(sources)
# set source
self.set_source(key, source)
def set_instances(self, keys: List[str],
instances: Union[object, list, dict]):
"""
Set a list of keys sources
raises a ConfigError if key not found
:param keys: list of strings, the list of keys to add sources for
:param instances: object or list of objects or dictionary of objects,
the source or sources to add,
if a dictionary source = sources[key] for key = keys[i]
if list source = sources[i] for keys[i]
if object all sources with these keys will = source
:type keys: list
:type instances: Union[object, list, dict]
:return None:
"""
# set function name
_ = display_func(None, 'set_instances', __NAME__, 'ParamDict')
# loop around each key in keys
for k_it in range(len(keys)):
# assign the key from k_it
key = keys[k_it]
# capitalise
key = _capitalise_key(key)
# Get source for this iteration
if type(instances) == list:
instance = instances[k_it]
elif type(instances) == dict:
instance = instances[key]
else:
instance = instances
# set source
self.set_instance(key, instance)
def append_sources(self, keys: str, sources: Union[str, List[str], dict]):
"""
Adds list of keys sources (appends if exists)
raises a ConfigError if key not found
:param keys: list of strings, the list of keys to add sources for
:param sources: string or list of strings or dictionary of strings,
the source or sources to add,
if a dictionary source = sources[key] for key = keys[i]
if list source = sources[i] for keys[i]
if string all sources with these keys will = source
:type keys: list
:type sources: Union[str, List[str], dict]
:return None:
"""
# set function name
_ = display_func(None, 'append_sources', __NAME__, 'ParamDict')
# loop around each key in keys
for k_it in range(len(keys)):
# assign the key from k_it
key = keys[k_it]
# capitalise
key = _capitalise_key(key)
# Get source for this iteration
if type(sources) == list:
source = sources[k_it]
elif type(sources) == dict:
source = sources[key]
else:
source = str(sources)
# append key
self.append_source(key, source)
def set_all_sources(self, source: str):
"""
Set all keys in dictionary to this source
:param source: string, all keys will be set to this source
:type source: str
:return None:
"""
# set function name
_ = display_func(None, 'set_all_sources', __NAME__, 'ParamDict')
# loop around each key in keys
for key in self.keys():
# capitalise
key = _capitalise_key(key)
# set key
self.sources[key] = source
def append_all_sources(self, source: str):
"""
Sets all sources to this "source" value
:param source: string, the source to set
:type source: str
:return None:
"""
# set function name
_ = display_func(None, 'append_all_sources', __NAME__, 'ParamDict')
# loop around each key in keys
for key in self.keys():
# capitalise
key = _capitalise_key(key)
# set key
self.sources[key] += ' {0}'.format(source)
def get_source(self, key: str) -> str:
"""
Get a source from the parameter dictionary (must be set)
raises a ConfigError if key not found
:param key: string, the key to find (must be set)
:return source: string, the source of the parameter
"""
# set function name
_ = display_func(None, 'get_source', __NAME__, 'ParamDict')
# capitalise
key = _capitalise_key(key)
# if key in keys and sources then return source
if key in self.keys() and key in self.sources.keys():
return str(self.sources[key])
# else raise a Config Error
else:
# log error: no source set for key
emsg = self.textentry('00-003-00028', args=[key])
raise ConfigError(emsg, level='error')
def get_instance(self, key: str) -> object:
"""
Get a source from the parameter dictionary (must be set)
raises a ConfigError if key not found
:param key: string, the key to find (must be set)
:return source: string, the source of the parameter
"""
# set function name
_ = display_func(None, 'get_instance', __NAME__, 'ParamDict')
# capitalise
key = _capitalise_key(key)
# if key in keys and sources then return source
if key in self.keys() and key in self.instances.keys():
return self.instances[key]
# else raise a Config Error
else:
emsg = self.textentry('00-003-00029', args=[key])
raise ConfigError(emsg, level='error')
def source_keys(self) -> List[str]:
"""
Get a dict_keys for the sources for this parameter dictionary
order the same as self.keys()
:return sources: values of sources dictionary
"""
# set function name
_ = display_func(None, 'source_keys', __NAME__, 'ParamDict')
# return all keys in source dictionary
return list(self.sources.keys())
def source_values(self) -> List[object]:
"""
Get a dict_values for the sources for this parameter dictionary
order the same as self.keys()
:return sources: values of sources dictionary
"""
# set function name
_ = display_func(None, 'source_values', __NAME__, 'ParamDict')
# return all values in source dictionary
return list(self.sources.values())
def startswith(self, substring: str) -> List[str]:
"""
Return all keys that start with this substring
:param substring: string, the prefix that the keys start with
:type substring: str
:return keys: list of strings, the keys with this substring at the start
"""
# set function name
_ = display_func(None, 'startswith', __NAME__, 'ParamDict')
# define return list
return_keys = []
# loop around keys
for key in self.keys():
# make sure key is string
if type(key) != str:
continue
# if first
if str(key).startswith(substring.upper()):
return_keys.append(key)
# return keys
return return_keys
def contains(self, substring: str) -> List[str]:
"""
Return all keys that contain this substring
:param substring: string, the sub-string to look for in all keys
:type substring: str
:return keys: list of strings, the keys which contain this substring
"""
# set function name
_ = display_func(None, 'contains', __NAME__, 'ParamDict')
# define return list
return_keys = []
# loop around keys
for key in self.keys():
# make sure key is string
if type(key) != str:
continue
# if first
if substring.upper() in key:
return_keys.append(key)
# return keys
return return_keys
def endswith(self, substring: str) -> List[str]:
"""
Return all keys that end with this substring
:param substring: string, the suffix that the keys ends with
:type substring: str
:return keys: list of strings, the keys with this substring at the end
"""
# set function name
_ = display_func(None, 'endswith', __NAME__, 'ParamDict')
# define return list
return_keys = []
# loop around keys
for key in self.keys():
# make sure key is string
if type(key) != str:
continue
# if first
if str(key).endswith(substring.upper()):
return_keys.append(key)
# return keys
return return_keys
def copy(self):
"""
Copy a parameter dictionary (deep copy parameters)
:return: the copy of the parameter dictionary
:rtype: ParamDict
"""
# set function name
_ = display_func(None, 'copy', __NAME__, 'ParamDict')
# make new copy of param dict
pp = ParamDict()
keys = list(self.keys())
values = list(self.values())
# loop around keys and add to new copy
for k_it, key in enumerate(keys):
value = values[k_it]
# try to deep copy parameter
if isinstance(value, ParamDict):
pp[key] = value.copy()
else:
# noinspection PyBroadException
try:
pp[key] = copy.deepcopy(value)
except Exception as _:
pp[key] = type(value)(value)
# copy source
if key in self.sources:
pp.set_source(key, str(self.sources[key]))
else:
pp.set_source(key, 'Unknown')
# copy source history
if key in self.source_history:
pp.source_history[key] = list(self.source_history[key])
else:
pp.source_history[key] = []
# copy instance
if key in self.instances:
pp.set_instance(key, self.instances[key])
else:
pp.set_instance(key, None)
# return new param dict filled
return pp
def merge(self, paramdict, overwrite: bool = True):
"""
Merge another parameter dictionary with this one
:param paramdict: ParamDict, another parameter dictionary to merge
with this one
:param overwrite: bool, if True (default) allows overwriting of
parameters, else skips ones already present
:type paramdict: ParamDict
:type overwrite: bool
:return: None
"""
# set function name
_ = display_func(None, 'merge', __NAME__, 'ParamDict')
# add param dict to self
for key in paramdict:
# deal with no overwriting
if not overwrite and key in self.keys:
continue
# copy source
if key in paramdict.sources:
ksource = paramdict.sources[key]
else:
ksource = None
# copy instance
if key in paramdict.instances:
kinst = paramdict.instances[key]
else:
kinst = None
# add to self
self.set(key, paramdict[key], ksource, kinst)
def _string_print(self) -> str:
"""
Constructs a string representation of the instance
:return: a string representation of the instance
:rtype: str
"""
# set function name
_ = display_func(None, '_string_print', __NAME__, 'ParamDict')
# get keys and values
keys = list(self.keys())
values = list(self.values())
# string storage
return_string = 'ParamDict:\n'
strvalues = []
# loop around each key in keys
for k_it, key in enumerate(keys):
# get this iterations values
value = values[k_it]
# deal with no source
if key not in self.sources:
self.sources[key] = 'None'
# print value
if type(value) in [list, np.ndarray]:
sargs = [key, list(value), self.sources[key], self.pfmt]
strvalues += _string_repr_list(*sargs)
elif type(value) in [dict, OrderedDict, ParamDict]:
strvalue = list(value.keys()).__repr__()[:40]
sargs = [key + '[DICT]', strvalue, self.sources[key]]
strvalues += [self.pfmt.format(*sargs)]
else:
strvalue = str(value)[:40]
sargs = [key + ':', strvalue, self.sources[key]]
strvalues += [self.pfmt.format(*sargs)]
# combine list into single string
for string_value in strvalues:
return_string += '\n {0}'.format(string_value)
# return string
return return_string + '\n'
def listp(self, key: str, separator: str = ',',
dtype: Union[None, Type] = None) -> list:
"""
Turn a string list parameter (separated with `separator`) into a list
of objects (of data type `dtype`)
i.e. ParamDict['MYPARAM'] = '1, 2, 3, 4'
x = ParamDict.listp('my_parameter', dtype=int)
gives:
x = list([1, 2, 3, 4])
:param key: str, the key that contains a string list
:param separator: str, the character that separates
:param dtype: type, the type to cast the list element to
:return: the list of values extracted from the string for `key`
:rtype: list
"""
# set function name
_ = display_func(None, 'listp', __NAME__, 'ParamDict')
# if key is present attempt str-->list
if key in self.keys():
item = self.__getitem__(key)
else:
# log error: parameter not found in parameter dict (via listp)
emsg = self.textentry('00-003-00030', args=[key])
raise ConfigError(emsg, level='error')
# convert string
if key in self.keys() and isinstance(item, str):
return _map_listparameter(str(item), separator=separator,
dtype=dtype)
elif isinstance(item, list):
return item
else:
# log error: parameter not found in parameter dict (via listp)
emsg = self.textentry('00-003-00032', args=[key])
raise ConfigError(emsg, level='error')
def dictp(self, key: str, dtype: Union[str, None] = None) -> dict:
"""
Turn a string dictionary parameter into a python dictionary
of objects (of data type `dtype`)
i.e. ParamDict['MYPARAM'] = '{"varA":1, "varB":2}'
x = ParamDict.listp('my_parameter', dtype=int)
gives:
x = dict(varA=1, varB=2)
Note string dictionary must be in the {"key":value} format
:param key: str, the key that contains a string list
:param dtype: type, the type to cast the list element to
:return: the list of values extracted from the string for `key`
:rtype: dict
"""
# set function name
_ = display_func(None, 'dictp', __NAME__, 'ParamDict')
# if key is present attempt str-->dict
if key in self.keys():
item = self.__getitem__(key)
else:
# log error message: parameter not found in param dict (via dictp)
emsg = self.textentry('00-003-00031', args=[key])
raise ConfigError(emsg.format(key), level='error')
# convert string
if isinstance(item, str):
return _map_dictparameter(str(item), dtype=dtype)
elif isinstance(item, dict):
return item
else:
# log error message: parameter not found in param dict (via dictp)
emsg = self.textentry('00-003-00033', args=[key])
raise ConfigError(emsg.format(key), level='error')
def get_instanceof(self, lookup: object, nameattr: str = 'name') -> dict:
"""
Get all instances of object instance lookup
i.e. perform isinstance(object, lookup)
:param lookup: object, the instance to lookup
:param nameattr: str, the attribute in instance that we will return
as the key
:return: a dictionary of keys/value pairs where each value is an
instance that belongs to instance of `lookup`
:rtype: dict
"""
# set function name
_ = display_func(None, 'get_instanceof', __NAME__, 'ParamDict')
# output storage
keydict = dict()
# loop around all keys
for key in list(self.instances.keys()):
# get the instance for this key
instance = self.instances[key]
# skip None
if instance is None:
continue
# else check instance type
if isinstance(instance, type(lookup)):
if hasattr(instance, nameattr):
name = getattr(instance, nameattr)
keydict[name] = instance
else:
continue
# return keyworddict
return keydict
def info(self, key: str):
"""
Display the information related to a specific key
:param key: str, the key to display information about
:type key: str
:return: None
"""
# set function name
_ = display_func(None, 'info', __NAME__, 'ParamDict')
# deal with key not existing
if key not in self.keys():
print(self.textentry('40-000-00001', args=[key]))
return
# print key title
print(self.textentry('40-000-00002', args=[key]))
# print value stats
value = self.__getitem__(key)
# print the data type
print(self.textentry('40-000-00003', args=[type(value).__name__]))
# deal with lists and numpy array
if isinstance(value, (list, np.ndarray)):
sargs = [key, list(value), None, self.pfmt_ns]
wargs = [np.nanmin(value), np.nanmax(value),
np.sum(np.isnan(value)) > 0, _string_repr_list(*sargs)]
print(self.textentry('40-000-00004', args=wargs))
# deal with dictionaries
elif isinstance(value, (dict, OrderedDict, ParamDict)):
strvalue = list(value.keys()).__repr__()[:40]
sargs = [key + '[DICT]', strvalue, None]
wargs = [len(list(value.keys())), self.pfmt_ns.format(*sargs)]
print(self.textentry('40-000-00005', args=wargs))
# deal with everything else
else:
strvalue = str(value)[:40]
sargs = [key + ':', strvalue, None]
wargs = [self.pfmt_ns.format(*sargs)]
print(self.textentry('40-000-00006', args=wargs))
# add source info
if key in self.sources:
print(self.textentry('40-000-00007', args=[self.sources[key]]))
# add instances info
if key in self.instances:
print(self.textentry('40-000-00008', args=[self.instances[key]]))
def history(self, key: str):
"""
Display the history of where key was defined (using source)
:param key: str, the key to print history of
:type key: str
:return: None
"""
# set function name
_ = display_func(None, 'history', __NAME__, 'ParamDict')
# if history found then print it
if key in self.source_history:
# print title: History for key
print(self.textentry('40-000-00009', args=[key]))
# loop around history and print row by row
for it, entry in enumerate(self.source_history[key]):
print('{0}: {1}'.format(it + 1, entry))
# else display that there was not history found
else:
print(self.textentry('40-000-00010', args=[key]))
# =============================================================================
# Define functions
# =============================================================================
def update_paramdicts(*args, **kwargs):
# set function name (cannot break here --> no access to inputs)
_ = display_func(None, 'update_paramdicts', __NAME__)
# get key from kwargs
key = kwargs.get('key', None)
# get value from kwargs
value = kwargs.get('value', None)
# get source from kwargs
source = kwargs.get('source', None)
# get instance from kwargs
instance = kwargs.get('instance', None)
# loop through param dicts
for arg in args:
if isinstance(arg, ParamDict):
arg.set(key, value=value, source=source, instance=instance)
def load_config(instrument=None, from_file=True, cache=True):
global CONFIG_CACHE
# set function name (cannot break here --> no access to inputs)
_ = display_func(None, 'load_config', __NAME__)
# check config cache
if instrument in CONFIG_CACHE and cache:
return CONFIG_CACHE[instrument].copy()
# deal with instrument set to 'None'
if isinstance(instrument, str):
if instrument.upper() == 'NONE':
instrument = None
# get instrument sub-package constants files
modules = get_module_names(instrument)
# get constants from modules
try:
keys, values, sources, instances = _load_from_module(modules, True)
except ConfigError:
sys.exit(1)
params = ParamDict(zip(keys, values))
# Set the source
params.set_sources(keys=keys, sources=sources)
# add to params
for it in range(len(keys)):
# set instance (Const/Keyword instance)
params.set_instance(keys[it], instances[it])
# get constants from user config files
if from_file:
# get instrument user config files
files = _get_file_names(params, instrument)
try:
keys, values, sources, instances = _load_from_file(files, modules)
except ConfigError:
sys.exit(1)
# add to params
for it in range(len(keys)):
# set value
params[keys[it]] = values[it]
# set instance (Const/Keyword instance)
params.set_instance(keys[it], instances[it])
params.set_sources(keys=keys, sources=sources)
# save sources to params
params = _save_config_params(params)
# cache these params
if cache:
CONFIG_CACHE[instrument] = params.copy()
# return the parameter dictionary
return params
def load_pconfig(instrument=None):
global PCONFIG_CACHE
# set function name (cannot break here --> no access to inputs)
func_name = display_func(None, 'load_pconfig', __NAME__)
# check cache
if instrument in PCONFIG_CACHE:
return PCONFIG_CACHE[instrument]
# deal with instrument set to 'None'
if isinstance(instrument, str):
if instrument.upper() == 'NONE':
instrument = None
# get instrument sub-package constants files
modules = get_module_names(instrument, mod_list=[PSEUDO_CONST_FILE])
# import module
mod = constant_functions.import_module(func_name, modules[0])
# check that we have class and import it
if hasattr(mod, PSEUDO_CONST_CLASS):
psconst = getattr(mod, PSEUDO_CONST_CLASS)
# else raise error
else:
emsg = 'Module "{0}" is required to have class "{1}"'
ConfigError(emsg.format(modules[0], PSEUDO_CONST_CLASS))
sys.exit(1)
# get instance of PseudoClass
pconfig = psconst(instrument=instrument)
# update cache
PCONFIG_CACHE[instrument] = pconfig
return pconfig
def get_config_all():
# set function name (cannot break here --> no access to inputs)
_ = display_func(None, 'get_config_all', __NAME__)
# get module names
modules = get_module_names(None)
# loop around modules and print our __all__ statement
for module in modules:
# generate a list of all functions in a module
rawlist = constant_functions.generate_consts(module)[0]
# print to std-out
print('=' * 50)
print('MODULE: {0}'.format(module))
print('=' * 50)
print('')
print('__all__ = [\'{0}\']'.format('\', \''.join(rawlist)))
print('')
def get_file_names(instrument=None, file_list=None, instrument_path=None,
default_path=None):
# set function name (cannot break here --> no access to inputs)
func_name = display_func(None, 'get_file_names', __NAME__)
# get core path
core_path = get_relative_folder(PACKAGE, default_path)
# get constants package path
if instrument is not None:
const_path = get_relative_folder(PACKAGE, instrument_path)
# get the directories within const_path
filelist = np.sort(os.listdir(const_path))
directories = []
for filename in filelist:
if os.path.isdir(filename):
directories.append(filename)
else:
const_path = None
# get the directories within const_path
filelist = np.sort(os.listdir(core_path))
directories = []
for filename in filelist:
if os.path.isdir(filename):
directories.append(filename)
# construct module import name
if instrument is None:
filepath = os.path.join(core_path, '')
else:
filepath = os.path.join(const_path, instrument.lower())
# get module names
paths = []
for filename in file_list:
# get file path
fpath = os.path.join(filepath, filename)
# append if path exists
if not os.path.exists(fpath):
emsgs = ['DevError: Filepath "{0}" does not exist.'
''.format(fpath),
'\tfunction = {0}'.format(func_name)]
raise ConfigError(emsgs, level='error')
# append mods
paths.append(fpath)
# make sure we found something
if len(paths) == 0:
emsgs = ['DevError: No files found',
'\tfunction = {0}'.format(func_name)]
raise ConfigError(emsgs, level='error')
# return modules
return paths
def get_module_names(instrument=None, mod_list=None, instrument_path=None,
default_path=None, path=True):
# set function name (cannot break here --> no access to inputs)
func_name = display_func(None, '_get_module_names', __NAME__)
# deal with no module list
if mod_list is None:
mod_list = SCRIPTS
# deal with no path
if instrument_path is None:
instrument_path = CONST_PATH
if default_path is None:
default_path = CORE_PATH
# get constants package path
const_path = get_relative_folder(PACKAGE, instrument_path)
core_path = get_relative_folder(PACKAGE, default_path)
# get the directories within const_path
filelist = np.sort(os.listdir(const_path))
directories = []
for filename in filelist:
if os.path.isdir(filename):
directories.append(filename)
# construct sub-module name
relpath = os.path.normpath(instrument_path).replace('.', '')
relpath = relpath.replace(os.sep, '.').strip('.')
corepath = os.path.normpath(default_path).replace('.', '')
corepath = corepath.replace(os.sep, '.').strip('.')
# construct module import name
if instrument is None:
modpath = '{0}.{1}'.format(PACKAGE, corepath)
filepath = os.path.join(core_path, '')
else:
modpath = '{0}.{1}.{2}'.format(PACKAGE, relpath, instrument.lower())
filepath = os.path.join(const_path, instrument.lower())
# get module names
mods, paths = [], []
for script in mod_list:
# make sure script doesn't end with .py
mscript = script.split('.')[0]
# get mod path
mod = '{0}.{1}'.format(modpath, mscript)
# get file path
fpath = os.path.join(filepath, script)
# append if path exists
found = True
if not os.path.exists(fpath):
if not fpath.endswith('.py'):
fpath += '.py'
if not os.path.exists(fpath):
found = False
else:
found = False
# deal with no file found
if not found:
emsgs = ['DevError: Const mod path "{0}" does not exist.'
''.format(mod),
'\tpath = {0}'.format(fpath),
'\tfunction = {0}'.format(func_name)]
raise ConfigError(emsgs, level='error')
# append mods
mods.append(mod)
paths.append(fpath)
# make sure we found something
if len(mods) == 0:
emsgs = ['DevError: No config dirs found',
'\tfunction = {0}'.format(func_name)]
raise ConfigError(emsgs, level='error')
if len(mods) != len(mod_list):
emsgs = ['DevError: Const mod scrips missing found=[{0}]'
''.format(','.join(mods)),
'\tfunction = {0}'.format(func_name)]
raise ConfigError(emsgs, level='error')
# return modules
if path:
return paths
else:
return mods
def print_error(error):
# set function name (cannot break here --> no access to inputs)
_ = display_func(None, 'print_error', __NAME__)
# print the configuration file
print('\n')
print('=' * 70)
print(' Configuration file {0}:'.format(error.level))
print('=' * 70, '\n')
# get the error string
estring = error.message
# if error string is not a list assume it is a string and push it into
# a single element list
if type(estring) is not list:
estring = [estring]
# loop around error strings (now must be a list of strings)
for emsg in estring:
# replace new line with new line + tab
emsg = emsg.replace('\n', '\n\t')
# print to std-out
print('\t' + emsg)
# print a gap between this and next lines
print('=' * 70, '\n')
def break_point(params=None, allow=None, level=2):
# set function name (cannot break inside break function)
_ = str(__NAME__) + '.break_point()'
# if we don't have parameters load them from config file
if params is None:
params = load_config()
# force to True
params['ALLOW_BREAKPOINTS'] = True
# if allow is not set
if allow is None:
allow = params['ALLOW_BREAKPOINTS']
# if still not allowed the return
if not allow:
return
# copy pdbrc
_copy_pdb_rc(params, level=level)
# catch bdb quit
# noinspection PyPep8
try:
_execute_ipdb()
except Exception:
emsg = 'USER[00-000-00000]: Debugger breakpoint exit.'
raise drs_exceptions.DebugExit(emsg)
finally:
# delete pdbrc
_remove_pdb_rc(params)
# noinspection PyUnusedLocal
def catch_sigint(signal_received, frame):
# set function name (cannot break here --> no access to inputs)
_ = display_func(None, 'catch_sigint', __NAME__)
# raise Keyboard Interrupt
raise KeyboardInterrupt('\nSIGINT or CTRL-C detected. Exiting\n')
def window_size(drows=80, dcols=80):
# set function name (cannot break here --> no access to inputs)
_ = display_func(None, 'window_size', __NAME__)
# only works on unix operating systems
if os.name == 'posix':
# see if we have stty commnad
if shutil.which('stty') is None:
return drows, dcols
# try to open via open and split output back to rows and columns
# noinspection PyPep8,PyBroadException
try:
rows, columns = os.popen('stty size', 'r').read().split()
return int(rows), int(columns)
# if not just pass over this
except Exception:
pass
# if we are on windows we have to get window size differently
elif os.name == 'nt':
# taken from: https://gist.github.com/jtriley/1108174
# noinspection PyPep8,PyBroadException
try:
import struct
from ctypes import windll, create_string_buffer
# stdin handle is -10
# stdout handle is -11
# stderr handle is -12
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
out = struct.unpack("hhhhHhhhhhh", csbi.raw)
left, top, right, bottom = out[5:9]
sizex = right - left + 1
sizey = bottom - top + 1
return int(sizey), int(sizex)
# if not just pass over this
except Exception:
pass
# if we have reached this point return the default number of rows
# and columns
return drows, dcols
def display_func(params=None, name=None, program=None, class_name=None,
wlog=None, textentry=None):
# set function name (cannot break here --> no access to inputs)
func_name = str(__NAME__) + '.display_func()'
# deal with no wlog defined
if wlog is None:
wlog = drs_exceptions.wlogbasic
# deal with not text entry defined
if textentry is None:
textentry = constant_functions.DisplayText()
# start the string function
strfunc = ''
# deal with no file name
if name is None:
name = 'Unknown'
# ----------------------------------------------------------------------
# add the program
if program is not None:
strfunc = str(program)
if class_name is not None:
strfunc += '.{0}'.format(class_name)
# add the name
strfunc += '.{0}'.format(name)
# add brackets to show function
if not strfunc.endswith('()'):
strfunc += '()'
# ----------------------------------------------------------------------
# deal with adding a break point
if params is not None:
if 'INPUTS' in params and 'BREAKFUNC' in params['INPUTS']:
# get break function
breakfunc = params['INPUTS']['BREAKFUNC']
# only deal with break function if it is set
if breakfunc not in [None, 'None', '']:
# get function name (without ending)
funcname = strfunc.replace('()', '')
# if function name endwith break function then we break here
if funcname.endswith(breakfunc):
# log we are breaking due to break function
wargs = [breakfunc]
msg = textentry('10-005-00004', args=wargs)
wlog(params, 'warning', msg)
break_point(params, allow=True, level=3)
# ----------------------------------------------------------------------
# deal with no params (do not log)
if params is None:
return strfunc
# deal with debug level too low (just return here)
if params['DRS_DEBUG'] < params['DEBUG_MODE_FUNC_PRINT']:
return strfunc
# ----------------------------------------------------------------------
# below here just for debug mode func print
# ----------------------------------------------------------------------
# add the string function to param dict
if 'DEBUG_FUNC_LIST' not in params:
params.set('DEBUG_FUNC_LIST', value=[None], source=func_name)
if 'DEBUG_FUNC_DICT' not in params:
params.set('DEBUG_FUNC_DICT', value=dict(), source=func_name)
# append to list
params['DEBUG_FUNC_LIST'].append(strfunc)
# update debug dictionary
if strfunc in params['DEBUG_FUNC_DICT']:
params['DEBUG_FUNC_DICT'][strfunc] += 1
else:
params['DEBUG_FUNC_DICT'][strfunc] = 1
# get count
count = params['DEBUG_FUNC_DICT'][strfunc]
# find previous entry
previous = params['DEBUG_FUNC_LIST'][-2]
# find out whether we have the same entry
same_entry = previous == strfunc
# add count
strfunc += ' (N={0})'.format(count)
# if we don't have a list then just print
if params['DEBUG_FUNC_LIST'][-2] is None:
# log in func
wlog(params, 'debug', textentry('90-000-00004', args=[strfunc]),
wrap=False)
elif not same_entry:
# get previous set of counts
previous_count = _get_prev_count(params, previous)
# only log if count is greater than 1
if previous_count > 1:
# log how many of previous there were
dargs = [previous_count]
wlog(params, 'debug', textentry('90-000-00005', args=dargs))
# log in func
wlog(params, 'debug', textentry('90-000-00004', args=[strfunc]),
wrap=False)
# return func_name
return strfunc
# =============================================================================
# Config loading private functions
# =============================================================================
def _get_file_names(params, instrument=None):
# set function name (cannot break here --> no access to inputs)
_ = display_func(params, '_get_file_names', __NAME__)
# deal with no instrument
if instrument is None:
return []
# get user environmental path
user_env = params['DRS_USERENV']
# get user default path (if environmental path unset)
user_dpath = params['DRS_USER_DEFAULT']
# get the package name
drs_package = params['DRS_PACKAGE']
# change user_dpath to a absolute path
user_dpath = get_relative_folder(drs_package, user_dpath)
# deal with no user environment and no default path
if user_env is None and user_dpath is None:
return []
# set empty directory
directory = None
# -------------------------------------------------------------------------
# User environmental path
# -------------------------------------------------------------------------
# check environmental path exists
if user_env in os.environ:
# get value
path = os.environ[user_env]
# check that directory linked exists
if os.path.exists(path):
# set directory
directory = path
# -------------------------------------------------------------------------
# if directory is not empty then we need to get instrument specific files
# -------------------------------------------------------------------------
if directory is not None:
# look for sub-directories (and if not found directory set to None so
# that we check the user default path)
source = 'environmental variables ({0})'.format(user_env)
subdir = _get_subdir(directory, instrument, source=source)
if subdir is None:
directory = None
# -------------------------------------------------------------------------
# User default path
# -------------------------------------------------------------------------
# check default path exists
if directory is None:
# check the directory linked exists
if os.path.exists(user_dpath):
# set directory
directory = user_dpath
# if directory is still empty return empty list
if directory is None:
return []
# -------------------------------------------------------------------------
# if directory is not empty then we need to get instrument specific files
# -------------------------------------------------------------------------
# look for sub-directories (This time if not found we have none and should
# return an empty set of files
source = 'default user config file ({0})'.format(user_dpath)
subdir = _get_subdir(directory, instrument, source=source)
if subdir is None:
return []
# -------------------------------------------------------------------------
# look for user configurations within instrument sub-folder
# -------------------------------------------------------------------------
files = []
for script in USCRIPTS:
# construct path
path = os.path.join(directory, subdir, script)
# check that it exists
if os.path.exists(path):
files.append(path)
# deal with no files found
if len(files) == 0:
wmsg1 = ('User config defined but instrument "{0}" directory '
'has no configurations files')
wmsg2 = '\tValid config files: {0}'.format(','.join(USCRIPTS))
ConfigWarning([wmsg1.format(instrument), wmsg2])
# return files
return files
def _get_subdir(directory, instrument, source):
# set function name (cannot break here --> no access to inputs)
_ = display_func(None, 'catch_sigint', __NAME__)
# get display text
textentry = constant_functions.DisplayText()
# set the sub directory to None initially
subdir = None
# loop around items in the directory
for filename in np.sort(os.listdir(directory)):
# check that the absolute path is a directory
cond1 = os.path.isdir(os.path.join(directory, filename))
# check that item (directory) is named the same as the instrument
cond2 = filename.lower() == instrument.lower()
# if both conditions true set the sub directory as this item
if cond1 and cond2:
subdir = filename
# deal with instrument sub-folder not found
if subdir is None:
# raise a config warning that directory not found
wargs = [source, instrument.lower(), directory]
ConfigWarning(textentry('10-002-00001', args=wargs))
# return the subdir
return subdir
def get_relative_folder(package, folder: Union[str, Path]):
"""
Get the absolute path of folder defined at relative path
folder from package
:param package: string, the python package name
:param folder: string, the relative path of the config folder
:return data: string, the absolute path and filename of the default config
file
"""
global REL_CACHE
# TODO: update to pathlib.Path
if isinstance(folder, Path):
folder = str(folder)
# set function name (cannot break here --> no access to inputs)
func_name = display_func(None, 'get_relative_folder', __NAME__)
# get text entry
textentry = constant_functions.DisplayText()
# ----------------------------------------------------------------------
# check relative folder cache
if package in REL_CACHE and folder in REL_CACHE[package]:
return REL_CACHE[package][folder]
# ----------------------------------------------------------------------
# get the package.__init__ file path
try:
init = pkg_resources.resource_filename(package, '__init__.py')
except ImportError:
eargs = [package, func_name]
raise ConfigError(textentry('00-008-00001', args=eargs), level='error')
# Get the config_folder from relative path
current = os.getcwd()
# get directory name of folder
dirname = os.path.dirname(init)
# change to directory in init
os.chdir(dirname)
# get the absolute path of the folder
data_folder = os.path.abspath(folder)
# change back to working dir
os.chdir(current)
# test that folder exists
if not os.path.exists(data_folder):
# raise exception
eargs = [os.path.basename(data_folder), os.path.dirname(data_folder)]
raise ConfigError(textentry('00-003-00005', args=eargs), level='error')
# ----------------------------------------------------------------------
# update REL_CACHE
if package not in REL_CACHE:
REL_CACHE[package] = dict()
# update entry
REL_CACHE[folder] = data_folder
# ----------------------------------------------------------------------
# return the absolute data_folder path
return data_folder
def _load_from_module(modules, quiet=False):
# set function name (cannot break here --> no access to inputs)
func_name = display_func(None, '_load_from_module', __NAME__)
# get text entry
textentry = constant_functions.DisplayText()
# storage for returned values
keys, values, sources, instances = [], [], [], []
# loop around modules
for module in modules:
# get a list of keys values
mkeys, mvalues = constant_functions.generate_consts(module)
# loop around each value and test type
for it in range(len(mkeys)):
# get iteration values
mvalue = mvalues[it]
# get the parameter name
key = mkeys[it]
# deal with duplicate keys
if key in keys:
# raise exception
eargs = [key, module, ','.join(modules), func_name]
raise ConfigError(textentry('00-003-00006', args=eargs),
level='error')
# valid parameter
cond = mvalue.validate(quiet=quiet)
# if validated append to keys/values/sources
if cond:
keys.append(key)
values.append(mvalue.true_value)
sources.append(mvalue.source)
instances.append(mvalue)
# return keys
return keys, values, sources, instances
def _load_from_file(files, modules):
# set function name (cannot break here --> no access to inputs)
_ = display_func(None, '_load_from_file', __NAME__)
# get text entry
textentry = constant_functions.DisplayText()
# -------------------------------------------------------------------------
# load constants from file
# -------------------------------------------------------------------------
fkeys, fvalues, fsources = [], [], []
for filename in files:
# get keys/values from script
fkey, fvalue = constant_functions.get_constants_from_file(filename)
# add to fkeys and fvalues (loop around fkeys)
for it in range(len(fkey)):
# get this iterations values
fkeyi, fvaluei = fkey[it], fvalue[it]
# if this is not a new constant print warning
if fkeyi in fkeys:
# log warning message
wargs = [fkeyi, filename, ','.join(set(fsources)), filename]
ConfigWarning(textentry('10-002-00002', args=wargs),
level='warning')
# append to list
fkeys.append(fkeyi)
fvalues.append(fvaluei)
fsources.append(filename)
# -------------------------------------------------------------------------
# Now need to test the values are correct
# -------------------------------------------------------------------------
# storage for returned values
keys, values, sources, instances = [], [], [], []
# loop around modules
for module in modules:
# get a list of keys values
mkeys, mvalues = constant_functions.generate_consts(module)
# loop around each value and test type
for it in range(len(mkeys)):
# get iteration values
mvalue = mvalues[it]
# loop around the file values
for jt in range(len(fkeys)):
# if we are not dealing with the same key skip
if fkeys[jt] != mkeys[it]:
continue
# if we are then we need to validate
value = mvalue.validate(fvalues[jt], source=fsources[jt])
# now append to output lists
keys.append(fkeys[jt])
values.append(value)
sources.append(fsources[jt])
instances.append(mvalue)
# return keys values and sources
return keys, values, sources, instances
def _save_config_params(params):
# set function name (cannot break here --> no access to inputs)
func_name = display_func(params, '_save_config_params', __NAME__)
# get sources from paramater dictionary
sources = params.sources.values()
# get unique sources
usources = set(sources)
# set up storage
params['DRS_CONFIG'] = []
params.set_source('DRS_CONFIG', func_name)
# loop around and add to param
for source in usources:
params['DRS_CONFIG'].append(source)
# return the parameters
return params
def _check_mod_source(source: str) -> str:
# set function name (cannot break here --> no access to inputs)
_ = display_func(None, '_check_mod_source', __NAME__)
# deal with source is None
if source is None:
return source
# if source doesn't exist also skip
if not os.path.exists(source):
return source
# get package path
package_path = get_relative_folder(PACKAGE, '')
# if package path not in source then skip
if package_path not in source:
return source
# remove package path and replace with PACKAGE
source = source.replace(package_path, PACKAGE.lower())
# replace separators with .
source = source.replace(os.sep, '.')
# remove double dots
while '..' in source:
source = source.replace('..', '.')
# return edited source
return source
def _execute_ipdb():
# set function name (cannot break here --> within break function)
_ = str(__NAME__) + '._execute_ipdb()'
# start ipdb
# noinspection PyBroadException
try:
# import ipython debugger
# noinspection PyUnresolvedReferences
import ipdb
# set the ipython trace
ipdb.set_trace()
except Exception as _:
# import python debugger (standard python module)
import pdb
# set the python trace
pdb.set_trace()
# =============================================================================
# Other private functions
# =============================================================================
# capitalisation function (for case insensitive keys)
def _capitalise_key(key: str) -> str:
"""
Capitalizes "key" (used to make ParamDict case insensitive), only if
key is a string
:param key: string or object, if string then key is capitalized else
nothing is done
:return key: capitalized string (or unchanged object)
"""
# set function name (cannot break here --> no access to inputs)
_ = display_func(None, '_capitalise_key', __NAME__)
# capitalise string keys
if type(key) == str:
key = key.upper()
return key
def _string_repr_list(key: str, values: Union[list, np.ndarray], source: str,
fmt: str) -> List[str]:
"""
Represent a list (or array) as a string list but only the first
40 charactersay
:param key: str, the key the list (values) came from
:param values: vector, the list or numpy array to print as a string
:param source: str, the source where the values were defined
:param fmt: str, the format for the printed list
:return:
"""
# set function name (cannot break here --> no access to inputs)
_ = display_func(None, '_load_from_file', __NAME__)
# get the list as a string
str_value = list(values).__repr__()
# if the string is longer than 40 characters cut down and add ...
if len(str_value) > 40:
str_value = str_value[:40] + '...'
# return the string as a list entry
return [fmt.format(key, str_value, source)]
def _map_listparameter(value, separator=',', dtype=None):
"""
Map a string list into a python list
:param value: str or list, if list returns if string tries to evaluate
:param separator: str, where to split the str at to make a list
:param dtype: type, if set forces elements of list to this data type
:return:
"""
# set function name (cannot break here --> no access to inputs)
func_name = display_func(None, '_map_listparameter', __NAME__)
# get text entry
textentry = constant_functions.DisplayText()
# return list if already a list
if isinstance(value, (list, np.ndarray)):
return list(value)
# try evaluating is a list
# noinspection PyBroadException
try:
# evulate value
rawvalue = eval(value)
# if it is a list return as a list
if isinstance(rawvalue, list):
return list(rawvalue)
# if it is not pass
except Exception as _:
pass
# deal with an empty value i.e. ''
if value == '':
return []
# try to return dtyped data
try:
# first split by separator
listparameter = value.split(separator)
# return the stripped down values
if dtype is not None and isinstance(dtype, type):
return list(map(lambda x: dtype(x.strip()), listparameter))
else:
return list(map(lambda x: x.strip(), listparameter))
except Exception as e:
eargs = [value, type(e), e, func_name]
BLOG(message=textentry('00-003-00002', args=eargs), level='error')
def _map_dictparameter(value: str, dtype: Union[None, Type] = None) -> dict:
"""
Map a string dictionary into a python dictionary
:param value: str, tries to evaluate string into a dictionary
i.e. "dict(a=1, b=2)" or {'a':1, 'b': 2}
:param dtype: type, if set forces elements of list to this data type
:return:
"""
# set function name (cannot break here --> no access to inputs)
func_name = display_func(None, '_map_dictparameter', __NAME__)
# get text entry
textentry = constant_functions.DisplayText()
# deal with an empty value i.e. ''
if value == '':
return dict()
# try evaulating as a dict
try:
rawvalue = eval(value)
if isinstance(rawvalue, dict):
returndict = dict()
for key in rawvalue.keys():
if dtype is not None and isinstance(dtype, type):
returndict[key] = dtype(rawvalue[key])
else:
returndict[key] = rawvalue[key]
return returndict
except Exception as e:
eargs = [value, type(e), e, func_name]
BLOG(message=textentry('00-003-00003', args=eargs), level='error')
def _copy_pdb_rc(params, level=0):
# set function name (cannot break here --> no access to inputs)
_ = str(__NAME__) + '_copy_pdb_rc()'
# set global CURRENT_PATH
global CURRENT_PATH
# get package
package = params['DRS_PACKAGE']
# get path
path = params['DRS_PDB_RC_FILE']
filename = params['DRS_PDB_RC_FILENAME']
# get current path
CURRENT_PATH = os.getcwd()
# get absolute path
oldsrc = get_relative_folder(package, path)
tmppath = oldsrc + '_tmp'
# get newsrc
newsrc = os.path.join(CURRENT_PATH, filename)
# read the lines
with open(oldsrc, 'r') as f:
lines = f.readlines()
# deal with levels
if level == 0:
upkey = ''
else:
upkey = 'up\n' * level
# loop around lines and replace
newlines = []
for line in lines:
newlines.append(line.format(up=upkey))
# write the lines
with open(tmppath, 'w') as f:
f.writelines(newlines)
# copy
shutil.copy(tmppath, newsrc)
# remove tmp file
os.remove(tmppath)
def _remove_pdb_rc(params):
# set function name (cannot break here --> no access to inputs)
_ = str(__NAME__) + '_remove_pdb_rc()'
# get file name
filename = params['DRS_PDB_RC_FILENAME']
# get newsrc
newsrc = os.path.join(CURRENT_PATH, filename)
# remove
if os.path.exists(newsrc):
os.remove(newsrc)
def _get_prev_count(params, previous):
# set function name (cannot break here --> no access to inputs)
_ = str(__NAME__) + '._get_prev_count()'
# get the debug list
debug_list = params['DEBUG_FUNC_LIST'][:-1]
# get the number of iterations
n_elements = 0
# loop around until we get to
for row in range(len(debug_list))[::-1]:
if debug_list[row] != previous:
break
else:
n_elements += 1
# return number of element founds
return n_elements
# =============================================================================
# Start of code
# =============================================================================
# Main code here
if __name__ == "__main__":
# ----------------------------------------------------------------------
# print 'Hello World!'
print("Hello World!")
# =============================================================================
# End of code
# =============================================================================
|
[
"ctypes.create_string_buffer",
"copy.deepcopy",
"sys.exit",
"numpy.nanmin",
"os.remove",
"os.path.exists",
"ctypes.windll.kernel32.GetConsoleScreenBufferInfo",
"os.listdir",
"apero.core.constants.constant_functions.generate_consts",
"os.path.normpath",
"apero.core.constants.constant_functions.DisplayText",
"os.path.isdir",
"apero.core.constants.constant_functions.get_constants_from_file",
"numpy.nanmax",
"os.popen",
"apero.core.constants.constant_functions.import_module",
"ipdb.set_trace",
"shutil.which",
"os.path.dirname",
"struct.unpack",
"numpy.isnan",
"shutil.copy",
"apero.lang.drs_exceptions.DebugExit",
"os.path.join",
"pkg_resources.resource_filename",
"os.getcwd",
"os.chdir",
"ctypes.windll.kernel32.GetStdHandle",
"os.path.basename",
"pdb.set_trace",
"os.path.abspath"
] |
[((44313, 44368), 'apero.core.constants.constant_functions.import_module', 'constant_functions.import_module', (['func_name', 'modules[0]'], {}), '(func_name, modules[0])\n', (44345, 44368), False, 'from apero.core.constants import constant_functions\n'), ((61893, 61925), 'apero.core.constants.constant_functions.DisplayText', 'constant_functions.DisplayText', ([], {}), '()\n', (61923, 61925), False, 'from apero.core.constants import constant_functions\n'), ((63423, 63455), 'apero.core.constants.constant_functions.DisplayText', 'constant_functions.DisplayText', ([], {}), '()\n', (63453, 63455), False, 'from apero.core.constants import constant_functions\n'), ((64071, 64082), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (64080, 64082), False, 'import os\n'), ((64132, 64153), 'os.path.dirname', 'os.path.dirname', (['init'], {}), '(init)\n', (64147, 64153), False, 'import os\n'), ((64192, 64209), 'os.chdir', 'os.chdir', (['dirname'], {}), '(dirname)\n', (64200, 64209), False, 'import os\n'), ((64270, 64293), 'os.path.abspath', 'os.path.abspath', (['folder'], {}), '(folder)\n', (64285, 64293), False, 'import os\n'), ((64331, 64348), 'os.chdir', 'os.chdir', (['current'], {}), '(current)\n', (64339, 64348), False, 'import os\n'), ((65189, 65221), 'apero.core.constants.constant_functions.DisplayText', 'constant_functions.DisplayText', ([], {}), '()\n', (65219, 65221), False, 'from apero.core.constants import constant_functions\n'), ((66562, 66594), 'apero.core.constants.constant_functions.DisplayText', 'constant_functions.DisplayText', ([], {}), '()\n', (66592, 66594), False, 'from apero.core.constants import constant_functions\n'), ((72966, 72998), 'apero.core.constants.constant_functions.DisplayText', 'constant_functions.DisplayText', ([], {}), '()\n', (72996, 72998), False, 'from apero.core.constants import constant_functions\n'), ((74554, 74586), 'apero.core.constants.constant_functions.DisplayText', 'constant_functions.DisplayText', ([], {}), '()\n', (74584, 74586), False, 'from apero.core.constants import constant_functions\n'), ((75621, 75632), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (75630, 75632), False, 'import os\n'), ((75765, 75801), 'os.path.join', 'os.path.join', (['CURRENT_PATH', 'filename'], {}), '(CURRENT_PATH, filename)\n', (75777, 75801), False, 'import os\n'), ((76214, 76242), 'shutil.copy', 'shutil.copy', (['tmppath', 'newsrc'], {}), '(tmppath, newsrc)\n', (76225, 76242), False, 'import shutil\n'), ((76269, 76287), 'os.remove', 'os.remove', (['tmppath'], {}), '(tmppath)\n', (76278, 76287), False, 'import os\n'), ((76524, 76560), 'os.path.join', 'os.path.join', (['CURRENT_PATH', 'filename'], {}), '(CURRENT_PATH, filename)\n', (76536, 76560), False, 'import os\n'), ((76581, 76603), 'os.path.exists', 'os.path.exists', (['newsrc'], {}), '(newsrc)\n', (76595, 76603), False, 'import os\n'), ((10081, 10113), 'apero.core.constants.constant_functions.DisplayText', 'constant_functions.DisplayText', ([], {}), '()\n', (10111, 10113), False, 'from apero.core.constants import constant_functions\n'), ((44674, 44685), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (44682, 44685), False, 'import sys\n'), ((46536, 46563), 'os.path.join', 'os.path.join', (['core_path', '""""""'], {}), "(core_path, '')\n", (46548, 46563), False, 'import os\n'), ((46748, 46780), 'os.path.join', 'os.path.join', (['filepath', 'filename'], {}), '(filepath, filename)\n', (46760, 46780), False, 'import os\n'), ((48084, 48106), 'os.listdir', 'os.listdir', (['const_path'], {}), '(const_path)\n', (48094, 48106), False, 'import os\n'), ((48170, 48193), 'os.path.isdir', 'os.path.isdir', (['filename'], {}), '(filename)\n', (48183, 48193), False, 'import os\n'), ((48642, 48669), 'os.path.join', 'os.path.join', (['core_path', '""""""'], {}), "(core_path, '')\n", (48654, 48669), False, 'import os\n'), ((49097, 49127), 'os.path.join', 'os.path.join', (['filepath', 'script'], {}), '(filepath, script)\n', (49109, 49127), False, 'import os\n'), ((54424, 54456), 'apero.core.constants.constant_functions.DisplayText', 'constant_functions.DisplayText', ([], {}), '()\n', (54454, 54456), False, 'from apero.core.constants import constant_functions\n'), ((59227, 59247), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (59241, 59247), False, 'import os\n'), ((60189, 60215), 'os.path.exists', 'os.path.exists', (['user_dpath'], {}), '(user_dpath)\n', (60203, 60215), False, 'import os\n'), ((61208, 61247), 'os.path.join', 'os.path.join', (['directory', 'subdir', 'script'], {}), '(directory, subdir, script)\n', (61220, 61247), False, 'import os\n'), ((61290, 61310), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (61304, 61310), False, 'import os\n'), ((62059, 62080), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (62069, 62080), False, 'import os\n'), ((63813, 63868), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['package', '"""__init__.py"""'], {}), "(package, '__init__.py')\n", (63844, 63868), False, 'import pkg_resources\n'), ((64390, 64417), 'os.path.exists', 'os.path.exists', (['data_folder'], {}), '(data_folder)\n', (64404, 64417), False, 'import os\n'), ((65424, 65466), 'apero.core.constants.constant_functions.generate_consts', 'constant_functions.generate_consts', (['module'], {}), '(module)\n', (65458, 65466), False, 'from apero.core.constants import constant_functions\n'), ((66916, 66968), 'apero.core.constants.constant_functions.get_constants_from_file', 'constant_functions.get_constants_from_file', (['filename'], {}), '(filename)\n', (66958, 66968), False, 'from apero.core.constants import constant_functions\n'), ((68014, 68056), 'apero.core.constants.constant_functions.generate_consts', 'constant_functions.generate_consts', (['module'], {}), '(module)\n', (68048, 68056), False, 'from apero.core.constants import constant_functions\n'), ((69699, 69721), 'os.path.exists', 'os.path.exists', (['source'], {}), '(source)\n', (69713, 69721), False, 'import os\n'), ((70583, 70599), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (70597, 70599), False, 'import ipdb\n'), ((76613, 76630), 'os.remove', 'os.remove', (['newsrc'], {}), '(newsrc)\n', (76622, 76630), False, 'import os\n'), ((12570, 12590), 'copy.deepcopy', 'copy.deepcopy', (['value'], {}), '(value)\n', (12583, 12590), False, 'import copy\n'), ((42643, 42654), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (42651, 42654), False, 'import sys\n'), ((45208, 45250), 'apero.core.constants.constant_functions.generate_consts', 'constant_functions.generate_consts', (['module'], {}), '(module)\n', (45242, 45250), False, 'from apero.core.constants import constant_functions\n'), ((46008, 46030), 'os.listdir', 'os.listdir', (['const_path'], {}), '(const_path)\n', (46018, 46030), False, 'import os\n'), ((46106, 46129), 'os.path.isdir', 'os.path.isdir', (['filename'], {}), '(filename)\n', (46119, 46129), False, 'import os\n'), ((46287, 46308), 'os.listdir', 'os.listdir', (['core_path'], {}), '(core_path)\n', (46297, 46308), False, 'import os\n'), ((46384, 46407), 'os.path.isdir', 'os.path.isdir', (['filename'], {}), '(filename)\n', (46397, 46407), False, 'import os\n'), ((46828, 46849), 'os.path.exists', 'os.path.exists', (['fpath'], {}), '(fpath)\n', (46842, 46849), False, 'import os\n'), ((48282, 48315), 'os.path.normpath', 'os.path.normpath', (['instrument_path'], {}), '(instrument_path)\n', (48298, 48315), False, 'import os\n'), ((48402, 48432), 'os.path.normpath', 'os.path.normpath', (['default_path'], {}), '(default_path)\n', (48418, 48432), False, 'import os\n'), ((49196, 49217), 'os.path.exists', 'os.path.exists', (['fpath'], {}), '(fpath)\n', (49210, 49217), False, 'import os\n'), ((51927, 51957), 'apero.lang.drs_exceptions.DebugExit', 'drs_exceptions.DebugExit', (['emsg'], {}), '(emsg)\n', (51951, 51957), False, 'from apero.lang import drs_exceptions\n'), ((52598, 52618), 'shutil.which', 'shutil.which', (['"""stty"""'], {}), "('stty')\n", (52610, 52618), False, 'import shutil\n'), ((62167, 62200), 'os.path.join', 'os.path.join', (['directory', 'filename'], {}), '(directory, filename)\n', (62179, 62200), False, 'import os\n'), ((64462, 64491), 'os.path.basename', 'os.path.basename', (['data_folder'], {}), '(data_folder)\n', (64478, 64491), False, 'import os\n'), ((64493, 64521), 'os.path.dirname', 'os.path.dirname', (['data_folder'], {}), '(data_folder)\n', (64508, 64521), False, 'import os\n'), ((70743, 70758), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (70756, 70758), False, 'import pdb\n'), ((39290, 39306), 'numpy.nanmin', 'np.nanmin', (['value'], {}), '(value)\n', (39299, 39306), True, 'import numpy as np\n'), ((39308, 39324), 'numpy.nanmax', 'np.nanmax', (['value'], {}), '(value)\n', (39317, 39324), True, 'import numpy as np\n'), ((43211, 43222), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (43219, 43222), False, 'import sys\n'), ((53406, 53439), 'ctypes.windll.kernel32.GetStdHandle', 'windll.kernel32.GetStdHandle', (['(-12)'], {}), '(-12)\n', (53434, 53439), False, 'from ctypes import windll, create_string_buffer\n'), ((53459, 53483), 'ctypes.create_string_buffer', 'create_string_buffer', (['(22)'], {}), '(22)\n', (53479, 53483), False, 'from ctypes import windll, create_string_buffer\n'), ((53502, 53553), 'ctypes.windll.kernel32.GetConsoleScreenBufferInfo', 'windll.kernel32.GetConsoleScreenBufferInfo', (['h', 'csbi'], {}), '(h, csbi)\n', (53544, 53553), False, 'from ctypes import windll, create_string_buffer\n'), ((30575, 30595), 'copy.deepcopy', 'copy.deepcopy', (['value'], {}), '(value)\n', (30588, 30595), False, 'import copy\n'), ((49315, 49336), 'os.path.exists', 'os.path.exists', (['fpath'], {}), '(fpath)\n', (49329, 49336), False, 'import os\n'), ((53596, 53634), 'struct.unpack', 'struct.unpack', (['"""hhhhHhhhhhh"""', 'csbi.raw'], {}), "('hhhhHhhhhhh', csbi.raw)\n", (53609, 53634), False, 'import struct\n'), ((39354, 39369), 'numpy.isnan', 'np.isnan', (['value'], {}), '(value)\n', (39362, 39369), True, 'import numpy as np\n'), ((52821, 52847), 'os.popen', 'os.popen', (['"""stty size"""', '"""r"""'], {}), "('stty size', 'r')\n", (52829, 52847), False, 'import os\n')]
|
import matplotlib.pyplot as plt
import matplotlib
from matplotlib import animation
import seaborn as sns
import numpy as np
import cmocean
import os
from mpl_toolkits.axes_grid1 import AxesGrid
from mpl_toolkits.axes_grid1 import make_axes_locatable
import scipy
import scipy.ndimage
from scipy.stats import norm
import matplotlib.image as mpimg
class Plotter():
def __init__(self, dic_data, deck, data_modes,
plot_deltas = False):
self.zz = deck.targetplot
plot_contour_linear = deck.doc["Plots"]["Contour Plots"]["Linear"]["Plot_it"]
plot_contour_log = deck.doc["Plots"]["Contour Plots"]["Log"]["Plot_it"]
plot_quiver = deck.doc["Plots"]["Quiver"]["Plot_it"]
plot_streamplots = deck.doc["Plots"]["Streamplots"]["Plot_it"]
gif_heatmaps = deck.doc["Plots"]["Heatmaps"]["Gif_it"]
gif_contourlin = deck.doc["Plots"]["Contour Plots"]["Linear"]["Gif_it"]
gif_contourlog = deck.doc["Plots"]["Contour Plots"]["Log"]["Gif_it"]
for self.index, dic_image in enumerate(dic_data.dataframe):
index = self.index
if plot_contour_linear.lower() == "true":
self.create_contourplot_linear(dic_data.dic_paths[index], dic_image, deck, data_modes)
if plot_contour_log.lower() == "true":
self.create_contourplot_log(dic_data.dic_paths[index], dic_image, deck, data_modes)
if plot_quiver.lower() == "true":
self.create_quiver(dic_data.dic_paths[index], dic_image, deck)
if plot_streamplots.lower() == "true":
self.create_streamplot(dic_data.dic_paths[index], dic_image, deck)
# Do we really need this ?
self.plot_dataset(dic_data.dic_paths[index], dic_image, deck)
if plot_deltas == True:
if index == 0:
pass
else:
self.plot_deltas(dic_data.dic_paths[index], dic_image, deck)
if deck.plot_heatmaps.lower() == "true":
for index2, gdf in enumerate(data_modes.grouped):
if index == index2:
self.build_deltaheatmaps(dic_data.dic_paths[index], gdf, deck, data_modes.scale_min, data_modes.scale_max)
if gif_heatmaps == "true":
self.create_heatmaps_gif(data_modes.grouped, deck, data_modes.scale_min, data_modes.scale_max)
if gif_contourlin.lower() == "true":
self.create_contourplotlin_gif(dic_data.dataframe, deck, data_modes, dic_data.dic_paths)
if gif_contourlog.lower() == "true":
self.create_contourplotlog_gif(dic_data.dataframe, deck, data_modes, dic_data.dic_paths)
def filter_NaN_Matrix(self, U, sigVal):
#Fonction pour limiter la propagation des NaNs dans le filtre gaussien lissant l'image
V=U.copy()
V[np.isnan(U)]=0
VV=scipy.ndimage.gaussian_filter(V,sigma=sigVal)
W=0*U.copy()+1
W[np.isnan(U)]=0
WW=scipy.ndimage.gaussian_filter(W,sigma=sigVal)
np.seterr(divide='ignore', invalid='ignore') #enleve le pb de division /0
Z=VV/WW
return Z
def create_contourplot_log(self, file_name, df, deck, data_modes):
x = list(sorted(set( df["x"].values )))
y = list(sorted(set( df["y"].values )))
img_name = file_name[0 : len(file_name) -10] + '.tif'
img = plt.imread(img_name)
fig, ax = plt.subplots(dpi=300,)
ax.imshow(img, alpha = 1, cmap = 'gray')
df.loc[df["sigma"] == -1, deck.doc["Plots"]['Target Plot'] ] = np.nan
e1 = np.array(df[deck.doc["Plots"]['Target Plot']].values)
e1 = e1.reshape(len(y), len(x))
levels = np.sort(np.append( np.append( -np.logspace(0.1, abs(data_modes.vmin_0),10) , np.linspace(-0.01,0.01,5) ), np.logspace(0.1,data_modes.vmax_0,15)))
ax.contour(x, y, e1, colors = 'k', linewidths = 0.5, levels = levels)
pcm = ax.pcolormesh(x,y,e1,norm=matplotlib.colors.SymLogNorm(linthresh=0.001, linscale=0.1, vmin=data_modes.vmin_0, vmax=data_modes.vmax_0),
cmap='plasma')
fig.colorbar(pcm, ax=ax, extend = 'both')
plt.title(deck.doc["Plots"]['Target Plot']+", "+str(self.index))
plot_dir = "./plots/"
check_folder = os.path.isdir(plot_dir)
if not check_folder:
os.makedirs(plot_dir)
plt.savefig("./plots/"+self.zz.strip('"')+"-"+file_name[:-4]+"-contourplot-log"+".png")
plt.close()
def create_contourplot_linear(self, file_name, df, deck, data_modes):
x = list(sorted(set( df["x"].values )))
y = list(sorted(set( df["y"].values )))
img_name = file_name[0 : len(file_name) -10] + '.tif'
img = plt.imread(img_name)
fig, ax = plt.subplots(dpi=300,)
ax.imshow(img, alpha = 1, cmap = 'gray')
df.loc[df["sigma"] == -1, deck.doc["Plots"]['Target Plot'] ] = np.nan
e1 = np.array(df[deck.doc["Plots"]['Target Plot']].values)
e1 = e1.reshape(len(y), len(x))
levels = np.linspace(data_modes.vmin_0, data_modes.vmax_0,10)
cs = plt.contourf(x, y, e1, origin = 'lower', extend = 'both', cmap = 'plasma', alpha = 0.5)
plt.contour(x, y, e1, levels = levels, colors = 'k', linewidths = 0.5)
fig.colorbar(cs)
plt.title(deck.doc["Plots"]['Target Plot']+", "+str(self.index))
plot_dir = "./plots/"
check_folder = os.path.isdir(plot_dir)
if not check_folder:
os.makedirs(plot_dir)
plt.savefig("./plots/"+self.zz.strip('"')+"-"+file_name[:-4]+"-contourplot-linear"+".png")
plt.close()
def create_quiver(self, file_name, df, deck):
x = list(sorted(set( df["x"].values )))
y = list(sorted(set( df["y"].values )))
df.loc[df["sigma"] == -1, "gamma" ] = np.nan
self.teta_ = np.array(df["gamma"].values)
teta_1 = np.cos(self.teta_)
self.teta_1 = teta_1.reshape(len(y), len(x))
teta_2 = np.sin(self.teta_)
self.teta_2 = teta_2.reshape(len(y), len(x))
contour_ = np.array(df[self.zz].values)
self.contour_ = contour_.reshape((len(y), len(x)))
img_name = file_name[0 : len(file_name) -10] + '.tif'
img = plt.imread(img_name)
fig, ax = plt.subplots(dpi=300)
ax.imshow(img, cmap = plt.get_cmap('gray'), alpha = 1)
skip1 = ( slice(None, None, 20))
skip2 = ( slice(None, None, 20), slice(None, None,20) )
tf1 = self.filter_NaN_Matrix(np.array(self.teta_1),7)
tf2 = self.filter_NaN_Matrix(np.array(self.teta_2),7)
contourf = self.filter_NaN_Matrix(np.array(self.contour_),7)
plt.quiver(np.array(x[skip1]),np.array(y[skip1]),tf1[skip2], tf2[skip2], contourf[skip2], cmap='plasma', scale = 50)
plt.colorbar()
plt.title(deck.doc["Plots"]['Target Plot']+", "+str(self.index))
plot_dir = "./plots/"
check_folder = os.path.isdir(plot_dir)
if not check_folder:
os.makedirs(plot_dir)
plt.savefig("./plots/"+self.zz.strip('"')+"-"+file_name[:-4]+"-quiver"+".png")
plt.close()
def create_streamplot(self, file_name, df, deck):
x = list(sorted(set( df["x"].values )))
y = list(sorted(set( df["y"].values )))
img_name = file_name[0 : len(file_name) -10] + '.tif'
img = plt.imread(img_name)
fig, ax = plt.subplots(dpi=300)
ax.imshow(img, cmap = plt.get_cmap('gray'), alpha = 1)
tf1 = self.filter_NaN_Matrix(np.array(self.teta_1),7)
tf2 = self.filter_NaN_Matrix(np.array(self.teta_2),7)
contourf = self.filter_NaN_Matrix(np.array(self.contour_),7)
fig = plt.streamplot(np.array(x), np.array(y), tf1, tf2,
color=contourf,
linewidth=1,
cmap='plasma',
density=1.3,
arrowsize=0.5)
plt.title(deck.doc["Plots"]['Target Plot']+", "+str(self.index))
plt.colorbar()
plot_dir = "./plots/"
check_folder = os.path.isdir(plot_dir)
if not check_folder:
os.makedirs(plot_dir)
plt.savefig("./plots/"+self.zz.strip('"')+"-"+file_name[:-4]+"-stream"+".png")
plt.close()
def plot_dataset(self, file_name, df, deck):
df = df.sort_index(axis=1, level='"x"', ascending=False)
x = list(sorted(set( df["x"].values )))
y = list(sorted(set( df["y"].values )))
df.loc[df["sigma"] == -1, deck.doc["Plots"]['Target Plot'] ] = np.nan
zv = 100*(df[deck.doc["Plots"]['Target Plot']].values)
zv = zv.reshape((len(y), len(x)))
fig = plt.contour(x, y, zv, levels=8, linewidths=0.4, colors="black")
cs = plt.contourf(x, y, zv, origin = 'lower', extend = 'both', cmap = 'plasma', alpha = 0.5)
cbar = plt.colorbar(cs)
cbar.ax.set_xlabel('Strain (%)')
plt.title(deck.doc["Plots"]['Target Plot'])
plt.clabel(fig, inline=0.1, fontsize=5)
plt.legend()
plot_dir = "./plots/"
check_folder = os.path.isdir(plot_dir)
if not check_folder:
os.makedirs(plot_dir)
plt.savefig("./plots/"+self.zz.strip('"')+"-"+file_name[:-3]+"_contour.png")
plt.close()
def plot_deltas(self, file_name, df, deck):
df = df.sort_index(axis=1, level='"x"', ascending=False)
x = list(sorted(set( df["x"].values )))
y = list(sorted(set( df["y"].values )))
df.loc[df["sigma"] == -1, deck.plot_inccontour_target ] = np.nan
zv = 100*(df[deck.plot_inccontour_target].values)
fig = plt.contour(x, y, zv, levels=8, linewidths=0.4, colors="black")
cs = plt.contourf(x, y, zv, origin = 'lower', extend = 'both', cmap = 'plasma', alpha = 0.5)
cbar = plt.colorbar(cs)
cbar.ax.set_xlabel('Strain (%)')
plt.title(deck.plot_inccontour_target)
plt.clabel(fig, inline=0.1, fontsize=5)
plt.legend()
plot_dir = "./plots/"
check_folder = os.path.isdir(plot_dir)
if not check_folder:
os.makedirs(plot_dir)
plt.savefig("./plots/"+self.zz.strip('"')+"-"+file_name[:-4]+"_deltas"+".png")
plt.close()
def build_deltaheatmaps(self, file_name, df, deck, vmin, vmax):
'''
Plots a heatmap for each image with delta variations over the x and y splitting regions
df = pandas data frame with set index, one column and target values.
'''
df = df.pivot('region_y', 'region_x', deck.target)
#df = df.sort_index(ascending=False)
fig, ax = plt.subplots(figsize=(9,6))
sns.set()
# bug of matplotlib 3.1 forces to manually set ylim to avoid cut-off top and bottom
# might remove this later
sns.heatmap(df, linewidths= .5, vmin = float(vmin), vmax = float(vmax), annot = True, annot_kws={"size": 9}, cmap = cmocean.cm.curl, ax = ax)
ax.set_ylim(len(df), 0)
plot_dir = "./plots/"
check_folder = os.path.isdir(plot_dir)
if not check_folder:
os.makedirs(plot_dir)
fig.savefig( "./plots/"+self.zz.strip('"')+"-"+file_name[:-4]+"_heatmap"+".png")
plt.close()
def create_heatmaps_gif(self, dfs, deck, vmin, vmax):
#set base plotting space
fig = plt.figure(figsize=(9,6))
# create iterator
data_frames_iterator = iter(dfs)
# set up formatting of the gif later
writer='matplotlib.animation.PillowWriter'
#'imagemagick'
def update_frame(i):
plt.clf()
heatmap_data = next(data_frames_iterator)
heatmap_data = heatmap_data.pivot('region_y', 'region_x', deck.doc["Plots"]["Incremental Contour"]["Target Plot"])
ax = sns.heatmap(heatmap_data,
linewidths= 0,
vmin = float(vmin),
vmax = float(vmax),
annot = True,
annot_kws={"size": 9},
cmap = "YlGnBu",
)
#need to manually set y_lim to avoi cropping of top and bottom cells
ax.set_ylim(heatmap_data.shape[0], 0)
animation.FuncAnimation(fig, update_frame, frames=len(dfs)-1, interval=400).save('./plots/heatmaps.gif', writer = writer)
def create_contourplotlin_gif(self, dfs, deck, data_modes, filenames):
#set base plotting space
fig, ax = plt.subplots(dpi=200, figsize=(12,10))
x = list(sorted(set( dfs[0]["x"].values )))
y = list(sorted(set( dfs[0]["y"].values )))
# create iterator
data_frames_iterator = iter(dfs)
# set up formatting of the gif later
writer='matplotlib.animation.PillowWriter'
def update_frame_log(i):
plt.clf()
img_name = filenames[i][0 : len(filenames[i]) -10] + '.tif'
img = plt.imread(img_name)
plt.imshow(img, alpha = 1, cmap = 'gray')
df = next(data_frames_iterator)
df.loc[df["sigma"] == -1, deck.doc["Plots"]['Target Plot'] ] = np.nan
e1 = np.array(df[deck.doc["Plots"]['Target Plot']].values)
e1 = e1.reshape(len(y), len(x))
levels = np.sort(np.linspace(data_modes.vmin_0, data_modes.vmax_0,20))
cont = plt.pcolormesh(x,y,e1,vmin=data_modes.vmin_0, vmax=data_modes.vmax_0,cmap='plasma')
plt.contour(x, y, e1, levels = levels, colors = 'k', linewidths = 0.5)
plt.colorbar(cont)
return cont
animation.FuncAnimation(fig, update_frame_log, frames=len(dfs)-1, interval=600).save('./plots/contourplotlin.gif', writer = writer)
def create_contourplotlog_gif(self, dfs, deck, data_modes, filenames):
#set base plotting space
fig, ax = plt.subplots(dpi=92, figsize=(12,10))
x = list(sorted(set( dfs[0]["x"].values )))
y = list(sorted(set( dfs[0]["y"].values )))
# create iterator
data_frames_iterator = iter(dfs)
# set up formatting of the gif later
writer='matplotlib.animation.PillowWriter'
def update_frame_log(i):
plt.clf()
img_name = filenames[i][0 : len(filenames[i]) -10] + '.tif'
img = plt.imread(img_name)
plt.imshow(img, alpha = 1, cmap = 'gray')
df = next(data_frames_iterator)
df.loc[df["sigma"] == -1, deck.doc["Plots"]['Target Plot'] ] = np.nan
e1 = np.array(df[deck.doc["Plots"]['Target Plot']].values)
e1 = e1.reshape(len(y), len(x))
levels = np.sort(np.append( np.append( -np.logspace(0.1, abs(data_modes.vmin_0),10) , np.linspace(-0.01,0.01,5) ), np.logspace(0.1,data_modes.vmax_0,15)))
cont = plt.pcolormesh(x,y,e1,norm=matplotlib.colors.SymLogNorm(linthresh=0.001, linscale=0.1, vmin=data_modes.vmin_0, vmax=data_modes.vmax_0), vmin=data_modes.vmin_0, vmax=data_modes.vmax_0,cmap='plasma')
plt.contour(x, y, e1, levels = levels, colors = 'k', linewidths = 0.5)
plt.colorbar(cont)
return cont
animation.FuncAnimation(fig, update_frame_log, frames=len(dfs)-1, interval=600).save('./plots/contourplotlog.gif', writer = writer)
|
[
"matplotlib.pyplot.pcolormesh",
"numpy.array",
"scipy.ndimage.gaussian_filter",
"numpy.sin",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.contourf",
"seaborn.set",
"matplotlib.pyplot.close",
"matplotlib.pyplot.contour",
"numpy.linspace",
"os.path.isdir",
"matplotlib.pyplot.clabel",
"numpy.logspace",
"numpy.isnan",
"numpy.cos",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.get_cmap",
"os.makedirs",
"matplotlib.pyplot.imread",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.figure",
"matplotlib.colors.SymLogNorm",
"numpy.seterr",
"matplotlib.pyplot.subplots"
] |
[((2947, 2993), 'scipy.ndimage.gaussian_filter', 'scipy.ndimage.gaussian_filter', (['V'], {'sigma': 'sigVal'}), '(V, sigma=sigVal)\n', (2976, 2993), False, 'import scipy\n'), ((3053, 3099), 'scipy.ndimage.gaussian_filter', 'scipy.ndimage.gaussian_filter', (['W'], {'sigma': 'sigVal'}), '(W, sigma=sigVal)\n', (3082, 3099), False, 'import scipy\n'), ((3108, 3152), 'numpy.seterr', 'np.seterr', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (3117, 3152), True, 'import numpy as np\n'), ((3470, 3490), 'matplotlib.pyplot.imread', 'plt.imread', (['img_name'], {}), '(img_name)\n', (3480, 3490), True, 'import matplotlib.pyplot as plt\n'), ((3509, 3530), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'dpi': '(300)'}), '(dpi=300)\n', (3521, 3530), True, 'import matplotlib.pyplot as plt\n'), ((3681, 3734), 'numpy.array', 'np.array', (["df[deck.doc['Plots']['Target Plot']].values"], {}), "(df[deck.doc['Plots']['Target Plot']].values)\n", (3689, 3734), True, 'import numpy as np\n'), ((4372, 4395), 'os.path.isdir', 'os.path.isdir', (['plot_dir'], {}), '(plot_dir)\n', (4385, 4395), False, 'import os\n'), ((4565, 4576), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4574, 4576), True, 'import matplotlib.pyplot as plt\n'), ((4858, 4878), 'matplotlib.pyplot.imread', 'plt.imread', (['img_name'], {}), '(img_name)\n', (4868, 4878), True, 'import matplotlib.pyplot as plt\n'), ((4897, 4918), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'dpi': '(300)'}), '(dpi=300)\n', (4909, 4918), True, 'import matplotlib.pyplot as plt\n'), ((5063, 5116), 'numpy.array', 'np.array', (["df[deck.doc['Plots']['Target Plot']].values"], {}), "(df[deck.doc['Plots']['Target Plot']].values)\n", (5071, 5116), True, 'import numpy as np\n'), ((5174, 5227), 'numpy.linspace', 'np.linspace', (['data_modes.vmin_0', 'data_modes.vmax_0', '(10)'], {}), '(data_modes.vmin_0, data_modes.vmax_0, 10)\n', (5185, 5227), True, 'import numpy as np\n'), ((5241, 5320), 'matplotlib.pyplot.contourf', 'plt.contourf', (['x', 'y', 'e1'], {'origin': '"""lower"""', 'extend': '"""both"""', 'cmap': '"""plasma"""', 'alpha': '(0.5)'}), "(x, y, e1, origin='lower', extend='both', cmap='plasma', alpha=0.5)\n", (5253, 5320), True, 'import matplotlib.pyplot as plt\n'), ((5338, 5402), 'matplotlib.pyplot.contour', 'plt.contour', (['x', 'y', 'e1'], {'levels': 'levels', 'colors': '"""k"""', 'linewidths': '(0.5)'}), "(x, y, e1, levels=levels, colors='k', linewidths=0.5)\n", (5349, 5402), True, 'import matplotlib.pyplot as plt\n'), ((5562, 5585), 'os.path.isdir', 'os.path.isdir', (['plot_dir'], {}), '(plot_dir)\n', (5575, 5585), False, 'import os\n'), ((5756, 5767), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5765, 5767), True, 'import matplotlib.pyplot as plt\n'), ((6008, 6036), 'numpy.array', 'np.array', (["df['gamma'].values"], {}), "(df['gamma'].values)\n", (6016, 6036), True, 'import numpy as np\n'), ((6063, 6081), 'numpy.cos', 'np.cos', (['self.teta_'], {}), '(self.teta_)\n', (6069, 6081), True, 'import numpy as np\n'), ((6161, 6179), 'numpy.sin', 'np.sin', (['self.teta_'], {}), '(self.teta_)\n', (6167, 6179), True, 'import numpy as np\n'), ((6262, 6290), 'numpy.array', 'np.array', (['df[self.zz].values'], {}), '(df[self.zz].values)\n', (6270, 6290), True, 'import numpy as np\n'), ((6431, 6451), 'matplotlib.pyplot.imread', 'plt.imread', (['img_name'], {}), '(img_name)\n', (6441, 6451), True, 'import matplotlib.pyplot as plt\n'), ((6470, 6491), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'dpi': '(300)'}), '(dpi=300)\n', (6482, 6491), True, 'import matplotlib.pyplot as plt\n'), ((6989, 7003), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (7001, 7003), True, 'import matplotlib.pyplot as plt\n'), ((7131, 7154), 'os.path.isdir', 'os.path.isdir', (['plot_dir'], {}), '(plot_dir)\n', (7144, 7154), False, 'import os\n'), ((7315, 7326), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7324, 7326), True, 'import matplotlib.pyplot as plt\n'), ((7562, 7582), 'matplotlib.pyplot.imread', 'plt.imread', (['img_name'], {}), '(img_name)\n', (7572, 7582), True, 'import matplotlib.pyplot as plt\n'), ((7610, 7631), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'dpi': '(300)'}), '(dpi=300)\n', (7622, 7631), True, 'import matplotlib.pyplot as plt\n'), ((8222, 8236), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (8234, 8236), True, 'import matplotlib.pyplot as plt\n'), ((8291, 8314), 'os.path.isdir', 'os.path.isdir', (['plot_dir'], {}), '(plot_dir)\n', (8304, 8314), False, 'import os\n'), ((8474, 8485), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8483, 8485), True, 'import matplotlib.pyplot as plt\n'), ((8914, 8977), 'matplotlib.pyplot.contour', 'plt.contour', (['x', 'y', 'zv'], {'levels': '(8)', 'linewidths': '(0.4)', 'colors': '"""black"""'}), "(x, y, zv, levels=8, linewidths=0.4, colors='black')\n", (8925, 8977), True, 'import matplotlib.pyplot as plt\n'), ((8991, 9070), 'matplotlib.pyplot.contourf', 'plt.contourf', (['x', 'y', 'zv'], {'origin': '"""lower"""', 'extend': '"""both"""', 'cmap': '"""plasma"""', 'alpha': '(0.5)'}), "(x, y, zv, origin='lower', extend='both', cmap='plasma', alpha=0.5)\n", (9003, 9070), True, 'import matplotlib.pyplot as plt\n'), ((9094, 9110), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['cs'], {}), '(cs)\n', (9106, 9110), True, 'import matplotlib.pyplot as plt\n'), ((9161, 9204), 'matplotlib.pyplot.title', 'plt.title', (["deck.doc['Plots']['Target Plot']"], {}), "(deck.doc['Plots']['Target Plot'])\n", (9170, 9204), True, 'import matplotlib.pyplot as plt\n'), ((9213, 9252), 'matplotlib.pyplot.clabel', 'plt.clabel', (['fig'], {'inline': '(0.1)', 'fontsize': '(5)'}), '(fig, inline=0.1, fontsize=5)\n', (9223, 9252), True, 'import matplotlib.pyplot as plt\n'), ((9261, 9273), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (9271, 9273), True, 'import matplotlib.pyplot as plt\n'), ((9336, 9359), 'os.path.isdir', 'os.path.isdir', (['plot_dir'], {}), '(plot_dir)\n', (9349, 9359), False, 'import os\n'), ((9516, 9527), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9525, 9527), True, 'import matplotlib.pyplot as plt\n'), ((9896, 9959), 'matplotlib.pyplot.contour', 'plt.contour', (['x', 'y', 'zv'], {'levels': '(8)', 'linewidths': '(0.4)', 'colors': '"""black"""'}), "(x, y, zv, levels=8, linewidths=0.4, colors='black')\n", (9907, 9959), True, 'import matplotlib.pyplot as plt\n'), ((9973, 10052), 'matplotlib.pyplot.contourf', 'plt.contourf', (['x', 'y', 'zv'], {'origin': '"""lower"""', 'extend': '"""both"""', 'cmap': '"""plasma"""', 'alpha': '(0.5)'}), "(x, y, zv, origin='lower', extend='both', cmap='plasma', alpha=0.5)\n", (9985, 10052), True, 'import matplotlib.pyplot as plt\n'), ((10076, 10092), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['cs'], {}), '(cs)\n', (10088, 10092), True, 'import matplotlib.pyplot as plt\n'), ((10143, 10181), 'matplotlib.pyplot.title', 'plt.title', (['deck.plot_inccontour_target'], {}), '(deck.plot_inccontour_target)\n', (10152, 10181), True, 'import matplotlib.pyplot as plt\n'), ((10190, 10229), 'matplotlib.pyplot.clabel', 'plt.clabel', (['fig'], {'inline': '(0.1)', 'fontsize': '(5)'}), '(fig, inline=0.1, fontsize=5)\n', (10200, 10229), True, 'import matplotlib.pyplot as plt\n'), ((10238, 10250), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (10248, 10250), True, 'import matplotlib.pyplot as plt\n'), ((10305, 10328), 'os.path.isdir', 'os.path.isdir', (['plot_dir'], {}), '(plot_dir)\n', (10318, 10328), False, 'import os\n'), ((10487, 10498), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (10496, 10498), True, 'import matplotlib.pyplot as plt\n'), ((10903, 10931), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(9, 6)'}), '(figsize=(9, 6))\n', (10915, 10931), True, 'import matplotlib.pyplot as plt\n'), ((10939, 10948), 'seaborn.set', 'sns.set', ([], {}), '()\n', (10946, 10948), True, 'import seaborn as sns\n'), ((11310, 11333), 'os.path.isdir', 'os.path.isdir', (['plot_dir'], {}), '(plot_dir)\n', (11323, 11333), False, 'import os\n'), ((11494, 11505), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (11503, 11505), True, 'import matplotlib.pyplot as plt\n'), ((11613, 11639), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 6)'}), '(figsize=(9, 6))\n', (11623, 11639), True, 'import matplotlib.pyplot as plt\n'), ((12828, 12867), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'dpi': '(200)', 'figsize': '(12, 10)'}), '(dpi=200, figsize=(12, 10))\n', (12840, 12867), True, 'import matplotlib.pyplot as plt\n'), ((14198, 14236), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'dpi': '(92)', 'figsize': '(12, 10)'}), '(dpi=92, figsize=(12, 10))\n', (14210, 14236), True, 'import matplotlib.pyplot as plt\n'), ((2921, 2932), 'numpy.isnan', 'np.isnan', (['U'], {}), '(U)\n', (2929, 2932), True, 'import numpy as np\n'), ((3027, 3038), 'numpy.isnan', 'np.isnan', (['U'], {}), '(U)\n', (3035, 3038), True, 'import numpy as np\n'), ((4439, 4460), 'os.makedirs', 'os.makedirs', (['plot_dir'], {}), '(plot_dir)\n', (4450, 4460), False, 'import os\n'), ((5627, 5648), 'os.makedirs', 'os.makedirs', (['plot_dir'], {}), '(plot_dir)\n', (5638, 5648), False, 'import os\n'), ((6699, 6720), 'numpy.array', 'np.array', (['self.teta_1'], {}), '(self.teta_1)\n', (6707, 6720), True, 'import numpy as np\n'), ((6761, 6782), 'numpy.array', 'np.array', (['self.teta_2'], {}), '(self.teta_2)\n', (6769, 6782), True, 'import numpy as np\n'), ((6828, 6851), 'numpy.array', 'np.array', (['self.contour_'], {}), '(self.contour_)\n', (6836, 6851), True, 'import numpy as np\n'), ((6875, 6893), 'numpy.array', 'np.array', (['x[skip1]'], {}), '(x[skip1])\n', (6883, 6893), True, 'import numpy as np\n'), ((6894, 6912), 'numpy.array', 'np.array', (['y[skip1]'], {}), '(y[skip1])\n', (6902, 6912), True, 'import numpy as np\n'), ((7198, 7219), 'os.makedirs', 'os.makedirs', (['plot_dir'], {}), '(plot_dir)\n', (7209, 7219), False, 'import os\n'), ((7733, 7754), 'numpy.array', 'np.array', (['self.teta_1'], {}), '(self.teta_1)\n', (7741, 7754), True, 'import numpy as np\n'), ((7795, 7816), 'numpy.array', 'np.array', (['self.teta_2'], {}), '(self.teta_2)\n', (7803, 7816), True, 'import numpy as np\n'), ((7862, 7885), 'numpy.array', 'np.array', (['self.contour_'], {}), '(self.contour_)\n', (7870, 7885), True, 'import numpy as np\n'), ((7927, 7938), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (7935, 7938), True, 'import numpy as np\n'), ((7940, 7951), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (7948, 7951), True, 'import numpy as np\n'), ((8357, 8378), 'os.makedirs', 'os.makedirs', (['plot_dir'], {}), '(plot_dir)\n', (8368, 8378), False, 'import os\n'), ((9401, 9422), 'os.makedirs', 'os.makedirs', (['plot_dir'], {}), '(plot_dir)\n', (9412, 9422), False, 'import os\n'), ((10370, 10391), 'os.makedirs', 'os.makedirs', (['plot_dir'], {}), '(plot_dir)\n', (10381, 10391), False, 'import os\n'), ((11375, 11396), 'os.makedirs', 'os.makedirs', (['plot_dir'], {}), '(plot_dir)\n', (11386, 11396), False, 'import os\n'), ((11869, 11878), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (11876, 11878), True, 'import matplotlib.pyplot as plt\n'), ((13182, 13191), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (13189, 13191), True, 'import matplotlib.pyplot as plt\n'), ((13283, 13303), 'matplotlib.pyplot.imread', 'plt.imread', (['img_name'], {}), '(img_name)\n', (13293, 13303), True, 'import matplotlib.pyplot as plt\n'), ((13316, 13353), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {'alpha': '(1)', 'cmap': '"""gray"""'}), "(img, alpha=1, cmap='gray')\n", (13326, 13353), True, 'import matplotlib.pyplot as plt\n'), ((13503, 13556), 'numpy.array', 'np.array', (["df[deck.doc['Plots']['Target Plot']].values"], {}), "(df[deck.doc['Plots']['Target Plot']].values)\n", (13511, 13556), True, 'import numpy as np\n'), ((13704, 13795), 'matplotlib.pyplot.pcolormesh', 'plt.pcolormesh', (['x', 'y', 'e1'], {'vmin': 'data_modes.vmin_0', 'vmax': 'data_modes.vmax_0', 'cmap': '"""plasma"""'}), "(x, y, e1, vmin=data_modes.vmin_0, vmax=data_modes.vmax_0,\n cmap='plasma')\n", (13718, 13795), True, 'import matplotlib.pyplot as plt\n'), ((13800, 13864), 'matplotlib.pyplot.contour', 'plt.contour', (['x', 'y', 'e1'], {'levels': 'levels', 'colors': '"""k"""', 'linewidths': '(0.5)'}), "(x, y, e1, levels=levels, colors='k', linewidths=0.5)\n", (13811, 13864), True, 'import matplotlib.pyplot as plt\n'), ((13884, 13902), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['cont'], {}), '(cont)\n', (13896, 13902), True, 'import matplotlib.pyplot as plt\n'), ((14551, 14560), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (14558, 14560), True, 'import matplotlib.pyplot as plt\n'), ((14652, 14672), 'matplotlib.pyplot.imread', 'plt.imread', (['img_name'], {}), '(img_name)\n', (14662, 14672), True, 'import matplotlib.pyplot as plt\n'), ((14685, 14722), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {'alpha': '(1)', 'cmap': '"""gray"""'}), "(img, alpha=1, cmap='gray')\n", (14695, 14722), True, 'import matplotlib.pyplot as plt\n'), ((14872, 14925), 'numpy.array', 'np.array', (["df[deck.doc['Plots']['Target Plot']].values"], {}), "(df[deck.doc['Plots']['Target Plot']].values)\n", (14880, 14925), True, 'import numpy as np\n'), ((15367, 15431), 'matplotlib.pyplot.contour', 'plt.contour', (['x', 'y', 'e1'], {'levels': 'levels', 'colors': '"""k"""', 'linewidths': '(0.5)'}), "(x, y, e1, levels=levels, colors='k', linewidths=0.5)\n", (15378, 15431), True, 'import matplotlib.pyplot as plt\n'), ((15451, 15469), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['cont'], {}), '(cont)\n', (15463, 15469), True, 'import matplotlib.pyplot as plt\n'), ((3899, 3938), 'numpy.logspace', 'np.logspace', (['(0.1)', 'data_modes.vmax_0', '(15)'], {}), '(0.1, data_modes.vmax_0, 15)\n', (3910, 3938), True, 'import numpy as np\n'), ((4058, 4170), 'matplotlib.colors.SymLogNorm', 'matplotlib.colors.SymLogNorm', ([], {'linthresh': '(0.001)', 'linscale': '(0.1)', 'vmin': 'data_modes.vmin_0', 'vmax': 'data_modes.vmax_0'}), '(linthresh=0.001, linscale=0.1, vmin=data_modes\n .vmin_0, vmax=data_modes.vmax_0)\n', (4086, 4170), False, 'import matplotlib\n'), ((6522, 6542), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""gray"""'], {}), "('gray')\n", (6534, 6542), True, 'import matplotlib.pyplot as plt\n'), ((7662, 7682), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""gray"""'], {}), "('gray')\n", (7674, 7682), True, 'import matplotlib.pyplot as plt\n'), ((13631, 13684), 'numpy.linspace', 'np.linspace', (['data_modes.vmin_0', 'data_modes.vmax_0', '(20)'], {}), '(data_modes.vmin_0, data_modes.vmax_0, 20)\n', (13642, 13684), True, 'import numpy as np\n'), ((3870, 3897), 'numpy.linspace', 'np.linspace', (['(-0.01)', '(0.01)', '(5)'], {}), '(-0.01, 0.01, 5)\n', (3881, 3897), True, 'import numpy as np\n'), ((15098, 15137), 'numpy.logspace', 'np.logspace', (['(0.1)', 'data_modes.vmax_0', '(15)'], {}), '(0.1, data_modes.vmax_0, 15)\n', (15109, 15137), True, 'import numpy as np\n'), ((15184, 15296), 'matplotlib.colors.SymLogNorm', 'matplotlib.colors.SymLogNorm', ([], {'linthresh': '(0.001)', 'linscale': '(0.1)', 'vmin': 'data_modes.vmin_0', 'vmax': 'data_modes.vmax_0'}), '(linthresh=0.001, linscale=0.1, vmin=data_modes\n .vmin_0, vmax=data_modes.vmax_0)\n', (15212, 15296), False, 'import matplotlib\n'), ((15069, 15096), 'numpy.linspace', 'np.linspace', (['(-0.01)', '(0.01)', '(5)'], {}), '(-0.01, 0.01, 5)\n', (15080, 15096), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
"""
ROS component that implement a ball detector
"""
# Import of libraries
import sys
import time
import numpy as np
from scipy.ndimage import filters
import imutils
import cv2
import roslib
import rospy
from sensor_msgs.msg import CompressedImage
from sensoring.srv import DetectImage,DetectImageResponse
## Variable for logging purpose
VERBOSE = False
class image_feature:
"""
A class used to detect a green ball
Attributes
-----
@param subscriber: variable that represents a subscriber to the camera topic
@type subscriber: Subscriber
@param resp_center: center of the ball
@type resp_center: int
@param resp_radius: radius of the ball
@type resp_radius: int
Methods
-----
getCenter():
Get the center of the ball
getRadius()
Get the radius of the ball
callback(ros_data)
Callback function of subscribed topic.
Here images get converted and features detected
"""
def __init__(self):
'''
Constuctor. Initialize the node and the attributes, subscribe to topic of the camera
'''
rospy.init_node('image_detector', anonymous=True)
## ROS Subsriber object for getting the images
self.subscriber = rospy.Subscriber("/robot/camera1/image_raw/compressed",CompressedImage, self.callback, queue_size=1)
## Center of the ball
self.resp_center = -1
## Radius of the ball
self.resp_radius = -1
def getCenter(self):
'''
Get the center of the ball
@returns: center of the ball
@rtype: int
'''
return self.resp_center
def getRadius(self):
'''
Get the radius of the ball
@returns: radius of the ball
@rtype: int
'''
return self.resp_radius
def callback(self, ros_data):
'''
Callback function for converting the images and
detecting the features
'''
if VERBOSE:
print ('received image of type: "%s"' % ros_data.format)
#### direct conversion to CV2 ####
np_arr = np.fromstring(ros_data.data, np.uint8)
image_np = cv2.imdecode(np_arr, cv2.IMREAD_COLOR) # OpenCV >= 3.0:
greenLower = (50, 50, 20)
greenUpper = (70, 255, 255)
blurred = cv2.GaussianBlur(image_np, (11, 11), 0)
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, greenLower, greenUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
#cv2.imshow('mask', mask)
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
center = None
# only proceed if at least one contour was found
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# only proceed if the radius meets a minimum size
if radius > 10:
# draw the circle and centroid on the frame,
# then update the list of tracked points
cv2.circle(image_np, (int(x), int(y)), int(radius),
(0, 255, 255), 2)
cv2.circle(image_np, center, 5, (0, 0, 255), -1)
self.resp_center = center[0]
self.resp_radius = radius
else:
self.resp_center = -1
self.resp_radius = -1
cv2.imshow('window', image_np)
cv2.waitKey(2)
class ball_info:
"""
A class used to represent a service for providing the radius
and center of the ball
Attributes
-----
@param ic: istance of class image_feature
@type ic: image_feature
@param s: service object
@type s: Service
Methods
-----
handle_object(req):
Received a request and reply with the center and radius
of the ball
"""
def __init__(self):
'''
Constuctor. Initialize the node and service, create an instance of the class
image_feature
'''
rospy.init_node('image_detector', anonymous=True)
## Image feature object
self.ic = image_feature()
## ROS service object
self.s = rospy.Service('detect_image', DetectImage, self.handle_object)
def handle_object(self,req):
"""
Received a request and reply with the center and radius
of the ball(the request is empty)
@returns: radius and center of the ball
@rtype: DetectImageResponse
"""
resp = DetectImageResponse()
resp.object = str(self.ic.getCenter())+" "+str(self.ic.getRadius())
return resp
def main(args):
'''
Main function.Starting the nodes
'''
c = ball_info()
try:
rospy.spin()
except KeyboardInterrupt:
print ("Shutting down ROS Image feature detector module")
cv2.destroyAllWindows()
if __name__ == '__main__':
main(sys.argv)
|
[
"rospy.init_node",
"cv2.imshow",
"cv2.destroyAllWindows",
"cv2.imdecode",
"cv2.erode",
"rospy.Service",
"imutils.grab_contours",
"rospy.spin",
"numpy.fromstring",
"rospy.Subscriber",
"cv2.waitKey",
"cv2.minEnclosingCircle",
"sensoring.srv.DetectImageResponse",
"cv2.circle",
"cv2.cvtColor",
"cv2.moments",
"cv2.GaussianBlur",
"cv2.inRange",
"cv2.dilate"
] |
[((5511, 5534), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (5532, 5534), False, 'import cv2\n'), ((1227, 1276), 'rospy.init_node', 'rospy.init_node', (['"""image_detector"""'], {'anonymous': '(True)'}), "('image_detector', anonymous=True)\n", (1242, 1276), False, 'import rospy\n'), ((1361, 1466), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/robot/camera1/image_raw/compressed"""', 'CompressedImage', 'self.callback'], {'queue_size': '(1)'}), "('/robot/camera1/image_raw/compressed', CompressedImage,\n self.callback, queue_size=1)\n", (1377, 1466), False, 'import rospy\n'), ((2277, 2315), 'numpy.fromstring', 'np.fromstring', (['ros_data.data', 'np.uint8'], {}), '(ros_data.data, np.uint8)\n', (2290, 2315), True, 'import numpy as np\n'), ((2335, 2373), 'cv2.imdecode', 'cv2.imdecode', (['np_arr', 'cv2.IMREAD_COLOR'], {}), '(np_arr, cv2.IMREAD_COLOR)\n', (2347, 2373), False, 'import cv2\n'), ((2482, 2521), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['image_np', '(11, 11)', '(0)'], {}), '(image_np, (11, 11), 0)\n', (2498, 2521), False, 'import cv2\n'), ((2536, 2576), 'cv2.cvtColor', 'cv2.cvtColor', (['blurred', 'cv2.COLOR_BGR2HSV'], {}), '(blurred, cv2.COLOR_BGR2HSV)\n', (2548, 2576), False, 'import cv2\n'), ((2592, 2632), 'cv2.inRange', 'cv2.inRange', (['hsv', 'greenLower', 'greenUpper'], {}), '(hsv, greenLower, greenUpper)\n', (2603, 2632), False, 'import cv2\n'), ((2648, 2683), 'cv2.erode', 'cv2.erode', (['mask', 'None'], {'iterations': '(2)'}), '(mask, None, iterations=2)\n', (2657, 2683), False, 'import cv2\n'), ((2699, 2735), 'cv2.dilate', 'cv2.dilate', (['mask', 'None'], {'iterations': '(2)'}), '(mask, None, iterations=2)\n', (2709, 2735), False, 'import cv2\n'), ((2906, 2933), 'imutils.grab_contours', 'imutils.grab_contours', (['cnts'], {}), '(cnts)\n', (2927, 2933), False, 'import imutils\n'), ((3952, 3982), 'cv2.imshow', 'cv2.imshow', (['"""window"""', 'image_np'], {}), "('window', image_np)\n", (3962, 3982), False, 'import cv2\n'), ((3991, 4005), 'cv2.waitKey', 'cv2.waitKey', (['(2)'], {}), '(2)\n', (4002, 4005), False, 'import cv2\n'), ((4649, 4698), 'rospy.init_node', 'rospy.init_node', (['"""image_detector"""'], {'anonymous': '(True)'}), "('image_detector', anonymous=True)\n", (4664, 4698), False, 'import rospy\n'), ((4814, 4876), 'rospy.Service', 'rospy.Service', (['"""detect_image"""', 'DetectImage', 'self.handle_object'], {}), "('detect_image', DetectImage, self.handle_object)\n", (4827, 4876), False, 'import rospy\n'), ((5168, 5189), 'sensoring.srv.DetectImageResponse', 'DetectImageResponse', ([], {}), '()\n', (5187, 5189), False, 'from sensoring.srv import DetectImage, DetectImageResponse\n'), ((5398, 5410), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (5408, 5410), False, 'import rospy\n'), ((3262, 3287), 'cv2.minEnclosingCircle', 'cv2.minEnclosingCircle', (['c'], {}), '(c)\n', (3284, 3287), False, 'import cv2\n'), ((3304, 3318), 'cv2.moments', 'cv2.moments', (['c'], {}), '(c)\n', (3315, 3318), False, 'import cv2\n'), ((3731, 3779), 'cv2.circle', 'cv2.circle', (['image_np', 'center', '(5)', '(0, 0, 255)', '(-1)'], {}), '(image_np, center, 5, (0, 0, 255), -1)\n', (3741, 3779), False, 'import cv2\n')]
|
import numpy as np
import openpnm as op
from porespy.filters import trim_nonpercolating_paths
import collections
def tortuosity(im, axis, return_im=False, **kwargs):
r"""
Calculates tortuosity of given image in specified direction
Parameters
----------
im : ND-image
The binary image to analyze with ``True`` indicating phase of interest
axis : int
The axis along which to apply boundary conditions
return_im : boolean
If ``True`` then the resulting tuple contains a copy of the input
image with the concentration profile.
Returns
-------
results : tuple
A named-tuple containing:
* ``tortuosity`` calculated using the ``effective_porosity`` as
* ``effective_porosity`` of the image after applying
``trim_nonpercolating_paths``. This removes disconnected
voxels which cause singular matrices.
:math:`(D_{AB}/D_{eff}) \cdot \varepsilon`.
* ``original_porosity`` of the image as given
* ``formation_factor`` found as :math:`D_{AB}/D_{eff}`.
* ``image`` containing the concentration values from the simulation.
This is only returned if ``return_im`` is ``True``.
"""
if axis > (im.ndim - 1):
raise Exception("Axis argument is too high")
# Obtain original porosity
porosity_orig = im.sum()/im.size
# removing floating pores
im = trim_nonpercolating_paths(im, inlet_axis=axis, outlet_axis=axis)
# porosity is changed because of trimmimg floating pores
porosity_true = im.sum()/im.size
if porosity_true < porosity_orig:
print('Caution, True porosity is:', porosity_true,
'and volume fraction filled:',
abs(porosity_orig-porosity_true)*100, '%')
# cubic network generation
net = op.network.CubicTemplate(template=im, spacing=1)
# adding phase
water = op.phases.Water(network=net)
water['throat.diffusive_conductance'] = 1 # dummy value
# running Fickian Diffusion
fd = op.algorithms.FickianDiffusion(network=net, phase=water)
# choosing axis of concentration gradient
inlets = net['pore.coords'][:, axis] <= 1
outlets = net['pore.coords'][:, axis] >= im.shape[axis]-1
# boundary conditions on concentration
C_in = 1.0
C_out = 0.0
fd.set_value_BC(pores=inlets, values=C_in)
fd.set_value_BC(pores=outlets, values=C_out)
# Use specified solver if given
if 'solver_family' in kwargs.keys():
fd.settings.update(kwargs)
else: # Use pyamg otherwise, if presnet
try:
import pyamg
fd.settings['solver_family'] = 'pyamg'
except ModuleNotFoundError: # Use scipy cg as last resort
fd.settings['solver_family'] = 'scipy'
fd.settings['solver_type'] = 'cg'
op.utils.tic()
fd.run()
op.utils.toc()
# calculating molar flow rate, effective diffusivity and tortuosity
rate_out = fd.rate(pores=outlets)[0]
rate_in = fd.rate(pores=inlets)[0]
if not np.allclose(-rate_out, rate_in):
raise Exception('Something went wrong, inlet and outlet rate do not match')
delta_C = C_in - C_out
L = im.shape[axis]
A = np.prod(im.shape)/L
N_A = A/L*delta_C
Deff = rate_in/N_A
tau = porosity_true/(Deff)
result = collections.namedtuple('tortuosity_result', ['tortuosity',
'effective_porosity',
'original_porosity',
'formation_factor',
'image'])
result.tortuosity = tau
result.formation_factor = 1/Deff
result.original_porosity = porosity_orig
result.effective_porosity = porosity_true
if return_im:
conc = np.zeros([im.size, ], dtype=float)
conc[net['pore.template_indices']] = fd['pore.concentration']
conc = np.reshape(conc, newshape=im.shape)
result.image = conc
else:
result.image = None
return result
|
[
"numpy.prod",
"openpnm.utils.toc",
"openpnm.phases.Water",
"openpnm.utils.tic",
"collections.namedtuple",
"numpy.allclose",
"numpy.reshape",
"porespy.filters.trim_nonpercolating_paths",
"numpy.zeros",
"openpnm.algorithms.FickianDiffusion",
"openpnm.network.CubicTemplate"
] |
[((1416, 1480), 'porespy.filters.trim_nonpercolating_paths', 'trim_nonpercolating_paths', (['im'], {'inlet_axis': 'axis', 'outlet_axis': 'axis'}), '(im, inlet_axis=axis, outlet_axis=axis)\n', (1441, 1480), False, 'from porespy.filters import trim_nonpercolating_paths\n'), ((1819, 1867), 'openpnm.network.CubicTemplate', 'op.network.CubicTemplate', ([], {'template': 'im', 'spacing': '(1)'}), '(template=im, spacing=1)\n', (1843, 1867), True, 'import openpnm as op\n'), ((1899, 1927), 'openpnm.phases.Water', 'op.phases.Water', ([], {'network': 'net'}), '(network=net)\n', (1914, 1927), True, 'import openpnm as op\n'), ((2030, 2086), 'openpnm.algorithms.FickianDiffusion', 'op.algorithms.FickianDiffusion', ([], {'network': 'net', 'phase': 'water'}), '(network=net, phase=water)\n', (2060, 2086), True, 'import openpnm as op\n'), ((2825, 2839), 'openpnm.utils.tic', 'op.utils.tic', ([], {}), '()\n', (2837, 2839), True, 'import openpnm as op\n'), ((2857, 2871), 'openpnm.utils.toc', 'op.utils.toc', ([], {}), '()\n', (2869, 2871), True, 'import openpnm as op\n'), ((3319, 3454), 'collections.namedtuple', 'collections.namedtuple', (['"""tortuosity_result"""', "['tortuosity', 'effective_porosity', 'original_porosity',\n 'formation_factor', 'image']"], {}), "('tortuosity_result', ['tortuosity',\n 'effective_porosity', 'original_porosity', 'formation_factor', 'image'])\n", (3341, 3454), False, 'import collections\n'), ((3035, 3066), 'numpy.allclose', 'np.allclose', (['(-rate_out)', 'rate_in'], {}), '(-rate_out, rate_in)\n', (3046, 3066), True, 'import numpy as np\n'), ((3210, 3227), 'numpy.prod', 'np.prod', (['im.shape'], {}), '(im.shape)\n', (3217, 3227), True, 'import numpy as np\n'), ((3872, 3904), 'numpy.zeros', 'np.zeros', (['[im.size]'], {'dtype': 'float'}), '([im.size], dtype=float)\n', (3880, 3904), True, 'import numpy as np\n'), ((3992, 4027), 'numpy.reshape', 'np.reshape', (['conc'], {'newshape': 'im.shape'}), '(conc, newshape=im.shape)\n', (4002, 4027), True, 'import numpy as np\n')]
|
import unittest
import numpy
from oo_trees.dataset import *
from oo_trees.decision_tree import *
from oo_trees.attribute import *
class TestDecisionTree(unittest.TestCase):
def test_classification(self):
X = numpy.array([[0, 1], [0, 0], [1, 0], [1, 1]])
y = numpy.array(['H', 'H', 'H', 'T'])
dataset = Dataset(X, y)
tree = DecisionTree(dataset)
self.assertEqual(len(tree.branches), 2)
self.assertEqual(len(tree.branches[1].branches), 0)
self.assertEqual(len(tree.branches[0].branches), 2)
self.assertEqual(len(tree.branches[0].branches[1].branches), 0)
self.assertEqual(len(tree.branches[0].branches[0].branches), 0)
self.assertEqual(tree.classify([0, 0]), 'H')
self.assertEqual(tree.classify([0, 1]), 'H')
self.assertEqual(tree.classify([1, 0]), 'H')
self.assertEqual(tree.classify([1, 1]), 'T')
self.assertEqual(tree.classify([2, 0]), 'H') # it can handle unknown values too
def test_min_points(self):
X = numpy.array([[0], [1], [1]])
y = numpy.array(['H', 'T', 'T'])
dataset = Dataset(X, y)
tree = DecisionTree(dataset, min_samples_split=0)
self.assertEqual(len(tree.branches), 2)
tree = DecisionTree(dataset, min_samples_split=5)
self.assertEqual(len(tree.branches), 0)
self.assertEqual(tree.leaf_value(), 'T')
def test_max_depth(self):
X = numpy.array([[0], [1], [1]])
y = numpy.array(['H', 'T', 'T'])
dataset = Dataset(X, y)
tree = DecisionTree(dataset, max_depth=3)
self.assertEqual(len(tree.branches), 2)
numpy.testing.assert_array_equal([2, 2],
[t.depth for t in tree.branches.values()])
tree = DecisionTree(dataset, max_depth=1)
self.assertEqual(len(tree.branches), 0)
self.assertEqual(tree.leaf_value(), 'T')
def test_performance_on(self):
# x1 < 0.25 => 'a'
# x1 >= 0.25, x2 = 0 => 'b'
# x1 < 0.50, x2 = 1 => 'c'
# x1 >= 0.50, x2 = 1 => 'a'
Xtrain = numpy.array([[0.15, 0], [0.232, 1], [0.173, 0], [0.263, 0], [0.671, 0], [0.9, 0], [0.387, 1], [0.482, 1], [0.632, 1], [0.892, 1]])
ytrain = numpy.array([ 'a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'a', 'a'])
training_dataset = Dataset(Xtrain, ytrain, [NumericAttribute(0), CategoricalAttribute(1)])
tree = DecisionTree(training_dataset)
# expecting
# Real
# a b c
#Pred a 2 0 2
#
# b 1 2 0
#
# c 1 0 2
# accuracy: 6/10
# a,a a,a a,c a,c b,a b,b b,b c,a c,c c,c
Xtest = numpy.array([[0.13, 0], [0.73, 1], [0.47, 1], [0.33, 1], [0.7, 1], [0.3, 0], [0.5, 0], [0.1, 1], [0.476, 1], [0.265, 1]])
ytest = numpy.array([ 'a', 'a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c'])
test_dataset = Dataset(Xtest, ytest, [NumericAttribute(0), CategoricalAttribute(1)])
performance = tree.performance_on(test_dataset)
self.assertEqual(performance.accuracy, 0.6)
numpy.testing.assert_array_equal(performance.to_array(), [[2,0,2], [1,2,0], [1,0,2]])
|
[
"numpy.array"
] |
[((221, 266), 'numpy.array', 'numpy.array', (['[[0, 1], [0, 0], [1, 0], [1, 1]]'], {}), '([[0, 1], [0, 0], [1, 0], [1, 1]])\n', (232, 266), False, 'import numpy\n'), ((279, 312), 'numpy.array', 'numpy.array', (["['H', 'H', 'H', 'T']"], {}), "(['H', 'H', 'H', 'T'])\n", (290, 312), False, 'import numpy\n'), ((1039, 1067), 'numpy.array', 'numpy.array', (['[[0], [1], [1]]'], {}), '([[0], [1], [1]])\n', (1050, 1067), False, 'import numpy\n'), ((1080, 1108), 'numpy.array', 'numpy.array', (["['H', 'T', 'T']"], {}), "(['H', 'T', 'T'])\n", (1091, 1108), False, 'import numpy\n'), ((1445, 1473), 'numpy.array', 'numpy.array', (['[[0], [1], [1]]'], {}), '([[0], [1], [1]])\n', (1456, 1473), False, 'import numpy\n'), ((1486, 1514), 'numpy.array', 'numpy.array', (["['H', 'T', 'T']"], {}), "(['H', 'T', 'T'])\n", (1497, 1514), False, 'import numpy\n'), ((2089, 2224), 'numpy.array', 'numpy.array', (['[[0.15, 0], [0.232, 1], [0.173, 0], [0.263, 0], [0.671, 0], [0.9, 0], [\n 0.387, 1], [0.482, 1], [0.632, 1], [0.892, 1]]'], {}), '([[0.15, 0], [0.232, 1], [0.173, 0], [0.263, 0], [0.671, 0], [\n 0.9, 0], [0.387, 1], [0.482, 1], [0.632, 1], [0.892, 1]])\n', (2100, 2224), False, 'import numpy\n'), ((2237, 2300), 'numpy.array', 'numpy.array', (["['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'a', 'a']"], {}), "(['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'a', 'a'])\n", (2248, 2300), False, 'import numpy\n'), ((2855, 2980), 'numpy.array', 'numpy.array', (['[[0.13, 0], [0.73, 1], [0.47, 1], [0.33, 1], [0.7, 1], [0.3, 0], [0.5, 0],\n [0.1, 1], [0.476, 1], [0.265, 1]]'], {}), '([[0.13, 0], [0.73, 1], [0.47, 1], [0.33, 1], [0.7, 1], [0.3, 0],\n [0.5, 0], [0.1, 1], [0.476, 1], [0.265, 1]])\n', (2866, 2980), False, 'import numpy\n'), ((2993, 3056), 'numpy.array', 'numpy.array', (["['a', 'a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c']"], {}), "(['a', 'a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c'])\n", (3004, 3056), False, 'import numpy\n')]
|
import numpy as np
k_i = np.array([0.20, 0.22, 0.78, 0.80,
0.30, 0.32, 0.96, 1.00,
1.20, 1.43, 1.80, 1.88,
0.40, 0.50, 3.24, 3.50,
0.38, 0.43, 2.24, 4.90,
0.40, 0.44, 1.22, 4.00,
0.39, 0.44, 0.96, 1.80,
0.39, 0.45, 0.80, 1.60,
0.40, 0.47, 0.60, 1.60], dtype=float)
c_i = np.linspace(0, 160, 9)
t_i = np.array([16, 25, 50, 75], dtype=float)
def phi_ij(c_ii, c_j, t_ii, t_j):
return np.sqrt(1 + (c_ii - c_j)**2 + (t_ii - t_j)**2)
def calculate_aj():
b_ij = np.zeros((36, 36), dtype=float)
i = 0
for c_j_val in c_i:
for t_j_val in t_i:
j = 0
for c_i_val in c_i:
for t_i_val in t_i:
b_ij[i, j] = phi_ij(c_i_val, c_j_val, t_i_val, t_j_val)
j += 1
i += 1
a_ij = np.linalg.solve(b_ij, k_i)
return a_ij
def tk_ct(a_ij, c, t):
i = 0
function_value = 0
for c_j in c_i:
for t_j in t_i:
function_value += a_ij[i] * phi_ij(c, c_j, t, t_j)
i += 1
return function_value
def check():
a_ij = calculate_aj()
k_test = np.zeros(36, dtype=float)
i = 0
for c in c_i:
for t in t_i:
k_test[i] = tk_ct(a_ij, c, t)
i += 1
print(k_test)
|
[
"numpy.linalg.solve",
"numpy.sqrt",
"numpy.array",
"numpy.linspace",
"numpy.zeros"
] |
[((26, 255), 'numpy.array', 'np.array', (['[0.2, 0.22, 0.78, 0.8, 0.3, 0.32, 0.96, 1.0, 1.2, 1.43, 1.8, 1.88, 0.4, 0.5,\n 3.24, 3.5, 0.38, 0.43, 2.24, 4.9, 0.4, 0.44, 1.22, 4.0, 0.39, 0.44, \n 0.96, 1.8, 0.39, 0.45, 0.8, 1.6, 0.4, 0.47, 0.6, 1.6]'], {'dtype': 'float'}), '([0.2, 0.22, 0.78, 0.8, 0.3, 0.32, 0.96, 1.0, 1.2, 1.43, 1.8, 1.88,\n 0.4, 0.5, 3.24, 3.5, 0.38, 0.43, 2.24, 4.9, 0.4, 0.44, 1.22, 4.0, 0.39,\n 0.44, 0.96, 1.8, 0.39, 0.45, 0.8, 1.6, 0.4, 0.47, 0.6, 1.6], dtype=float)\n', (34, 255), True, 'import numpy as np\n'), ((401, 423), 'numpy.linspace', 'np.linspace', (['(0)', '(160)', '(9)'], {}), '(0, 160, 9)\n', (412, 423), True, 'import numpy as np\n'), ((430, 469), 'numpy.array', 'np.array', (['[16, 25, 50, 75]'], {'dtype': 'float'}), '([16, 25, 50, 75], dtype=float)\n', (438, 469), True, 'import numpy as np\n'), ((517, 567), 'numpy.sqrt', 'np.sqrt', (['(1 + (c_ii - c_j) ** 2 + (t_ii - t_j) ** 2)'], {}), '(1 + (c_ii - c_j) ** 2 + (t_ii - t_j) ** 2)\n', (524, 567), True, 'import numpy as np\n'), ((597, 628), 'numpy.zeros', 'np.zeros', (['(36, 36)'], {'dtype': 'float'}), '((36, 36), dtype=float)\n', (605, 628), True, 'import numpy as np\n'), ((913, 939), 'numpy.linalg.solve', 'np.linalg.solve', (['b_ij', 'k_i'], {}), '(b_ij, k_i)\n', (928, 939), True, 'import numpy as np\n'), ((1233, 1258), 'numpy.zeros', 'np.zeros', (['(36)'], {'dtype': 'float'}), '(36, dtype=float)\n', (1241, 1258), True, 'import numpy as np\n')]
|
import numpy as np
def fit_MRF_pseudolikelihood(adj_exc,adj_inh,y):
'''
Fit a Markov random field using maximum pseudolikelihood estimation,
also known as logistic regression. The conditional probabilities
follow
y_i ~ Logistic(B[0] + B[1] A1_{ij} y_j + A1[2] X_{ij} (1-y_j)
+ B[3] A2_{ij} y_j + B[4] A3_{ij} (1-y_j) ),
where A1 = adj_exc and A2 = adj_inh and each term is summed over
j.
Params
======
adj_exc: excitatory adjacency matrix
adj_inh: inhibitory adjacency matrix
y: site variables, 0 or 1
Returns
=======
B: logistic regression coefficients
'''
from sklearn.linear_model import LogisticRegression
from sklearn import cross_validation
if len(np.unique(y)) < 2:
B=np.array([np.nan,np.nan,np.nan,np.nan,np.nan])
else:
N=y.shape[0]
ytile=np.tile(y,(N,1)).T
X1=np.array(np.sum(np.multiply(adj_exc,ytile),0)).flatten()
X2=np.array(np.sum(np.multiply(adj_exc,1-ytile),0)).flatten()
X3=np.array(np.sum(np.multiply(adj_inh,ytile),0)).flatten()
X4=np.array(np.sum(np.multiply(adj_inh,1-ytile),0)).flatten()
model=LogisticRegression(penalty='l2')
X=np.column_stack((X1,X2,X3,X4))
model.fit(X,y)
B=np.hstack((model.intercept_, model.coef_.flatten()))
return B
def predict_MRF(B, adj_exc, adj_inh, burn_in=4e3, steps=1e4,
skip_multiple=3):
'''
Perform prediction with an MRF (Markov random field). Uses Gibbs sampling
to sample from the distribution P(y) =1/Z exp( -H(y) ).
The Hamiltonian is:
H = \sum_{ij} y_i (B[0] + B[1] A1_{ji} y_j + A1[2] X_{ji} (1-y_j)
+ B[3] A2_{ji} y_j + B[4] A3_{ji} (1-y_j))
Params
======
B: coefficients of the MRF
adj_exc: excitatory adjacency matrix
adj_inh: inhibitory adjacency matrix
burn_in: number of burn-in steps to take (default: 4000)
steps: total number of Gibbs steps to take (default: 10000)
skip_multiple: skips skip_multiple * num_neuron steps between samples
Returns
=======
ypostmean: posterior mean of state
'''
import numpy.random
def gibbs_proba(y,B,adj_exc,adj_inh):
term0=B[0]*np.dot(adj_exc.T,y)
term1=B[1]*np.dot(adj_exc.T,(1-y))
term2=B[2]*np.dot(adj_inh.T,y)
term3=B[3]*np.dot(adj_inh.T,(1-y))
e=B[4]+term0+term1+term2+term3
return np.exp(e)/(np.exp(e)+1.0)
N=adj_exc.shape[0]
steps=int(steps)
# run a Gibbs sampler
y=np.random.rand(N,1)
samples=np.zeros((N,steps))
# zero diagonals (should be 0 already)
np.fill_diagonal(adj_exc,0)
np.fill_diagonal(adj_inh,0)
for ii in range(steps):
yt=y
# Gibbs update
proba=gibbs_proba(y,B,adj_exc,adj_inh)
yt=np.array(np.random.rand(N,1) < proba,dtype=np.float)
y=yt
samples[:,ii]=y.flatten()
# compute mle
use_indices=np.arange(burn_in,steps,skip_multiple*N, dtype=int)
final_samples=samples[:,use_indices]
ypostmean=np.mean(final_samples,axis=1)
return ypostmean
def fit_logistic_graph_features():
pass
def get_node_features(adj_exc,adj_inh,normalize_centrality=True):
'''
Get node-based features to train logistic classifier
Params
======
adj_exc: excitatory adjacency matrix
adj_inh: inhibitory adjacency matrix
normalize_centrality: normalize relevant measures? (default: True)
Returns
=======
X: numneuron x numfeatures array to be used with logistic regression
X_labels
'''
import networkx as nx
G_exc=nx.DiGraph(adj_exc)
G_inh=nx.DiGraph(adj_inh)
def dict_to_array(d):
return np.array([d[i] for i in sorted(d)])
def features(G,normalize_centrality):
'''
Returns the features we are interested in within a dict
'''
load_centrality=nx.load_centrality(G,normalized=normalize_centrality)
betweenness_centrality=nx.betweenness_centrality(G,normalized=normalize_centrality)
eigenvector_centrality=nx.eigenvector_centrality_numpy(G,normalized=normalize_centrality)
closeness_centrality=nx.closeness_centrality(G,normalized=normalize_centrality)
in_degree=G.in_degree()
out_degree=G.out_degree()
core_number=nx.core_number(G)
clustering=nx.clustering(G)
d={}
d['in_degree']=in_degree
d['out_degree']=out_degree
d['load_centrality']=load_centrality
d['betweennes_centrality']=betweennes_centrality
d['eigenvector_centrality']=eigenvector_centrality
d['closeness_centrality']=closeness_centrality
d['core_number']=core_number
return d
# grab the features
d_exc=features(G_exc)
d_inh=features(G_inh)
# setup some structures
num_features=len(d_exc)+len(d_inh)
num_nodes=G.number_of_nodes()
X=np.zeros((num_nodes,num_features),dtype=np.float)
X_labels=[]
# fill in X and Xlabels
feature_index=0
for gclass in ('exc','inh'):
if gclass == 'exc':
d=d_exc
else:
d=d_inh
for feature in sorted(d):
X_labels.append(feature+"_"+gclass)
X[:,feature_index]=dict_to_array(d[feature])
feature_index+=1
return X, X_labels
|
[
"numpy.random.rand",
"numpy.column_stack",
"numpy.array",
"networkx.closeness_centrality",
"networkx.betweenness_centrality",
"numpy.arange",
"networkx.eigenvector_centrality_numpy",
"numpy.mean",
"numpy.multiply",
"networkx.clustering",
"networkx.DiGraph",
"numpy.exp",
"numpy.dot",
"networkx.load_centrality",
"numpy.tile",
"numpy.fill_diagonal",
"networkx.core_number",
"numpy.unique",
"sklearn.linear_model.LogisticRegression",
"numpy.zeros"
] |
[((2585, 2605), 'numpy.random.rand', 'np.random.rand', (['N', '(1)'], {}), '(N, 1)\n', (2599, 2605), True, 'import numpy as np\n'), ((2617, 2637), 'numpy.zeros', 'np.zeros', (['(N, steps)'], {}), '((N, steps))\n', (2625, 2637), True, 'import numpy as np\n'), ((2684, 2712), 'numpy.fill_diagonal', 'np.fill_diagonal', (['adj_exc', '(0)'], {}), '(adj_exc, 0)\n', (2700, 2712), True, 'import numpy as np\n'), ((2717, 2745), 'numpy.fill_diagonal', 'np.fill_diagonal', (['adj_inh', '(0)'], {}), '(adj_inh, 0)\n', (2733, 2745), True, 'import numpy as np\n'), ((3001, 3056), 'numpy.arange', 'np.arange', (['burn_in', 'steps', '(skip_multiple * N)'], {'dtype': 'int'}), '(burn_in, steps, skip_multiple * N, dtype=int)\n', (3010, 3056), True, 'import numpy as np\n'), ((3108, 3138), 'numpy.mean', 'np.mean', (['final_samples'], {'axis': '(1)'}), '(final_samples, axis=1)\n', (3115, 3138), True, 'import numpy as np\n'), ((3681, 3700), 'networkx.DiGraph', 'nx.DiGraph', (['adj_exc'], {}), '(adj_exc)\n', (3691, 3700), True, 'import networkx as nx\n'), ((3711, 3730), 'networkx.DiGraph', 'nx.DiGraph', (['adj_inh'], {}), '(adj_inh)\n', (3721, 3730), True, 'import networkx as nx\n'), ((4975, 5026), 'numpy.zeros', 'np.zeros', (['(num_nodes, num_features)'], {'dtype': 'np.float'}), '((num_nodes, num_features), dtype=np.float)\n', (4983, 5026), True, 'import numpy as np\n'), ((791, 841), 'numpy.array', 'np.array', (['[np.nan, np.nan, np.nan, np.nan, np.nan]'], {}), '([np.nan, np.nan, np.nan, np.nan, np.nan])\n', (799, 841), True, 'import numpy as np\n'), ((1192, 1224), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'penalty': '"""l2"""'}), "(penalty='l2')\n", (1210, 1224), False, 'from sklearn.linear_model import LogisticRegression\n'), ((1235, 1268), 'numpy.column_stack', 'np.column_stack', (['(X1, X2, X3, X4)'], {}), '((X1, X2, X3, X4))\n', (1250, 1268), True, 'import numpy as np\n'), ((3968, 4022), 'networkx.load_centrality', 'nx.load_centrality', (['G'], {'normalized': 'normalize_centrality'}), '(G, normalized=normalize_centrality)\n', (3986, 4022), True, 'import networkx as nx\n'), ((4053, 4114), 'networkx.betweenness_centrality', 'nx.betweenness_centrality', (['G'], {'normalized': 'normalize_centrality'}), '(G, normalized=normalize_centrality)\n', (4078, 4114), True, 'import networkx as nx\n'), ((4145, 4212), 'networkx.eigenvector_centrality_numpy', 'nx.eigenvector_centrality_numpy', (['G'], {'normalized': 'normalize_centrality'}), '(G, normalized=normalize_centrality)\n', (4176, 4212), True, 'import networkx as nx\n'), ((4241, 4300), 'networkx.closeness_centrality', 'nx.closeness_centrality', (['G'], {'normalized': 'normalize_centrality'}), '(G, normalized=normalize_centrality)\n', (4264, 4300), True, 'import networkx as nx\n'), ((4386, 4403), 'networkx.core_number', 'nx.core_number', (['G'], {}), '(G)\n', (4400, 4403), True, 'import networkx as nx\n'), ((4423, 4439), 'networkx.clustering', 'nx.clustering', (['G'], {}), '(G)\n', (4436, 4439), True, 'import networkx as nx\n'), ((762, 774), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (771, 774), True, 'import numpy as np\n'), ((883, 901), 'numpy.tile', 'np.tile', (['y', '(N, 1)'], {}), '(y, (N, 1))\n', (890, 901), True, 'import numpy as np\n'), ((2279, 2299), 'numpy.dot', 'np.dot', (['adj_exc.T', 'y'], {}), '(adj_exc.T, y)\n', (2285, 2299), True, 'import numpy as np\n'), ((2318, 2342), 'numpy.dot', 'np.dot', (['adj_exc.T', '(1 - y)'], {}), '(adj_exc.T, 1 - y)\n', (2324, 2342), True, 'import numpy as np\n'), ((2361, 2381), 'numpy.dot', 'np.dot', (['adj_inh.T', 'y'], {}), '(adj_inh.T, y)\n', (2367, 2381), True, 'import numpy as np\n'), ((2400, 2424), 'numpy.dot', 'np.dot', (['adj_inh.T', '(1 - y)'], {}), '(adj_inh.T, 1 - y)\n', (2406, 2424), True, 'import numpy as np\n'), ((2478, 2487), 'numpy.exp', 'np.exp', (['e'], {}), '(e)\n', (2484, 2487), True, 'import numpy as np\n'), ((2489, 2498), 'numpy.exp', 'np.exp', (['e'], {}), '(e)\n', (2495, 2498), True, 'import numpy as np\n'), ((2876, 2896), 'numpy.random.rand', 'np.random.rand', (['N', '(1)'], {}), '(N, 1)\n', (2890, 2896), True, 'import numpy as np\n'), ((929, 956), 'numpy.multiply', 'np.multiply', (['adj_exc', 'ytile'], {}), '(adj_exc, ytile)\n', (940, 956), True, 'import numpy as np\n'), ((997, 1028), 'numpy.multiply', 'np.multiply', (['adj_exc', '(1 - ytile)'], {}), '(adj_exc, 1 - ytile)\n', (1008, 1028), True, 'import numpy as np\n'), ((1067, 1094), 'numpy.multiply', 'np.multiply', (['adj_inh', 'ytile'], {}), '(adj_inh, ytile)\n', (1078, 1094), True, 'import numpy as np\n'), ((1135, 1166), 'numpy.multiply', 'np.multiply', (['adj_inh', '(1 - ytile)'], {}), '(adj_inh, 1 - ytile)\n', (1146, 1166), True, 'import numpy as np\n')]
|
from __future__ import print_function
import numpy as np
import treegp
from treegp_test_helper import timer
from treegp_test_helper import get_correlation_length_matrix
from treegp_test_helper import make_1d_grf
from treegp_test_helper import make_2d_grf
@timer
def test_hyperparameter_search_1d():
optimizer = ['log-likelihood', 'two-pcf']
npoints = [100, 2000]
noise = 0.01
sigma = [1., 2., 1., 2.]
l = [0.5, 0.8, 8., 10.]
kernels = ['RBF', 'RBF', 'VonKarman', 'VonKarman']
max_sep = [1.75, 1.75, 1.25, 1.25]
for n, opt in enumerate(optimizer):
for i, ker in enumerate(kernels):
# Generate 1D gaussian random fields.
kernel = "%f**2 * %s(%f)"%((sigma[i], ker, l[i]))
kernel_skl = treegp.eval_kernel(kernel)
x, y, y_err = make_1d_grf(kernel_skl,
noise=noise,
seed=42, npoints=npoints[n])
# Do gp interpolation without hyperparameters
# fitting (truth is put initially).
gp = treegp.GPInterpolation(kernel=kernel, optimizer=opt,
normalize=True, nbins=15, min_sep=0.1,
max_sep=max_sep[i])
gp.initialize(x, y, y_err=y_err)
gp.solve()
# test if found hyperparameters are close the true hyperparameters.
np.testing.assert_allclose(kernel_skl.theta, gp.kernel.theta, atol=7e-1)
if opt == "two-pcf":
xi, xi_weight, distance, coord, mask = gp.return_2pcf()
np.testing.assert_allclose(xi, gp._optimizer._2pcf, atol=1e-10)
if opt == "log-likelihood":
logL = gp.return_log_likelihood()
np.testing.assert_allclose(logL, gp._optimizer._logL, atol=1e-10)
# Predict at same position as the simulated data.
# Predictions are strictily equal to the input data
# in the case of no noise. With noise you should expect
# to have a pull distribution with mean function arround 0
# with a std<1 (you use the same data to train and validate, and
# the data are well sample compared to the input correlation
# length).
y_predict, y_cov = gp.predict(x, return_cov=True)
y_std = np.sqrt(np.diag(y_cov))
pull = y - y_predict
mean_pull = np.mean(pull)
std_pull = np.std(pull)
# Test that mean of the pull is close to zeros and std of the pull bellow 1.
np.testing.assert_allclose(0., mean_pull, atol=3.*(std_pull)/np.sqrt(npoints[n]))
if std_pull > 1.:
raise ValueError("std_pull is > 1. Current value std_pull = %f"%(std_pull))
# Test that for extrapolation, interpolation is the mean function (0 here)
# and the diagonal of the covariance matrix is close to the hyperameters is
# link to the amplitudes of the fluctuation of the gaussian random fields.
new_x = np.linspace(np.max(x)+6.*l[i], np.max(x)+7.*l[i], npoints[n]).reshape((npoints[n],1))
y_predict, y_cov = gp.predict(new_x, return_cov=True)
y_std = np.sqrt(np.diag(y_cov))
np.testing.assert_allclose(np.mean(y)*np.ones_like(y_std), y_predict, atol=1e-5)
sig = np.sqrt(np.exp(gp.kernel.theta[0]))
np.testing.assert_allclose(sig*np.ones_like(y_std), y_std, atol=1e-5)
@timer
def test_hyperparameter_search_2d():
optimizer = ['log-likelihood', 'anisotropic', 'anisotropic']
npoints = [600, 2000, 2000]
noise = 0.01
sigma = 2.
size = [0.5, 0.5, 1.5]
g1 = 0.2
g2 = 0.2
ker = ['AnisotropicRBF', 'AnisotropicRBF', 'AnisotropicVonKarman']
for n, opt in enumerate(optimizer):
# Generate 2D gaussian random fields.
L = get_correlation_length_matrix(size[n], g1, g2)
invL = np.linalg.inv(L)
kernel = "%f**2*%s"%((sigma, ker[n]))
kernel += "(invLam={0!r})".format(invL)
kernel_skl = treegp.eval_kernel(kernel)
x, y, y_err = make_2d_grf(kernel_skl,
noise=noise,
seed=42, npoints=npoints[n])
# Do gp interpolation without hyperparameters
# fitting (truth is put initially).
gp = treegp.GPInterpolation(kernel=kernel, optimizer=opt,
normalize=True, nbins=21, min_sep=0.,
max_sep=1., p0=[0.3, 0.,0.])
gp.initialize(x, y, y_err=y_err)
gp.solve()
# test if found hyperparameters are close the true hyperparameters.
np.testing.assert_allclose(kernel_skl.theta, gp.kernel.theta, atol=5e-1)
# Predict at same position as the simulated data.
# Predictions are strictily equal to the input data
# in the case of no noise. With noise you should expect
# to have a pull distribution with mean function arround 0
# with a std<1 (you use the same data to train and validate, and
# the data are well sample compared to the input correlation
# length).
y_predict, y_cov = gp.predict(x, return_cov=True)
y_std = np.sqrt(np.diag(y_cov))
pull = y - y_predict
pull /= np.sqrt(y_err**2 + y_std**2)
mean_pull = np.mean(pull)
std_pull = np.std(pull)
# Test that mean of the pull is close to zeros and std of the pull bellow 1.
np.testing.assert_allclose(0., mean_pull, atol=3.*(std_pull)/np.sqrt(npoints[n]))
if std_pull > 1.:
raise ValueError("std_pull is > 1. Current value std_pull = %f"%(std_pull))
# Test that for extrapolation, interpolation is the mean function (0 here)
# and the diagonal of the covariance matrix is close to the hyperameters is
# link to the amplitudes of the fluctuation of the gaussian random fields.
np.random.seed(42)
x1 = np.random.uniform(np.max(x)+6.*size[n],
np.max(x)+6.*size[n], npoints[n])
x2 = np.random.uniform(np.max(x)+6.*size[n],
np.max(x)+6.*size[n], npoints[n])
new_x = np.array([x1, x2]).T
y_predict, y_cov = gp.predict(new_x, return_cov=True)
y_std = np.sqrt(np.diag(y_cov))
np.testing.assert_allclose(np.mean(y), y_predict, atol=1e-5)
sig = np.sqrt(np.exp(gp.kernel.theta[0]))
np.testing.assert_allclose(sig*np.ones_like(y_std), y_std, atol=1e-5)
if __name__ == "__main__":
test_hyperparameter_search_1d()
test_hyperparameter_search_2d()
|
[
"numpy.mean",
"treegp.GPInterpolation",
"treegp.eval_kernel",
"numpy.sqrt",
"numpy.ones_like",
"numpy.testing.assert_allclose",
"treegp_test_helper.make_1d_grf",
"numpy.diag",
"numpy.exp",
"treegp_test_helper.get_correlation_length_matrix",
"numpy.array",
"numpy.linalg.inv",
"numpy.max",
"numpy.random.seed",
"numpy.std",
"treegp_test_helper.make_2d_grf"
] |
[((3968, 4014), 'treegp_test_helper.get_correlation_length_matrix', 'get_correlation_length_matrix', (['size[n]', 'g1', 'g2'], {}), '(size[n], g1, g2)\n', (3997, 4014), False, 'from treegp_test_helper import get_correlation_length_matrix\n'), ((4030, 4046), 'numpy.linalg.inv', 'np.linalg.inv', (['L'], {}), '(L)\n', (4043, 4046), True, 'import numpy as np\n'), ((4162, 4188), 'treegp.eval_kernel', 'treegp.eval_kernel', (['kernel'], {}), '(kernel)\n', (4180, 4188), False, 'import treegp\n'), ((4212, 4277), 'treegp_test_helper.make_2d_grf', 'make_2d_grf', (['kernel_skl'], {'noise': 'noise', 'seed': '(42)', 'npoints': 'npoints[n]'}), '(kernel_skl, noise=noise, seed=42, npoints=npoints[n])\n', (4223, 4277), False, 'from treegp_test_helper import make_2d_grf\n'), ((4458, 4587), 'treegp.GPInterpolation', 'treegp.GPInterpolation', ([], {'kernel': 'kernel', 'optimizer': 'opt', 'normalize': '(True)', 'nbins': '(21)', 'min_sep': '(0.0)', 'max_sep': '(1.0)', 'p0': '[0.3, 0.0, 0.0]'}), '(kernel=kernel, optimizer=opt, normalize=True, nbins=\n 21, min_sep=0.0, max_sep=1.0, p0=[0.3, 0.0, 0.0])\n', (4480, 4587), False, 'import treegp\n'), ((4794, 4865), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['kernel_skl.theta', 'gp.kernel.theta'], {'atol': '(0.5)'}), '(kernel_skl.theta, gp.kernel.theta, atol=0.5)\n', (4820, 4865), True, 'import numpy as np\n'), ((5421, 5453), 'numpy.sqrt', 'np.sqrt', (['(y_err ** 2 + y_std ** 2)'], {}), '(y_err ** 2 + y_std ** 2)\n', (5428, 5453), True, 'import numpy as np\n'), ((5470, 5483), 'numpy.mean', 'np.mean', (['pull'], {}), '(pull)\n', (5477, 5483), True, 'import numpy as np\n'), ((5503, 5515), 'numpy.std', 'np.std', (['pull'], {}), '(pull)\n', (5509, 5515), True, 'import numpy as np\n'), ((6066, 6084), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (6080, 6084), True, 'import numpy as np\n'), ((765, 791), 'treegp.eval_kernel', 'treegp.eval_kernel', (['kernel'], {}), '(kernel)\n', (783, 791), False, 'import treegp\n'), ((818, 883), 'treegp_test_helper.make_1d_grf', 'make_1d_grf', (['kernel_skl'], {'noise': 'noise', 'seed': '(42)', 'npoints': 'npoints[n]'}), '(kernel_skl, noise=noise, seed=42, npoints=npoints[n])\n', (829, 883), False, 'from treegp_test_helper import make_1d_grf\n'), ((1084, 1200), 'treegp.GPInterpolation', 'treegp.GPInterpolation', ([], {'kernel': 'kernel', 'optimizer': 'opt', 'normalize': '(True)', 'nbins': '(15)', 'min_sep': '(0.1)', 'max_sep': 'max_sep[i]'}), '(kernel=kernel, optimizer=opt, normalize=True, nbins=\n 15, min_sep=0.1, max_sep=max_sep[i])\n', (1106, 1200), False, 'import treegp\n'), ((1438, 1509), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['kernel_skl.theta', 'gp.kernel.theta'], {'atol': '(0.7)'}), '(kernel_skl.theta, gp.kernel.theta, atol=0.7)\n', (1464, 1509), True, 'import numpy as np\n'), ((2483, 2496), 'numpy.mean', 'np.mean', (['pull'], {}), '(pull)\n', (2490, 2496), True, 'import numpy as np\n'), ((2520, 2532), 'numpy.std', 'np.std', (['pull'], {}), '(pull)\n', (2526, 2532), True, 'import numpy as np\n'), ((5360, 5374), 'numpy.diag', 'np.diag', (['y_cov'], {}), '(y_cov)\n', (5367, 5374), True, 'import numpy as np\n'), ((6337, 6355), 'numpy.array', 'np.array', (['[x1, x2]'], {}), '([x1, x2])\n', (6345, 6355), True, 'import numpy as np\n'), ((6445, 6459), 'numpy.diag', 'np.diag', (['y_cov'], {}), '(y_cov)\n', (6452, 6459), True, 'import numpy as np\n'), ((6497, 6507), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (6504, 6507), True, 'import numpy as np\n'), ((6553, 6579), 'numpy.exp', 'np.exp', (['gp.kernel.theta[0]'], {}), '(gp.kernel.theta[0])\n', (6559, 6579), True, 'import numpy as np\n'), ((1645, 1708), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['xi', 'gp._optimizer._2pcf'], {'atol': '(1e-10)'}), '(xi, gp._optimizer._2pcf, atol=1e-10)\n', (1671, 1708), True, 'import numpy as np\n'), ((1815, 1880), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['logL', 'gp._optimizer._logL'], {'atol': '(1e-10)'}), '(logL, gp._optimizer._logL, atol=1e-10)\n', (1841, 1880), True, 'import numpy as np\n'), ((2410, 2424), 'numpy.diag', 'np.diag', (['y_cov'], {}), '(y_cov)\n', (2417, 2424), True, 'import numpy as np\n'), ((3312, 3326), 'numpy.diag', 'np.diag', (['y_cov'], {}), '(y_cov)\n', (3319, 3326), True, 'import numpy as np\n'), ((3460, 3486), 'numpy.exp', 'np.exp', (['gp.kernel.theta[0]'], {}), '(gp.kernel.theta[0])\n', (3466, 3486), True, 'import numpy as np\n'), ((6116, 6125), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (6122, 6125), True, 'import numpy as np\n'), ((6169, 6178), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (6175, 6178), True, 'import numpy as np\n'), ((6234, 6243), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (6240, 6243), True, 'import numpy as np\n'), ((6287, 6296), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (6293, 6296), True, 'import numpy as np\n'), ((6620, 6639), 'numpy.ones_like', 'np.ones_like', (['y_std'], {}), '(y_std)\n', (6632, 6639), True, 'import numpy as np\n'), ((3380, 3390), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (3387, 3390), True, 'import numpy as np\n'), ((3391, 3410), 'numpy.ones_like', 'np.ones_like', (['y_std'], {}), '(y_std)\n', (3403, 3410), True, 'import numpy as np\n'), ((3531, 3550), 'numpy.ones_like', 'np.ones_like', (['y_std'], {}), '(y_std)\n', (3543, 3550), True, 'import numpy as np\n'), ((5671, 5690), 'numpy.sqrt', 'np.sqrt', (['npoints[n]'], {}), '(npoints[n])\n', (5678, 5690), True, 'import numpy as np\n'), ((2696, 2715), 'numpy.sqrt', 'np.sqrt', (['npoints[n]'], {}), '(npoints[n])\n', (2703, 2715), True, 'import numpy as np\n'), ((3135, 3144), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (3141, 3144), True, 'import numpy as np\n'), ((3154, 3163), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (3160, 3163), True, 'import numpy as np\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.