code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
#!/usr/bin/env python3
import sys
sys.path.append('../..')
import numpy as np
from neml.cp import crystallography
from neml.math import rotations
import matplotlib.pyplot as plt
if __name__ == "__main__":
N = 300
orientations = rotations.random_orientations(N)
sgroup = crystallography.SymmetryGroup("432")
angles = []
for i in range(len(orientations)):
for j in range(i+1, len(orientations)):
o1 = orientations[i]
o2 = orientations[j]
m = sgroup.misorientation(o1,o2)
axis, angle = m.to_axis_angle()
angles.append(angle)
angles = np.rad2deg(angles)
plt.figure()
plt.hist(angles, bins = 30)
plt.show()
Np = N * (N-1)
orientations1 = rotations.random_orientations(Np)
orientations2 = rotations.random_orientations(Np)
mis = sgroup.misorientation_block(orientations1, orientations2)
angles = [np.rad2deg(m.to_axis_angle()[1]) for m in mis]
plt.figure()
plt.hist(angles, bins = 30)
plt.show()
|
[
"matplotlib.pyplot.hist",
"neml.cp.crystallography.SymmetryGroup",
"neml.math.rotations.random_orientations",
"matplotlib.pyplot.figure",
"numpy.rad2deg",
"sys.path.append",
"matplotlib.pyplot.show"
] |
[((35, 59), 'sys.path.append', 'sys.path.append', (['"""../.."""'], {}), "('../..')\n", (50, 59), False, 'import sys\n'), ((238, 270), 'neml.math.rotations.random_orientations', 'rotations.random_orientations', (['N'], {}), '(N)\n', (267, 270), False, 'from neml.math import rotations\n'), ((283, 319), 'neml.cp.crystallography.SymmetryGroup', 'crystallography.SymmetryGroup', (['"""432"""'], {}), "('432')\n", (312, 319), False, 'from neml.cp import crystallography\n'), ((588, 606), 'numpy.rad2deg', 'np.rad2deg', (['angles'], {}), '(angles)\n', (598, 606), True, 'import numpy as np\n'), ((612, 624), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (622, 624), True, 'import matplotlib.pyplot as plt\n'), ((627, 652), 'matplotlib.pyplot.hist', 'plt.hist', (['angles'], {'bins': '(30)'}), '(angles, bins=30)\n', (635, 652), True, 'import matplotlib.pyplot as plt\n'), ((657, 667), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (665, 667), True, 'import matplotlib.pyplot as plt\n'), ((707, 740), 'neml.math.rotations.random_orientations', 'rotations.random_orientations', (['Np'], {}), '(Np)\n', (736, 740), False, 'from neml.math import rotations\n'), ((759, 792), 'neml.math.rotations.random_orientations', 'rotations.random_orientations', (['Np'], {}), '(Np)\n', (788, 792), False, 'from neml.math import rotations\n'), ((921, 933), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (931, 933), True, 'import matplotlib.pyplot as plt\n'), ((936, 961), 'matplotlib.pyplot.hist', 'plt.hist', (['angles'], {'bins': '(30)'}), '(angles, bins=30)\n', (944, 961), True, 'import matplotlib.pyplot as plt\n'), ((966, 976), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (974, 976), True, 'import matplotlib.pyplot as plt\n')]
|
import numpy as np
import scipy as sp
import ast
import os
from quchem.Unitary_Partitioning.Graph import Clique_cover_Hamiltonian
import quchem.Misc_functions.conversion_scripts as conv_scr
from copy import deepcopy
from quchem.Unitary_Partitioning.Unitary_partitioning_LCU_method import LCU_linalg_Energy
from openfermion import qubit_operator_sparse
import pickle
import datetime
#######
import sys
# working_dir = os.getcwd()
working_dir = os.path.dirname(os.path.abspath(__file__)) # gets directory where running python file is!
Analysis_dir = os.path.join(working_dir, 'Analysis')
full_H_results_dir = os.path.join(Analysis_dir, 'SeqRot_LCU_script_A_results')
print('start time: {}'.format(datetime.datetime.now().strftime('%Y%b%d-%H%M%S%f')))
print('working directory:', working_dir)
###### IMPORT INITIAL RESULTS
## import LCU results
myriad_LCU_results = {}
for filename in os.listdir(full_H_results_dir):
if (filename.endswith('.pickle') and filename.startswith('LCU_CS_VQE_exp')):
file_path = os.path.join(full_H_results_dir, filename)
mol_name = filename[40:-8]
with open(file_path,'rb') as infile:
data = pickle.load(infile)
myriad_LCU_results[mol_name] = data
### find anti-commuting sets
unitary_paritioning_LCU={}
# optional params!
commutativity_flag = 'AC' ## <- defines relationship between sets!!!
Graph_colouring_strategy='largest_first'
check_reduction_LCU = False
######## take commandline arguement to run in parallel
mol_num = int(sys.argv[1])
sorted_mol_names = sorted(list(myriad_LCU_results.keys()))
mol_key = sorted_mol_names[mol_num-1] # UCL supercomputer indexes from 1, hence minus one here!
########
# for mol_key in tqdm(list(myriad_LCU_results.keys())): # removed loop and used myriad array input!
anti_commuting_sets_different_H_LCU_sizes={}
for ind_key in myriad_LCU_results[mol_key]:
if isinstance(ind_key, str):
continue
if ind_key==0:
# only non-contextual problem
anti_commuting_sets_different_H_LCU_sizes[ind_key]= {'AC_sets': {},
'E':myriad_LCU_results[mol_key][ind_key]['E']}
else:
### LCU
H_LCU_dict = myriad_LCU_results[mol_key][ind_key]['H']
H_LCU_pruned = {P_key: coeff.real for P_key, coeff in H_LCU_dict.items() if not np.isclose(coeff.real,0)}
H_LCU= conv_scr.Get_Openfermion_Hamiltonian(H_LCU_pruned)
n_qubits = len(list(H_LCU_dict.keys())[0])
anti_commuting_sets_LCU = Clique_cover_Hamiltonian(list(H_LCU),
n_qubits,
commutativity_flag,
Graph_colouring_strategy)
all_zero_Pn_index_dict = {set_key: 0 for set_key in anti_commuting_sets_LCU}
E_LCU = LCU_linalg_Energy(anti_commuting_sets_LCU,
all_zero_Pn_index_dict,
n_qubits,
atol=1e-8,
rtol=1e-05,
check_reduction=check_reduction_LCU)
anti_commuting_sets_different_H_LCU_sizes[ind_key]= {'AC_sets': anti_commuting_sets_LCU,
'E':E_LCU}
unitary_paritioning_LCU[mol_key]= deepcopy(anti_commuting_sets_different_H_LCU_sizes)
del anti_commuting_sets_different_H_LCU_sizes
####### SAVE OUTPUT details
unique_file_time = datetime.datetime.now().strftime('%Y%b%d-%H%M%S%f')
# output_dir = os.path.join(working_dir, 'Pickle_out')
output_dir = os.getcwd()
########
####### SAVE OUTPUT
file_name2 = 'Unitary_Partitinging_LCU_CS_VQE_LCU_exp__{}__{}_.pickle'.format(unique_file_time, mol_key)
file_out2=os.path.join(output_dir, file_name2)
with open(file_out2, 'wb') as outfile:
pickle.dump(unitary_paritioning_LCU, outfile)
print('pickle files dumped unqiue time id: {}'.format(unique_file_time))
print('end time: {}'.format(datetime.datetime.now().strftime('%Y%b%d-%H%M%S%f')))
|
[
"os.listdir",
"pickle.dump",
"numpy.isclose",
"os.path.join",
"pickle.load",
"os.getcwd",
"quchem.Unitary_Partitioning.Unitary_partitioning_LCU_method.LCU_linalg_Energy",
"quchem.Misc_functions.conversion_scripts.Get_Openfermion_Hamiltonian",
"datetime.datetime.now",
"copy.deepcopy",
"os.path.abspath"
] |
[((554, 591), 'os.path.join', 'os.path.join', (['working_dir', '"""Analysis"""'], {}), "(working_dir, 'Analysis')\n", (566, 591), False, 'import os\n'), ((613, 670), 'os.path.join', 'os.path.join', (['Analysis_dir', '"""SeqRot_LCU_script_A_results"""'], {}), "(Analysis_dir, 'SeqRot_LCU_script_A_results')\n", (625, 670), False, 'import os\n'), ((894, 924), 'os.listdir', 'os.listdir', (['full_H_results_dir'], {}), '(full_H_results_dir)\n', (904, 924), False, 'import os\n'), ((3341, 3392), 'copy.deepcopy', 'deepcopy', (['anti_commuting_sets_different_H_LCU_sizes'], {}), '(anti_commuting_sets_different_H_LCU_sizes)\n', (3349, 3392), False, 'from copy import deepcopy\n'), ((3608, 3619), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3617, 3619), False, 'import os\n'), ((3766, 3802), 'os.path.join', 'os.path.join', (['output_dir', 'file_name2'], {}), '(output_dir, file_name2)\n', (3778, 3802), False, 'import os\n'), ((465, 490), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (480, 490), False, 'import os\n'), ((3847, 3892), 'pickle.dump', 'pickle.dump', (['unitary_paritioning_LCU', 'outfile'], {}), '(unitary_paritioning_LCU, outfile)\n', (3858, 3892), False, 'import pickle\n'), ((1027, 1069), 'os.path.join', 'os.path.join', (['full_H_results_dir', 'filename'], {}), '(full_H_results_dir, filename)\n', (1039, 1069), False, 'import os\n'), ((2421, 2471), 'quchem.Misc_functions.conversion_scripts.Get_Openfermion_Hamiltonian', 'conv_scr.Get_Openfermion_Hamiltonian', (['H_LCU_pruned'], {}), '(H_LCU_pruned)\n', (2457, 2471), True, 'import quchem.Misc_functions.conversion_scripts as conv_scr\n'), ((2925, 3066), 'quchem.Unitary_Partitioning.Unitary_partitioning_LCU_method.LCU_linalg_Energy', 'LCU_linalg_Energy', (['anti_commuting_sets_LCU', 'all_zero_Pn_index_dict', 'n_qubits'], {'atol': '(1e-08)', 'rtol': '(1e-05)', 'check_reduction': 'check_reduction_LCU'}), '(anti_commuting_sets_LCU, all_zero_Pn_index_dict, n_qubits,\n atol=1e-08, rtol=1e-05, check_reduction=check_reduction_LCU)\n', (2942, 3066), False, 'from quchem.Unitary_Partitioning.Unitary_partitioning_LCU_method import LCU_linalg_Energy\n'), ((3488, 3511), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3509, 3511), False, 'import datetime\n'), ((1170, 1189), 'pickle.load', 'pickle.load', (['infile'], {}), '(infile)\n', (1181, 1189), False, 'import pickle\n'), ((703, 726), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (724, 726), False, 'import datetime\n'), ((3997, 4020), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4018, 4020), False, 'import datetime\n'), ((2371, 2396), 'numpy.isclose', 'np.isclose', (['coeff.real', '(0)'], {}), '(coeff.real, 0)\n', (2381, 2396), True, 'import numpy as np\n')]
|
# Copyright 2019 Systems & Technology Research, LLC
# Use of this software is governed by the license.txt file.
import os
import numpy as np
import torch
import torch.nn as nn
import torchvision.transforms as transforms
import torch.nn.functional as F
from PIL import ImageFilter
def prepare_vggface_image(img):
"""
Convert an RGB byte image to a FloatTensor suitable for processing with the network.
This function assumes the image has already been resized, cropped, jittered, etc.
"""
# Convert to BGR
img_bgr = np.array(img)[...,[2,1,0]]
# Subtract mean pixel value
img_bgr_fp = img_bgr - np.array((93.5940, 104.7624, 129.1863))
# Permute dimensions so output is 3xRxC
img_bgr_fp = np.rollaxis(img_bgr_fp, 2, 0)
return torch.from_numpy(img_bgr_fp).float()
def generate_random_blur(blur_radius, blur_prob):
def random_blur(img):
if np.random.random() < blur_prob:
return img.filter(ImageFilter.GaussianBlur(radius=blur_radius))
else:
return img
return random_blur
""" Function suitable for transform argument of datasets.ImageFolder """
def vggface_preprocess(jitter=False, blur_radius=None, blur_prob=1.0):
transform_list = [transforms.Resize(256),]
if jitter:
transform_list.append(transforms.RandomCrop((224,224)))
transform_list.append(transforms.RandomHorizontalFlip())
#transform_list.append(transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1))
else:
transform_list.append(transforms.CenterCrop((224,224)))
if blur_radius is not None and blur_prob > 0:
transform_list.append(transforms.Lambda(generate_random_blur(blur_radius, blur_prob)))
# finally, convert PIL RGB image to FloatTensor
transform_list.append(transforms.Lambda(prepare_vggface_image))
return transforms.Compose(transform_list)
class VGGFace(nn.Module):
"""
The VGGFace network (VGG_VD_16)
mode can be one of ['encode', 'classify', 'both']
"""
def __init__(self, mode='encode', num_classes=2622):
super(VGGFace, self).__init__()
valid_modes = {'encode','classify','both'}
if mode not in valid_modes:
raise Exception('mode should be one of ' + str(valid_modes))
self.mode = mode
self.fc_outputs = num_classes
# layers with stored weights
self.conv1_1 = nn.Conv2d(3,64,(3, 3),(1, 1),(1, 1))
self.conv1_2 = nn.Conv2d(64,64,(3, 3),(1, 1),(1, 1))
self.conv2_1 = nn.Conv2d(64,128,(3, 3),(1, 1),(1, 1))
self.conv2_2 = nn.Conv2d(128,128,(3, 3),(1, 1),(1, 1))
self.conv3_1 = nn.Conv2d(128,256,(3, 3),(1, 1),(1, 1))
self.conv3_2 = nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1))
self.conv3_3 = nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1))
self.conv4_1 = nn.Conv2d(256,512,(3, 3),(1, 1),(1, 1))
self.conv4_2 = nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1))
self.conv4_3 = nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1))
self.conv5_1 = nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1))
self.conv5_2 = nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1))
self.conv5_3 = nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1))
self.fc6 = nn.Linear(25088,4096)
self.fc7 = nn.Linear(4096,4096)
self.fc8 = nn.Linear(4096, self.fc_outputs)
# layers with no weights
self.nonlin = nn.ReLU()
self.maxpool = nn.MaxPool2d((2, 2),(2, 2),(0, 0),ceil_mode=True)
self.dropout = nn.Dropout(0.5)
def forward(self, input):
"""
Run the network.
Input should be Nx3x224x224.
Based on self.mode, return output of fc7, fc8, or both.
"""
assert len(input.size()) == 4
e1_1 = self.nonlin(self.conv1_1(input))
e1_2 = self.maxpool(self.nonlin(self.conv1_2(e1_1)))
e2_1 = self.nonlin(self.conv2_1(e1_2))
e2_2 = self.maxpool(self.nonlin(self.conv2_2(e2_1)))
e3_1 = self.nonlin(self.conv3_1(e2_2))
e3_2 = self.nonlin(self.conv3_2(e3_1))
e3_3 = self.maxpool(self.nonlin(self.conv3_3(e3_2)))
e4_1 = self.nonlin(self.conv4_1(e3_3))
e4_2 = self.nonlin(self.conv4_2(e4_1))
e4_3 = self.maxpool(self.nonlin(self.conv4_3(e4_2)))
e5_1 = self.nonlin(self.conv5_1(e4_3))
e5_2 = self.nonlin(self.conv5_2(e5_1))
e5_3 = self.maxpool(self.nonlin(self.conv5_3(e5_2)))
e5_3_flat = e5_3.view(e5_3.size(0), -1)
e6 = self.nonlin(self.fc6(e5_3_flat))
# use encoding prior to nonlinearity
e7_pre = self.fc7(self.dropout(e6))
e7 = self.nonlin(e7_pre)
# return e7, e8, or both depending on self.mode
if self.mode == 'encode':
return e7
else:
e8 = self.fc8(self.dropout(e7))
if self.mode == 'classify':
return e8
elif self.mode == 'both':
return e7,e8
else:
raise Exception('Invalid mode: ' + mode)
def set_fc_outputs(self, new_fc_outputs):
self.fc_outputs = new_fc_outputs
self.fc8 = nn.Linear(4096, self.fc_outputs)
class VGGFace_Custom(VGGFace):
"""Inherit VGGFace() and override the forward pass to
normalize the output. Don't care about classification
"""
def forward(self, input, nrm=True):
"""
Run the network.
Input should be Nx3x224x224.
Based on self.mode, return output of fc7, fc8, or both.
"""
assert len(input.size()) == 4
e1_1 = self.nonlin(self.conv1_1(input))
e1_2 = self.maxpool(self.nonlin(self.conv1_2(e1_1)))
e2_1 = self.nonlin(self.conv2_1(e1_2))
e2_2 = self.maxpool(self.nonlin(self.conv2_2(e2_1)))
e3_1 = self.nonlin(self.conv3_1(e2_2))
e3_2 = self.nonlin(self.conv3_2(e3_1))
e3_3 = self.maxpool(self.nonlin(self.conv3_3(e3_2)))
e4_1 = self.nonlin(self.conv4_1(e3_3))
e4_2 = self.nonlin(self.conv4_2(e4_1))
e4_3 = self.maxpool(self.nonlin(self.conv4_3(e4_2)))
e5_1 = self.nonlin(self.conv5_1(e4_3))
e5_2 = self.nonlin(self.conv5_2(e5_1))
e5_3 = self.maxpool(self.nonlin(self.conv5_3(e5_2)))
e5_3_flat = e5_3.view(e5_3.size(0), -1)
e6 = self.nonlin(self.fc6(e5_3_flat))
# use encoding prior to nonlinearity
e7_pre = self.fc7(self.dropout(e6))
e7 = self.nonlin(e7_pre)
"""Override code here: Want to normalize the output and
return the encoding. Don't care about classification.
"""
if nrm is False:
return e7
#print torch.div(e7,torch.norm(e7))
#print e7.size()
xnorm = F.normalize(e7, p=2, dim=1)
return xnorm
#return torch.div(e7,torch.norm(e7))
def vgg16(model_filename=None):
"""
Constructs a VGG-16 model
"""
model = VGGFace_Custom()
if model_filename is not None:
model.load_state_dict(torch.load(model_filename))
return model
|
[
"torchvision.transforms.CenterCrop",
"torch.nn.ReLU",
"torch.nn.Dropout",
"numpy.random.random",
"torch.load",
"numpy.rollaxis",
"torchvision.transforms.Lambda",
"torch.from_numpy",
"torch.nn.Conv2d",
"torch.nn.functional.normalize",
"numpy.array",
"torchvision.transforms.RandomCrop",
"torch.nn.MaxPool2d",
"torchvision.transforms.RandomHorizontalFlip",
"PIL.ImageFilter.GaussianBlur",
"torch.nn.Linear",
"torchvision.transforms.Resize",
"torchvision.transforms.Compose"
] |
[((730, 759), 'numpy.rollaxis', 'np.rollaxis', (['img_bgr_fp', '(2)', '(0)'], {}), '(img_bgr_fp, 2, 0)\n', (741, 759), True, 'import numpy as np\n'), ((1862, 1896), 'torchvision.transforms.Compose', 'transforms.Compose', (['transform_list'], {}), '(transform_list)\n', (1880, 1896), True, 'import torchvision.transforms as transforms\n'), ((543, 556), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (551, 556), True, 'import numpy as np\n'), ((629, 667), 'numpy.array', 'np.array', (['(93.594, 104.7624, 129.1863)'], {}), '((93.594, 104.7624, 129.1863))\n', (637, 667), True, 'import numpy as np\n'), ((1233, 1255), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256)'], {}), '(256)\n', (1250, 1255), True, 'import torchvision.transforms as transforms\n'), ((1809, 1849), 'torchvision.transforms.Lambda', 'transforms.Lambda', (['prepare_vggface_image'], {}), '(prepare_vggface_image)\n', (1826, 1849), True, 'import torchvision.transforms as transforms\n'), ((2411, 2451), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(64)', '(3, 3)', '(1, 1)', '(1, 1)'], {}), '(3, 64, (3, 3), (1, 1), (1, 1))\n', (2420, 2451), True, 'import torch.nn as nn\n'), ((2471, 2512), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)', '(3, 3)', '(1, 1)', '(1, 1)'], {}), '(64, 64, (3, 3), (1, 1), (1, 1))\n', (2480, 2512), True, 'import torch.nn as nn\n'), ((2533, 2575), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(128)', '(3, 3)', '(1, 1)', '(1, 1)'], {}), '(64, 128, (3, 3), (1, 1), (1, 1))\n', (2542, 2575), True, 'import torch.nn as nn\n'), ((2595, 2638), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)', '(3, 3)', '(1, 1)', '(1, 1)'], {}), '(128, 128, (3, 3), (1, 1), (1, 1))\n', (2604, 2638), True, 'import torch.nn as nn\n'), ((2659, 2702), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(256)', '(3, 3)', '(1, 1)', '(1, 1)'], {}), '(128, 256, (3, 3), (1, 1), (1, 1))\n', (2668, 2702), True, 'import torch.nn as nn\n'), ((2722, 2765), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)', '(3, 3)', '(1, 1)', '(1, 1)'], {}), '(256, 256, (3, 3), (1, 1), (1, 1))\n', (2731, 2765), True, 'import torch.nn as nn\n'), ((2785, 2828), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)', '(3, 3)', '(1, 1)', '(1, 1)'], {}), '(256, 256, (3, 3), (1, 1), (1, 1))\n', (2794, 2828), True, 'import torch.nn as nn\n'), ((2849, 2892), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(512)', '(3, 3)', '(1, 1)', '(1, 1)'], {}), '(256, 512, (3, 3), (1, 1), (1, 1))\n', (2858, 2892), True, 'import torch.nn as nn\n'), ((2912, 2955), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(512)', '(3, 3)', '(1, 1)', '(1, 1)'], {}), '(512, 512, (3, 3), (1, 1), (1, 1))\n', (2921, 2955), True, 'import torch.nn as nn\n'), ((2975, 3018), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(512)', '(3, 3)', '(1, 1)', '(1, 1)'], {}), '(512, 512, (3, 3), (1, 1), (1, 1))\n', (2984, 3018), True, 'import torch.nn as nn\n'), ((3039, 3082), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(512)', '(3, 3)', '(1, 1)', '(1, 1)'], {}), '(512, 512, (3, 3), (1, 1), (1, 1))\n', (3048, 3082), True, 'import torch.nn as nn\n'), ((3102, 3145), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(512)', '(3, 3)', '(1, 1)', '(1, 1)'], {}), '(512, 512, (3, 3), (1, 1), (1, 1))\n', (3111, 3145), True, 'import torch.nn as nn\n'), ((3165, 3208), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(512)', '(3, 3)', '(1, 1)', '(1, 1)'], {}), '(512, 512, (3, 3), (1, 1), (1, 1))\n', (3174, 3208), True, 'import torch.nn as nn\n'), ((3225, 3247), 'torch.nn.Linear', 'nn.Linear', (['(25088)', '(4096)'], {}), '(25088, 4096)\n', (3234, 3247), True, 'import torch.nn as nn\n'), ((3266, 3287), 'torch.nn.Linear', 'nn.Linear', (['(4096)', '(4096)'], {}), '(4096, 4096)\n', (3275, 3287), True, 'import torch.nn as nn\n'), ((3306, 3338), 'torch.nn.Linear', 'nn.Linear', (['(4096)', 'self.fc_outputs'], {}), '(4096, self.fc_outputs)\n', (3315, 3338), True, 'import torch.nn as nn\n'), ((3395, 3404), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3402, 3404), True, 'import torch.nn as nn\n'), ((3428, 3480), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2, 2)', '(2, 2)', '(0, 0)'], {'ceil_mode': '(True)'}), '((2, 2), (2, 2), (0, 0), ceil_mode=True)\n', (3440, 3480), True, 'import torch.nn as nn\n'), ((3501, 3516), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (3511, 3516), True, 'import torch.nn as nn\n'), ((5128, 5160), 'torch.nn.Linear', 'nn.Linear', (['(4096)', 'self.fc_outputs'], {}), '(4096, self.fc_outputs)\n', (5137, 5160), True, 'import torch.nn as nn\n'), ((6725, 6752), 'torch.nn.functional.normalize', 'F.normalize', (['e7'], {'p': '(2)', 'dim': '(1)'}), '(e7, p=2, dim=1)\n', (6736, 6752), True, 'import torch.nn.functional as F\n'), ((771, 799), 'torch.from_numpy', 'torch.from_numpy', (['img_bgr_fp'], {}), '(img_bgr_fp)\n', (787, 799), False, 'import torch\n'), ((897, 915), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (913, 915), True, 'import numpy as np\n'), ((1303, 1336), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(224, 224)'], {}), '((224, 224))\n', (1324, 1336), True, 'import torchvision.transforms as transforms\n'), ((1367, 1400), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (1398, 1400), True, 'import torchvision.transforms as transforms\n'), ((1552, 1585), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224, 224)'], {}), '((224, 224))\n', (1573, 1585), True, 'import torchvision.transforms as transforms\n'), ((7002, 7028), 'torch.load', 'torch.load', (['model_filename'], {}), '(model_filename)\n', (7012, 7028), False, 'import torch\n'), ((959, 1003), 'PIL.ImageFilter.GaussianBlur', 'ImageFilter.GaussianBlur', ([], {'radius': 'blur_radius'}), '(radius=blur_radius)\n', (983, 1003), False, 'from PIL import ImageFilter\n')]
|
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Optional
import hydra
import numpy as np
import pandas as pd
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import (
Callback,
LightningDataModule,
LightningModule,
Trainer,
seed_everything,
)
from pytorch_lightning.loggers import LightningLoggerBase
from sklearn.model_selection import KFold
from src.train import PrepareTmpFile
from src.utils import utils
log = utils.get_logger(__name__)
def test(config: DictConfig, datamodule: Optional[LightningDataModule] = None) -> Optional[float]:
"""Contains training pipeline.
Instantiates all PyTorch Lightning objects from config.
Args:
config (DictConfig): Configuration composed by Hydra.
Returns:
Optional[float]: Metric score for hyperparameter optimization.
"""
# Set seed for random number generators in pytorch, numpy and python.random
if config.get("seed"):
seed_everything(config.seed, workers=True)
# Init lightning datamodule
if datamodule is None:
log.info(f"Instantiating datamodule <{config.datamodule._target_}>")
datamodule: LightningDataModule = hydra.utils.instantiate(config.datamodule)
# Init lightning model
log.info(f"Instantiating model <{config.model._target_}>")
model_cls = utils._locate(config.model._target_)
checkpoint_path: Path = Path(config.work_dir) / config.load_checkpoint
model: LightningModule = model_cls.load_from_checkpoint(checkpoint_path)
# Init lightning callbacks
callbacks: List[Callback] = []
if "callbacks" in config:
for _, cb_conf in config.callbacks.items():
if "_target_" in cb_conf:
log.info(f"Instantiating callback <{cb_conf._target_}>")
callbacks.append(hydra.utils.instantiate(cb_conf))
# Init lightning loggers
logger: List[LightningLoggerBase] = []
if "logger" in config:
for _, lg_conf in config.logger.items():
if "_target_" in lg_conf:
log.info(f"Instantiating logger <{lg_conf._target_}>")
logger.append(hydra.utils.instantiate(lg_conf))
# Init lightning trainer
log.info(f"Instantiating trainer <{config.trainer._target_}>")
trainer: Trainer = hydra.utils.instantiate(config.trainer, callbacks=callbacks, logger=logger, _convert_="partial")
# Send some parameters from config to all lightning loggers
log.info("Logging hyperparameters!")
utils.log_hyperparameters(
config=config,
model=model,
datamodule=datamodule,
trainer=trainer,
callbacks=callbacks,
logger=logger,
)
# Evaluate model on test set, using the best model achieved during training
log.info("Starting testing!")
result: List[Dict[str, float]] = trainer.test(model=model, datamodule=datamodule)
# Make sure everything closed properly
log.info("Finalizing!")
utils.finish(
config=config,
model=model,
datamodule=datamodule,
trainer=trainer,
callbacks=callbacks,
logger=logger,
)
return result
def test_cv(config: OmegaConf, df: pd.DataFrame):
# Filter run
log.debug("Filtering")
log.debug(f"Length: {len(df)}")
for name, d in [("model", config.model), ("dataset", config.datamodule), ("trainer", config.trainer)]:
for k, v in d.items():
if len(df) == 1:
break
df = df[df[f"{name}_{k}"] == v]
log.debug(f"{name}_{k}={v}")
log.debug(f"Length: {len(df)}")
index = df.index
assert len(index) == 1
run_name = index[0]
log.info(f"Run name: {run_name}")
checkpoint_paths = df.filter(regex="^best_checkpoint")
result_dict = defaultdict(list)
# Load csv
df = pd.read_csv(config.datamodule.csv_path)
kf = KFold(n_splits=config["folds"], shuffle=True, random_state=config.seed)
datamodule_params = dict(config.datamodule)
datamodule_cls = utils._locate(datamodule_params.pop("_target_"))
datamodule_params.pop("csv_path") # remove csv_path from params
for i, (checkpoint_path, (train_idx, test_idx)) in enumerate(
zip(checkpoint_paths.values[0], kf.split(df)), start=1
):
log.info(f"Start {i}th fold out of {kf.n_splits} folds")
train_df = df.iloc[train_idx]
test_df = df.iloc[test_idx]
valid_df, test_df = np.array_split(test_df, 2)
log.info(checkpoint_path)
config.load_checkpoint = checkpoint_path
# Init lightning datamodule
log.info(f"Instantiating datamodule <{config.datamodule._target_}>")
with PrepareTmpFile(train_df, valid_df, test_df) as (ft, fv, fe):
datamodule: LightningDataModule = datamodule_cls(ft.name, fv.name, fe.name, **datamodule_params)
result: List[Dict[str, float]] = test(config, datamodule)
print(result)
assert len(result) == 1
result = result[0]
for k, v in result.items():
result_dict[k].append(v)
utils.log_cv_result(run_name, config, result_dict)
|
[
"src.utils.utils.get_logger",
"pandas.read_csv",
"src.utils.utils.log_cv_result",
"pathlib.Path",
"hydra.utils.instantiate",
"pytorch_lightning.seed_everything",
"src.utils.utils.log_hyperparameters",
"numpy.array_split",
"src.train.PrepareTmpFile",
"collections.defaultdict",
"src.utils.utils._locate",
"sklearn.model_selection.KFold",
"src.utils.utils.finish"
] |
[((499, 525), 'src.utils.utils.get_logger', 'utils.get_logger', (['__name__'], {}), '(__name__)\n', (515, 525), False, 'from src.utils import utils\n'), ((1376, 1412), 'src.utils.utils._locate', 'utils._locate', (['config.model._target_'], {}), '(config.model._target_)\n', (1389, 1412), False, 'from src.utils import utils\n'), ((2334, 2434), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['config.trainer'], {'callbacks': 'callbacks', 'logger': 'logger', '_convert_': '"""partial"""'}), "(config.trainer, callbacks=callbacks, logger=logger,\n _convert_='partial')\n", (2357, 2434), False, 'import hydra\n'), ((2541, 2674), 'src.utils.utils.log_hyperparameters', 'utils.log_hyperparameters', ([], {'config': 'config', 'model': 'model', 'datamodule': 'datamodule', 'trainer': 'trainer', 'callbacks': 'callbacks', 'logger': 'logger'}), '(config=config, model=model, datamodule=datamodule,\n trainer=trainer, callbacks=callbacks, logger=logger)\n', (2566, 2674), False, 'from src.utils import utils\n'), ((3003, 3124), 'src.utils.utils.finish', 'utils.finish', ([], {'config': 'config', 'model': 'model', 'datamodule': 'datamodule', 'trainer': 'trainer', 'callbacks': 'callbacks', 'logger': 'logger'}), '(config=config, model=model, datamodule=datamodule, trainer=\n trainer, callbacks=callbacks, logger=logger)\n', (3015, 3124), False, 'from src.utils import utils\n'), ((3831, 3848), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3842, 3848), False, 'from collections import defaultdict\n'), ((3874, 3913), 'pandas.read_csv', 'pd.read_csv', (['config.datamodule.csv_path'], {}), '(config.datamodule.csv_path)\n', (3885, 3913), True, 'import pandas as pd\n'), ((3923, 3994), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': "config['folds']", 'shuffle': '(True)', 'random_state': 'config.seed'}), "(n_splits=config['folds'], shuffle=True, random_state=config.seed)\n", (3928, 3994), False, 'from sklearn.model_selection import KFold\n'), ((5121, 5171), 'src.utils.utils.log_cv_result', 'utils.log_cv_result', (['run_name', 'config', 'result_dict'], {}), '(run_name, config, result_dict)\n', (5140, 5171), False, 'from src.utils import utils\n'), ((1004, 1046), 'pytorch_lightning.seed_everything', 'seed_everything', (['config.seed'], {'workers': '(True)'}), '(config.seed, workers=True)\n', (1019, 1046), False, 'from pytorch_lightning import Callback, LightningDataModule, LightningModule, Trainer, seed_everything\n'), ((1226, 1268), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['config.datamodule'], {}), '(config.datamodule)\n', (1249, 1268), False, 'import hydra\n'), ((1441, 1462), 'pathlib.Path', 'Path', (['config.work_dir'], {}), '(config.work_dir)\n', (1445, 1462), False, 'from pathlib import Path\n'), ((4485, 4511), 'numpy.array_split', 'np.array_split', (['test_df', '(2)'], {}), '(test_df, 2)\n', (4499, 4511), True, 'import numpy as np\n'), ((4723, 4766), 'src.train.PrepareTmpFile', 'PrepareTmpFile', (['train_df', 'valid_df', 'test_df'], {}), '(train_df, valid_df, test_df)\n', (4737, 4766), False, 'from src.train import PrepareTmpFile\n'), ((1858, 1890), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['cb_conf'], {}), '(cb_conf)\n', (1881, 1890), False, 'import hydra\n'), ((2180, 2212), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['lg_conf'], {}), '(lg_conf)\n', (2203, 2212), False, 'import hydra\n')]
|
from __future__ import print_function
import numpy as np
import os
class BFGS_Hessian(object):
"""
Class to evaluate the update to inverse Hessian matrix in the L-BFGS scheme.
(see wikipedia article if nothing else).
H is B^-1 form that article.
B_k+1 = B + yy^t / (y^ts) - B s s^t B / (s^t Bk s)) (all k on the RHS)
H_k+1 = (1 - sy^t / (y^t s) ) H (1 - ys^t / (y^ts))) + ss^t / (y^t s).
Determinant of B:
ln det Bk+1 = ln det Bk + ln( s^ty / s^t B s).
For quasi Newton, s_k = x_k1 - x_k = - alpha_k Hk grad_k with alpha_k newton step-length.
--> s^t B s at k is alpha_k^2 g_k H g_k
s^t y is - alpha_k (g_k+1 - g_k) H g_k
This leads to ln|B_k + 1| = ln |B_k| + ln(1 - 1/alpha_k g_k+1 H g_k / (gk H gk))
"""
def __init__(self, lib_dir, apply_H0k, paths2ys, paths2ss, L=100000, apply_B0k=None, verbose=True):
"""
:param apply_H0k: user supplied function(x,k), applying a zeroth order estimate of the inverse Hessian to x at
iter k.
:param paths2ys: dictionary of paths to the y vectors. y_k = grad_k+1 - grad_k
:param paths2ss: dictionary of paths to the s vectors. s_k = x_k+1 - xk_k
:return:
H is inverse Hessian, not Hessian.
"""
self.lib_dir = lib_dir
self.paths2ys = paths2ys
self.paths2ss = paths2ss
self.L = L
self.applyH0k = apply_H0k
self.applyB0k = apply_B0k
self.verbose = verbose
def y(self, n):
return np.load(self.paths2ys[n], mmap_mode='r')
def s(self, n):
return np.load(self.paths2ss[n], mmap_mode='r')
def add_ys(self, path2y, path2s, k):
assert os.path.exists(path2y), path2y
assert os.path.exists(path2s), path2s
self.paths2ys[k] = path2y
self.paths2ss[k] = path2s
if self.verbose:
print('Linked y vector ', path2y, ' to Hessian')
print('Linked s vector ', path2s, ' to Hessian')
def _save_alpha(self, alpha, i):
fname = os.path.join(self.lib_dir, 'temp_alpha_%s.npy' % i)
np.save(fname, alpha)
return
def _load_alpha(self, i):
"""
Loads, and remove, alpha from disk.
:param i:
:return:
"""
fname = os.path.join(self.lib_dir, 'temp_alpha_%s.npy' % i)
assert os.path.exists(fname)
ret = np.load(fname)
os.remove(fname)
return ret
def applyH(self, x, k, _depth=0):
"""
Recursive calculation of H_k x, for any x.
This uses the product form update H_new = (1 - rho s y^t) H (1 - rho y s^t) + rho ss^t
:param x: vector to apply the inverse Hessian to
:param k: iter level. Output is H_k x.
:param _depth : internal, for internal bookkeeping.
:return:
"""
if k <= 0 or _depth >= self.L or self.L == 0: return self.applyH0k(x, k)
s = self.s(k - 1)
y = self.y(k - 1)
rho = 1. / np.sum(s * y)
Hv = self.applyH(x - rho * y * np.sum(x * s), k - 1, _depth=_depth + 1)
return Hv - s * (rho * np.sum(y * Hv)) + rho * s * np.sum(s * x)
def get_gk(self, k, alpha_k0):
"""
Reconstruct gradient at xk, given the first newton step length at step max(0,k-L)
! this is very badly behaved numerically.
"""
assert self.applyB0k is not None
ret = -self.applyB0k(self.s(max(0, k - self.L)),max(0,k-self.L)) / alpha_k0
for j in range(max(0, k - self.L), k):
ret += self.y(j)
return ret
def get_sBs(self, k, alpha_k, alpha_k0):
"""
Reconstruct s^Bs at x_k, given the first newton step length at step max(0,k-L) and current step alpha_k.
"""
return - alpha_k * np.sum(self.s(k) * self.get_gk(k, alpha_k0))
def get_lndet_update(self, k, alpha_k, alpha_k0):
"""
Return update to B log determinant, lndet B_k+1 = lndet B_k + output.
"""
return np.log(np.sum(self.y(k) * self.s(k)) / self.get_sBs(k, alpha_k, alpha_k0))
def get_mHkgk(self, gk, k, output_fname=None):
"""
Obtains - H_k g_k with L-BFGS two-loop recursion.
:param gk: grad f(x_k)
:param k: iterate index
:return: - H_k g_k according to L-BFGS.
If output_fname is set then output is saved in file and nothing is returned.
Should be fine with k == 0
"""
q = gk.copy()
rho = lambda i: 1. / np.sum(self.s(i) * self.y(i))
for i in range(k - 1, np.max([-1, k - self.L - 1]), -1):
alpha_i = rho(i) * np.sum(self.s(i) * q)
q -= alpha_i * self.y(i)
self._save_alpha(alpha_i, i)
r = self.applyH0k(q, k)
for i in range(np.max([0, k - self.L]), k):
beta = rho(i) * np.sum(self.y(i) * r)
r += self.s(i) * (self._load_alpha(i) - beta)
if output_fname is None: return -r
np.save(output_fname, -r)
return
def sample_Gaussian(self, k, x_0, rng_state=None):
"""
sample from a MV zero-mean Gaussian with covariance matrix H, at iteration level k,
given input x_0 random vector with covariance H_0.
Since H is the inverse Hessian, then H is roughly the covariance matrix of the parameters in a line search.
:param k:
:param x_0:
:return:
"""
ret = x_0.copy()
rho = lambda i: 1. / np.sum(self.s(i) * self.y(i))
if rng_state is not None: np.random.set_state(rng_state)
eps = np.random.standard_normal((len(range(np.max([0, k - self.L]), k)), 1))
for idx, i in enumerate(range(np.max([0, k - self.L]), k)):
ret = ret - self.s(i) * np.sum(self.y(i) * ret) * rho(i) + np.sqrt(rho(i)) * self.s(i) * eps[idx]
return ret
|
[
"os.path.exists",
"numpy.random.set_state",
"os.path.join",
"numpy.max",
"numpy.sum",
"numpy.load",
"numpy.save",
"os.remove"
] |
[((1529, 1569), 'numpy.load', 'np.load', (['self.paths2ys[n]'], {'mmap_mode': '"""r"""'}), "(self.paths2ys[n], mmap_mode='r')\n", (1536, 1569), True, 'import numpy as np\n'), ((1606, 1646), 'numpy.load', 'np.load', (['self.paths2ss[n]'], {'mmap_mode': '"""r"""'}), "(self.paths2ss[n], mmap_mode='r')\n", (1613, 1646), True, 'import numpy as np\n'), ((1704, 1726), 'os.path.exists', 'os.path.exists', (['path2y'], {}), '(path2y)\n', (1718, 1726), False, 'import os\n'), ((1750, 1772), 'os.path.exists', 'os.path.exists', (['path2s'], {}), '(path2s)\n', (1764, 1772), False, 'import os\n'), ((2050, 2101), 'os.path.join', 'os.path.join', (['self.lib_dir', "('temp_alpha_%s.npy' % i)"], {}), "(self.lib_dir, 'temp_alpha_%s.npy' % i)\n", (2062, 2101), False, 'import os\n'), ((2110, 2131), 'numpy.save', 'np.save', (['fname', 'alpha'], {}), '(fname, alpha)\n', (2117, 2131), True, 'import numpy as np\n'), ((2297, 2348), 'os.path.join', 'os.path.join', (['self.lib_dir', "('temp_alpha_%s.npy' % i)"], {}), "(self.lib_dir, 'temp_alpha_%s.npy' % i)\n", (2309, 2348), False, 'import os\n'), ((2364, 2385), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (2378, 2385), False, 'import os\n'), ((2400, 2414), 'numpy.load', 'np.load', (['fname'], {}), '(fname)\n', (2407, 2414), True, 'import numpy as np\n'), ((2423, 2439), 'os.remove', 'os.remove', (['fname'], {}), '(fname)\n', (2432, 2439), False, 'import os\n'), ((4976, 5001), 'numpy.save', 'np.save', (['output_fname', '(-r)'], {}), '(output_fname, -r)\n', (4983, 5001), True, 'import numpy as np\n'), ((3001, 3014), 'numpy.sum', 'np.sum', (['(s * y)'], {}), '(s * y)\n', (3007, 3014), True, 'import numpy as np\n'), ((4566, 4594), 'numpy.max', 'np.max', (['[-1, k - self.L - 1]'], {}), '([-1, k - self.L - 1])\n', (4572, 4594), True, 'import numpy as np\n'), ((4788, 4811), 'numpy.max', 'np.max', (['[0, k - self.L]'], {}), '([0, k - self.L])\n', (4794, 4811), True, 'import numpy as np\n'), ((5537, 5567), 'numpy.random.set_state', 'np.random.set_state', (['rng_state'], {}), '(rng_state)\n', (5556, 5567), True, 'import numpy as np\n'), ((3154, 3167), 'numpy.sum', 'np.sum', (['(s * x)'], {}), '(s * x)\n', (3160, 3167), True, 'import numpy as np\n'), ((5692, 5715), 'numpy.max', 'np.max', (['[0, k - self.L]'], {}), '([0, k - self.L])\n', (5698, 5715), True, 'import numpy as np\n'), ((3054, 3067), 'numpy.sum', 'np.sum', (['(x * s)'], {}), '(x * s)\n', (3060, 3067), True, 'import numpy as np\n'), ((3126, 3140), 'numpy.sum', 'np.sum', (['(y * Hv)'], {}), '(y * Hv)\n', (3132, 3140), True, 'import numpy as np\n'), ((5619, 5642), 'numpy.max', 'np.max', (['[0, k - self.L]'], {}), '([0, k - self.L])\n', (5625, 5642), True, 'import numpy as np\n')]
|
# Copyright (C) 2019 <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>
#
# This file is part of breast_cancer_classifier.
#
# breast_cancer_classifier is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# breast_cancer_classifier is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with breast_cancer_classifier. If not, see <http://www.gnu.org/licenses/>.
# ==============================================================================
import cv2
import numpy as np
from src.constants import VIEWS
def shift_window_inside_image(start, end, image_axis_size, input_axis_size):
"""
If the window goes outside the bound of the image, then shifts it to fit inside the image.
"""
if start < 0:
start = 0
end = start + input_axis_size
elif end > image_axis_size:
end = image_axis_size
start = end - input_axis_size
return start, end
def zero_pad_and_align_window(image_axis_size, input_axis_size, max_crop_and_size_noise, bidirectional):
"""
Adds Zero padding to the image if cropped image is smaller than required window size.
"""
pad_width = input_axis_size - image_axis_size + max_crop_and_size_noise * (2 if bidirectional else 1)
assert (pad_width >= 0)
if bidirectional:
pad_front = int(pad_width / 2)
start = max_crop_and_size_noise
else:
start, pad_front = 0, 0
pad_back = pad_width - pad_front
end = start + input_axis_size
return start, end, pad_front, pad_back
def simple_resize(image_to_resize, size):
"""
Resizes image to the required size
"""
image_resized = cv2.resize(image_to_resize, (size[1], size[0]), interpolation=cv2.INTER_CUBIC)
if len(image_to_resize.shape) == 3 and len(image_resized.shape) == 2 and image_to_resize.shape[2] == 1:
image_resized = np.expand_dims(image_resized, 2)
return image_resized
def crop_image(image, input_size, borders):
"""
Crops image to the required size using window location
"""
cropped_image = image[borders[0]: borders[1], borders[2]: borders[3]]
if ((borders[1] - borders[0]) != input_size[0]) or ((borders[3] - borders[2]) != input_size[1]):
cropped_image = simple_resize(cropped_image, input_size)
return cropped_image
def window_location_at_center_point(input_size, center_y, center_x):
"""
Calculates window location (top, bottom, left, right)
given center point and size of augmentation window
"""
half_height = input_size[0] // 2
half_width = input_size[1] // 2
top = center_y - half_height
bottom = center_y + input_size[0] - half_height
left = center_x - half_width
right = center_x + input_size[1] - half_width
return top, bottom, left, right
def sample_crop_best_center(image, input_size, random_number_generator, max_crop_noise, max_crop_size_noise,
best_center, view):
"""
Crops using the best center point and ideal window size.
Pads small images to have enough room for crop noise and size noise.
Applies crop noise in location of the window borders.
"""
max_crop_noise = np.array(max_crop_noise)
crop_noise_multiplier = np.zeros(2, dtype=np.float32)
if max_crop_noise.any():
# there is no point in sampling crop_noise_multiplier if it's going to be multiplied by (0, 0)
crop_noise_multiplier = random_number_generator.uniform(low=-1.0, high=1.0, size=2)
center_y, center_x = best_center
# get the window around the center point. The window might be outside of the image.
top, bottom, left, right = window_location_at_center_point(input_size, center_y, center_x)
pad_y_top, pad_y_bottom, pad_x_right = 0, 0, 0
if VIEWS.is_cc(view):
if image.shape[0] < input_size[0] + (max_crop_noise[0] + max_crop_size_noise) * 2:
# Image is smaller than window size + noise margin in y direction.
# CC view: pad at both top and bottom
top, bottom, pad_y_top, pad_y_bottom = zero_pad_and_align_window(image.shape[0], input_size[0],
max_crop_noise[0] + max_crop_size_noise,
True)
elif VIEWS.is_mlo(view):
if image.shape[0] < input_size[0] + max_crop_noise[0] + max_crop_size_noise:
# Image is smaller than window size + noise margin in y direction.
# MLO view: only pad at the bottom
top, bottom, _, pad_y_bottom = zero_pad_and_align_window(image.shape[0], input_size[0],
max_crop_noise[0] + max_crop_size_noise, False)
else:
raise KeyError("Unknown view", view)
if image.shape[1] < input_size[1] + max_crop_noise[1] + max_crop_size_noise:
# Image is smaller than window size + noise margin in x direction.
left, right, _, pad_x_right = zero_pad_and_align_window(image.shape[1], input_size[1],
max_crop_noise[1] + max_crop_size_noise, False)
# Pad image if necessary by allocating new memory and copying contents over
if pad_y_top > 0 or pad_y_bottom > 0 or pad_x_right > 0:
new_zero_array = np.zeros((
image.shape[0] + pad_y_top + pad_y_bottom,
image.shape[1] + pad_x_right, image.shape[2]), dtype=image.dtype)
new_zero_array[pad_y_top: image.shape[0] + pad_y_top, 0: image.shape[1]] = image
image = new_zero_array
# if window is drawn outside of image, shift it to be inside the image.
top, bottom = shift_window_inside_image(top, bottom, image.shape[0], input_size[0])
left, right = shift_window_inside_image(left, right, image.shape[1], input_size[1])
if top == 0:
# there is nowhere to shift upwards, we only apply noise downwards
crop_noise_multiplier[0] = np.abs(crop_noise_multiplier[0])
elif bottom == image.shape[0]:
# there is nowhere to shift down, we only apply noise upwards
crop_noise_multiplier[0] = -np.abs(crop_noise_multiplier[0])
# else: we do nothing to the noise multiplier
if left == 0:
# there is nowhere to shift left, we only apply noise to move right
crop_noise_multiplier[1] = np.abs(crop_noise_multiplier[1])
elif right == image.shape[1]:
# there is nowhere to shift right, we only apply noise to move left
crop_noise_multiplier[1] = -np.abs(crop_noise_multiplier[1])
# else: we do nothing to the noise multiplier
borders = np.array((top, bottom, left, right), dtype=np.int32)
# Calculate maximum amount of how much the window can move for cropping noise
top_margin = top
bottom_margin = image.shape[0] - bottom
left_margin = left
right_margin = image.shape[1] - right
if crop_noise_multiplier[0] >= 0:
vertical_margin = bottom_margin
else:
vertical_margin = top_margin
if crop_noise_multiplier[1] >= 0:
horizontal_margin = right_margin
else:
horizontal_margin = left_margin
if vertical_margin < max_crop_noise[0]:
max_crop_noise[0] = vertical_margin
if horizontal_margin < max_crop_noise[1]:
max_crop_noise[1] = horizontal_margin
crop_noise = np.round(max_crop_noise * crop_noise_multiplier)
crop_noise = np.array((crop_noise[0], crop_noise[0], crop_noise[1], crop_noise[1]), dtype=np.int32)
borders = borders + crop_noise
# this is to make sure that the cropping window isn't outside of the image
assert (borders[0] >= 0) and (borders[1] <= image.shape[0]) and (borders[2] >= 0) and (borders[3] <= image.shape[
1]), "Centre of the crop area is sampled such that the borders are outside of the image. Borders: " + str(
borders) + ', image shape: ' + str(image.shape)
# return the padded image and cropping window information
return image, borders
def sample_crop(image, input_size, borders, random_number_generator, max_crop_size_noise):
"""
Applies size noise of the window borders.
"""
size_noise_multiplier = random_number_generator.uniform(low=-1.0, high=1.0, size=4)
top_margin = borders[0]
bottom_margin = image.shape[0] - borders[1]
left_margin = borders[2]
right_margin = image.shape[1] - borders[3]
max_crop_size_noise = min(max_crop_size_noise, top_margin, bottom_margin, left_margin, right_margin)
if input_size[0] >= input_size[1]:
max_crop_size_vertical_noise = max_crop_size_noise
max_crop_size_horizontal_noise = np.round(max_crop_size_noise * (input_size[1] / input_size[0]))
elif input_size[0] < input_size[1]:
max_crop_size_vertical_noise = np.round(max_crop_size_noise * (input_size[0] / input_size[1]))
max_crop_size_horizontal_noise = max_crop_size_noise
else:
raise RuntimeError()
max_crop_size_noise = np.array((max_crop_size_vertical_noise, max_crop_size_vertical_noise,
max_crop_size_horizontal_noise, max_crop_size_horizontal_noise),
dtype=np.int32)
size_noise = np.round(max_crop_size_noise * size_noise_multiplier)
size_noise = np.array(size_noise, dtype=np.int32)
borders = borders + size_noise
# this is to make sure that the cropping window isn't outside of the image
assert (borders[0] >= 0) and (borders[1] <= image.shape[0]) and (borders[2] >= 0) and (borders[3] <= image.shape[
1]), "Center of the crop area is sampled such that the borders are outside of the image. Borders: " + str(
borders) + ', image shape: ' + str(image.shape)
# Sanity check. make sure that the top is above the bottom
assert borders[1] > borders[0], "Bottom above the top. Top: " + str(borders[0]) + ', bottom: ' + str(borders[1])
# Sanity check. make sure that the left is left to the right
assert borders[3] > borders[2], "Left on the right. Left: " + str(borders[2]) + ', right: ' + str(borders[3])
return borders
def random_augmentation_best_center(image, input_size, random_number_generator, max_crop_noise=(0, 0),
max_crop_size_noise=0, auxiliary_image=None,
best_center=None, view=""):
"""
Crops augmentation window from a given image
by applying noise in location and size of the window.
"""
joint_image = np.expand_dims(image, 2)
if auxiliary_image is not None:
joint_image = np.concatenate([joint_image, auxiliary_image], axis=2)
joint_image, borders = sample_crop_best_center(joint_image, input_size, random_number_generator, max_crop_noise,
max_crop_size_noise, best_center, view)
borders = sample_crop(joint_image, input_size, borders, random_number_generator, max_crop_size_noise)
sampled_joint_image = crop_image(joint_image, input_size, borders)
if auxiliary_image is None:
return sampled_joint_image[:, :, 0], None
else:
return sampled_joint_image[:, :, 0], sampled_joint_image[:, :, 1:]
|
[
"numpy.abs",
"src.constants.VIEWS.is_cc",
"numpy.array",
"numpy.zeros",
"src.constants.VIEWS.is_mlo",
"numpy.expand_dims",
"numpy.concatenate",
"cv2.resize",
"numpy.round"
] |
[((2308, 2386), 'cv2.resize', 'cv2.resize', (['image_to_resize', '(size[1], size[0])'], {'interpolation': 'cv2.INTER_CUBIC'}), '(image_to_resize, (size[1], size[0]), interpolation=cv2.INTER_CUBIC)\n', (2318, 2386), False, 'import cv2\n'), ((3833, 3857), 'numpy.array', 'np.array', (['max_crop_noise'], {}), '(max_crop_noise)\n', (3841, 3857), True, 'import numpy as np\n'), ((3886, 3915), 'numpy.zeros', 'np.zeros', (['(2)'], {'dtype': 'np.float32'}), '(2, dtype=np.float32)\n', (3894, 3915), True, 'import numpy as np\n'), ((4423, 4440), 'src.constants.VIEWS.is_cc', 'VIEWS.is_cc', (['view'], {}), '(view)\n', (4434, 4440), False, 'from src.constants import VIEWS\n'), ((7323, 7375), 'numpy.array', 'np.array', (['(top, bottom, left, right)'], {'dtype': 'np.int32'}), '((top, bottom, left, right), dtype=np.int32)\n', (7331, 7375), True, 'import numpy as np\n'), ((8045, 8093), 'numpy.round', 'np.round', (['(max_crop_noise * crop_noise_multiplier)'], {}), '(max_crop_noise * crop_noise_multiplier)\n', (8053, 8093), True, 'import numpy as np\n'), ((8111, 8201), 'numpy.array', 'np.array', (['(crop_noise[0], crop_noise[0], crop_noise[1], crop_noise[1])'], {'dtype': 'np.int32'}), '((crop_noise[0], crop_noise[0], crop_noise[1], crop_noise[1]),\n dtype=np.int32)\n', (8119, 8201), True, 'import numpy as np\n'), ((9667, 9826), 'numpy.array', 'np.array', (['(max_crop_size_vertical_noise, max_crop_size_vertical_noise,\n max_crop_size_horizontal_noise, max_crop_size_horizontal_noise)'], {'dtype': 'np.int32'}), '((max_crop_size_vertical_noise, max_crop_size_vertical_noise,\n max_crop_size_horizontal_noise, max_crop_size_horizontal_noise), dtype=\n np.int32)\n', (9675, 9826), True, 'import numpy as np\n'), ((9906, 9959), 'numpy.round', 'np.round', (['(max_crop_size_noise * size_noise_multiplier)'], {}), '(max_crop_size_noise * size_noise_multiplier)\n', (9914, 9959), True, 'import numpy as np\n'), ((9977, 10013), 'numpy.array', 'np.array', (['size_noise'], {'dtype': 'np.int32'}), '(size_noise, dtype=np.int32)\n', (9985, 10013), True, 'import numpy as np\n'), ((11191, 11215), 'numpy.expand_dims', 'np.expand_dims', (['image', '(2)'], {}), '(image, 2)\n', (11205, 11215), True, 'import numpy as np\n'), ((2519, 2551), 'numpy.expand_dims', 'np.expand_dims', (['image_resized', '(2)'], {}), '(image_resized, 2)\n', (2533, 2551), True, 'import numpy as np\n'), ((4980, 4998), 'src.constants.VIEWS.is_mlo', 'VIEWS.is_mlo', (['view'], {}), '(view)\n', (4992, 4998), False, 'from src.constants import VIEWS\n'), ((6014, 6136), 'numpy.zeros', 'np.zeros', (['(image.shape[0] + pad_y_top + pad_y_bottom, image.shape[1] + pad_x_right,\n image.shape[2])'], {'dtype': 'image.dtype'}), '((image.shape[0] + pad_y_top + pad_y_bottom, image.shape[1] +\n pad_x_right, image.shape[2]), dtype=image.dtype)\n', (6022, 6136), True, 'import numpy as np\n'), ((6659, 6691), 'numpy.abs', 'np.abs', (['crop_noise_multiplier[0]'], {}), '(crop_noise_multiplier[0])\n', (6665, 6691), True, 'import numpy as np\n'), ((7046, 7078), 'numpy.abs', 'np.abs', (['crop_noise_multiplier[1]'], {}), '(crop_noise_multiplier[1])\n', (7052, 7078), True, 'import numpy as np\n'), ((9333, 9396), 'numpy.round', 'np.round', (['(max_crop_size_noise * (input_size[1] / input_size[0]))'], {}), '(max_crop_size_noise * (input_size[1] / input_size[0]))\n', (9341, 9396), True, 'import numpy as np\n'), ((11274, 11328), 'numpy.concatenate', 'np.concatenate', (['[joint_image, auxiliary_image]'], {'axis': '(2)'}), '([joint_image, auxiliary_image], axis=2)\n', (11288, 11328), True, 'import numpy as np\n'), ((9476, 9539), 'numpy.round', 'np.round', (['(max_crop_size_noise * (input_size[0] / input_size[1]))'], {}), '(max_crop_size_noise * (input_size[0] / input_size[1]))\n', (9484, 9539), True, 'import numpy as np\n'), ((6833, 6865), 'numpy.abs', 'np.abs', (['crop_noise_multiplier[0]'], {}), '(crop_noise_multiplier[0])\n', (6839, 6865), True, 'import numpy as np\n'), ((7225, 7257), 'numpy.abs', 'np.abs', (['crop_noise_multiplier[1]'], {}), '(crop_noise_multiplier[1])\n', (7231, 7257), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.io import fits
from astropy.nddata import CCDData
from astropy.nddata import Cutout2D
from astropy.stats import sigma_clipped_stats
from astropy.wcs.utils import proj_plane_pixel_scales
from .plot import plot_image
from .instrument_info import get_zp
from .utils import get_wcs_rotation
from astropy.visualization import simple_norm, make_lupton_rgb
from .math import Maskellipse,polynomialfit,cross_match
from photutils.segmentation import deblend_sources
from astropy.convolution import Gaussian2DKernel
from astropy.stats import gaussian_fwhm_to_sigma
from photutils import detect_threshold
from photutils import detect_sources
from photutils import source_properties
from astropy.table import Table, Column, join, join_skycoord
from astropy.wcs import WCS
from astropy.nddata import NDData
from photutils.psf import extract_stars
import matplotlib.colors as colors
from photutils import EPSFBuilder
__all__ = ['image', 'image_atlas']
class image(object):
'''
A single image object.
Functions
---------
* Read from fits file use CCDData.
* get_size : Get the image size.
* plot : Plot the image.
* sigma_clipped_stats : Calculate the basic statistics of the image.
* set_data : Load from numpy array.
* set_mask : Set image mask.
* set_pixel_scales : Set the pixel scales along two axes.
* set_zero_point : Set magnitude zero point.
'''
def __init__(self, filename=None, hdu=0, unit=None, zero_point=None,
pixel_scales=None, wcs_rotation=None, mask=None, verbose=True):
'''
Parameters
----------
filename (optional) : string
FITS file name of the image.
hdu : int (default: 0)
The number of extension to load from the FITS file.
unit (optional) : string
Unit of the image flux for CCDData.
zero_point (optional) : float
Magnitude zero point.
pixel_scales (optional) : tuple
Pixel scales along the first and second directions, units: arcsec.
wcs_rotation (optional) : float
WCS rotation, east of north, units: radian.
mask (optional) : 2D bool array
The image mask.
verbose : bool (default: True)
Print out auxiliary data.
'''
if filename is None:
self.data = None
else:
self.data = CCDData.read(filename, hdu=hdu, unit=unit, mask=mask)
if self.data.wcs and (pixel_scales is None):
pixel_scales = proj_plane_pixel_scales(self.data.wcs) * u.degree.to('arcsec')
self.zero_point = zero_point
if pixel_scales is None:
self.pixel_scales = None
else:
self.pixel_scales = (pixel_scales[0]*u.arcsec, pixel_scales[1]*u.arcsec)
if self.data.wcs and (wcs_rotation is None):
self.wcs_rotation = get_wcs_rotation(self.data.wcs)
elif wcs_rotation is not None:
self.wcs_rotation = wcs_rotation * u.radian
else:
self.wcs_rotation = None
self.sources_catalog = None
self.sigma_image = None
self.sources_skycord = None
self.ss_data = None
self.PSF = None
def get_size(self, units='pixel'):
'''
Get the size of the image.
Parameters
----------
units : string
Units of the size (pixel or angular units).
Returns
-------
x, y : float
Size along X and Y axes.
'''
nrow, ncol = self.data.shape
if units == 'pixel':
x = ncol
y = nrow
else:
x = ncol * self.pixel_scales[0].to(units).value
y = nrow * self.pixel_scales[1].to(units).value
return (x, y)
def get_size(self, units='pixel'):
'''
Get the size of the image.
Parameters
----------
units : string
Units of the size (pixel or angular units).
Returns
-------
x, y : float
Size along X and Y axes.
'''
nrow, ncol = self.data.shape
if units == 'pixel':
x = ncol
y = nrow
else:
x = ncol * self.pixel_scales[0].to(units).value
y = nrow * self.pixel_scales[1].to(units).value
return (x, y)
def get_data_info(self):
'''
Data information to generate model image.
Returns
-------
d : dict
shape : (ny, nx)
Image array shape.
pixel_scale : (pixelscale_x, pixelscale_y), default units: arcsec
Pixel scales.
wcs_rotation : angle, default units: radian
WCS rotation, east of north.
'''
d = dict(shape=self.data.shape,
pixel_scale=self.pixel_scale,
wcs_rotation=self.wcs_rotation)
return d
def sigma_clipped_stats(self, **kwargs):
'''
Run astropy.stats.sigma_clipped_stats to get the basic statistics of
the image.
Parameters
----------
All of the parameters go to astropy.stats.sigma_clipped_stats().
Returns
-------
mean, median, stddev : float
The mean, median, and standard deviation of the sigma-clipped data.
'''
return sigma_clipped_stats(self.data.data, mask=self.data.mask, **kwargs)
def plot(self, stretch='asinh', units='arcsec', vmin=None, vmax=None,
a=None, ax=None, plain=False, **kwargs):
'''
Plot an image.
Parameters
----------
stretch : string (default: 'asinh')
Choice of stretch: asinh, linear, sqrt, log.
units : string (default: 'arcsec')
Units of pixel scale.
vmin (optional) : float
Minimal value of imshow.
vmax (optional) : float
Maximal value of imshow.
a (optional) : float
Scale factor of some stretch function.
ax (optional) : matplotlib.Axis
Axis to plot the image.
plain : bool (default: False)
If False, tune the image.
**kwargs : Additional parameters goes into plt.imshow()
Returns
-------
ax : matplotlib.Axis
Axis to plot the image.
'''
assert self.data is not None, 'Set data first!'
ax = plot_image(self.data, self.pixel_scales, stretch=stretch,
units=units, vmin=vmin, vmax=vmax, a=a, ax=ax,
plain=plain, **kwargs)
if plain is False:
ax.set_xlabel(r'$\Delta X$ ({0})'.format(units), fontsize=24)
ax.set_ylabel(r'$\Delta Y$ ({0})'.format(units), fontsize=24)
return ax
def plot_direction(self, ax, xy=(0, 0), len_E=None, len_N=None, color='k', fontsize=20,
linewidth=2, frac_len=0.1, units='arcsec', backextend=0.05):
'''
Plot the direction arrow. Only applied to plots using WCS.
Parameters
----------
ax : Axis
Axis to plot the direction.
xy : (x, y)
Coordinate of the origin of the arrows.
length : float
Length of the arrows, units: pixel.
units: string (default: arcsec)
Units of xy.
'''
xlim = ax.get_xlim()
len_total = np.abs(xlim[1] - xlim[0])
pixelscale = self.pixel_scales[0].to('degree').value
if len_E is None:
len_E = len_total * frac_len / pixelscale
if len_N is None:
len_N = len_total * frac_len / pixelscale
wcs = self.data.wcs
header = wcs.to_header()
d_ra = len_E * pixelscale
d_dec = len_N * pixelscale
ra = [header['CRVAL1'], header['CRVAL1']+d_ra, header['CRVAL1']]
dec = [header['CRVAL2'], header['CRVAL2'], header['CRVAL2']+d_dec]
ra_pix, dec_pix = wcs.all_world2pix(ra, dec, 1)
d_arrow1 = [ra_pix[1]-ra_pix[0], dec_pix[1]-dec_pix[0]]
d_arrow2 = [ra_pix[2]-ra_pix[0], dec_pix[2]-dec_pix[0]]
l_arrow1 = np.sqrt(d_arrow1[0]**2 + d_arrow1[1]**2)
l_arrow2 = np.sqrt(d_arrow2[0]**2 + d_arrow2[1]**2)
d_arrow1 = np.array(d_arrow1) / l_arrow1 * len_E * pixelscale
d_arrow2 = np.array(d_arrow2) / l_arrow2 * len_N * pixelscale
def sign_2_align(sign):
'''
Determine the alignment of the text.
'''
if sign[0] < 0:
ha = 'right'
else:
ha = 'left'
if sign[1] < 0:
va = 'top'
else:
va = 'bottom'
return ha, va
ha1, va1 = sign_2_align(np.sign(d_arrow1))
ha2, va2 = sign_2_align(np.sign(d_arrow2))
xy_e = (xy[0] - d_arrow1[0] * backextend, xy[1] - d_arrow1[1] * backextend)
ax.annotate('E', xy=xy_e, xycoords='data', fontsize=fontsize,
xytext=(d_arrow1[0]+xy[0], d_arrow1[1]+xy[1]), color=color,
arrowprops=dict(color=color, arrowstyle="<-", lw=linewidth),
ha=ha1, va=va1)
xy_n = (xy[0] - d_arrow2[0] * backextend, xy[1] - d_arrow2[1] * backextend)
ax.annotate('N', xy=xy_n, xycoords='data', fontsize=fontsize,
xytext=(d_arrow2[0]+xy[0], d_arrow2[1]+xy[1]), color=color,
arrowprops=dict(color=color, arrowstyle="<-", lw=linewidth),
ha=ha2, va=va2)
def set_data(self, data, unit):
'''
Parameters
----------
data : 2D array
Image data.
unit : string
Unit for CCDData.
'''
self.data = CCDData(data, unit=unit)
def source_detection_individual(self, psfFWHM, nsigma=3.0, sc_key=''):
'''
Parameters
----------
psfFWHM : float
FWHM of the imaging point spread function
nsigma : float
source detection threshold
'''
data = np.array(self.data.copy())
psfFWHMpix = psfFWHM / self.pixel_scales[0].value
thresholder = detect_threshold(data, nsigma=nsigma)
sigma = psfFWHMpix * gaussian_fwhm_to_sigma
kernel = Gaussian2DKernel(sigma, x_size=5, y_size=5)
kernel.normalize()
segm = detect_sources(data, thresholder, npixels=5, filter_kernel=kernel)
props = source_properties(data, segm)
tab = Table(props.to_table())
self.sources_catalog = tab
srcPstradec = self.data.wcs.all_pix2world(tab['xcentroid'], tab['ycentroid'],1)
sc = SkyCoord(srcPstradec[0], srcPstradec[1], unit='deg')
sctab = Table([sc,np.arange(len(sc))],names=['sc','sloop_{0}'.format(sc_key)])
self.sources_skycord = sctab
def make_mask(self,sources=None,magnification=3.):
'''
make mask for the extension.
Parameters
----------
sources : a to-be masked source table (can generate from photutils source detection)
if None, will use its own source catalog
magnification : expand factor to generate mask
'''
mask=np.zeros_like(self.data, dtype=bool)
mask[np.isnan(self.data)] = True
mask[np.isinf(self.data)] = True
if sources is None:
sources = self.sources_catalog
for loop in range(len(sources)):
position = (sources['xcentroid'][loop],sources['ycentroid'][loop])
a = sources['semimajor_axis_sigma'][loop]
b = sources['semiminor_axis_sigma'][loop]
theta = sources['orientation'][loop]*180./np.pi
mask=Maskellipse(mask,position,magnification*a,(1-b/a),theta)
self.data.mask = mask
if self.ss_data is not None:
self.ss_data.mask = mask
def set_mask(self, mask):
'''
Set mask for the extension.
Parameters
----------
mask : 2D array
The mask.
'''
assert self.data.shape == mask.shape, 'Mask shape incorrect!'
self.data.mask = mask
if self.ss_data is not Nont:
self.ss_data.mask = mask
def set_pixel_scales(self, pixel_scales):
'''
Parameters
----------
pixel_scales (optional) : tuple
Pixel scales along the first and second directions, units: arcsec.
'''
self.pixel_scales = (pixel_scales[0]*u.arcsec, pixel_scales[1]*u.arcsec)
def set_zero_point(self, zp):
'''
Set magnitude zero point.
'''
self.zero_point = zp
def sky_subtraction(self, order=3 , filepath = None):
'''
Do polynomial-fitting sky subtraction
Parameters
----------
order (optional) : int
order of the polynomial
'''
data = np.array(self.data.copy())
maskplus = self.data.mask.copy()
backR=polynomialfit(data,maskplus.astype(bool),order=order)
background=backR['bkg']
self.ss_data = CCDData(data-background, unit=self.data.unit)
self.ss_data.mask = maskplus
if filepath is not None:
hdu_temp = fits.PrimaryHDU(data-background)
hdu_temp.writeto(filepath, overwrite=True)
def read_ss_image(self,filepath):
'''
read sky subtracted image from "filepath"
'''
hdu = fits.open(filepath)
self.ss_data = CCDData(hdu[0].data, unit=self.data.unit)
self.ss_data.mask = self.data.mask.copy()
def cal_sigma_image(self,filepath=None):
'''
Construct sigma map following the same procedure as Galfit (quadruture sum of sigma at each pixel from source and sky background).
Note
----------
'GAIN' keyword must be available in the image header and ADU x GAIN = electron
Parameters
----------
filepath:
Whether and where to save sigma map
'''
GAIN = self.data.header['CELL.GAIN']
if self.ss_data is None:
raise ValueError(" Please do sky subtration first !!!")
data = np.array(self.ss_data.copy())
mask = self.ss_data.mask.copy()
bkgrms = np.nanstd(data[~mask.astype(bool)])
data[~mask.astype(bool)] = 0.
sigmap = np.sqrt(data/GAIN+bkgrms**2)
self.sigma_image = sigmap
if filepath is not None:
hdu_temp = fits.PrimaryHDU(sigmap)
hdu_temp.writeto(filepath, overwrite=True)
def read_sigmap(self, filepath):
'''
read sigma image from "filepath"
'''
hdu = fits.open(filepath)
self.sigma_image = hdu[0].data
def read_PSF(self, filepath):
'''
read PSF image from "filepath"
'''
hdu = fits.open(filepath)
self.PSF = hdu[0].data
class image_atlas(object):
'''
Many images.
'''
def __init__(self, image_list=None, zp_list=None, band_list=None, psfFWHM_list=None):
'''
Parameters
----------
image_list (optional) : List
List of `image`.
zp_list (optional) : List
List of magnitude zeropoint.
band_list (optional) : List
List of band name. Check `instrument_info` for band names.
'''
if image_list is None:
self.image_list = []
else:
self.image_list = image_list
if band_list is None:
self.band_list = []
else:
self.band_list = band_list
if (zp_list is None) and (band_list is not None):
zp_list = []
for b in band_list:
zp_list.append(get_zp(b))
for loop, img in enumerate(self.image_list):
img.set_zero_point(zp_list[loop])
if psfFWHM_list is None:
self.psfFWHM_list = []
else:
self.psfFWHM_list = psfFWHM_list
self.__length = len(image_list)
self.common_catalog = None
def __getitem__(self, key):
'''
Get the image data using the filter name or number index.
'''
if type(key) is str:
idx = self.band_list.index(key)
elif type(key) is int:
idx = key
return self.image_list[idx]
def __len__(self):
'''
Get the length of the data list.
'''
return self.__length
def source_detection(self,nsigma=3.0):
'''
Do multi-band source detection
Parameters
----------
nsigma : float, or a array with same size as image_atlas
source detection threshold
'''
if type(nsigma) == float:
nsigma = nsigma * np.ones(self.__length,dtype=float)
for loop in range(self.__length):
self.image_list[loop].source_detection_individual(self.psfFWHM_list[loop],nsigma=nsigma[loop],sc_key=loop+1)
def make_common_catalog(self,CM_separation=2.5,magnification=3.0,applylist=None):
'''
Do multi-band source detection
Parameters
----------
CM_separation : float
angular separation used to do sky coordinates crossmatching, unit in deg
magnification : float, or a array with same size as image_atlas
magnification for generating mask foe each image
applylist : [list of index]
None for all images
'''
if type(magnification) == float:
magnification = magnification * np.ones(self.__length,dtype=float)
if applylist is None:
applylist = np.arange(self.__length)
cats = []
for loop in applylist:
cats.append(self.image_list[loop].sources_skycord)
comc = cross_match(cats,angular_sep = 2.5)
lencc = len(comc)
master_a = np.zeros(lencc, dtype = float)
master_b = np.zeros(lencc, dtype = float)
for loop in range(len(comc)):
a = []
b = []
for loop2 in applylist:
a.append(self.image_list[loop2].sources_catalog['semimajor_axis_sigma'][comc['sloop_{0}'.format(loop2+1)][loop]]
*magnification[loop2]*self.image_list[loop2].pixel_scales[0].value)
b.append(self.image_list[loop2].sources_catalog['semiminor_axis_sigma'][comc['sloop_{0}'.format(loop2+1)][loop]]
*magnification[loop2]*self.image_list[loop2].pixel_scales[0].value)
master_a[loop] = np.max(np.array(a))
master_b[loop] = np.max(np.array(b))
comc.add_column(Column(master_a, name = 'master_a'))
comc.add_column(Column(master_b, name = 'master_b'))
self.common_catalog = comc
def sky_subtraction(self,order=3,filepaths=None):
'''
Do multi-band sky subtration
Parameters
----------
order (optional) : int
order of the polynomial
filepaths : filepath to store the sky subtracted images
'''
if type(order) == int:
order = order * np.ones(self.__length,dtype=int)
for loop in range(self.__length):
if filepaths is None:
self.image_list[loop].sky_subtraction(order[loop])
else:
self.image_list[loop].sky_subtraction(order[loop],filepath=filepaths[loop])
def master_mask(self, magnification=3.0, applylist=None):
'''
Do multi-band source masking
Parameters
----------
magnification : float, or a array with same size as image_atlas
magnification for generating mask foe each image
applylist : [list of index]
None for all images
'''
if type(magnification) == float:
magnification = magnification * np.ones(self.__length,dtype=float)
if applylist is None:
applylist = np.arange(self.__length)
comc = self.common_catalog.copy()
commonsourcelist = []
for loop2 in applylist:
newsc = self.image_list[loop2].sources_catalog.copy()
for loop in range(len(comc)):
self.image_list[loop2].sources_catalog['semimajor_axis_sigma'][comc['sloop_{0}'.format(loop2+1)][loop]] = comc['master_a'][loop]/(magnification[loop2]*self.image_list[loop2].pixel_scales[0].value)
self.image_list[loop2].sources_catalog['semiminor_axis_sigma'][comc['sloop_{0}'.format(loop2+1)][loop]] = comc['master_b'][loop]/(magnification[loop2]*self.image_list[loop2].pixel_scales[0].value)
indexes = np.delete(np.arange(len(self.image_list[loop2].sources_catalog)), comc['sloop_{0}'.format(loop2+1)])
newsc.remove_rows(indexes)
commonsourcelist.append(newsc)
for loop2 in range(self.__length):
self.image_list[loop2].make_mask(sources=commonsourcelist[loop2],magnification=magnification[loop2])
def generate_PSFs(self, equivalent_radius=2., size = 20.,oversampling=1, plot=None, filepaths=None):
'''
Generate effective point spread fuctions (ePSFs) for each image
Parameters
----------
equivalent_radius : float, unit arcsec
radius criteria to indentify star
size : float, unit pixel
use what size box to extract stars
oversampling : int
oversample the ePSF
plot : None for not plot stars & ePSF
list like [1,2,3] to plot rgb image
filepaths : filepath to store the ePSFs
'''
stars = self.common_catalog.copy()
remolist = []
for loop in range(len(stars)):
for loop2 in range(self.__length):
a = (self.image_list[loop2].sources_catalog['equivalent_radius'][stars['sloop_{0}'.format(loop2+1)][loop]])*self.image_list[loop2].pixel_scales[0].value
if (a > equivalent_radius):
remolist.append(loop)
break
stars.remove_rows(remolist)
star_images = []
PSFs = []
for loop2 in range(self.__length):
newsc = self.image_list[loop2].sources_catalog.copy()
indexes = np.delete(np.arange(len(self.image_list[loop2].sources_catalog)), stars['sloop_{0}'.format(loop2+1)])
newsc.remove_rows(indexes)
stars_tbl = Table()
stars_tbl['x']=np.array(newsc['maxval_xpos'])
stars_tbl['y']=np.array(newsc['maxval_ypos'])
nddata = NDData(data=np.array(self.image_list[loop2].ss_data))
Tstar = extract_stars(nddata, stars_tbl, size=size)
epsf_builder = EPSFBuilder(oversampling=oversampling, maxiters=15,progress_bar=False)
epsf, fitted_stars = epsf_builder(Tstar)
self.image_list[loop2].PSF = epsf.data
if filepaths is not None:
hdu = fits.PrimaryHDU(epsf.data.astype('float32'))
After = fits.HDUList([hdu])
After.writeto(filepaths[loop2],overwrite= True)
if plot is not None:
star_images.append(Tstar)
PSFs.append(epsf.data)
if plot is not None:
tlens = len(stars)
if (((tlens//5)+1)*5-tlens) < (((tlens//4)+1)*4-tlens):
ncols = 5
nrows = (tlens//5)+1
else:
ncols = 4
nrows = (tlens//4)+1
fig, ax = plt.subplots(nrows=nrows, ncols=ncols, figsize=(3*ncols, 3*nrows),squeeze=True)
ax = ax.ravel()
for i in range(tlens):
if len(plot) > 2:
star_b = star_images[plot[0]][i].data*100./np.sum(star_images[plot[0]][i].data)
star_g = star_images[plot[1]][i].data*100./np.sum(star_images[plot[1]][i].data)
star_r = star_images[plot[2]][i].data*100./np.sum(star_images[plot[2]][i].data)
norm = simple_norm(star_b, 'log', percent=99.)
image = make_lupton_rgb(star_r, star_g, star_b, Q=10)
else:
image = star_images[plot[0]][i].data
norm = simple_norm(image, 'log', percent=99.)
ax[i].imshow(image,norm=norm ,origin='lower')
plt.show()
fig=plt.figure(figsize=(10,10))
if len(plot) > 2:
star_b = PSFs[plot[0]]*100./np.sum(PSFs[plot[0]])
star_g = PSFs[plot[1]]*100./np.sum(PSFs[plot[1]])
star_r = PSFs[plot[2]]*100./np.sum(PSFs[plot[2]])
norm = simple_norm(star_b, 'log', percent=99.)
image = make_lupton_rgb(star_r, star_g, star_b, Q=10)
else:
image = PSFs[plot[0]]
norm = simple_norm(image, 'log', percent=99.)
plt.imshow(image,norm=norm ,origin='lower')
plt.show()
|
[
"numpy.sqrt",
"astropy.table.Table",
"numpy.array",
"photutils.source_properties",
"photutils.psf.extract_stars",
"astropy.io.fits.open",
"numpy.arange",
"matplotlib.pyplot.imshow",
"photutils.EPSFBuilder",
"astropy.units.degree.to",
"astropy.visualization.make_lupton_rgb",
"astropy.nddata.CCDData",
"numpy.isinf",
"numpy.abs",
"numpy.ones",
"astropy.io.fits.PrimaryHDU",
"photutils.detect_threshold",
"astropy.table.Column",
"numpy.isnan",
"numpy.sign",
"astropy.stats.sigma_clipped_stats",
"matplotlib.pyplot.show",
"astropy.wcs.utils.proj_plane_pixel_scales",
"photutils.detect_sources",
"astropy.io.fits.HDUList",
"astropy.visualization.simple_norm",
"astropy.coordinates.SkyCoord",
"numpy.sum",
"numpy.zeros",
"matplotlib.pyplot.figure",
"astropy.convolution.Gaussian2DKernel",
"astropy.nddata.CCDData.read",
"numpy.zeros_like",
"matplotlib.pyplot.subplots"
] |
[((5528, 5594), 'astropy.stats.sigma_clipped_stats', 'sigma_clipped_stats', (['self.data.data'], {'mask': 'self.data.mask'}), '(self.data.data, mask=self.data.mask, **kwargs)\n', (5547, 5594), False, 'from astropy.stats import sigma_clipped_stats\n'), ((7580, 7605), 'numpy.abs', 'np.abs', (['(xlim[1] - xlim[0])'], {}), '(xlim[1] - xlim[0])\n', (7586, 7605), True, 'import numpy as np\n'), ((8309, 8353), 'numpy.sqrt', 'np.sqrt', (['(d_arrow1[0] ** 2 + d_arrow1[1] ** 2)'], {}), '(d_arrow1[0] ** 2 + d_arrow1[1] ** 2)\n', (8316, 8353), True, 'import numpy as np\n'), ((8369, 8413), 'numpy.sqrt', 'np.sqrt', (['(d_arrow2[0] ** 2 + d_arrow2[1] ** 2)'], {}), '(d_arrow2[0] ** 2 + d_arrow2[1] ** 2)\n', (8376, 8413), True, 'import numpy as np\n'), ((9920, 9944), 'astropy.nddata.CCDData', 'CCDData', (['data'], {'unit': 'unit'}), '(data, unit=unit)\n', (9927, 9944), False, 'from astropy.nddata import CCDData\n'), ((10346, 10383), 'photutils.detect_threshold', 'detect_threshold', (['data'], {'nsigma': 'nsigma'}), '(data, nsigma=nsigma)\n', (10362, 10383), False, 'from photutils import detect_threshold\n'), ((10453, 10496), 'astropy.convolution.Gaussian2DKernel', 'Gaussian2DKernel', (['sigma'], {'x_size': '(5)', 'y_size': '(5)'}), '(sigma, x_size=5, y_size=5)\n', (10469, 10496), False, 'from astropy.convolution import Gaussian2DKernel\n'), ((10539, 10605), 'photutils.detect_sources', 'detect_sources', (['data', 'thresholder'], {'npixels': '(5)', 'filter_kernel': 'kernel'}), '(data, thresholder, npixels=5, filter_kernel=kernel)\n', (10553, 10605), False, 'from photutils import detect_sources\n'), ((10622, 10651), 'photutils.source_properties', 'source_properties', (['data', 'segm'], {}), '(data, segm)\n', (10639, 10651), False, 'from photutils import source_properties\n'), ((10826, 10878), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['srcPstradec[0]', 'srcPstradec[1]'], {'unit': '"""deg"""'}), "(srcPstradec[0], srcPstradec[1], unit='deg')\n", (10834, 10878), False, 'from astropy.coordinates import SkyCoord\n'), ((11380, 11416), 'numpy.zeros_like', 'np.zeros_like', (['self.data'], {'dtype': 'bool'}), '(self.data, dtype=bool)\n', (11393, 11416), True, 'import numpy as np\n'), ((13258, 13305), 'astropy.nddata.CCDData', 'CCDData', (['(data - background)'], {'unit': 'self.data.unit'}), '(data - background, unit=self.data.unit)\n', (13265, 13305), False, 'from astropy.nddata import CCDData\n'), ((13612, 13631), 'astropy.io.fits.open', 'fits.open', (['filepath'], {}), '(filepath)\n', (13621, 13631), False, 'from astropy.io import fits\n'), ((13655, 13696), 'astropy.nddata.CCDData', 'CCDData', (['hdu[0].data'], {'unit': 'self.data.unit'}), '(hdu[0].data, unit=self.data.unit)\n', (13662, 13696), False, 'from astropy.nddata import CCDData\n'), ((14500, 14534), 'numpy.sqrt', 'np.sqrt', (['(data / GAIN + bkgrms ** 2)'], {}), '(data / GAIN + bkgrms ** 2)\n', (14507, 14534), True, 'import numpy as np\n'), ((14815, 14834), 'astropy.io.fits.open', 'fits.open', (['filepath'], {}), '(filepath)\n', (14824, 14834), False, 'from astropy.io import fits\n'), ((14986, 15005), 'astropy.io.fits.open', 'fits.open', (['filepath'], {}), '(filepath)\n', (14995, 15005), False, 'from astropy.io import fits\n'), ((18034, 18062), 'numpy.zeros', 'np.zeros', (['lencc'], {'dtype': 'float'}), '(lencc, dtype=float)\n', (18042, 18062), True, 'import numpy as np\n'), ((18084, 18112), 'numpy.zeros', 'np.zeros', (['lencc'], {'dtype': 'float'}), '(lencc, dtype=float)\n', (18092, 18112), True, 'import numpy as np\n'), ((2529, 2582), 'astropy.nddata.CCDData.read', 'CCDData.read', (['filename'], {'hdu': 'hdu', 'unit': 'unit', 'mask': 'mask'}), '(filename, hdu=hdu, unit=unit, mask=mask)\n', (2541, 2582), False, 'from astropy.nddata import CCDData\n'), ((8928, 8945), 'numpy.sign', 'np.sign', (['d_arrow1'], {}), '(d_arrow1)\n', (8935, 8945), True, 'import numpy as np\n'), ((8979, 8996), 'numpy.sign', 'np.sign', (['d_arrow2'], {}), '(d_arrow2)\n', (8986, 8996), True, 'import numpy as np\n'), ((11430, 11449), 'numpy.isnan', 'np.isnan', (['self.data'], {}), '(self.data)\n', (11438, 11449), True, 'import numpy as np\n'), ((11471, 11490), 'numpy.isinf', 'np.isinf', (['self.data'], {}), '(self.data)\n', (11479, 11490), True, 'import numpy as np\n'), ((13397, 13431), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', (['(data - background)'], {}), '(data - background)\n', (13412, 13431), False, 'from astropy.io import fits\n'), ((14619, 14642), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', (['sigmap'], {}), '(sigmap)\n', (14634, 14642), False, 'from astropy.io import fits\n'), ((17801, 17825), 'numpy.arange', 'np.arange', (['self.__length'], {}), '(self.__length)\n', (17810, 17825), True, 'import numpy as np\n'), ((18775, 18808), 'astropy.table.Column', 'Column', (['master_a'], {'name': '"""master_a"""'}), "(master_a, name='master_a')\n", (18781, 18808), False, 'from astropy.table import Table, Column, join, join_skycoord\n'), ((18836, 18869), 'astropy.table.Column', 'Column', (['master_b'], {'name': '"""master_b"""'}), "(master_b, name='master_b')\n", (18842, 18869), False, 'from astropy.table import Table, Column, join, join_skycoord\n'), ((20083, 20107), 'numpy.arange', 'np.arange', (['self.__length'], {}), '(self.__length)\n', (20092, 20107), True, 'import numpy as np\n'), ((22561, 22568), 'astropy.table.Table', 'Table', ([], {}), '()\n', (22566, 22568), False, 'from astropy.table import Table, Column, join, join_skycoord\n'), ((22596, 22626), 'numpy.array', 'np.array', (["newsc['maxval_xpos']"], {}), "(newsc['maxval_xpos'])\n", (22604, 22626), True, 'import numpy as np\n'), ((22654, 22684), 'numpy.array', 'np.array', (["newsc['maxval_ypos']"], {}), "(newsc['maxval_ypos'])\n", (22662, 22684), True, 'import numpy as np\n'), ((22780, 22823), 'photutils.psf.extract_stars', 'extract_stars', (['nddata', 'stars_tbl'], {'size': 'size'}), '(nddata, stars_tbl, size=size)\n', (22793, 22823), False, 'from photutils.psf import extract_stars\n'), ((22851, 22922), 'photutils.EPSFBuilder', 'EPSFBuilder', ([], {'oversampling': 'oversampling', 'maxiters': '(15)', 'progress_bar': '(False)'}), '(oversampling=oversampling, maxiters=15, progress_bar=False)\n', (22862, 22922), False, 'from photutils import EPSFBuilder\n'), ((23647, 23735), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'nrows', 'ncols': 'ncols', 'figsize': '(3 * ncols, 3 * nrows)', 'squeeze': '(True)'}), '(nrows=nrows, ncols=ncols, figsize=(3 * ncols, 3 * nrows),\n squeeze=True)\n', (23659, 23735), True, 'import matplotlib.pyplot as plt\n'), ((24484, 24494), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (24492, 24494), True, 'import matplotlib.pyplot as plt\n'), ((24511, 24539), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (24521, 24539), True, 'import matplotlib.pyplot as plt\n'), ((25030, 25074), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {'norm': 'norm', 'origin': '"""lower"""'}), "(image, norm=norm, origin='lower')\n", (25040, 25074), True, 'import matplotlib.pyplot as plt\n'), ((25086, 25096), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (25094, 25096), True, 'import matplotlib.pyplot as plt\n'), ((16920, 16955), 'numpy.ones', 'np.ones', (['self.__length'], {'dtype': 'float'}), '(self.__length, dtype=float)\n', (16927, 16955), True, 'import numpy as np\n'), ((17712, 17747), 'numpy.ones', 'np.ones', (['self.__length'], {'dtype': 'float'}), '(self.__length, dtype=float)\n', (17719, 17747), True, 'import numpy as np\n'), ((18689, 18700), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (18697, 18700), True, 'import numpy as np\n'), ((18738, 18749), 'numpy.array', 'np.array', (['b'], {}), '(b)\n', (18746, 18749), True, 'import numpy as np\n'), ((19252, 19285), 'numpy.ones', 'np.ones', (['self.__length'], {'dtype': 'int'}), '(self.__length, dtype=int)\n', (19259, 19285), True, 'import numpy as np\n'), ((19994, 20029), 'numpy.ones', 'np.ones', (['self.__length'], {'dtype': 'float'}), '(self.__length, dtype=float)\n', (20001, 20029), True, 'import numpy as np\n'), ((23155, 23174), 'astropy.io.fits.HDUList', 'fits.HDUList', (['[hdu]'], {}), '([hdu])\n', (23167, 23174), False, 'from astropy.io import fits\n'), ((24790, 24830), 'astropy.visualization.simple_norm', 'simple_norm', (['star_b', '"""log"""'], {'percent': '(99.0)'}), "(star_b, 'log', percent=99.0)\n", (24801, 24830), False, 'from astropy.visualization import simple_norm, make_lupton_rgb\n'), ((24854, 24899), 'astropy.visualization.make_lupton_rgb', 'make_lupton_rgb', (['star_r', 'star_g', 'star_b'], {'Q': '(10)'}), '(star_r, star_g, star_b, Q=10)\n', (24869, 24899), False, 'from astropy.visualization import simple_norm, make_lupton_rgb\n'), ((24979, 25018), 'astropy.visualization.simple_norm', 'simple_norm', (['image', '"""log"""'], {'percent': '(99.0)'}), "(image, 'log', percent=99.0)\n", (24990, 25018), False, 'from astropy.visualization import simple_norm, make_lupton_rgb\n'), ((2671, 2709), 'astropy.wcs.utils.proj_plane_pixel_scales', 'proj_plane_pixel_scales', (['self.data.wcs'], {}), '(self.data.wcs)\n', (2694, 2709), False, 'from astropy.wcs.utils import proj_plane_pixel_scales\n'), ((2712, 2733), 'astropy.units.degree.to', 'u.degree.to', (['"""arcsec"""'], {}), "('arcsec')\n", (2723, 2733), True, 'from astropy import units as u\n'), ((8429, 8447), 'numpy.array', 'np.array', (['d_arrow1'], {}), '(d_arrow1)\n', (8437, 8447), True, 'import numpy as np\n'), ((8499, 8517), 'numpy.array', 'np.array', (['d_arrow2'], {}), '(d_arrow2)\n', (8507, 8517), True, 'import numpy as np\n'), ((22718, 22758), 'numpy.array', 'np.array', (['self.image_list[loop2].ss_data'], {}), '(self.image_list[loop2].ss_data)\n', (22726, 22758), True, 'import numpy as np\n'), ((24151, 24191), 'astropy.visualization.simple_norm', 'simple_norm', (['star_b', '"""log"""'], {'percent': '(99.0)'}), "(star_b, 'log', percent=99.0)\n", (24162, 24191), False, 'from astropy.visualization import simple_norm, make_lupton_rgb\n'), ((24219, 24264), 'astropy.visualization.make_lupton_rgb', 'make_lupton_rgb', (['star_r', 'star_g', 'star_b'], {'Q': '(10)'}), '(star_r, star_g, star_b, Q=10)\n', (24234, 24264), False, 'from astropy.visualization import simple_norm, make_lupton_rgb\n'), ((24371, 24410), 'astropy.visualization.simple_norm', 'simple_norm', (['image', '"""log"""'], {'percent': '(99.0)'}), "(image, 'log', percent=99.0)\n", (24382, 24410), False, 'from astropy.visualization import simple_norm, make_lupton_rgb\n'), ((24613, 24634), 'numpy.sum', 'np.sum', (['PSFs[plot[0]]'], {}), '(PSFs[plot[0]])\n', (24619, 24634), True, 'import numpy as np\n'), ((24679, 24700), 'numpy.sum', 'np.sum', (['PSFs[plot[1]]'], {}), '(PSFs[plot[1]])\n', (24685, 24700), True, 'import numpy as np\n'), ((24745, 24766), 'numpy.sum', 'np.sum', (['PSFs[plot[2]]'], {}), '(PSFs[plot[2]])\n', (24751, 24766), True, 'import numpy as np\n'), ((23887, 23923), 'numpy.sum', 'np.sum', (['star_images[plot[0]][i].data'], {}), '(star_images[plot[0]][i].data)\n', (23893, 23923), True, 'import numpy as np\n'), ((23987, 24023), 'numpy.sum', 'np.sum', (['star_images[plot[1]][i].data'], {}), '(star_images[plot[1]][i].data)\n', (23993, 24023), True, 'import numpy as np\n'), ((24087, 24123), 'numpy.sum', 'np.sum', (['star_images[plot[2]][i].data'], {}), '(star_images[plot[2]][i].data)\n', (24093, 24123), True, 'import numpy as np\n')]
|
import cv2
import numpy as np
class TapeTracker(object):
min_thresh = np.array( [80,0,0] )
max_thresh = np.array( [90, 255, 255] )
def __init_(self):
self.img = np.zeros((500,500))
def pipeline(self, img):
self.img = cv2.resize(img, (300,300), cv2.INTER_NEAREST)
self.img = cv2.cvtColor(self.img, cv2.COLOR_BGR2HLS)
self.mask = cv2.inRange(self.img, self.min_thresh, self.max_thresh)
kernel = np.ones((5,5), np.uint8)
#self.mask = cv2.dilate(self.mask,kernel, iterations=2)
self.cnt, self.hier = cv2.findContours(self.mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
self.ret = np.copy(self.img)
self.cnt_f = []
self.cnt = sorted(self.cnt, key=cv2.contourArea, reverse=True)[:2] # get largest contour
for cnt in self.cnt:
x,y,w,h = cv2.boundingRect(cnt)
if w < 0.6*h and cv2.contourArea(cnt) > 10:
cv2.rectangle(self.ret, (x,y), (x+w, y+h), (0,255,0), 2)
self.cnt_f.append(cnt)
M_1 = cv2.moments(self.cnt_f[0])
cx_1 = int(M_1['m10']/M_1['m00'])
cy_1 = int(M_1['m01']/M_1['m00'])
M_2 = cv2.moments(self.cnt_f[1])
cx_2 = int(M_2['m10']/M_2['m00'])
cy_2 = int(M_2['m01']/M_2['m00'])
midpoint = ((cx_1+cx_2)//2, (cy_1+cy_2)//2)
self.error = midpoint[0] - self.img.shape[0]
print(self.error)
#cy = int(M['m01']/M['m00'])
#print(cx - self.img.shape[0]//2)
#print(cx)
self.ret = cv2.drawContours(self.ret, self.cnt_f, -1, (150, 150, 255), 2)
self.ret = cv2.circle(self.ret, (cx_1, cy_1), 2, (150, 155, 255))
self.ret = cv2.circle(self.ret, (cx_2, cy_2), 2, (150, 155, 255))
self.ret = cv2.circle(self.ret, midpoint, 2, (150, 255, 255))
if __name__ == "__main__":
ct = TapeTracker()
img = cv2.imread('img/1.jpg')
ct.pipeline(img)
cv2.imshow('output', cv2.resize(cv2.cvtColor(ct.img, cv2.COLOR_HLS2BGR), (500, 500), cv2.INTER_NEAREST))
cv2.imshow('mask', cv2.resize(ct.mask, (500,500), cv2.INTER_NEAREST))
cv2.imshow('contour', cv2.resize(cv2.cvtColor(ct.ret, cv2.COLOR_HLS2BGR), (500, 500), cv2.INTER_NEAREST))
k = cv2.waitKey(0) & 0xFF
if k == 27:
cv2.destroyAllWindows()
|
[
"cv2.rectangle",
"numpy.copy",
"cv2.drawContours",
"numpy.ones",
"cv2.inRange",
"cv2.contourArea",
"numpy.array",
"numpy.zeros",
"cv2.circle",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.cvtColor",
"cv2.moments",
"cv2.findContours",
"cv2.resize",
"cv2.imread",
"cv2.boundingRect"
] |
[((74, 94), 'numpy.array', 'np.array', (['[80, 0, 0]'], {}), '([80, 0, 0])\n', (82, 94), True, 'import numpy as np\n'), ((110, 134), 'numpy.array', 'np.array', (['[90, 255, 255]'], {}), '([90, 255, 255])\n', (118, 134), True, 'import numpy as np\n'), ((1765, 1788), 'cv2.imread', 'cv2.imread', (['"""img/1.jpg"""'], {}), "('img/1.jpg')\n", (1775, 1788), False, 'import cv2\n'), ((173, 193), 'numpy.zeros', 'np.zeros', (['(500, 500)'], {}), '((500, 500))\n', (181, 193), True, 'import numpy as np\n'), ((235, 281), 'cv2.resize', 'cv2.resize', (['img', '(300, 300)', 'cv2.INTER_NEAREST'], {}), '(img, (300, 300), cv2.INTER_NEAREST)\n', (245, 281), False, 'import cv2\n'), ((296, 337), 'cv2.cvtColor', 'cv2.cvtColor', (['self.img', 'cv2.COLOR_BGR2HLS'], {}), '(self.img, cv2.COLOR_BGR2HLS)\n', (308, 337), False, 'import cv2\n'), ((359, 414), 'cv2.inRange', 'cv2.inRange', (['self.img', 'self.min_thresh', 'self.max_thresh'], {}), '(self.img, self.min_thresh, self.max_thresh)\n', (370, 414), False, 'import cv2\n'), ((433, 458), 'numpy.ones', 'np.ones', (['(5, 5)', 'np.uint8'], {}), '((5, 5), np.uint8)\n', (440, 458), True, 'import numpy as np\n'), ((555, 620), 'cv2.findContours', 'cv2.findContours', (['self.mask', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_NONE'], {}), '(self.mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n', (571, 620), False, 'import cv2\n'), ((641, 658), 'numpy.copy', 'np.copy', (['self.img'], {}), '(self.img)\n', (648, 658), True, 'import numpy as np\n'), ((997, 1023), 'cv2.moments', 'cv2.moments', (['self.cnt_f[0]'], {}), '(self.cnt_f[0])\n', (1008, 1023), False, 'import cv2\n'), ((1115, 1141), 'cv2.moments', 'cv2.moments', (['self.cnt_f[1]'], {}), '(self.cnt_f[1])\n', (1126, 1141), False, 'import cv2\n'), ((1439, 1501), 'cv2.drawContours', 'cv2.drawContours', (['self.ret', 'self.cnt_f', '(-1)', '(150, 150, 255)', '(2)'], {}), '(self.ret, self.cnt_f, -1, (150, 150, 255), 2)\n', (1455, 1501), False, 'import cv2\n'), ((1517, 1571), 'cv2.circle', 'cv2.circle', (['self.ret', '(cx_1, cy_1)', '(2)', '(150, 155, 255)'], {}), '(self.ret, (cx_1, cy_1), 2, (150, 155, 255))\n', (1527, 1571), False, 'import cv2\n'), ((1587, 1641), 'cv2.circle', 'cv2.circle', (['self.ret', '(cx_2, cy_2)', '(2)', '(150, 155, 255)'], {}), '(self.ret, (cx_2, cy_2), 2, (150, 155, 255))\n', (1597, 1641), False, 'import cv2\n'), ((1657, 1707), 'cv2.circle', 'cv2.circle', (['self.ret', 'midpoint', '(2)', '(150, 255, 255)'], {}), '(self.ret, midpoint, 2, (150, 255, 255))\n', (1667, 1707), False, 'import cv2\n'), ((1936, 1986), 'cv2.resize', 'cv2.resize', (['ct.mask', '(500, 500)', 'cv2.INTER_NEAREST'], {}), '(ct.mask, (500, 500), cv2.INTER_NEAREST)\n', (1946, 1986), False, 'import cv2\n'), ((2102, 2116), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2113, 2116), False, 'import cv2\n'), ((2142, 2165), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2163, 2165), False, 'import cv2\n'), ((818, 839), 'cv2.boundingRect', 'cv2.boundingRect', (['cnt'], {}), '(cnt)\n', (834, 839), False, 'import cv2\n'), ((1842, 1881), 'cv2.cvtColor', 'cv2.cvtColor', (['ct.img', 'cv2.COLOR_HLS2BGR'], {}), '(ct.img, cv2.COLOR_HLS2BGR)\n', (1854, 1881), False, 'import cv2\n'), ((2023, 2062), 'cv2.cvtColor', 'cv2.cvtColor', (['ct.ret', 'cv2.COLOR_HLS2BGR'], {}), '(ct.ret, cv2.COLOR_HLS2BGR)\n', (2035, 2062), False, 'import cv2\n'), ((898, 961), 'cv2.rectangle', 'cv2.rectangle', (['self.ret', '(x, y)', '(x + w, y + h)', '(0, 255, 0)', '(2)'], {}), '(self.ret, (x, y), (x + w, y + h), (0, 255, 0), 2)\n', (911, 961), False, 'import cv2\n'), ((863, 883), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (878, 883), False, 'import cv2\n')]
|
import sys
# in this case local import
sys.path.append("../")
import mag2dpoly as mag
import numpy as np
# induced magnetization
Jind = mag.MagnetizVector(mod=4.9,Ideg=90.0,Ddeg=45.0)
# remanent magnetization
Jrem = mag.MagnetizVector(mod=3.1,Ideg=45.0,Ddeg=0.0)
# angle with the North axis
northxax = 90.0
# number of observation
Nobs = 101
xzobs = np.transpose(np.vstack(( np.linspace(0.0,100.0,Nobs), -1.0*np.ones(Nobs))))
# vertices of the poligonal bodies
vertices = np.array([ [35.0, 50.0],
[65.0, 50.0],
[80.0, 35.0],
[65.0, 20.0],
[35.0, 20.0],
[20.0, 35.0] ])
# indices of vertices for the body
nbod = 1
bodyindices = np.empty(shape=(nbod,), dtype=np.object)
inds = range(6)
bodyindices[0] = np.array(inds)
# construct the poligonal body object
pbody = mag.MagPolyBodies2D(bodyindices,vertices)
# type of forward algorithm
forwardtype = "talwani"
# compute total field
# make Jind and Jrem arrays of objects (as many as there are bodies)
Jindv = np.array([Jind]) # we have one single body in this case
Jremv = np.array([Jrem]) # we have one single body in this case
tmag = mag.tmagpolybodies2Dgen(xzobs,Jindv,Jremv,northxax,pbody,forwardtype)
## plot
import matplotlib.pyplot as plt
plt.figure()
plt.subplot(211)
plt.title("Magnetic anomaly")
plt.plot(xzobs[:,0],tmag,"o-")
plt.subplot(212)
plt.title("Polygonal body")
x = np.append(pbody.bo[0].ver1[:,0],pbody.bo[0].ver1[0,0])
y = np.append(pbody.bo[0].ver1[:,1],pbody.bo[0].ver1[0,1])
plt.plot(x,y,"o-")
plt.show()
|
[
"numpy.ones",
"mag2dpoly.tmagpolybodies2Dgen",
"mag2dpoly.MagnetizVector",
"mag2dpoly.MagPolyBodies2D",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.subplot",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.append",
"numpy.empty",
"numpy.linspace",
"matplotlib.pyplot.title",
"sys.path.append",
"matplotlib.pyplot.show"
] |
[((41, 63), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (56, 63), False, 'import sys\n'), ((141, 190), 'mag2dpoly.MagnetizVector', 'mag.MagnetizVector', ([], {'mod': '(4.9)', 'Ideg': '(90.0)', 'Ddeg': '(45.0)'}), '(mod=4.9, Ideg=90.0, Ddeg=45.0)\n', (159, 190), True, 'import mag2dpoly as mag\n'), ((221, 269), 'mag2dpoly.MagnetizVector', 'mag.MagnetizVector', ([], {'mod': '(3.1)', 'Ideg': '(45.0)', 'Ddeg': '(0.0)'}), '(mod=3.1, Ideg=45.0, Ddeg=0.0)\n', (239, 269), True, 'import mag2dpoly as mag\n'), ((480, 579), 'numpy.array', 'np.array', (['[[35.0, 50.0], [65.0, 50.0], [80.0, 35.0], [65.0, 20.0], [35.0, 20.0], [\n 20.0, 35.0]]'], {}), '([[35.0, 50.0], [65.0, 50.0], [80.0, 35.0], [65.0, 20.0], [35.0, \n 20.0], [20.0, 35.0]])\n', (488, 579), True, 'import numpy as np\n'), ((746, 786), 'numpy.empty', 'np.empty', ([], {'shape': '(nbod,)', 'dtype': 'np.object'}), '(shape=(nbod,), dtype=np.object)\n', (754, 786), True, 'import numpy as np\n'), ((820, 834), 'numpy.array', 'np.array', (['inds'], {}), '(inds)\n', (828, 834), True, 'import numpy as np\n'), ((882, 924), 'mag2dpoly.MagPolyBodies2D', 'mag.MagPolyBodies2D', (['bodyindices', 'vertices'], {}), '(bodyindices, vertices)\n', (901, 924), True, 'import mag2dpoly as mag\n'), ((1078, 1094), 'numpy.array', 'np.array', (['[Jind]'], {}), '([Jind])\n', (1086, 1094), True, 'import numpy as np\n'), ((1142, 1158), 'numpy.array', 'np.array', (['[Jrem]'], {}), '([Jrem])\n', (1150, 1158), True, 'import numpy as np\n'), ((1205, 1279), 'mag2dpoly.tmagpolybodies2Dgen', 'mag.tmagpolybodies2Dgen', (['xzobs', 'Jindv', 'Jremv', 'northxax', 'pbody', 'forwardtype'], {}), '(xzobs, Jindv, Jremv, northxax, pbody, forwardtype)\n', (1228, 1279), True, 'import mag2dpoly as mag\n'), ((1318, 1330), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1328, 1330), True, 'import matplotlib.pyplot as plt\n'), ((1331, 1347), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (1342, 1347), True, 'import matplotlib.pyplot as plt\n'), ((1348, 1377), 'matplotlib.pyplot.title', 'plt.title', (['"""Magnetic anomaly"""'], {}), "('Magnetic anomaly')\n", (1357, 1377), True, 'import matplotlib.pyplot as plt\n'), ((1378, 1411), 'matplotlib.pyplot.plot', 'plt.plot', (['xzobs[:, 0]', 'tmag', '"""o-"""'], {}), "(xzobs[:, 0], tmag, 'o-')\n", (1386, 1411), True, 'import matplotlib.pyplot as plt\n'), ((1409, 1425), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (1420, 1425), True, 'import matplotlib.pyplot as plt\n'), ((1426, 1453), 'matplotlib.pyplot.title', 'plt.title', (['"""Polygonal body"""'], {}), "('Polygonal body')\n", (1435, 1453), True, 'import matplotlib.pyplot as plt\n'), ((1458, 1515), 'numpy.append', 'np.append', (['pbody.bo[0].ver1[:, 0]', 'pbody.bo[0].ver1[0, 0]'], {}), '(pbody.bo[0].ver1[:, 0], pbody.bo[0].ver1[0, 0])\n', (1467, 1515), True, 'import numpy as np\n'), ((1517, 1574), 'numpy.append', 'np.append', (['pbody.bo[0].ver1[:, 1]', 'pbody.bo[0].ver1[0, 1]'], {}), '(pbody.bo[0].ver1[:, 1], pbody.bo[0].ver1[0, 1])\n', (1526, 1574), True, 'import numpy as np\n'), ((1572, 1592), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""o-"""'], {}), "(x, y, 'o-')\n", (1580, 1592), True, 'import matplotlib.pyplot as plt\n'), ((1591, 1601), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1599, 1601), True, 'import matplotlib.pyplot as plt\n'), ((382, 411), 'numpy.linspace', 'np.linspace', (['(0.0)', '(100.0)', 'Nobs'], {}), '(0.0, 100.0, Nobs)\n', (393, 411), True, 'import numpy as np\n'), ((416, 429), 'numpy.ones', 'np.ones', (['Nobs'], {}), '(Nobs)\n', (423, 429), True, 'import numpy as np\n')]
|
from funlib.show.neuroglancer import add_layer, ScalePyramid
import argparse
import daisy
import glob
import neuroglancer
import numpy as np
import os
import webbrowser
from swc_parser import _parse_swc
from pathlib import Path
import itertools
import random
import logging
ngid = itertools.count(start=1)
parser = argparse.ArgumentParser()
parser.add_argument(
"--file", "-f", type=str, action="append", help="The path to the container to show"
)
parser.add_argument(
"--datasets",
"-d",
type=str,
nargs="+",
action="append",
help="The datasets in the container to show",
)
parser.add_argument(
"--synapses",
"-s",
type=str,
action="append",
help="A numpy npz containing synapse annotations as stored by "
"synful.gunpowder.ExtractSynapses",
)
parser.add_argument(
"--time",
"-t",
type=int,
action="store",
dest="minutes",
default=0,
help="How long you want neuroglancer to stay available",
)
parser.add_argument(
"--output",
"-o",
type=str,
action="store",
dest="log",
default="",
help="Where to output url to",
)
args = parser.parse_args()
print("passed in arguments: {}".format(args))
minutes = args.minutes
print("showing neuroglancer for {} minutes".format(minutes))
if args.log != "":
logging.basicConfig(level=logging.INFO, filename=args.log)
else:
logging.basicConfig(level=logging.INFO)
neuroglancer.set_server_bind_address("0.0.0.0")
viewer = neuroglancer.Viewer()
swc_path = Path(
"/nrs/funke/mouselight-v2/2017-07-02",
"consensus-neurons-with-machine-centerpoints-labelled-as-swcs/G-002.swc",
)
swc_path = Path(
"/groups/mousebrainmicro/mousebrainmicro/cluster/2018-07-02/carver/augmented-with-skeleton-nodes-as-swcs/G-002.swc"
)
n5_path = Path(
"/nrs/funke/mouselight-v2/2018-07-02",
"consensus-neurons-with-machine-centerpoints-labelled-as-swcs-carved.n5/",
)
transform = Path("/nrs/mouselight/SAMPLES/2018-07-02/transform.txt")
def load_transform(transform_path: Path):
text = transform_path.open("r").read()
lines = text.split("\n")
constants = {}
for line in lines:
if len(line) > 0:
variable, value = line.split(":")
constants[variable] = float(value)
spacing = (
np.array([constants["sx"], constants["sy"], constants["sz"]])
/ 2 ** (constants["nl"] - 1)
/ 1000
)
origin = spacing * (
(np.array([constants["ox"], constants["oy"], constants["oz"]]) // spacing)
/ 1000
)
return origin, spacing
def swc_to_voxel_coords(swc_coord, origin, spacing):
return np.round((swc_coord - origin) / spacing).astype(int)
# swc
neuron_graph = _parse_swc(swc_path)
origin, spacing = load_transform(transform)
voxel_size = spacing
voxel_size_rounded = np.array((10, 3, 3)[::-1])
nodes = []
edges = []
print(len(neuron_graph.nodes))
for node_a, node_b in neuron_graph.edges:
a = swc_to_voxel_coords(neuron_graph.nodes[node_a]["location"], origin, spacing)
b = swc_to_voxel_coords(neuron_graph.nodes[node_b]["location"], origin, spacing)
pos_u = a
pos_v = b
nodes.append(
neuroglancer.EllipsoidAnnotation(
center=pos_u, radii=(3, 3, 3) / voxel_size, id=next(ngid)
)
)
edges.append(
neuroglancer.LineAnnotation(point_a=pos_u, point_b=pos_v, id=next(ngid))
)
if len(nodes) > 10000:
break
nodes.append(
neuroglancer.EllipsoidAnnotation(
center=pos_v, radii=(1, 1, 1) / voxel_size, id=next(ngid)
)
)
a = daisy.open_ds(str(n5_path.absolute()), "volume")
with viewer.txn() as s:
add_layer(s, a, "volume", shader="rgb", c=[0, 0, 0])
with viewer.txn() as s:
s.layers["edges"] = neuroglancer.AnnotationLayer(
filter_by_segmentation=False, annotation_color="#add8e6", annotations=edges
)
s.layers["nodes"] = neuroglancer.AnnotationLayer(
filter_by_segmentation=False, annotation_color="#ff00ff", annotations=nodes
)
url = str(viewer)
logging.info(url)
import time
time.sleep(60 * minutes)
try:
if minutes < 1:
input("Press ENTER to exit:")
except:
pass
|
[
"logging.basicConfig",
"funlib.show.neuroglancer.add_layer",
"argparse.ArgumentParser",
"pathlib.Path",
"neuroglancer.Viewer",
"numpy.round",
"time.sleep",
"neuroglancer.set_server_bind_address",
"numpy.array",
"itertools.count",
"neuroglancer.AnnotationLayer",
"logging.info",
"swc_parser._parse_swc"
] |
[((282, 306), 'itertools.count', 'itertools.count', ([], {'start': '(1)'}), '(start=1)\n', (297, 306), False, 'import itertools\n'), ((317, 342), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (340, 342), False, 'import argparse\n'), ((1419, 1466), 'neuroglancer.set_server_bind_address', 'neuroglancer.set_server_bind_address', (['"""0.0.0.0"""'], {}), "('0.0.0.0')\n", (1455, 1466), False, 'import neuroglancer\n'), ((1476, 1497), 'neuroglancer.Viewer', 'neuroglancer.Viewer', ([], {}), '()\n', (1495, 1497), False, 'import neuroglancer\n'), ((1510, 1631), 'pathlib.Path', 'Path', (['"""/nrs/funke/mouselight-v2/2017-07-02"""', '"""consensus-neurons-with-machine-centerpoints-labelled-as-swcs/G-002.swc"""'], {}), "('/nrs/funke/mouselight-v2/2017-07-02',\n 'consensus-neurons-with-machine-centerpoints-labelled-as-swcs/G-002.swc')\n", (1514, 1631), False, 'from pathlib import Path\n'), ((1650, 1781), 'pathlib.Path', 'Path', (['"""/groups/mousebrainmicro/mousebrainmicro/cluster/2018-07-02/carver/augmented-with-skeleton-nodes-as-swcs/G-002.swc"""'], {}), "(\n '/groups/mousebrainmicro/mousebrainmicro/cluster/2018-07-02/carver/augmented-with-skeleton-nodes-as-swcs/G-002.swc'\n )\n", (1654, 1781), False, 'from pathlib import Path\n'), ((1788, 1910), 'pathlib.Path', 'Path', (['"""/nrs/funke/mouselight-v2/2018-07-02"""', '"""consensus-neurons-with-machine-centerpoints-labelled-as-swcs-carved.n5/"""'], {}), "('/nrs/funke/mouselight-v2/2018-07-02',\n 'consensus-neurons-with-machine-centerpoints-labelled-as-swcs-carved.n5/')\n", (1792, 1910), False, 'from pathlib import Path\n'), ((1930, 1986), 'pathlib.Path', 'Path', (['"""/nrs/mouselight/SAMPLES/2018-07-02/transform.txt"""'], {}), "('/nrs/mouselight/SAMPLES/2018-07-02/transform.txt')\n", (1934, 1986), False, 'from pathlib import Path\n'), ((2706, 2726), 'swc_parser._parse_swc', '_parse_swc', (['swc_path'], {}), '(swc_path)\n', (2716, 2726), False, 'from swc_parser import _parse_swc\n'), ((2814, 2840), 'numpy.array', 'np.array', (['(10, 3, 3)[::-1]'], {}), '((10, 3, 3)[::-1])\n', (2822, 2840), True, 'import numpy as np\n'), ((4025, 4042), 'logging.info', 'logging.info', (['url'], {}), '(url)\n', (4037, 4042), False, 'import logging\n'), ((4057, 4081), 'time.sleep', 'time.sleep', (['(60 * minutes)'], {}), '(60 * minutes)\n', (4067, 4081), False, 'import time\n'), ((1309, 1367), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'filename': 'args.log'}), '(level=logging.INFO, filename=args.log)\n', (1328, 1367), False, 'import logging\n'), ((1378, 1417), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (1397, 1417), False, 'import logging\n'), ((3640, 3692), 'funlib.show.neuroglancer.add_layer', 'add_layer', (['s', 'a', '"""volume"""'], {'shader': '"""rgb"""', 'c': '[0, 0, 0]'}), "(s, a, 'volume', shader='rgb', c=[0, 0, 0])\n", (3649, 3692), False, 'from funlib.show.neuroglancer import add_layer, ScalePyramid\n'), ((3742, 3852), 'neuroglancer.AnnotationLayer', 'neuroglancer.AnnotationLayer', ([], {'filter_by_segmentation': '(False)', 'annotation_color': '"""#add8e6"""', 'annotations': 'edges'}), "(filter_by_segmentation=False, annotation_color\n ='#add8e6', annotations=edges)\n", (3770, 3852), False, 'import neuroglancer\n'), ((3886, 3996), 'neuroglancer.AnnotationLayer', 'neuroglancer.AnnotationLayer', ([], {'filter_by_segmentation': '(False)', 'annotation_color': '"""#ff00ff"""', 'annotations': 'nodes'}), "(filter_by_segmentation=False, annotation_color\n ='#ff00ff', annotations=nodes)\n", (3914, 3996), False, 'import neuroglancer\n'), ((2288, 2349), 'numpy.array', 'np.array', (["[constants['sx'], constants['sy'], constants['sz']]"], {}), "([constants['sx'], constants['sy'], constants['sz']])\n", (2296, 2349), True, 'import numpy as np\n'), ((2630, 2670), 'numpy.round', 'np.round', (['((swc_coord - origin) / spacing)'], {}), '((swc_coord - origin) / spacing)\n', (2638, 2670), True, 'import numpy as np\n'), ((2442, 2503), 'numpy.array', 'np.array', (["[constants['ox'], constants['oy'], constants['oz']]"], {}), "([constants['ox'], constants['oy'], constants['oz']])\n", (2450, 2503), True, 'import numpy as np\n')]
|
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
import glob, random
import sklearn
from sklearn.decomposition import PCA
from xgboost.sklearn import XGBRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import GradientBoostingRegressor,BaggingRegressor, RandomForestRegressor,VotingRegressor
from sklearn.linear_model import LinearRegression
from lightgbm import LGBMRegressor
import catboost
from catboost import CatBoostRegressor
from tqdm import tqdm
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
#import warnings
#warnings.filterwarnings('ignore')
folder = os.path.dirname(os.path.abspath(__file__))
train_new = pd.read_csv(folder+'/Train.csv')
bands_of_interest = ['S2_B5', 'S2_B4', 'S2_B3', 'S2_B2', 'CLIM_pr', 'CLIM_soil']
band_names = [l.strip() for l in open(folder + '/band_names.txt', 'r').readlines()]
def process_train(fid, folder= folder+'/imtrain'):
fn = f'{folder}/{fid}.npy'
arr = np.load(fn)
values = {}
for month in range(12):
bns = [str(month) + '_' + b for b in bands_of_interest] # Bands of interest for this month
idxs = np.where(np.isin(band_names, bns)) # Index of these bands
vs = arr[idxs, 20, 20] # Sample the im at the center point
for bn, v in zip(bns, vs[0]):
values[bn] = v
return values
def process_test(fid, folder= folder+'/imtest'):
fn = f'{folder}/{fid}.npy'
arr = np.load(fn)
values = {}
for month in range(12):
bns = [str(month) + '_' + b for b in bands_of_interest] # Bands of interest for this month
idxs = np.where(np.isin(band_names, bns)) # Index of these bands
vs = arr[idxs, 20, 20] # Sample the im at the center point
for bn, v in zip(bns, vs[0]):
values[bn] = v
return values
# Make a new DF with the sampled values from each field
train_sampled = pd.DataFrame([process_train(fid) for fid in train_new['Field_ID'].values])
#MODEL
X = train_sampled.copy()
y = train_new['Yield'].values
print(X.head)
print(y)
X_train, X_test, y_train, y_test = train_test_split(X, y)
model=BaggingRegressor(CatBoostRegressor(silent=True),n_estimators=55)
model.fit(X_train, y_train)
print('Score:', mean_squared_error(y_test, model.predict(X_test), squared=False))
#SUBMITTING
ss = pd.read_csv(folder+'/SampleSubmission.csv')
test_sampled = pd.DataFrame([process_test(fid) for fid in ss['Field_ID'].values])
preds = model.predict(test_sampled)
ss['Yield'] = preds
ss.to_csv(folder+'/Sub.csv', index=False)
|
[
"sklearn.preprocessing.LabelEncoder",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"numpy.isin",
"catboost.CatBoostRegressor",
"os.path.abspath",
"numpy.load"
] |
[((691, 705), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (703, 705), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((828, 862), 'pandas.read_csv', 'pd.read_csv', (["(folder + '/Train.csv')"], {}), "(folder + '/Train.csv')\n", (839, 862), True, 'import pandas as pd\n'), ((2214, 2236), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {}), '(X, y)\n', (2230, 2236), False, 'from sklearn.model_selection import train_test_split\n'), ((2443, 2488), 'pandas.read_csv', 'pd.read_csv', (["(folder + '/SampleSubmission.csv')"], {}), "(folder + '/SampleSubmission.csv')\n", (2454, 2488), True, 'import pandas as pd\n'), ((788, 813), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (803, 813), False, 'import os\n'), ((1123, 1134), 'numpy.load', 'np.load', (['fn'], {}), '(fn)\n', (1130, 1134), True, 'import numpy as np\n'), ((1573, 1584), 'numpy.load', 'np.load', (['fn'], {}), '(fn)\n', (1580, 1584), True, 'import numpy as np\n'), ((2261, 2291), 'catboost.CatBoostRegressor', 'CatBoostRegressor', ([], {'silent': '(True)'}), '(silent=True)\n', (2278, 2291), False, 'from catboost import CatBoostRegressor\n'), ((1295, 1319), 'numpy.isin', 'np.isin', (['band_names', 'bns'], {}), '(band_names, bns)\n', (1302, 1319), True, 'import numpy as np\n'), ((1745, 1769), 'numpy.isin', 'np.isin', (['band_names', 'bns'], {}), '(band_names, bns)\n', (1752, 1769), True, 'import numpy as np\n')]
|
from MELC.utils.myDatasets import generate_workingRaw_from_raw, MELCStructureDataset
import numpy as np
import tifffile as tiff
from MELC.utils.registration_daria import register
import matplotlib.pyplot as plt
import cv2
from MELC.utils.Files import create_folder
from skimage import img_as_float, img_as_uint
from MELC.utils.f_transformations import filterLowFrequencies, visualize_frequencies
import glob
from os.path import join
from config import *
import argparse
SEPARATOR = '/'
def parse_args():
"""Parse input arguments"""
parser = argparse.ArgumentParser(description='Run Training of Mask R-CNN')
parser.add_argument(
'--path', dest='path', required=True,
help='Config file for training (and optionally testing)')
return parser.parse_args()
class MELCImageProcessing:
def __init__(self, path: str, melc_structure_generated: bool = True):
self._path = path
self._path_registered_fluor = ''
self._path_registered_bleach = ''
self._path_registered_phase = ''
self._path_registered_vis_fluor = ''
self._path_registered_vis_bleach = ''
self._path_registered_vis_phase = ''
self._path_bg_corr = ''
self._path_bg_corr_f = ''
self._path_bg_corr_v_f = ''
self._path_normalized_f = ''
self._path_normalized_v_f = ''
'''
Extract MELC data and calibration data
'''
w_raw = self._path + SEPARATOR + 'w_raw'
if not melc_structure_generated:
generate_workingRaw_from_raw(self._path, w_raw)
melc_dataset = MELCStructureDataset(w_raw)
'''
Sort by creation date
'''
self._melc_fluor = melc_dataset.fluor_pd.sort_values('order_index', ascending=True)
self._melc_phase = melc_dataset.phase_pd.sort_values('order_index', ascending=True)
self._melc_bleach = melc_dataset.bleach_pd.sort_values('order_index', ascending=True)
self._melc_phasebleach = melc_dataset.phasebleach_pd.sort_values('order_index', ascending=True)
self.create_folders()
self._corrected_bf_im = self.generate_bg_correction_img()
self.process_images()
def create_folders(self):
'''
Create folders for registered images
'''
path_processed = join(self._path, 'processed')
path_registered = join(path_processed, 'registered')
self._path_registered_fluor = join(path_registered, 'fluor')
self._path_registered_bleach = join(path_registered, 'bleach')
self._path_registered_phase = join(path_registered, 'phase')
self._path_registered_vis_fluor = join(path_registered, 'vis_fluor')
self._path_registered_vis_bleach = join(path_registered, 'vis_bleach')
self._path_registered_vis_phase = join(path_registered, 'vis_phase')
create_folder(path_processed)
create_folder(path_registered)
create_folder(self._path_registered_fluor)
create_folder(self._path_registered_bleach)
create_folder(self._path_registered_phase)
create_folder(self._path_registered_vis_fluor)
create_folder(self._path_registered_vis_bleach)
create_folder(self._path_registered_vis_phase)
'''
Create folders for background corrected images
'''
self._path_bg_corr = self._path + SEPARATOR + 'processed' + SEPARATOR + 'background_corr' + SEPARATOR
self._path_bg_corr_f = self._path_bg_corr + 'fluor' + SEPARATOR
self._path_bg_corr_v_f = self._path_bg_corr + 'vis_fluor' + SEPARATOR
self._path_bg_corr_p = self._path_bg_corr + 'phase' + SEPARATOR
self._path_bg_corr_v_p = self._path_bg_corr + 'vis_phase' + SEPARATOR
create_folder(self._path_bg_corr)
create_folder(self._path_bg_corr_f)
create_folder(self._path_bg_corr_v_f)
create_folder(self._path_bg_corr_p)
create_folder(self._path_bg_corr_v_p)
'''
Create folders for normalized images
'''
path_normalized = self._path + SEPARATOR + 'processed' + SEPARATOR + 'normalized'
self._path_normalized_f = path_normalized + SEPARATOR + 'fluor' + SEPARATOR
self._path_normalized_v_f = path_normalized + SEPARATOR + 'vis_fluor' + SEPARATOR
self._path_normalized_p = path_normalized + SEPARATOR + 'phase' + SEPARATOR
self._path_normalized_v_p = path_normalized + SEPARATOR + 'vis_phase' + SEPARATOR
create_folder(path_normalized)
create_folder(self._path_normalized_f)
create_folder(self._path_normalized_v_f)
create_folder(self._path_normalized_p)
create_folder(self._path_normalized_v_p)
def generate_bg_correction_img(self):
'''
Create correction image for fluorescence and bleaching images
'''
brightfield_im = []
darkframe_im = []
filter_names = ['XF116-2', 'XF111-2']
calibration_path = self._path + SEPARATOR +'w_raw' + SEPARATOR + 'calibration' + SEPARATOR
brightfield_im.append(np.int16(tiff.imread(glob.glob(calibration_path + '*_cal_b001_5000_XF116-2_000.tif'))))
brightfield_im.append(np.int16(tiff.imread(glob.glob(calibration_path + '*_cal_b001_5000_XF111-2_000.tif'))))
darkframe_im.append(np.int16(tiff.imread(glob.glob(calibration_path + '*_cal_d001_5000_XF116-2_000.tif'))))
darkframe_im.append(np.int16(tiff.imread(glob.glob(calibration_path + '*_cal_d001_5000_XF111-2_000.tif'))))
corrected_brightfield_im = [(brightfield_im[i] - darkframe_im[i]) for i in range(len(filter_names))]
corrected_brightfield_im[0][corrected_brightfield_im[0] <= 0] = 0
corrected_brightfield_im[1][corrected_brightfield_im[1] <= 0] = 0
return corrected_brightfield_im
def process_images(self):
'''
Registration, background correction and normalization of images
'''
'''
Registration
'''
ref_image = tiff.imread(glob.glob(self._path + SEPARATOR + 'w_raw' + SEPARATOR + 'phase' + SEPARATOR + '*_Propidium iodide_200_XF116*.tif'))
for i in range(0, (len(self._melc_fluor)-1)):
pb_idx = np.where(self._melc_phasebleach['order_index'] == self._melc_bleach.iloc[i]['order_index'])[0][0]
phasebleach_image = tiff.imread(self._melc_phasebleach.iloc[pb_idx]['path'])
bleach_image = tiff.imread(self._melc_bleach.iloc[i]['path'])
registered_bleach_image = register(ref_image, phasebleach_image, bleach_image)
filename_bleach = SEPARATOR + str(int(self._melc_bleach.iloc[i]['order_index'])) + '_' + '_'.join(
self._melc_bleach.iloc[i]['fid'].split('_')[:-1]) + '.tif'
tiff.imsave(self._path_registered_bleach + filename_bleach, registered_bleach_image)
save_vis_img(registered_bleach_image, self._path_registered_vis_bleach, filename_bleach)
p_idx = np.where(self._melc_phase['order_index'] == self._melc_fluor.iloc[i+1]['order_index'])[0][0]
phase_image = tiff.imread(self._melc_phase.iloc[p_idx]['path'])
fluorescence_image = tiff.imread(self._melc_fluor.iloc[i+1]['path'])
registered_phase_image = register(ref_image, phase_image, phase_image)
registered_fluor_image = register(ref_image, phase_image, fluorescence_image)
filename_fluor = SEPARATOR + str(int(self._melc_fluor.iloc[i+1]['order_index'])) + '_' + '_'.join(
self._melc_fluor.iloc[i+1]['fid'].split('_')[:-1]) + '.tif'
tiff.imsave(self._path_registered_fluor + filename_fluor, registered_fluor_image)
tiff.imsave(self._path_registered_phase + filename_fluor, registered_fluor_image)
save_vis_img(registered_fluor_image, self._path_registered_vis_fluor, filename_fluor)
save_vis_img(registered_phase_image, self._path_registered_vis_phase, filename_fluor)
'''
Background Correction
'''
bleach = np.int16(registered_bleach_image)
fluor = np.int16(registered_fluor_image)
phase = np.int16(registered_phase_image)
if self._melc_fluor.iloc[i+1]['filter'] == 'XF111-2':
fluor -= self._corrected_bf_im[1]
phase -= self._corrected_bf_im[1]
else:
fluor -= self._corrected_bf_im[0]
phase -= self._corrected_bf_im[0]
if self._melc_bleach.iloc[i]['filter'] == 'XF111-2':
bleach -= self._corrected_bf_im[1]
else:
bleach -= self._corrected_bf_im[0]
phase[phase < 0] = 0
# Substraction of bleaching image
fluor_wo_bg = fluor - bleach
fluor_wo_bg[fluor_wo_bg < 0] = 0
tiff.imsave(self._path_bg_corr_f + filename_fluor, fluor_wo_bg)
save_vis_img(fluor_wo_bg, self._path_bg_corr_v_f, filename_fluor)
tiff.imsave(self._path_bg_corr_p + filename_fluor, phase)
save_vis_img(phase, self._path_bg_corr_v_p, filename_fluor)
'''
Normalization
'''
fluor_wo_bg_normalized = melc_normalization(fluor_wo_bg)
phase_bc_normalized = melc_normalization(phase)
tiff.imsave(self._path_normalized_f + filename_fluor, fluor_wo_bg_normalized)
save_vis_img(fluor_wo_bg_normalized, self._path_normalized_v_f, filename_fluor)
tiff.imsave(self._path_normalized_p + filename_fluor, phase_bc_normalized)
save_vis_img(phase_bc_normalized, self._path_normalized_v_p, filename_fluor)
def save_vis_img(img: np.ndarray, path: str, filename: str):
img_float = img_as_float(img.astype(int))
img_float = img_float - np.percentile(img_float[20:-20, 20:-20], 0.135) # subtract background
if not np.percentile(img_float[20:-20, 20:-20], 100 - 0.135) == 0.0:
img_float /= np.percentile(img_float[20:-20, 20:-20], 100 - 0.135) # normalize to 99.865% of max value
img_float[img_float < 0] = 0
img_float[img_float > 1] = 1 # cut-off high intensities
tiff.imsave(path + filename, img_as_uint(img_float))
def melc_normalization(img: np.ndarray):
sorted_img = np.sort(np.ravel(img))[::-1]
img[img > sorted_img[3]] = sorted_img[3] # cut off high intensities
return img[15:-15, 15:-15]
'''
For visualization and inspection of images
***Using normalization
registered_u8 = cv2.convertScaleAbs(registered_image, alpha=(255.0/65535.0))
kernel = np.ones((2, 2), np.float32)/4
mean_filtered_img = cv2.filter2D(registered_float, -1, kernel)
normalized_img = cv2.normalize(mean_filtered_img, None, 0, 255, cv2.NORM_MINMAX)
***Using FFT - cut 0.00001 percent of highest frequencies
images = []
images.append(registered_float)
visualize_frequencies(images)
pixels = registered_float.size
high_intensity_pixels = 3
percentage_non_artificial = 100-high_intensity_pixels/pixels
filtered_img = filterLowFrequencies(registered_float, percentage_non_artificial)
images.append(filtered_img)
visualize_frequencies(images)
***Plot histogram
hist = cv2.calcHist([registered_image], [0], None, [65535], [0, 65535])
plt.plot(hist)
plt.xticks(np.arange(0, 65535, step=2000))
plt.grid(True)
plt.yscale('log') # plt.xlim([0, 65535])
plt.show()
'''
if __name__ == '__main__':
args = parse_args()
MELCImageProcessing(args.path, melc_structure_generated=False)
# raw_1 = r'G:\FORSCHUNG\LAB4\VISIOMICS\MELC\2019\3rdFinalPanel_18-6056\201912201349_1'
# melc_processed_data = MELCImageProcessing(raw_1, melc_structure_generated=False)
x = 0
|
[
"MELC.utils.registration_daria.register",
"MELC.utils.myDatasets.MELCStructureDataset",
"tifffile.imread",
"argparse.ArgumentParser",
"skimage.img_as_uint",
"numpy.where",
"numpy.int16",
"os.path.join",
"MELC.utils.Files.create_folder",
"tifffile.imsave",
"numpy.percentile",
"MELC.utils.myDatasets.generate_workingRaw_from_raw",
"numpy.ravel",
"glob.glob"
] |
[((550, 615), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run Training of Mask R-CNN"""'}), "(description='Run Training of Mask R-CNN')\n", (573, 615), False, 'import argparse\n'), ((1600, 1627), 'MELC.utils.myDatasets.MELCStructureDataset', 'MELCStructureDataset', (['w_raw'], {}), '(w_raw)\n', (1620, 1627), False, 'from MELC.utils.myDatasets import generate_workingRaw_from_raw, MELCStructureDataset\n'), ((2317, 2346), 'os.path.join', 'join', (['self._path', '"""processed"""'], {}), "(self._path, 'processed')\n", (2321, 2346), False, 'from os.path import join\n'), ((2373, 2407), 'os.path.join', 'join', (['path_processed', '"""registered"""'], {}), "(path_processed, 'registered')\n", (2377, 2407), False, 'from os.path import join\n'), ((2446, 2476), 'os.path.join', 'join', (['path_registered', '"""fluor"""'], {}), "(path_registered, 'fluor')\n", (2450, 2476), False, 'from os.path import join\n'), ((2516, 2547), 'os.path.join', 'join', (['path_registered', '"""bleach"""'], {}), "(path_registered, 'bleach')\n", (2520, 2547), False, 'from os.path import join\n'), ((2586, 2616), 'os.path.join', 'join', (['path_registered', '"""phase"""'], {}), "(path_registered, 'phase')\n", (2590, 2616), False, 'from os.path import join\n'), ((2659, 2693), 'os.path.join', 'join', (['path_registered', '"""vis_fluor"""'], {}), "(path_registered, 'vis_fluor')\n", (2663, 2693), False, 'from os.path import join\n'), ((2737, 2772), 'os.path.join', 'join', (['path_registered', '"""vis_bleach"""'], {}), "(path_registered, 'vis_bleach')\n", (2741, 2772), False, 'from os.path import join\n'), ((2815, 2849), 'os.path.join', 'join', (['path_registered', '"""vis_phase"""'], {}), "(path_registered, 'vis_phase')\n", (2819, 2849), False, 'from os.path import join\n'), ((2859, 2888), 'MELC.utils.Files.create_folder', 'create_folder', (['path_processed'], {}), '(path_processed)\n', (2872, 2888), False, 'from MELC.utils.Files import create_folder\n'), ((2897, 2927), 'MELC.utils.Files.create_folder', 'create_folder', (['path_registered'], {}), '(path_registered)\n', (2910, 2927), False, 'from MELC.utils.Files import create_folder\n'), ((2936, 2978), 'MELC.utils.Files.create_folder', 'create_folder', (['self._path_registered_fluor'], {}), '(self._path_registered_fluor)\n', (2949, 2978), False, 'from MELC.utils.Files import create_folder\n'), ((2987, 3030), 'MELC.utils.Files.create_folder', 'create_folder', (['self._path_registered_bleach'], {}), '(self._path_registered_bleach)\n', (3000, 3030), False, 'from MELC.utils.Files import create_folder\n'), ((3039, 3081), 'MELC.utils.Files.create_folder', 'create_folder', (['self._path_registered_phase'], {}), '(self._path_registered_phase)\n', (3052, 3081), False, 'from MELC.utils.Files import create_folder\n'), ((3090, 3136), 'MELC.utils.Files.create_folder', 'create_folder', (['self._path_registered_vis_fluor'], {}), '(self._path_registered_vis_fluor)\n', (3103, 3136), False, 'from MELC.utils.Files import create_folder\n'), ((3145, 3192), 'MELC.utils.Files.create_folder', 'create_folder', (['self._path_registered_vis_bleach'], {}), '(self._path_registered_vis_bleach)\n', (3158, 3192), False, 'from MELC.utils.Files import create_folder\n'), ((3201, 3247), 'MELC.utils.Files.create_folder', 'create_folder', (['self._path_registered_vis_phase'], {}), '(self._path_registered_vis_phase)\n', (3214, 3247), False, 'from MELC.utils.Files import create_folder\n'), ((3748, 3781), 'MELC.utils.Files.create_folder', 'create_folder', (['self._path_bg_corr'], {}), '(self._path_bg_corr)\n', (3761, 3781), False, 'from MELC.utils.Files import create_folder\n'), ((3790, 3825), 'MELC.utils.Files.create_folder', 'create_folder', (['self._path_bg_corr_f'], {}), '(self._path_bg_corr_f)\n', (3803, 3825), False, 'from MELC.utils.Files import create_folder\n'), ((3834, 3871), 'MELC.utils.Files.create_folder', 'create_folder', (['self._path_bg_corr_v_f'], {}), '(self._path_bg_corr_v_f)\n', (3847, 3871), False, 'from MELC.utils.Files import create_folder\n'), ((3880, 3915), 'MELC.utils.Files.create_folder', 'create_folder', (['self._path_bg_corr_p'], {}), '(self._path_bg_corr_p)\n', (3893, 3915), False, 'from MELC.utils.Files import create_folder\n'), ((3924, 3961), 'MELC.utils.Files.create_folder', 'create_folder', (['self._path_bg_corr_v_p'], {}), '(self._path_bg_corr_v_p)\n', (3937, 3961), False, 'from MELC.utils.Files import create_folder\n'), ((4479, 4509), 'MELC.utils.Files.create_folder', 'create_folder', (['path_normalized'], {}), '(path_normalized)\n', (4492, 4509), False, 'from MELC.utils.Files import create_folder\n'), ((4518, 4556), 'MELC.utils.Files.create_folder', 'create_folder', (['self._path_normalized_f'], {}), '(self._path_normalized_f)\n', (4531, 4556), False, 'from MELC.utils.Files import create_folder\n'), ((4565, 4605), 'MELC.utils.Files.create_folder', 'create_folder', (['self._path_normalized_v_f'], {}), '(self._path_normalized_v_f)\n', (4578, 4605), False, 'from MELC.utils.Files import create_folder\n'), ((4614, 4652), 'MELC.utils.Files.create_folder', 'create_folder', (['self._path_normalized_p'], {}), '(self._path_normalized_p)\n', (4627, 4652), False, 'from MELC.utils.Files import create_folder\n'), ((4661, 4701), 'MELC.utils.Files.create_folder', 'create_folder', (['self._path_normalized_v_p'], {}), '(self._path_normalized_v_p)\n', (4674, 4701), False, 'from MELC.utils.Files import create_folder\n'), ((9815, 9862), 'numpy.percentile', 'np.percentile', (['img_float[20:-20, 20:-20]', '(0.135)'], {}), '(img_float[20:-20, 20:-20], 0.135)\n', (9828, 9862), True, 'import numpy as np\n'), ((9987, 10040), 'numpy.percentile', 'np.percentile', (['img_float[20:-20, 20:-20]', '(100 - 0.135)'], {}), '(img_float[20:-20, 20:-20], 100 - 0.135)\n', (10000, 10040), True, 'import numpy as np\n'), ((10215, 10237), 'skimage.img_as_uint', 'img_as_uint', (['img_float'], {}), '(img_float)\n', (10226, 10237), False, 'from skimage import img_as_float, img_as_uint\n'), ((1528, 1575), 'MELC.utils.myDatasets.generate_workingRaw_from_raw', 'generate_workingRaw_from_raw', (['self._path', 'w_raw'], {}), '(self._path, w_raw)\n', (1556, 1575), False, 'from MELC.utils.myDatasets import generate_workingRaw_from_raw, MELCStructureDataset\n'), ((6013, 6132), 'glob.glob', 'glob.glob', (["(self._path + SEPARATOR + 'w_raw' + SEPARATOR + 'phase' + SEPARATOR +\n '*_Propidium iodide_200_XF116*.tif')"], {}), "(self._path + SEPARATOR + 'w_raw' + SEPARATOR + 'phase' +\n SEPARATOR + '*_Propidium iodide_200_XF116*.tif')\n", (6022, 6132), False, 'import glob\n'), ((6336, 6392), 'tifffile.imread', 'tiff.imread', (["self._melc_phasebleach.iloc[pb_idx]['path']"], {}), "(self._melc_phasebleach.iloc[pb_idx]['path'])\n", (6347, 6392), True, 'import tifffile as tiff\n'), ((6420, 6466), 'tifffile.imread', 'tiff.imread', (["self._melc_bleach.iloc[i]['path']"], {}), "(self._melc_bleach.iloc[i]['path'])\n", (6431, 6466), True, 'import tifffile as tiff\n'), ((6505, 6557), 'MELC.utils.registration_daria.register', 'register', (['ref_image', 'phasebleach_image', 'bleach_image'], {}), '(ref_image, phasebleach_image, bleach_image)\n', (6513, 6557), False, 'from MELC.utils.registration_daria import register\n'), ((6756, 6844), 'tifffile.imsave', 'tiff.imsave', (['(self._path_registered_bleach + filename_bleach)', 'registered_bleach_image'], {}), '(self._path_registered_bleach + filename_bleach,\n registered_bleach_image)\n', (6767, 6844), True, 'import tifffile as tiff\n'), ((7083, 7132), 'tifffile.imread', 'tiff.imread', (["self._melc_phase.iloc[p_idx]['path']"], {}), "(self._melc_phase.iloc[p_idx]['path'])\n", (7094, 7132), True, 'import tifffile as tiff\n'), ((7166, 7215), 'tifffile.imread', 'tiff.imread', (["self._melc_fluor.iloc[i + 1]['path']"], {}), "(self._melc_fluor.iloc[i + 1]['path'])\n", (7177, 7215), True, 'import tifffile as tiff\n'), ((7251, 7296), 'MELC.utils.registration_daria.register', 'register', (['ref_image', 'phase_image', 'phase_image'], {}), '(ref_image, phase_image, phase_image)\n', (7259, 7296), False, 'from MELC.utils.registration_daria import register\n'), ((7334, 7386), 'MELC.utils.registration_daria.register', 'register', (['ref_image', 'phase_image', 'fluorescence_image'], {}), '(ref_image, phase_image, fluorescence_image)\n', (7342, 7386), False, 'from MELC.utils.registration_daria import register\n'), ((7586, 7671), 'tifffile.imsave', 'tiff.imsave', (['(self._path_registered_fluor + filename_fluor)', 'registered_fluor_image'], {}), '(self._path_registered_fluor + filename_fluor,\n registered_fluor_image)\n', (7597, 7671), True, 'import tifffile as tiff\n'), ((7680, 7765), 'tifffile.imsave', 'tiff.imsave', (['(self._path_registered_phase + filename_fluor)', 'registered_fluor_image'], {}), '(self._path_registered_phase + filename_fluor,\n registered_fluor_image)\n', (7691, 7765), True, 'import tifffile as tiff\n'), ((8047, 8080), 'numpy.int16', 'np.int16', (['registered_bleach_image'], {}), '(registered_bleach_image)\n', (8055, 8080), True, 'import numpy as np\n'), ((8101, 8133), 'numpy.int16', 'np.int16', (['registered_fluor_image'], {}), '(registered_fluor_image)\n', (8109, 8133), True, 'import numpy as np\n'), ((8154, 8186), 'numpy.int16', 'np.int16', (['registered_phase_image'], {}), '(registered_phase_image)\n', (8162, 8186), True, 'import numpy as np\n'), ((8838, 8901), 'tifffile.imsave', 'tiff.imsave', (['(self._path_bg_corr_f + filename_fluor)', 'fluor_wo_bg'], {}), '(self._path_bg_corr_f + filename_fluor, fluor_wo_bg)\n', (8849, 8901), True, 'import tifffile as tiff\n'), ((8993, 9050), 'tifffile.imsave', 'tiff.imsave', (['(self._path_bg_corr_p + filename_fluor)', 'phase'], {}), '(self._path_bg_corr_p + filename_fluor, phase)\n', (9004, 9050), True, 'import tifffile as tiff\n'), ((9324, 9401), 'tifffile.imsave', 'tiff.imsave', (['(self._path_normalized_f + filename_fluor)', 'fluor_wo_bg_normalized'], {}), '(self._path_normalized_f + filename_fluor, fluor_wo_bg_normalized)\n', (9335, 9401), True, 'import tifffile as tiff\n'), ((9506, 9580), 'tifffile.imsave', 'tiff.imsave', (['(self._path_normalized_p + filename_fluor)', 'phase_bc_normalized'], {}), '(self._path_normalized_p + filename_fluor, phase_bc_normalized)\n', (9517, 9580), True, 'import tifffile as tiff\n'), ((9900, 9953), 'numpy.percentile', 'np.percentile', (['img_float[20:-20, 20:-20]', '(100 - 0.135)'], {}), '(img_float[20:-20, 20:-20], 100 - 0.135)\n', (9913, 9953), True, 'import numpy as np\n'), ((10312, 10325), 'numpy.ravel', 'np.ravel', (['img'], {}), '(img)\n', (10320, 10325), True, 'import numpy as np\n'), ((5091, 5154), 'glob.glob', 'glob.glob', (["(calibration_path + '*_cal_b001_5000_XF116-2_000.tif')"], {}), "(calibration_path + '*_cal_b001_5000_XF116-2_000.tif')\n", (5100, 5154), False, 'import glob\n'), ((5209, 5272), 'glob.glob', 'glob.glob', (["(calibration_path + '*_cal_b001_5000_XF111-2_000.tif')"], {}), "(calibration_path + '*_cal_b001_5000_XF111-2_000.tif')\n", (5218, 5272), False, 'import glob\n'), ((5325, 5388), 'glob.glob', 'glob.glob', (["(calibration_path + '*_cal_d001_5000_XF116-2_000.tif')"], {}), "(calibration_path + '*_cal_d001_5000_XF116-2_000.tif')\n", (5334, 5388), False, 'import glob\n'), ((5441, 5504), 'glob.glob', 'glob.glob', (["(calibration_path + '*_cal_d001_5000_XF111-2_000.tif')"], {}), "(calibration_path + '*_cal_d001_5000_XF111-2_000.tif')\n", (5450, 5504), False, 'import glob\n'), ((6206, 6302), 'numpy.where', 'np.where', (["(self._melc_phasebleach['order_index'] == self._melc_bleach.iloc[i][\n 'order_index'])"], {}), "(self._melc_phasebleach['order_index'] == self._melc_bleach.iloc[i]\n ['order_index'])\n", (6214, 6302), True, 'import numpy as np\n'), ((6964, 7057), 'numpy.where', 'np.where', (["(self._melc_phase['order_index'] == self._melc_fluor.iloc[i + 1]['order_index']\n )"], {}), "(self._melc_phase['order_index'] == self._melc_fluor.iloc[i + 1][\n 'order_index'])\n", (6972, 7057), True, 'import numpy as np\n')]
|
import numpy as np
from utils import C_bohr
__all__ = ['Grid']
class Grid:
def __init__(self, npoints, rgrid, solver='sinc', alpha=0.0, rbar=0.0):
self.ngrid = npoints
self.rmin = rgrid[0] / C_bohr
self.rmax = rgrid[1] / C_bohr
rbar = rbar / C_bohr
self.solver = solver.lower()
self.Gy = np.ones(self.ngrid)
self.Fy = np.zeros(self.ngrid)
if self.solver == 'sinc':
self.rgrid, self.rstep = self.generate_sinc_uniform_grid()
else:
self.rgrid, self.rstep = self.generate_fourier_uniform_grid()
if alpha > 0.0:
# mapping is allowed with sinc method only
self.solver = 'sinc'
self.rmin = self.get_grid_bounding_values(self.rmin, rbar, alpha)
self.rmax = self.get_grid_bounding_values(self.rmax, rbar, alpha)
self.rgrid, ygrid = self.generate_nonuniform_grid(alpha, rbar)
gy_power1 = np.power(1.0+ygrid, (1.0/alpha)-1.0)
gy_power2 = np.power(1.0-ygrid, (1.0/alpha)+1.0)
self.Gy = (2.0*rbar/alpha) * gy_power1 / gy_power2
fy_power = (np.power((1.0 - np.power(ygrid, 2)), 2))
self.Fy = (1.0 - (1.0/(alpha**2))) / fy_power
def get_grid_points(self):
return self.rgrid * C_bohr
def get_grid_bounding_values(self, rlimit, rbar, alpha):
return ((rlimit/rbar)**alpha - 1.0) / ((rlimit/rbar)**alpha + 1.0)
def generate_fourier_uniform_grid(self):
return np.linspace(
self.rmin, self.rmax, num=self.ngrid, endpoint=False, retstep=True
)
def generate_sinc_uniform_grid(self):
return np.linspace(
self.rmin, self.rmax, num=self.ngrid, endpoint=True, retstep=True
)
def calculate_sinc_basis_functions(self, r):
# numpy sinc function is defined as sin(pi*x)/(pi*x) where pi is
# used for normalization. Thus I do not need to multiply by pi
# for j in range(0, self.nch*self.ngrid):
for j in range(0, self.ngrid):
arg = (r - self.rgrid[j]) / self.rstep
# return one column from a matrix
return np.sinc(arg)
def generate_nonuniform_grid(self, alpha, rbar):
ystep = (self.rmax - self.rmin) / (self.ngrid - 1) # / ngrid - 1 ??
# ygrid = np.ogrid[self.rmin+ystep:self.rmax+ystep:ystep]
# ygrid = np.ogrid[self.rmin:self.rmax:ystep]
# ygrid = np.linspace(self.rmin, self.rmax, num=self.ngrid)
# ygrid = np.arange(self.rmin, self.rmax, step=ystep)
# ygrid = np.linspace(
# self.rmin, self.rmax, num=self.ngrid, endpoint=True
# )
ygrid = np.empty(self.ngrid)
for j in range(1, self.ngrid+1):
ygrid[j-1] = self.rmin + ystep*(j-1.0)
Ry = rbar * np.power((1.0+ygrid) / (1.0-ygrid), 1.0/alpha)
print(ygrid)
print(len(ygrid))
return Ry, ygrid
|
[
"numpy.ones",
"numpy.power",
"numpy.sinc",
"numpy.zeros",
"numpy.linspace",
"numpy.empty"
] |
[((346, 365), 'numpy.ones', 'np.ones', (['self.ngrid'], {}), '(self.ngrid)\n', (353, 365), True, 'import numpy as np\n'), ((384, 404), 'numpy.zeros', 'np.zeros', (['self.ngrid'], {}), '(self.ngrid)\n', (392, 404), True, 'import numpy as np\n'), ((1523, 1602), 'numpy.linspace', 'np.linspace', (['self.rmin', 'self.rmax'], {'num': 'self.ngrid', 'endpoint': '(False)', 'retstep': '(True)'}), '(self.rmin, self.rmax, num=self.ngrid, endpoint=False, retstep=True)\n', (1534, 1602), True, 'import numpy as np\n'), ((1684, 1762), 'numpy.linspace', 'np.linspace', (['self.rmin', 'self.rmax'], {'num': 'self.ngrid', 'endpoint': '(True)', 'retstep': '(True)'}), '(self.rmin, self.rmax, num=self.ngrid, endpoint=True, retstep=True)\n', (1695, 1762), True, 'import numpy as np\n'), ((2181, 2193), 'numpy.sinc', 'np.sinc', (['arg'], {}), '(arg)\n', (2188, 2193), True, 'import numpy as np\n'), ((2698, 2718), 'numpy.empty', 'np.empty', (['self.ngrid'], {}), '(self.ngrid)\n', (2706, 2718), True, 'import numpy as np\n'), ((970, 1010), 'numpy.power', 'np.power', (['(1.0 + ygrid)', '(1.0 / alpha - 1.0)'], {}), '(1.0 + ygrid, 1.0 / alpha - 1.0)\n', (978, 1010), True, 'import numpy as np\n'), ((1031, 1071), 'numpy.power', 'np.power', (['(1.0 - ygrid)', '(1.0 / alpha + 1.0)'], {}), '(1.0 - ygrid, 1.0 / alpha + 1.0)\n', (1039, 1071), True, 'import numpy as np\n'), ((2833, 2885), 'numpy.power', 'np.power', (['((1.0 + ygrid) / (1.0 - ygrid))', '(1.0 / alpha)'], {}), '((1.0 + ygrid) / (1.0 - ygrid), 1.0 / alpha)\n', (2841, 2885), True, 'import numpy as np\n'), ((1172, 1190), 'numpy.power', 'np.power', (['ygrid', '(2)'], {}), '(ygrid, 2)\n', (1180, 1190), True, 'import numpy as np\n')]
|
import os
import glob
from tqdm import tqdm
import argparse
from PIL import Image
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.utils.data as data
from torchvision import transforms, datasets
from networks.dan import DAN
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--aff_path', type=str, default='datasets/AfectNet/', help='AfectNet dataset path.')
parser.add_argument('--batch_size', type=int, default=256, help='Batch size.')
parser.add_argument('--lr', type=float, default=0.0001, help='Initial learning rate for adam.')
parser.add_argument('--workers', default=8, type=int, help='Number of data loading workers.')
parser.add_argument('--epochs', type=int, default=40, help='Total training epochs.')
parser.add_argument('--num_head', type=int, default=4, help='Number of attention head.')
parser.add_argument('--num_class', type=int, default=8, help='Number of class.')
return parser.parse_args()
class AffectNet(data.Dataset):
def __init__(self, aff_path, phase, use_cache = True, transform = None):
self.phase = phase
self.transform = transform
self.aff_path = aff_path
if use_cache:
cache_path = os.path.join(aff_path,'affectnet.csv')
if os.path.exists(cache_path):
df = pd.read_csv(cache_path)
else:
df = self.get_df()
df.to_csv(cache_path)
else:
df = self.get_df()
self.data = df[df['phase'] == phase]
self.file_paths = self.data.loc[:, 'img_path'].values
self.label = self.data.loc[:, 'label'].values
_, self.sample_counts = np.unique(self.label, return_counts=True)
# print(f' distribution of {phase} samples: {self.sample_counts}')
def get_df(self):
train_path = os.path.join(self.aff_path,'train_set/')
val_path = os.path.join(self.aff_path,'val_set/')
data = []
for anno in glob.glob(train_path + 'annotations/*_exp.npy'):
idx = os.path.basename(anno).split('_')[0]
img_path = os.path.join(train_path,f'images/{idx}.jpg')
label = int(np.load(anno))
data.append(['train',img_path,label])
for anno in glob.glob(val_path + 'annotations/*_exp.npy'):
idx = os.path.basename(anno).split('_')[0]
img_path = os.path.join(val_path,f'images/{idx}.jpg')
label = int(np.load(anno))
data.append(['val',img_path,label])
return pd.DataFrame(data = data,columns = ['phase','img_path','label'])
def __len__(self):
return len(self.file_paths)
def __getitem__(self, idx):
path = self.file_paths[idx]
image = Image.open(path).convert('RGB')
label = self.label[idx]
if self.transform is not None:
image = self.transform(image)
return image, label
class AffinityLoss(nn.Module):
def __init__(self, device, num_class=8, feat_dim=512):
super(AffinityLoss, self).__init__()
self.num_class = num_class
self.feat_dim = feat_dim
self.gap = nn.AdaptiveAvgPool2d(1)
self.device = device
self.centers = nn.Parameter(torch.randn(self.num_class, self.feat_dim).to(device))
def forward(self, x, labels):
x = self.gap(x).view(x.size(0), -1)
batch_size = x.size(0)
distmat = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(batch_size, self.num_class) + \
torch.pow(self.centers, 2).sum(dim=1, keepdim=True).expand(self.num_class, batch_size).t()
distmat.addmm_(x, self.centers.t(), beta=1, alpha=-2)
classes = torch.arange(self.num_class).long().to(self.device)
labels = labels.unsqueeze(1).expand(batch_size, self.num_class)
mask = labels.eq(classes.expand(batch_size, self.num_class))
dist = distmat * mask.float()
dist = dist / self.centers.var(dim=0).sum()
loss = dist.clamp(min=1e-12, max=1e+12).sum() / batch_size
return loss
class PartitionLoss(nn.Module):
def __init__(self, ):
super(PartitionLoss, self).__init__()
def forward(self, x):
num_head = x.size(1)
if num_head > 1:
var = x.var(dim=1).mean()
loss = torch.log(1+num_head/var)
else:
loss = 0
return loss
class ImbalancedDatasetSampler(data.sampler.Sampler):
def __init__(self, dataset, indices: list = None, num_samples: int = None):
self.indices = list(range(len(dataset))) if indices is None else indices
self.num_samples = len(self.indices) if num_samples is None else num_samples
df = pd.DataFrame()
df["label"] = self._get_labels(dataset)
df.index = self.indices
df = df.sort_index()
label_to_count = df["label"].value_counts()
weights = 1.0 / label_to_count[df["label"]]
self.weights = torch.DoubleTensor(weights.to_list())
# self.weights = self.weights.clamp(min=1e-5)
def _get_labels(self, dataset):
if isinstance(dataset, datasets.ImageFolder):
return [x[1] for x in dataset.imgs]
elif isinstance(dataset, torch.utils.data.Subset):
return [dataset.dataset.imgs[i][1] for i in dataset.indices]
else:
raise NotImplementedError
def __iter__(self):
return (self.indices[i] for i in torch.multinomial(self.weights, self.num_samples, replacement=True))
def __len__(self):
return self.num_samples
def run_training():
args = parse_args()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if torch.cuda.is_available():
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.enabled = True
model = DAN(num_class=args.num_class, num_head=args.num_head)
model.to(device)
data_transforms = transforms.Compose([
transforms.Resize((224, 224)),
transforms.RandomHorizontalFlip(),
transforms.RandomApply([
transforms.RandomAffine(20, scale=(0.8, 1), translate=(0.2, 0.2)),
], p=0.7),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
transforms.RandomErasing(),
])
# train_dataset = AffectNet(args.aff_path, phase = 'train', transform = data_transforms) # loading dynamically
train_dataset = datasets.ImageFolder(f'{args.aff_path}/train', transform = data_transforms) # loading statically
if args.num_class == 7: # ignore the 8-th class
idx = [i for i in range(len(train_dataset)) if train_dataset.imgs[i][1] != 7]
train_dataset = data.Subset(train_dataset, idx)
print('Whole train set size:', train_dataset.__len__())
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size = args.batch_size,
num_workers = args.workers,
sampler=ImbalancedDatasetSampler(train_dataset),
shuffle = False,
pin_memory = True)
data_transforms_val = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
# val_dataset = AffectNet(args.aff_path, phase = 'val', transform = data_transforms_val) # loading dynamically
val_dataset = datasets.ImageFolder(f'{args.aff_path}/val', transform = data_transforms_val) # loading statically
if args.num_class == 7: # ignore the 8-th class
idx = [i for i in range(len(val_dataset)) if val_dataset.imgs[i][1] != 7]
val_dataset = data.Subset(val_dataset, idx)
print('Validation set size:', val_dataset.__len__())
val_loader = torch.utils.data.DataLoader(val_dataset,
batch_size = args.batch_size,
num_workers = args.workers,
shuffle = False,
pin_memory = True)
criterion_cls = torch.nn.CrossEntropyLoss().to(device)
criterion_af = AffinityLoss(device, num_class=args.num_class)
criterion_pt = PartitionLoss()
params = list(model.parameters()) + list(criterion_af.parameters())
optimizer = torch.optim.Adam(params,args.lr,weight_decay = 0)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma = 0.6)
best_acc = 0
for epoch in tqdm(range(1, args.epochs + 1)):
running_loss = 0.0
correct_sum = 0
iter_cnt = 0
model.train()
for (imgs, targets) in train_loader:
iter_cnt += 1
optimizer.zero_grad()
imgs = imgs.to(device)
targets = targets.to(device)
out,feat,heads = model(imgs)
loss = criterion_cls(out,targets) + criterion_af(feat,targets) + criterion_pt(heads)
loss.backward()
optimizer.step()
running_loss += loss
_, predicts = torch.max(out, 1)
correct_num = torch.eq(predicts, targets).sum()
correct_sum += correct_num
acc = correct_sum.float() / float(train_dataset.__len__())
running_loss = running_loss/iter_cnt
tqdm.write('[Epoch %d] Training accuracy: %.4f. Loss: %.3f. LR %.6f' % (epoch, acc, running_loss,optimizer.param_groups[0]['lr']))
with torch.no_grad():
running_loss = 0.0
iter_cnt = 0
bingo_cnt = 0
sample_cnt = 0
model.eval()
for imgs, targets in val_loader:
imgs = imgs.to(device)
targets = targets.to(device)
out,feat,heads = model(imgs)
loss = criterion_cls(out,targets) + criterion_af(feat,targets) + criterion_pt(heads)
running_loss += loss
iter_cnt+=1
_, predicts = torch.max(out, 1)
correct_num = torch.eq(predicts,targets)
bingo_cnt += correct_num.sum().cpu()
sample_cnt += out.size(0)
running_loss = running_loss/iter_cnt
scheduler.step()
acc = bingo_cnt.float()/float(sample_cnt)
acc = np.around(acc.numpy(),4)
best_acc = max(acc,best_acc)
tqdm.write("[Epoch %d] Validation accuracy:%.4f. Loss:%.3f" % (epoch, acc, running_loss))
tqdm.write("best_acc:" + str(best_acc))
if args.num_class == 7 and acc > 0.65:
torch.save({'iter': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),},
os.path.join('checkpoints', "affecnet7_epoch"+str(epoch)+"_acc"+str(acc)+".pth"))
tqdm.write('Model saved.')
elif args.num_class == 8 and acc > 0.62:
torch.save({'iter': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),},
os.path.join('checkpoints', "affecnet8_epoch"+str(epoch)+"_acc"+str(acc)+".pth"))
tqdm.write('Model saved.')
if __name__ == "__main__":
run_training()
|
[
"torch.nn.CrossEntropyLoss",
"pandas.read_csv",
"torch.max",
"torch.pow",
"torch.eq",
"torch.cuda.is_available",
"torch.arange",
"os.path.exists",
"argparse.ArgumentParser",
"tqdm.tqdm.write",
"torchvision.datasets.ImageFolder",
"torch.nn.AdaptiveAvgPool2d",
"pandas.DataFrame",
"torchvision.transforms.ToTensor",
"torch.randn",
"glob.glob",
"torch.utils.data.append",
"torchvision.transforms.RandomAffine",
"torch.optim.lr_scheduler.ExponentialLR",
"torchvision.transforms.RandomHorizontalFlip",
"torchvision.transforms.RandomErasing",
"torchvision.transforms.Normalize",
"torchvision.transforms.Resize",
"networks.dan.DAN",
"torch.optim.Adam",
"PIL.Image.open",
"torch.log",
"numpy.unique",
"torch.multinomial",
"os.path.join",
"torch.utils.data.Subset",
"os.path.basename",
"torch.utils.data.DataLoader",
"torch.no_grad",
"numpy.load"
] |
[((298, 323), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (321, 323), False, 'import argparse\n'), ((5792, 5817), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5815, 5817), False, 'import torch\n'), ((5972, 6025), 'networks.dan.DAN', 'DAN', ([], {'num_class': 'args.num_class', 'num_head': 'args.num_head'}), '(num_class=args.num_class, num_head=args.num_head)\n', (5975, 6025), False, 'from networks.dan import DAN\n'), ((6661, 6734), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['f"""{args.aff_path}/train"""'], {'transform': 'data_transforms'}), "(f'{args.aff_path}/train', transform=data_transforms)\n", (6681, 6734), False, 'from torchvision import transforms, datasets\n'), ((7907, 7982), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['f"""{args.aff_path}/val"""'], {'transform': 'data_transforms_val'}), "(f'{args.aff_path}/val', transform=data_transforms_val)\n", (7927, 7982), False, 'from torchvision import transforms, datasets\n'), ((8278, 8408), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['val_dataset'], {'batch_size': 'args.batch_size', 'num_workers': 'args.workers', 'shuffle': '(False)', 'pin_memory': '(True)'}), '(val_dataset, batch_size=args.batch_size,\n num_workers=args.workers, shuffle=False, pin_memory=True)\n', (8305, 8408), False, 'import torch\n'), ((8854, 8903), 'torch.optim.Adam', 'torch.optim.Adam', (['params', 'args.lr'], {'weight_decay': '(0)'}), '(params, args.lr, weight_decay=0)\n', (8870, 8903), False, 'import torch\n'), ((8920, 8980), 'torch.optim.lr_scheduler.ExponentialLR', 'torch.optim.lr_scheduler.ExponentialLR', (['optimizer'], {'gamma': '(0.6)'}), '(optimizer, gamma=0.6)\n', (8958, 8980), False, 'import torch\n'), ((1733, 1774), 'numpy.unique', 'np.unique', (['self.label'], {'return_counts': '(True)'}), '(self.label, return_counts=True)\n', (1742, 1774), True, 'import numpy as np\n'), ((1894, 1935), 'os.path.join', 'os.path.join', (['self.aff_path', '"""train_set/"""'], {}), "(self.aff_path, 'train_set/')\n", (1906, 1935), False, 'import os\n'), ((1954, 1993), 'os.path.join', 'os.path.join', (['self.aff_path', '"""val_set/"""'], {}), "(self.aff_path, 'val_set/')\n", (1966, 1993), False, 'import os\n'), ((2040, 2087), 'glob.glob', 'glob.glob', (["(train_path + 'annotations/*_exp.npy')"], {}), "(train_path + 'annotations/*_exp.npy')\n", (2049, 2087), False, 'import glob\n'), ((2330, 2375), 'glob.glob', 'glob.glob', (["(val_path + 'annotations/*_exp.npy')"], {}), "(val_path + 'annotations/*_exp.npy')\n", (2339, 2375), False, 'import glob\n'), ((2609, 2672), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data', 'columns': "['phase', 'img_path', 'label']"}), "(data=data, columns=['phase', 'img_path', 'label'])\n", (2621, 2672), True, 'import pandas as pd\n'), ((3225, 3248), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1)'], {}), '(1)\n', (3245, 3248), True, 'import torch.nn as nn\n'), ((4800, 4814), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4812, 4814), True, 'import pandas as pd\n'), ((6924, 6955), 'torch.utils.data.Subset', 'data.Subset', (['train_dataset', 'idx'], {}), '(train_dataset, idx)\n', (6935, 6955), True, 'import torch.utils.data as data\n'), ((8168, 8197), 'torch.utils.data.Subset', 'data.Subset', (['val_dataset', 'idx'], {}), '(val_dataset, idx)\n', (8179, 8197), True, 'import torch.utils.data as data\n'), ((9853, 9989), 'tqdm.tqdm.write', 'tqdm.write', (["('[Epoch %d] Training accuracy: %.4f. Loss: %.3f. LR %.6f' % (epoch, acc,\n running_loss, optimizer.param_groups[0]['lr']))"], {}), "('[Epoch %d] Training accuracy: %.4f. Loss: %.3f. LR %.6f' % (\n epoch, acc, running_loss, optimizer.param_groups[0]['lr']))\n", (9863, 9989), False, 'from tqdm import tqdm\n'), ((1274, 1313), 'os.path.join', 'os.path.join', (['aff_path', '"""affectnet.csv"""'], {}), "(aff_path, 'affectnet.csv')\n", (1286, 1313), False, 'import os\n'), ((1328, 1354), 'os.path.exists', 'os.path.exists', (['cache_path'], {}), '(cache_path)\n', (1342, 1354), False, 'import os\n'), ((2167, 2212), 'os.path.join', 'os.path.join', (['train_path', 'f"""images/{idx}.jpg"""'], {}), "(train_path, f'images/{idx}.jpg')\n", (2179, 2212), False, 'import os\n'), ((2263, 2302), 'torch.utils.data.append', 'data.append', (["['train', img_path, label]"], {}), "(['train', img_path, label])\n", (2274, 2302), True, 'import torch.utils.data as data\n'), ((2455, 2498), 'os.path.join', 'os.path.join', (['val_path', 'f"""images/{idx}.jpg"""'], {}), "(val_path, f'images/{idx}.jpg')\n", (2467, 2498), False, 'import os\n'), ((2549, 2586), 'torch.utils.data.append', 'data.append', (["['val', img_path, label]"], {}), "(['val', img_path, label])\n", (2560, 2586), True, 'import torch.utils.data as data\n'), ((4390, 4419), 'torch.log', 'torch.log', (['(1 + num_head / var)'], {}), '(1 + num_head / var)\n', (4399, 4419), False, 'import torch\n'), ((5746, 5771), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5769, 5771), False, 'import torch\n'), ((6108, 6137), 'torchvision.transforms.Resize', 'transforms.Resize', (['(224, 224)'], {}), '((224, 224))\n', (6125, 6137), False, 'from torchvision import transforms, datasets\n'), ((6147, 6180), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (6178, 6180), False, 'from torchvision import transforms, datasets\n'), ((6330, 6351), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (6349, 6351), False, 'from torchvision import transforms, datasets\n'), ((6361, 6436), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (6381, 6436), False, 'from torchvision import transforms, datasets\n'), ((6479, 6505), 'torchvision.transforms.RandomErasing', 'transforms.RandomErasing', ([], {}), '()\n', (6503, 6505), False, 'from torchvision import transforms, datasets\n'), ((7514, 7543), 'torchvision.transforms.Resize', 'transforms.Resize', (['(224, 224)'], {}), '((224, 224))\n', (7531, 7543), False, 'from torchvision import transforms, datasets\n'), ((7553, 7574), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (7572, 7574), False, 'from torchvision import transforms, datasets\n'), ((7584, 7659), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (7604, 7659), False, 'from torchvision import transforms, datasets\n'), ((8625, 8652), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (8650, 8652), False, 'import torch\n'), ((9615, 9632), 'torch.max', 'torch.max', (['out', '(1)'], {}), '(out, 1)\n', (9624, 9632), False, 'import torch\n'), ((10006, 10021), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10019, 10021), False, 'import torch\n'), ((10958, 11051), 'tqdm.tqdm.write', 'tqdm.write', (["('[Epoch %d] Validation accuracy:%.4f. Loss:%.3f' % (epoch, acc, running_loss))"], {}), "('[Epoch %d] Validation accuracy:%.4f. Loss:%.3f' % (epoch, acc,\n running_loss))\n", (10968, 11051), False, 'from tqdm import tqdm\n'), ((1377, 1400), 'pandas.read_csv', 'pd.read_csv', (['cache_path'], {}), '(cache_path)\n', (1388, 1400), True, 'import pandas as pd\n'), ((2236, 2249), 'numpy.load', 'np.load', (['anno'], {}), '(anno)\n', (2243, 2249), True, 'import numpy as np\n'), ((2522, 2535), 'numpy.load', 'np.load', (['anno'], {}), '(anno)\n', (2529, 2535), True, 'import numpy as np\n'), ((2819, 2835), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (2829, 2835), False, 'from PIL import Image\n'), ((5536, 5603), 'torch.multinomial', 'torch.multinomial', (['self.weights', 'self.num_samples'], {'replacement': '(True)'}), '(self.weights, self.num_samples, replacement=True)\n', (5553, 5603), False, 'import torch\n'), ((10538, 10555), 'torch.max', 'torch.max', (['out', '(1)'], {}), '(out, 1)\n', (10547, 10555), False, 'import torch\n'), ((10587, 10614), 'torch.eq', 'torch.eq', (['predicts', 'targets'], {}), '(predicts, targets)\n', (10595, 10614), False, 'import torch\n'), ((11469, 11495), 'tqdm.tqdm.write', 'tqdm.write', (['"""Model saved."""'], {}), "('Model saved.')\n", (11479, 11495), False, 'from tqdm import tqdm\n'), ((3315, 3357), 'torch.randn', 'torch.randn', (['self.num_class', 'self.feat_dim'], {}), '(self.num_class, self.feat_dim)\n', (3326, 3357), False, 'import torch\n'), ((6231, 6296), 'torchvision.transforms.RandomAffine', 'transforms.RandomAffine', (['(20)'], {'scale': '(0.8, 1)', 'translate': '(0.2, 0.2)'}), '(20, scale=(0.8, 1), translate=(0.2, 0.2))\n', (6254, 6296), False, 'from torchvision import transforms, datasets\n'), ((9659, 9686), 'torch.eq', 'torch.eq', (['predicts', 'targets'], {}), '(predicts, targets)\n', (9667, 9686), False, 'import torch\n'), ((11867, 11893), 'tqdm.tqdm.write', 'tqdm.write', (['"""Model saved."""'], {}), "('Model saved.')\n", (11877, 11893), False, 'from tqdm import tqdm\n'), ((2107, 2129), 'os.path.basename', 'os.path.basename', (['anno'], {}), '(anno)\n', (2123, 2129), False, 'import os\n'), ((2395, 2417), 'os.path.basename', 'os.path.basename', (['anno'], {}), '(anno)\n', (2411, 2417), False, 'import os\n'), ((3769, 3797), 'torch.arange', 'torch.arange', (['self.num_class'], {}), '(self.num_class)\n', (3781, 3797), False, 'import torch\n'), ((3499, 3514), 'torch.pow', 'torch.pow', (['x', '(2)'], {}), '(x, 2)\n', (3508, 3514), False, 'import torch\n'), ((3597, 3623), 'torch.pow', 'torch.pow', (['self.centers', '(2)'], {}), '(self.centers, 2)\n', (3606, 3623), False, 'import torch\n')]
|
# Licensed under an MIT open source license - see LICENSE
'''
Test functions for VCA
'''
from unittest import TestCase
import numpy as np
import numpy.testing as npt
from ..statistics import VCA, VCA_Distance
from ._testing_data import \
dataset1, dataset2, computed_data, computed_distances
class testVCA(TestCase):
def setUp(self):
self.dataset1 = dataset1
self.dataset2 = dataset2
def test_VCA_method(self):
self.tester = VCA(dataset1["cube"][0],
dataset1["cube"][1],
slice_sizes=[1.0])
self.tester.run()
assert np.allclose(self.tester.ps1D, computed_data['vca_val'])
def test_VCA_distance(self):
self.tester_dist = \
VCA_Distance(dataset1["cube"],
dataset2["cube"]).distance_metric()
npt.assert_almost_equal(self.tester_dist.distance,
computed_distances['vca_distance'])
|
[
"numpy.testing.assert_almost_equal",
"numpy.allclose"
] |
[((627, 682), 'numpy.allclose', 'np.allclose', (['self.tester.ps1D', "computed_data['vca_val']"], {}), "(self.tester.ps1D, computed_data['vca_val'])\n", (638, 682), True, 'import numpy as np\n'), ((858, 949), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['self.tester_dist.distance', "computed_distances['vca_distance']"], {}), "(self.tester_dist.distance, computed_distances[\n 'vca_distance'])\n", (881, 949), True, 'import numpy.testing as npt\n')]
|
# -*- coding:utf-8 -*-
import numpy as np
def sin_sin(x,y):
return 1000*abs(np.sin(x/2000*np.pi) + np.sin(y/2000.0*np.pi))+100
|
[
"numpy.sin"
] |
[((80, 104), 'numpy.sin', 'np.sin', (['(x / 2000 * np.pi)'], {}), '(x / 2000 * np.pi)\n', (86, 104), True, 'import numpy as np\n'), ((103, 129), 'numpy.sin', 'np.sin', (['(y / 2000.0 * np.pi)'], {}), '(y / 2000.0 * np.pi)\n', (109, 129), True, 'import numpy as np\n')]
|
from __future__ import division
import numpy as np
from path import Path
from imageio import imread
from skimage.transform import resize as imresize
from kitti_util import pose_from_oxts_packet, generate_depth_map, read_calib_file, transform_from_rot_trans
from datetime import datetime
class KittiRawLoader(object):
def __init__(self,
dataset_dir,
static_frames_file=None,
img_height=128,
img_width=416,
min_disp=0.2,
get_depth=False,
get_pose=False,
depth_size_ratio=1):
dir_path = Path(__file__).realpath().dirname()
test_scene_file = dir_path/'test_scenes.txt'
self.from_speed = static_frames_file is None
if static_frames_file is not None:
self.collect_static_frames(static_frames_file)
with open(test_scene_file, 'r') as f:
test_scenes = f.readlines()
self.test_scenes = [t[:-1] for t in test_scenes]
self.dataset_dir = dataset_dir
self.img_height = img_height
self.img_width = img_width
self.cam_ids = ['02', '03']
self.date_list = ['2011_09_26', '2011_09_28', '2011_09_29', '2011_09_30', '2011_10_03']
self.min_disp = min_disp
self.get_depth = get_depth
self.get_pose = get_pose
self.depth_size_ratio = depth_size_ratio
self.collect_train_folders()
def collect_static_frames(self, static_frames_file):
with open(static_frames_file, 'r') as f:
frames = f.readlines()
self.static_frames = {}
for fr in frames:
if fr == '\n':
continue
date, drive, frame_id = fr.split(' ')
curr_fid = '%.10d' % (np.int(frame_id[:-1]))
if drive not in self.static_frames.keys():
self.static_frames[drive] = []
self.static_frames[drive].append(curr_fid)
def collect_train_folders(self):
self.scenes = []
for date in self.date_list:
drive_set = (self.dataset_dir/date).dirs()
for dr in drive_set:
if dr.name[:-5] not in self.test_scenes:
self.scenes.append(dr)
def collect_scenes(self, drive):
train_scenes = []
for c in self.cam_ids:
oxts = sorted((drive/'oxts'/'data').files('*.txt'))
with open(drive/'oxts'/'timestamps.txt', 'r') as f:
times = [datetime.strptime(time_string[:-4], "%Y-%m-%d %H:%M:%S.%f") for time_string in f.readlines()]
scene_data = {'cid': c,
'dir': drive,
'speed': [],
'time': [t.timestamp() for t in times],
'frame_id': [],
'pose': [],
'rel_path': drive.name + '_' + c}
scale = None
origin = None
imu2velo = read_calib_file(drive.parent/'calib_imu_to_velo.txt')
velo2cam = read_calib_file(drive.parent/'calib_velo_to_cam.txt')
cam2cam = read_calib_file(drive.parent/'calib_cam_to_cam.txt')
velo2cam_mat = transform_from_rot_trans(velo2cam['R'], velo2cam['T'])
imu2velo_mat = transform_from_rot_trans(imu2velo['R'], imu2velo['T'])
cam_2rect_mat = transform_from_rot_trans(cam2cam['R_rect_00'], np.zeros(3))
imu2cam = cam_2rect_mat @ velo2cam_mat @ imu2velo_mat
for n, f in enumerate(oxts):
metadata = np.genfromtxt(f)
speed = metadata[8:11]
scene_data['speed'].append(speed)
scene_data['frame_id'].append('{:010d}'.format(n))
lat = metadata[0]
if scale is None:
scale = np.cos(lat * np.pi / 180.)
pose_matrix = pose_from_oxts_packet(metadata[:6], scale)
if origin is None:
origin = pose_matrix
odo_pose = imu2cam @ np.linalg.inv(origin) @ pose_matrix @ np.linalg.inv(imu2cam)
scene_data['pose'].append(odo_pose[:3])
sample = self.load_image(scene_data, 0)
if sample is None:
return []
scene_data['P_rect'] = self.get_P_rect(scene_data, sample[1], sample[2])
scene_data['intrinsics'] = scene_data['P_rect'][:, :3]
train_scenes.append(scene_data)
return train_scenes
def get_scene_imgs(self, scene_data):
def construct_sample(scene_data, i, frame_id):
sample = {"img": self.load_image(scene_data, i)[0], "id": frame_id}
if self.get_depth:
sample['depth'] = self.get_depth_map(scene_data, i)
if self.get_pose:
sample['pose'] = scene_data['pose'][i]
return sample
if self.from_speed:
cum_displacement = np.zeros(3)
for i, (speed1, speed2, t1, t2) in enumerate(zip(scene_data['speed'][1:],
scene_data['speed'][:-1],
scene_data['time'][1:],
scene_data['time'][:-1])):
print(speed1, speed2, t1, t2)
cum_displacement += 0.5*(speed1 + speed2) / (t2-t1)
disp_mag = np.linalg.norm(cum_displacement)
if disp_mag > self.min_disp:
frame_id = scene_data['frame_id'][i]
yield construct_sample(scene_data, i, frame_id)
cum_displacement *= 0
else: # from static frame file
drive = str(scene_data['dir'].name)
for (i, frame_id) in enumerate(scene_data['frame_id']):
if (drive not in self.static_frames.keys()) or (frame_id not in self.static_frames[drive]):
yield construct_sample(scene_data, i, frame_id)
def get_P_rect(self, scene_data, zoom_x, zoom_y):
calib_file = scene_data['dir'].parent/'calib_cam_to_cam.txt'
filedata = read_calib_file(calib_file)
P_rect = np.reshape(filedata['P_rect_' + scene_data['cid']], (3, 4))
P_rect[0] *= zoom_x
P_rect[1] *= zoom_y
return P_rect
def load_image(self, scene_data, tgt_idx):
img_file = scene_data['dir']/'image_{}'.format(scene_data['cid'])/'data'/scene_data['frame_id'][tgt_idx]+'.png'
if not img_file.isfile():
return None
img = imread(img_file)
zoom_y = self.img_height/img.shape[0]
zoom_x = self.img_width/img.shape[1]
img = imresize(img, (self.img_height, self.img_width))
# workaround for skimage (float [0 .. 1]) and imageio (uint8 [0 .. 255]) interoperability
img = (img * 255).astype(np.uint8)
return img, zoom_x, zoom_y
def get_depth_map(self, scene_data, tgt_idx):
# compute projection matrix velodyne->image plane
R_cam2rect = np.eye(4)
calib_dir = scene_data['dir'].parent
cam2cam = read_calib_file(calib_dir/'calib_cam_to_cam.txt')
velo2cam = read_calib_file(calib_dir/'calib_velo_to_cam.txt')
velo2cam = np.hstack((velo2cam['R'].reshape(3, 3), velo2cam['T'][..., np.newaxis]))
velo2cam = np.vstack((velo2cam, np.array([0, 0, 0, 1.0])))
R_cam2rect[:3, :3] = cam2cam['R_rect_00'].reshape(3, 3)
velo2cam = np.dot(R_cam2rect, velo2cam)
velo_file_name = scene_data['dir']/'velodyne_points'/'data'/'{}.bin'.format(scene_data['frame_id'][tgt_idx])
return generate_depth_map(velo_file_name, scene_data['P_rect'], velo2cam,
self.img_width, self.img_height, self.depth_size_ratio)
|
[
"numpy.eye",
"kitti_util.read_calib_file",
"numpy.reshape",
"numpy.genfromtxt",
"datetime.datetime.strptime",
"path.Path",
"numpy.array",
"numpy.dot",
"kitti_util.generate_depth_map",
"kitti_util.transform_from_rot_trans",
"numpy.zeros",
"numpy.cos",
"numpy.linalg.norm",
"imageio.imread",
"numpy.linalg.inv",
"kitti_util.pose_from_oxts_packet",
"skimage.transform.resize",
"numpy.int"
] |
[((6194, 6221), 'kitti_util.read_calib_file', 'read_calib_file', (['calib_file'], {}), '(calib_file)\n', (6209, 6221), False, 'from kitti_util import pose_from_oxts_packet, generate_depth_map, read_calib_file, transform_from_rot_trans\n'), ((6239, 6298), 'numpy.reshape', 'np.reshape', (["filedata['P_rect_' + scene_data['cid']]", '(3, 4)'], {}), "(filedata['P_rect_' + scene_data['cid']], (3, 4))\n", (6249, 6298), True, 'import numpy as np\n'), ((6617, 6633), 'imageio.imread', 'imread', (['img_file'], {}), '(img_file)\n', (6623, 6633), False, 'from imageio import imread\n'), ((6739, 6787), 'skimage.transform.resize', 'imresize', (['img', '(self.img_height, self.img_width)'], {}), '(img, (self.img_height, self.img_width))\n', (6747, 6787), True, 'from skimage.transform import resize as imresize\n'), ((7097, 7106), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (7103, 7106), True, 'import numpy as np\n'), ((7171, 7222), 'kitti_util.read_calib_file', 'read_calib_file', (["(calib_dir / 'calib_cam_to_cam.txt')"], {}), "(calib_dir / 'calib_cam_to_cam.txt')\n", (7186, 7222), False, 'from kitti_util import pose_from_oxts_packet, generate_depth_map, read_calib_file, transform_from_rot_trans\n'), ((7240, 7292), 'kitti_util.read_calib_file', 'read_calib_file', (["(calib_dir / 'calib_velo_to_cam.txt')"], {}), "(calib_dir / 'calib_velo_to_cam.txt')\n", (7255, 7292), False, 'from kitti_util import pose_from_oxts_packet, generate_depth_map, read_calib_file, transform_from_rot_trans\n'), ((7534, 7562), 'numpy.dot', 'np.dot', (['R_cam2rect', 'velo2cam'], {}), '(R_cam2rect, velo2cam)\n', (7540, 7562), True, 'import numpy as np\n'), ((7697, 7824), 'kitti_util.generate_depth_map', 'generate_depth_map', (['velo_file_name', "scene_data['P_rect']", 'velo2cam', 'self.img_width', 'self.img_height', 'self.depth_size_ratio'], {}), "(velo_file_name, scene_data['P_rect'], velo2cam, self.\n img_width, self.img_height, self.depth_size_ratio)\n", (7715, 7824), False, 'from kitti_util import pose_from_oxts_packet, generate_depth_map, read_calib_file, transform_from_rot_trans\n'), ((2993, 3048), 'kitti_util.read_calib_file', 'read_calib_file', (["(drive.parent / 'calib_imu_to_velo.txt')"], {}), "(drive.parent / 'calib_imu_to_velo.txt')\n", (3008, 3048), False, 'from kitti_util import pose_from_oxts_packet, generate_depth_map, read_calib_file, transform_from_rot_trans\n'), ((3070, 3125), 'kitti_util.read_calib_file', 'read_calib_file', (["(drive.parent / 'calib_velo_to_cam.txt')"], {}), "(drive.parent / 'calib_velo_to_cam.txt')\n", (3085, 3125), False, 'from kitti_util import pose_from_oxts_packet, generate_depth_map, read_calib_file, transform_from_rot_trans\n'), ((3146, 3200), 'kitti_util.read_calib_file', 'read_calib_file', (["(drive.parent / 'calib_cam_to_cam.txt')"], {}), "(drive.parent / 'calib_cam_to_cam.txt')\n", (3161, 3200), False, 'from kitti_util import pose_from_oxts_packet, generate_depth_map, read_calib_file, transform_from_rot_trans\n'), ((3227, 3281), 'kitti_util.transform_from_rot_trans', 'transform_from_rot_trans', (["velo2cam['R']", "velo2cam['T']"], {}), "(velo2cam['R'], velo2cam['T'])\n", (3251, 3281), False, 'from kitti_util import pose_from_oxts_packet, generate_depth_map, read_calib_file, transform_from_rot_trans\n'), ((3309, 3363), 'kitti_util.transform_from_rot_trans', 'transform_from_rot_trans', (["imu2velo['R']", "imu2velo['T']"], {}), "(imu2velo['R'], imu2velo['T'])\n", (3333, 3363), False, 'from kitti_util import pose_from_oxts_packet, generate_depth_map, read_calib_file, transform_from_rot_trans\n'), ((4974, 4985), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (4982, 4985), True, 'import numpy as np\n'), ((1789, 1810), 'numpy.int', 'np.int', (['frame_id[:-1]'], {}), '(frame_id[:-1])\n', (1795, 1810), True, 'import numpy as np\n'), ((3439, 3450), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (3447, 3450), True, 'import numpy as np\n'), ((3588, 3604), 'numpy.genfromtxt', 'np.genfromtxt', (['f'], {}), '(f)\n', (3601, 3604), True, 'import numpy as np\n'), ((3916, 3958), 'kitti_util.pose_from_oxts_packet', 'pose_from_oxts_packet', (['metadata[:6]', 'scale'], {}), '(metadata[:6], scale)\n', (3937, 3958), False, 'from kitti_util import pose_from_oxts_packet, generate_depth_map, read_calib_file, transform_from_rot_trans\n'), ((5473, 5505), 'numpy.linalg.norm', 'np.linalg.norm', (['cum_displacement'], {}), '(cum_displacement)\n', (5487, 5505), True, 'import numpy as np\n'), ((7423, 7447), 'numpy.array', 'np.array', (['[0, 0, 0, 1.0]'], {}), '([0, 0, 0, 1.0])\n', (7431, 7447), True, 'import numpy as np\n'), ((2504, 2563), 'datetime.datetime.strptime', 'datetime.strptime', (['time_string[:-4]', '"""%Y-%m-%d %H:%M:%S.%f"""'], {}), "(time_string[:-4], '%Y-%m-%d %H:%M:%S.%f')\n", (2521, 2563), False, 'from datetime import datetime\n'), ((3858, 3885), 'numpy.cos', 'np.cos', (['(lat * np.pi / 180.0)'], {}), '(lat * np.pi / 180.0)\n', (3864, 3885), True, 'import numpy as np\n'), ((4111, 4133), 'numpy.linalg.inv', 'np.linalg.inv', (['imu2cam'], {}), '(imu2cam)\n', (4124, 4133), True, 'import numpy as np\n'), ((634, 648), 'path.Path', 'Path', (['__file__'], {}), '(__file__)\n', (638, 648), False, 'from path import Path\n'), ((4073, 4094), 'numpy.linalg.inv', 'np.linalg.inv', (['origin'], {}), '(origin)\n', (4086, 4094), True, 'import numpy as np\n')]
|
import pickle
import numpy as np
import feature_extraction as fe
""" source : https://www.census.gov/quickfacts/fact/table/alleghenycountypennsylvania/PST045216 """
CURR_YEAR = 2015
# gender
FEMALE_PERCENT = 0.517 # 4327
# MALE = 0.483 # 3134
# age
# BELOW_18 = 0.189 # 0
OVER_65_PERCENT = 0.18 # 4353
# OTHER = 0.631 # 3108
OTHER = 0.82
# race
WHITE = 0.805 # 3184
BLACK = 0.134 # 2294
ASIAN = 0.037 # 1244
# OTHER = 0.024 # 739
def draw_general_sample(num_samples, modified_patient_data, feature='gender', percent=[FEMALE_PERCENT]):
# check if num_samples is reasonable
if num_samples > len(modified_patient_data):
print('data points collected fewer than required!')
return None
# check if the feature categories and given number of percentages is correct
if not ((feature.lower() == 'gender' and len(percent) == 1) \
or (feature.lower() == 'age' and len(percent) == 1) \
or (feature.lower() == 'race' and len(percent) == 3)):
print('unmatched percentage!')
return None
# add age
_add_age(modified_patient_data)
# draw samples
if feature.lower() == 'gender':
FEMALE_PERCENT = percent[0]
# group patient data
female_need = int(num_samples * FEMALE_PERCENT)
male_need = int(num_samples * (1 - FEMALE_PERCENT))
female_group, male_group = _split_gender(modified_patient_data)
# get id
fp_id = np.random.choice(list(female_group.keys()), female_need)
mp_id = np.random.choice(list(male_group.keys()), female_need)
# get sample
sample_chosen = {k : v for k, v in modified_patient_data.iteritems() if k in fp_id or k in mp_id}
elif feature.lower() == 'age':
OVER_65_PERCENT = percent[0]
# group patient data
elder_need = int(num_samples * OVER_65_PERCENT)
adult_need = int(num_samples * (1 - OVER_65_PERCENT))
adult, elder = _split_age(modified_patient_data)
# get id
ap_id = np.random.choice(list(adult.keys()), elder_need)
ep_id = np.random.choice(list(elder.keys()), adult_need)
# get sample
sample_chosen = {k : v for k, v in modified_patient_data.iteritems() if k in ap_id or k in ep_id}
elif feature.lower() == 'race':
WHITE = percent[0]
BLACK = percent[1]
ASIAN = percent[2]
OTHER = 1 - WHITE - BLACK - ASIAN
# group patient data
white_need = int(num_samples * WHITE)
black_need = int(num_samples * BLACK)
asian_need = int(num_samples * ASIAN)
other_need = int(num_samples * OTHER)
white, black, asian, other = _split_race(modified_patient_data)
# get id
w_id = np.random.choice(list(white.keys()), white_need)
b_id = np.random.choice(list(black.keys()), black_need)
a_id = np.random.choice(list(asian.keys()), asian_need)
o_id = np.random.choice(list(other.keys()), other_need)
# get sample
sample_chosen = {k : v for k, v in modified_patient_data.iteritems() if k in w_id or k in b_id or k in a_id or k in o_id}
return sample_chosen
def _add_age(modified_patient_data):
for pid in modified_patient_data:
data = modified_patient_data[pid]
birth_year = int(data['dob'].split('-')[0])
data['age'] = int(CURR_YEAR - birth_year)
def _split_gender(modified_patient_data):
female_group = {}
male_group = {}
for pid in modified_patient_data:
data = modified_patient_data[pid]
if data['gender'].lower() == 'female':
female_group[pid] = data
elif data['gender'].lower() == 'male':
male_group[pid] = data
elif np.random.randint(2): # Unknown case
female_group[pid] = data
else:
male_group[pid] = data
return female_group, male_group
def _split_age(single_group):
adult = {}
elder = {}
for pid in single_group:
data = single_group[pid]
if data['age'] > 65:
elder[pid] = data
else:
adult[pid] = data
return adult, elder
def _split_race(single_group):
white = {}
black = {}
asian = {}
other = {}
for pid in single_group:
data = single_group[pid]
if data['race'].lower() == 'white':
white[pid] = data
elif data['race'].lower() == 'black':
black[pid] = data
elif data['race'].lower() == 'asian':
asian[pid] = data
else:
other[pid] = data
return white, black, asian, other
if __name__ == "__main__":
draw_general_sample(2000)
|
[
"numpy.random.randint"
] |
[((3692, 3712), 'numpy.random.randint', 'np.random.randint', (['(2)'], {}), '(2)\n', (3709, 3712), True, 'import numpy as np\n')]
|
# -*- coding: UTF-8 -*-
import socket
import pyaudio
import numpy as np
import time
import logging
address = ('127.0.0.1', 8301)
RATE = 8000
RECORD_SECONDS = 10 #录制时长,单位秒
FORMAT = pyaudio.paInt16
CHANNELS = 1
CHUNK=256
DEBUG=1
def start_client ():
#socket init
tcpClient = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcpClient.connect(address)
logging.info(" connect to %s:%s OK" % ( address[0],address[1]))
#pyaudio init
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK) #创建录音文件
logging.info("Please speak.")
#控制录音时长,开始发送
cnt=0
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
samples = stream.read(CHUNK)
#buff=np.float32(np.frombuffer(samples, dtype=np.int16)) #16为bytes转int
tcpClient.send(samples)
msg=tcpClient.recv(1024).decode("utf-8")
if msg != " ":
logging.debug("result: %s " % msg)
cnt=cnt+1
logging.debug ("audio length: %d, recv count : %d " % (len(samples),cnt))
#end for
#发送结束符号,长度为1值为0的数组,暂不支持其它
eos=np.zeros(1)
tcpClient.send(bytes(eos))
msg=tcpClient.recv(1024).decode("utf-8")
logging.info("final result: %s " % msg )
#close socket and recording
stream.stop_stream()
stream.close()
p.terminate()
tcpClient.close()
if __name__ == '__main__':
logfile="log.asr_server"
if DEBUG:
logging.basicConfig( filename = "", level=logging.DEBUG)
else:
logging.basicConfig( filename = "", level=logging.INFO)
time_start = time.time()
start_client()
logging.info ( "** total time : %8.2fs" % ( time.time() - time_start ))
|
[
"logging.basicConfig",
"logging.debug",
"socket.socket",
"numpy.zeros",
"time.time",
"pyaudio.PyAudio",
"logging.info"
] |
[((286, 335), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (299, 335), False, 'import socket\n'), ((373, 436), 'logging.info', 'logging.info', (["(' connect to %s:%s OK' % (address[0], address[1]))"], {}), "(' connect to %s:%s OK' % (address[0], address[1]))\n", (385, 436), False, 'import logging\n'), ((464, 481), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (479, 481), False, 'import pyaudio\n'), ((600, 629), 'logging.info', 'logging.info', (['"""Please speak."""'], {}), "('Please speak.')\n", (612, 629), False, 'import logging\n'), ((1148, 1159), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (1156, 1159), True, 'import numpy as np\n'), ((1240, 1280), 'logging.info', 'logging.info', (["('final result: %s ' % msg)"], {}), "('final result: %s ' % msg)\n", (1252, 1280), False, 'import logging\n'), ((1629, 1640), 'time.time', 'time.time', ([], {}), '()\n', (1638, 1640), False, 'import time\n'), ((1481, 1534), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': '""""""', 'level': 'logging.DEBUG'}), "(filename='', level=logging.DEBUG)\n", (1500, 1534), False, 'import logging\n'), ((1556, 1608), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': '""""""', 'level': 'logging.INFO'}), "(filename='', level=logging.INFO)\n", (1575, 1608), False, 'import logging\n'), ((954, 988), 'logging.debug', 'logging.debug', (["('result: %s ' % msg)"], {}), "('result: %s ' % msg)\n", (967, 988), False, 'import logging\n'), ((1708, 1719), 'time.time', 'time.time', ([], {}), '()\n', (1717, 1719), False, 'import time\n')]
|
# add LDDMM shooting code into path
import sys
sys.path.append('../vectormomentum/Code/Python');
sys.path.append('../library')
from subprocess import call
import argparse
import os.path
#Add deep learning related libraries
from collections import Counter
import torch
import prediction_network
import util
import numpy as np
from skimage import exposure
#Add LDDMM registration related libraries
# pyca modules
import PyCA.Core as ca
import PyCA.Common as common
#import PyCA.Display as display
# vector momentum modules
# others
import logging
import copy
import math
import registration_methods
#parse command line input
parser = argparse.ArgumentParser(description='Deformation prediction given set of moving and target images.')
requiredNamed = parser.add_argument_group('required named arguments')
requiredNamed.add_argument('--moving-image', nargs='+', required=True, metavar=('m1', 'm2, m3...'),
help='List of moving images, seperated by space.')
requiredNamed.add_argument('--target-image', nargs='+', required=True, metavar=('t1', 't2, t3...'),
help='List of target images, seperated by space.')
requiredNamed.add_argument('--output-prefix', nargs='+', required=True, metavar=('o1', 'o2, o3...'),
help='List of registration output prefixes for every moving/target image pair, seperated by space. Preferred to be a directory (e.g. /some_path/output_dir/)')
parser.add_argument('--samples', type=int, default=50, metavar='N',
help='number of times to sample the network (default: 64)')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for prediction network (default: 64)')
parser.add_argument('--n-GPU', type=int, default=1, metavar='N',
help='number of GPUs used for prediction (default: 1). For maximum efficiency please set the batch size divisible by the number of GPUs.')
parser.add_argument('--use-CPU-for-shooting', action='store_true', default=False,
help='Use CPU for geodesic shooting. Slow, but saves GPU memory.')
parser.add_argument('--shoot-steps', type=int, default=0, metavar='N',
help='time steps for geodesic shooting. Ignore this option to use the default step size used by the registration model.')
parser.add_argument('--affine-align', action='store_true', default=False,
help='Perform affine registration to align moving and target images to ICBM152 atlas space. Require niftireg.')
parser.add_argument('--histeq', action='store_true', default=False,
help='Perform histogram equalization to the moving and target images.')
parser.add_argument('--atlas', default="../data/atlas/icbm152.nii",
help="Atlas to use for (affine) pre-registration")
parser.add_argument('--prediction-parameter', default='../../network_configs/OASIS_predict_probabilistic.pth.tar',
help="network parameters for the prediction network")
args = parser.parse_args()
# check validity of input arguments from command line
def check_args(args):
# number of input images/output prefix consistency check
n_moving_images = len(args.moving_image)
n_target_images = len(args.target_image)
n_output_prefix = len(args.output_prefix)
if (n_moving_images != n_target_images):
print('The number of moving images is not consistent with the number of target images!')
sys.exit(1)
elif (n_moving_images != n_output_prefix ):
print('The number of output prefix is not consistent with the number of input images!')
sys.exit(1)
# number of GPU check (positive integers)
if (args.n_GPU <= 0):
print('Number of GPUs must be positive!')
sys.exit(1)
# geodesic shooting step check (positive integers)
if (args.shoot_steps < 0):
print('Shooting steps (--shoot-steps) is negative. Using model default step.')
# geodesic shooting step check (positive integers)
if (args.samples < 1):
print('Number of samples (--samples) is smaller than 1. Using model default step.')
#enddef
def create_net(args, network_config):
net_single = prediction_network.net(network_config['network_feature']).cuda();
net_single.load_state_dict(network_config['state_dict'])
if (args.n_GPU > 1) :
device_ids=range(0, args.n_GPU)
net = torch.nn.DataParallel(net_single, device_ids=device_ids).cuda()
else:
net = net_single
net.train()
return net;
#enddef
def preprocess_image(image_pyca, histeq):
image_np = common.AsNPCopy(image_pyca)
nan_mask = np.isnan(image_np)
image_np[nan_mask] = 0
image_np /= np.amax(image_np)
# perform histogram equalization if needed
if histeq:
image_np[image_np != 0] = exposure.equalize_hist(image_np[image_np != 0])
return image_np
#perform deformation prediction
def predict_image(args):
if (args.use_CPU_for_shooting):
mType = ca.MEM_HOST
else:
mType = ca.MEM_DEVICE
# load the prediction network
predict_network_config = torch.load(args.prediction_parameter)
prediction_net = create_net(args, predict_network_config);
batch_size = args.batch_size
patch_size = predict_network_config['patch_size']
input_batch = torch.zeros(batch_size, 2, patch_size, patch_size, patch_size).cuda()
# start prediction
for i in range(0, len(args.moving_image)):
common.Mkdir_p(os.path.dirname(args.output_prefix[i]))
if (args.affine_align):
# Perform affine registration to both moving and target image to the ICBM152 atlas space.
# Registration is done using Niftireg.
call(["reg_aladin",
"-noSym", "-speeeeed", "-ref", args.atlas ,
"-flo", args.moving_image[i],
"-res", args.output_prefix[i]+"moving_affine.nii",
"-aff", args.output_prefix[i]+'moving_affine_transform.txt'])
call(["reg_aladin",
"-noSym", "-speeeeed" ,"-ref", args.atlas ,
"-flo", args.target_image[i],
"-res", args.output_prefix[i]+"target_affine.nii",
"-aff", args.output_prefix[i]+'target_affine_transform.txt'])
moving_image = common.LoadITKImage(args.output_prefix[i]+"moving_affine.nii", mType)
target_image = common.LoadITKImage(args.output_prefix[i]+"target_affine.nii", mType)
else:
moving_image = common.LoadITKImage(args.moving_image[i], mType)
target_image = common.LoadITKImage(args.target_image[i], mType)
#preprocessing of the image
moving_image_np = preprocess_image(moving_image, args.histeq);
target_image_np = preprocess_image(target_image, args.histeq);
grid = moving_image.grid()
moving_image_processed = common.ImFromNPArr(moving_image_np, mType)
target_image_processed = common.ImFromNPArr(target_image_np, mType)
moving_image.setGrid(grid)
target_image.setGrid(grid)
predict_transform_space = False
if 'matlab_t7' in predict_network_config:
predict_transform_space = True
# run actual prediction
prediction_result = util.predict_momentum(moving_image_np, target_image_np, input_batch, batch_size, patch_size, prediction_net, predict_transform_space);
m0 = prediction_result['image_space']
m0_reg = common.FieldFromNPArr(prediction_result['image_space'], mType);
registration_result = registration_methods.geodesic_shooting(moving_image_processed, target_image_processed, m0_reg, args.shoot_steps, mType, predict_network_config)
phi = common.AsNPCopy(registration_result['phiinv'])
phi_square = np.power(phi,2)
for sample_iter in range(1, args.samples):
print(sample_iter)
prediction_result = util.predict_momentum(moving_image_np, target_image_np, input_batch, batch_size, patch_size, prediction_net, predict_transform_space);
m0 += prediction_result['image_space']
m0_reg = common.FieldFromNPArr(prediction_result['image_space'], mType);
registration_result = registration_methods.geodesic_shooting(moving_image_processed, target_image_processed, m0_reg, args.shoot_steps, mType, predict_network_config)
phi += common.AsNPCopy(registration_result['phiinv'])
phi_square += np.power(common.AsNPCopy(registration_result['phiinv']),2)
m0_mean = np.divide(m0, args.samples);
m0_reg = common.FieldFromNPArr(m0_mean, mType);
registration_result = registration_methods.geodesic_shooting(moving_image_processed, target_image_processed, m0_reg, args.shoot_steps, mType, predict_network_config)
phi_mean = registration_result['phiinv']
phi_var = np.divide(phi_square, args.samples) - np.power(np.divide(phi, args.samples), 2)
#save result
common.SaveITKImage(registration_result['I1'], args.output_prefix[i]+"I1.mhd")
common.SaveITKField(phi_mean, args.output_prefix[i]+"phiinv_mean.mhd")
common.SaveITKField(common.FieldFromNPArr(phi_var, mType), args.output_prefix[i]+"phiinv_var.mhd")
#enddef
if __name__ == '__main__':
check_args(args);
predict_image(args)
|
[
"PyCA.Common.ImFromNPArr",
"PyCA.Common.SaveITKField",
"sys.exit",
"PyCA.Common.FieldFromNPArr",
"sys.path.append",
"numpy.divide",
"argparse.ArgumentParser",
"PyCA.Common.LoadITKImage",
"util.predict_momentum",
"subprocess.call",
"registration_methods.geodesic_shooting",
"skimage.exposure.equalize_hist",
"numpy.isnan",
"prediction_network.net",
"PyCA.Common.AsNPCopy",
"PyCA.Common.SaveITKImage",
"numpy.power",
"torch.load",
"torch.nn.DataParallel",
"numpy.amax",
"torch.zeros"
] |
[((47, 95), 'sys.path.append', 'sys.path.append', (['"""../vectormomentum/Code/Python"""'], {}), "('../vectormomentum/Code/Python')\n", (62, 95), False, 'import sys\n'), ((97, 126), 'sys.path.append', 'sys.path.append', (['"""../library"""'], {}), "('../library')\n", (112, 126), False, 'import sys\n'), ((638, 743), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Deformation prediction given set of moving and target images."""'}), "(description=\n 'Deformation prediction given set of moving and target images.')\n", (661, 743), False, 'import argparse\n'), ((4658, 4685), 'PyCA.Common.AsNPCopy', 'common.AsNPCopy', (['image_pyca'], {}), '(image_pyca)\n', (4673, 4685), True, 'import PyCA.Common as common\n'), ((4701, 4719), 'numpy.isnan', 'np.isnan', (['image_np'], {}), '(image_np)\n', (4709, 4719), True, 'import numpy as np\n'), ((4763, 4780), 'numpy.amax', 'np.amax', (['image_np'], {}), '(image_np)\n', (4770, 4780), True, 'import numpy as np\n'), ((5173, 5210), 'torch.load', 'torch.load', (['args.prediction_parameter'], {}), '(args.prediction_parameter)\n', (5183, 5210), False, 'import torch\n'), ((3507, 3518), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3515, 3518), False, 'import sys\n'), ((3814, 3825), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3822, 3825), False, 'import sys\n'), ((4878, 4925), 'skimage.exposure.equalize_hist', 'exposure.equalize_hist', (['image_np[image_np != 0]'], {}), '(image_np[image_np != 0])\n', (4900, 4925), False, 'from skimage import exposure\n'), ((6970, 7012), 'PyCA.Common.ImFromNPArr', 'common.ImFromNPArr', (['moving_image_np', 'mType'], {}), '(moving_image_np, mType)\n', (6988, 7012), True, 'import PyCA.Common as common\n'), ((7046, 7088), 'PyCA.Common.ImFromNPArr', 'common.ImFromNPArr', (['target_image_np', 'mType'], {}), '(target_image_np, mType)\n', (7064, 7088), True, 'import PyCA.Common as common\n'), ((7353, 7490), 'util.predict_momentum', 'util.predict_momentum', (['moving_image_np', 'target_image_np', 'input_batch', 'batch_size', 'patch_size', 'prediction_net', 'predict_transform_space'], {}), '(moving_image_np, target_image_np, input_batch,\n batch_size, patch_size, prediction_net, predict_transform_space)\n', (7374, 7490), False, 'import util\n'), ((7560, 7622), 'PyCA.Common.FieldFromNPArr', 'common.FieldFromNPArr', (["prediction_result['image_space']", 'mType'], {}), "(prediction_result['image_space'], mType)\n", (7581, 7622), True, 'import PyCA.Common as common\n'), ((7654, 7805), 'registration_methods.geodesic_shooting', 'registration_methods.geodesic_shooting', (['moving_image_processed', 'target_image_processed', 'm0_reg', 'args.shoot_steps', 'mType', 'predict_network_config'], {}), '(moving_image_processed,\n target_image_processed, m0_reg, args.shoot_steps, mType,\n predict_network_config)\n', (7692, 7805), False, 'import registration_methods\n'), ((7812, 7858), 'PyCA.Common.AsNPCopy', 'common.AsNPCopy', (["registration_result['phiinv']"], {}), "(registration_result['phiinv'])\n", (7827, 7858), True, 'import PyCA.Common as common\n'), ((7880, 7896), 'numpy.power', 'np.power', (['phi', '(2)'], {}), '(phi, 2)\n', (7888, 7896), True, 'import numpy as np\n'), ((8630, 8657), 'numpy.divide', 'np.divide', (['m0', 'args.samples'], {}), '(m0, args.samples)\n', (8639, 8657), True, 'import numpy as np\n'), ((8676, 8713), 'PyCA.Common.FieldFromNPArr', 'common.FieldFromNPArr', (['m0_mean', 'mType'], {}), '(m0_mean, mType)\n', (8697, 8713), True, 'import PyCA.Common as common\n'), ((8745, 8896), 'registration_methods.geodesic_shooting', 'registration_methods.geodesic_shooting', (['moving_image_processed', 'target_image_processed', 'm0_reg', 'args.shoot_steps', 'mType', 'predict_network_config'], {}), '(moving_image_processed,\n target_image_processed, m0_reg, args.shoot_steps, mType,\n predict_network_config)\n', (8783, 8896), False, 'import registration_methods\n'), ((9066, 9151), 'PyCA.Common.SaveITKImage', 'common.SaveITKImage', (["registration_result['I1']", "(args.output_prefix[i] + 'I1.mhd')"], {}), "(registration_result['I1'], args.output_prefix[i] + 'I1.mhd'\n )\n", (9085, 9151), True, 'import PyCA.Common as common\n'), ((9153, 9225), 'PyCA.Common.SaveITKField', 'common.SaveITKField', (['phi_mean', "(args.output_prefix[i] + 'phiinv_mean.mhd')"], {}), "(phi_mean, args.output_prefix[i] + 'phiinv_mean.mhd')\n", (9172, 9225), True, 'import PyCA.Common as common\n'), ((3671, 3682), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3679, 3682), False, 'import sys\n'), ((4251, 4308), 'prediction_network.net', 'prediction_network.net', (["network_config['network_feature']"], {}), "(network_config['network_feature'])\n", (4273, 4308), False, 'import prediction_network\n'), ((5380, 5442), 'torch.zeros', 'torch.zeros', (['batch_size', '(2)', 'patch_size', 'patch_size', 'patch_size'], {}), '(batch_size, 2, patch_size, patch_size, patch_size)\n', (5391, 5442), False, 'import torch\n'), ((5785, 6003), 'subprocess.call', 'call', (["['reg_aladin', '-noSym', '-speeeeed', '-ref', args.atlas, '-flo', args.\n moving_image[i], '-res', args.output_prefix[i] + 'moving_affine.nii',\n '-aff', args.output_prefix[i] + 'moving_affine_transform.txt']"], {}), "(['reg_aladin', '-noSym', '-speeeeed', '-ref', args.atlas, '-flo', args\n .moving_image[i], '-res', args.output_prefix[i] + 'moving_affine.nii',\n '-aff', args.output_prefix[i] + 'moving_affine_transform.txt'])\n", (5789, 6003), False, 'from subprocess import call\n'), ((6077, 6295), 'subprocess.call', 'call', (["['reg_aladin', '-noSym', '-speeeeed', '-ref', args.atlas, '-flo', args.\n target_image[i], '-res', args.output_prefix[i] + 'target_affine.nii',\n '-aff', args.output_prefix[i] + 'target_affine_transform.txt']"], {}), "(['reg_aladin', '-noSym', '-speeeeed', '-ref', args.atlas, '-flo', args\n .target_image[i], '-res', args.output_prefix[i] + 'target_affine.nii',\n '-aff', args.output_prefix[i] + 'target_affine_transform.txt'])\n", (6081, 6295), False, 'from subprocess import call\n'), ((6384, 6455), 'PyCA.Common.LoadITKImage', 'common.LoadITKImage', (["(args.output_prefix[i] + 'moving_affine.nii')", 'mType'], {}), "(args.output_prefix[i] + 'moving_affine.nii', mType)\n", (6403, 6455), True, 'import PyCA.Common as common\n'), ((6481, 6552), 'PyCA.Common.LoadITKImage', 'common.LoadITKImage', (["(args.output_prefix[i] + 'target_affine.nii')", 'mType'], {}), "(args.output_prefix[i] + 'target_affine.nii', mType)\n", (6500, 6552), True, 'import PyCA.Common as common\n'), ((6594, 6642), 'PyCA.Common.LoadITKImage', 'common.LoadITKImage', (['args.moving_image[i]', 'mType'], {}), '(args.moving_image[i], mType)\n', (6613, 6642), True, 'import PyCA.Common as common\n'), ((6670, 6718), 'PyCA.Common.LoadITKImage', 'common.LoadITKImage', (['args.target_image[i]', 'mType'], {}), '(args.target_image[i], mType)\n', (6689, 6718), True, 'import PyCA.Common as common\n'), ((8011, 8148), 'util.predict_momentum', 'util.predict_momentum', (['moving_image_np', 'target_image_np', 'input_batch', 'batch_size', 'patch_size', 'prediction_net', 'predict_transform_space'], {}), '(moving_image_np, target_image_np, input_batch,\n batch_size, patch_size, prediction_net, predict_transform_space)\n', (8032, 8148), False, 'import util\n'), ((8218, 8280), 'PyCA.Common.FieldFromNPArr', 'common.FieldFromNPArr', (["prediction_result['image_space']", 'mType'], {}), "(prediction_result['image_space'], mType)\n", (8239, 8280), True, 'import PyCA.Common as common\n'), ((8316, 8467), 'registration_methods.geodesic_shooting', 'registration_methods.geodesic_shooting', (['moving_image_processed', 'target_image_processed', 'm0_reg', 'args.shoot_steps', 'mType', 'predict_network_config'], {}), '(moving_image_processed,\n target_image_processed, m0_reg, args.shoot_steps, mType,\n predict_network_config)\n', (8354, 8467), False, 'import registration_methods\n'), ((8479, 8525), 'PyCA.Common.AsNPCopy', 'common.AsNPCopy', (["registration_result['phiinv']"], {}), "(registration_result['phiinv'])\n", (8494, 8525), True, 'import PyCA.Common as common\n'), ((8956, 8991), 'numpy.divide', 'np.divide', (['phi_square', 'args.samples'], {}), '(phi_square, args.samples)\n', (8965, 8991), True, 'import numpy as np\n'), ((9252, 9289), 'PyCA.Common.FieldFromNPArr', 'common.FieldFromNPArr', (['phi_var', 'mType'], {}), '(phi_var, mType)\n', (9273, 9289), True, 'import PyCA.Common as common\n'), ((4459, 4515), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['net_single'], {'device_ids': 'device_ids'}), '(net_single, device_ids=device_ids)\n', (4480, 4515), False, 'import torch\n'), ((8561, 8607), 'PyCA.Common.AsNPCopy', 'common.AsNPCopy', (["registration_result['phiinv']"], {}), "(registration_result['phiinv'])\n", (8576, 8607), True, 'import PyCA.Common as common\n'), ((9003, 9031), 'numpy.divide', 'np.divide', (['phi', 'args.samples'], {}), '(phi, args.samples)\n', (9012, 9031), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
import math
from typing import Iterable, Optional, Tuple, Union
import numpy as np
from analysis import linearity
from utils import utils
from unit_test import unit_test
from processor import ProcessorBase
from generation import signal_generation
def generate_impulse(n_samp, amplitude=1.0) -> np.ndarray:
x = np.zeros(n_samp, dtype=np.float64)
x[0] = amplitude
return x
def generate_step(n_samp, amplitude=1.0) -> np.ndarray:
return np.ones(n_samp) * amplitude
def generate_ramp(n_samp, slope=1.0) -> np.ndarray:
y = (np.arange(n_samp) + 1).astype(np.float64) * slope
assert utils.approx_equal(y[0], slope)
assert utils.approx_equal(y[1], 2*slope)
return y
def get_impulse_response(system, n_samp, amplitude=1.0, reset=True, negative=False) -> np.ndarray:
# Assuming system is LTI & causal, and that system.reset() works as it should,
# we can ignore negative half of impulse/step response, as zero-input will have zero-output
x = generate_impulse(n_samp, amplitude)
if negative:
x = -x
if reset:
system.reset()
return system.process_vector(x)
def get_step_response(system, n_samp, amplitude=1.0, reset=True, negative=False) -> np.ndarray:
x = generate_step(n_samp, amplitude)
if negative:
x = -x
if reset:
system.reset()
return system.process_vector(x)
def get_ramp_response(system, n_samp, slope=1.0, reset=True, negative=False) -> np.ndarray:
x = generate_ramp(n_samp, slope)
if negative:
x = -x
if reset:
system.reset()
return system.process_vector(x)
|
[
"utils.utils.approx_equal",
"numpy.zeros",
"numpy.ones",
"numpy.arange"
] |
[((354, 388), 'numpy.zeros', 'np.zeros', (['n_samp'], {'dtype': 'np.float64'}), '(n_samp, dtype=np.float64)\n', (362, 388), True, 'import numpy as np\n'), ((640, 671), 'utils.utils.approx_equal', 'utils.approx_equal', (['y[0]', 'slope'], {}), '(y[0], slope)\n', (658, 671), False, 'from utils import utils\n'), ((681, 716), 'utils.utils.approx_equal', 'utils.approx_equal', (['y[1]', '(2 * slope)'], {}), '(y[1], 2 * slope)\n', (699, 716), False, 'from utils import utils\n'), ((489, 504), 'numpy.ones', 'np.ones', (['n_samp'], {}), '(n_samp)\n', (496, 504), True, 'import numpy as np\n'), ((581, 598), 'numpy.arange', 'np.arange', (['n_samp'], {}), '(n_samp)\n', (590, 598), True, 'import numpy as np\n')]
|
"""
Copyright 2021 Objectiv B.V.
"""
import datetime
import warnings
from abc import ABC
from enum import Enum
from typing import Union, cast, List, Tuple, Optional, Any
import numpy
import pandas
from sqlalchemy.engine import Dialect
from bach import DataFrame
from bach.series import Series, SeriesString, SeriesBoolean, SeriesFloat64, SeriesInt64
from bach.expression import Expression, join_expressions
from bach.series.series import WrappedPartition, ToPandasInfo
from bach.series.utils.datetime_formats import parse_c_standard_code_to_postgres_code, \
parse_c_code_to_bigquery_code
from bach.types import DtypeOrAlias, StructuredDtype
from sql_models.constants import DBDialect
from sql_models.util import is_postgres, is_bigquery, DatabaseNotSupportedException
class DatePart(str, Enum):
DAY = 'days'
HOUR = 'hours'
MINUTE = 'minutes'
SECOND = 'seconds'
MILLISECOND = 'milliseconds'
MICROSECOND = 'microseconds'
# conversions for date parts to seconds
# when adjusting intervals, 30-day time periods are represented as months
# BigQuery seems to follow Postgres threshold
# https://www.postgresql.org/docs/current/functions-datetime.html#:~:text=justify_days%20(%20interval%20)%20%E2%86%92%20interval,mon%205%20days
# For example 395 days is equal to 1 year, 1 month and 5 days.
_TOTAL_SECONDS_PER_DATE_PART = {
DatePart.DAY: 24 * 60 * 60,
DatePart.HOUR: 60 * 60,
DatePart.MINUTE: 60,
DatePart.SECOND: 1,
DatePart.MILLISECOND: 1e-3,
DatePart.MICROSECOND: 1e-6,
}
class DateTimeOperation:
def __init__(self, series: 'SeriesAbstractDateTime'):
self._series = series
def sql_format(self, format_str: str) -> SeriesString:
"""
Allow formatting of this Series (to a string type).
:param format_str: The format to apply to the date/time column.
Currently, this uses Postgres' data format string syntax:
https://www.postgresql.org/docs/14/functions-formatting.html
.. warning::
This method is deprecated, we recommend using :meth:`SeriesAbstractDateTime.dt.strftime` instead.
.. code-block:: python
df['year'] = df.some_date_series.dt.sql_format('YYYY') # return year
df['date'] = df.some_date_series.dt.sql_format('YYYYMMDD') # return date
:returns: a SeriesString containing the formatted date.
"""
warnings.warn(
'Call to deprecated method, we recommend to use SeriesAbstractDateTime.dt.strftime instead',
category=DeprecationWarning,
)
expression = Expression.construct('to_char({}, {})',
self._series, Expression.string_value(format_str))
str_series = self._series.copy_override_type(SeriesString).copy_override(expression=expression)
return str_series
def strftime(self, format_str: str) -> SeriesString:
"""
Allow formatting of this Series (to a string type).
:param format_str: The format to apply to the date/time column.
This uses 1989 C standard format codes:
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes
.. code-block:: python
df['year'] = df.some_date_series.dt.sql_format('%Y') # return year
df['date'] = df.some_date_series.dt.sql_format('%Y%m%d') # return date
:returns: a SeriesString containing the formatted date.
"""
engine = self._series.engine
if is_postgres(engine):
parsed_format_str = parse_c_standard_code_to_postgres_code(format_str)
expression = Expression.construct(
'to_char({}, {})', self._series, Expression.string_value(parsed_format_str),
)
elif is_bigquery(engine):
# BQ uses C Standard Codes
# https://cloud.google.com/bigquery/docs/reference/standard-sql/format-elements#format_elements_date_time
parsed_format_str = parse_c_code_to_bigquery_code(format_str)
expression = Expression.construct(
'format_date({}, {})',
Expression.string_value(parsed_format_str),
self._series,
)
else:
raise DatabaseNotSupportedException(engine)
str_series = self._series.copy_override_type(SeriesString).copy_override(expression=expression)
return str_series
class TimedeltaOperation(DateTimeOperation):
def _get_conversion_df(self) -> 'DataFrame':
"""
generates a dataframe containing the amounts of seconds a supported date part has.
"""
from bach import DataFrame
conversion_df = pandas.DataFrame(
data=[
{
self._format_converted_series_name(dp): ts
for dp, ts in _TOTAL_SECONDS_PER_DATE_PART.items()
},
]
)
convert_df = DataFrame.from_pandas(df=conversion_df, engine=self._series.engine, convert_objects=True)
return convert_df.reset_index(drop=True)
@staticmethod
def _format_converted_series_name(date_part: DatePart) -> str:
return f'_SECONDS_IN_{date_part.name}'
@property
def components(self) -> DataFrame:
"""
:returns: a DataFrame containing all date parts from the timedelta.
"""
df = self.total_seconds.to_frame()
df = df.merge(self._get_conversion_df(), how='cross')
# justifies total seconds into the units of each date component
# after adjustment, it converts it back into seconds
for date_part in DatePart:
converted_series_name = self._format_converted_series_name(DatePart(date_part))
df[f'ts_{date_part}'] = df['total_seconds'] // df[converted_series_name]
df[f'ts_{date_part}'] *= df[converted_series_name]
# materialize to avoid complex subquery
df = df.materialize(node_name='justified_date_components')
components_series_names = []
prev_ts = ''
# extract actual date component from justified seconds
# by getting the difference between current and previous components
# this helps on normalizing negative time deltas and have only negative values
# in days.
for date_part in DatePart:
converted_series_name = self._format_converted_series_name(DatePart(date_part))
component_name = f'{date_part}'
current_ts = f'ts_{date_part}'
if not prev_ts:
df[component_name] = df[current_ts] / df[converted_series_name]
else:
df[component_name] = (df[current_ts] - df[prev_ts]) / df[converted_series_name]
df[component_name] = cast(SeriesFloat64, df[component_name]).round(decimals=0)
components_series_names.append(component_name)
prev_ts = current_ts
return df[components_series_names].astype('int64')
@property
def days(self) -> SeriesInt64:
"""
converts total seconds into days and returns only the integral part of the result
"""
day_series = self.total_seconds // _TOTAL_SECONDS_PER_DATE_PART[DatePart.DAY]
day_series = day_series.astype('int64')
return (
day_series
.copy_override_type(SeriesInt64)
.copy_override(name='days')
)
@property
def seconds(self) -> SeriesInt64:
"""
removes days from total seconds (self.total_seconds % _SECONDS_IN_DAY)
and returns only the integral part of the result
"""
seconds_series = (self.total_seconds % _TOTAL_SECONDS_PER_DATE_PART[DatePart.DAY]) // 1
seconds_series = seconds_series.astype('int64')
return (
seconds_series
.copy_override_type(SeriesInt64)
.copy_override(name='seconds')
)
@property
def microseconds(self) -> SeriesInt64:
"""
considers only the fractional part of the total seconds and converts it into microseconds
"""
microseconds_series = (
(self.total_seconds % 1) / _TOTAL_SECONDS_PER_DATE_PART[DatePart.MICROSECOND]
)
microseconds_series = microseconds_series.astype('int64')
return (
microseconds_series
.copy_override_type(SeriesInt64)
.copy_override(name='microseconds')
)
@property
def total_seconds(self) -> SeriesFloat64:
"""
returns the total amount of seconds in the interval
"""
if not is_bigquery(self._series.engine):
# extract(epoch from source) returns the total number of seconds in the interval
expression = Expression.construct(f'extract(epoch from {{}})', self._series)
else:
# bq cannot extract epoch from interval
expression = Expression.construct(
(
f"UNIX_MICROS(CAST('1970-01-01' AS TIMESTAMP) + {{}}) "
f"* {_TOTAL_SECONDS_PER_DATE_PART[DatePart.MICROSECOND]}"
),
self._series,
)
return (
self._series
.copy_override_type(SeriesFloat64)
.copy_override(name='total_seconds', expression=expression)
)
class SeriesAbstractDateTime(Series, ABC):
"""
A Series that represents the generic date/time type and its specific operations. Selected arithmetic
operations are accepted using the usual operators.
**Date/Time Operations**
On any of the subtypes, you can access date operations through the `dt` accessor.
"""
@property
def dt(self) -> DateTimeOperation:
"""
Get access to date operations.
.. autoclass:: bach.series.series_datetime.DateTimeOperation
:members:
"""
return DateTimeOperation(self)
def _comparator_operation(self, other, comparator,
other_dtypes=('timestamp', 'date', 'time', 'string')) -> 'SeriesBoolean':
return super()._comparator_operation(other, comparator, other_dtypes)
@classmethod
def _cast_to_date_if_dtype_date(cls, series: 'Series') -> 'Series':
# PG returns timestamp in all cases were we expect date
# Make sure we cast properly, and round similar to python datetime: add 12 hours and cast to date
if series.dtype == 'date':
td_12_hours = datetime.timedelta(seconds=3600 * 12)
series_12_hours = SeriesTimedelta.from_value(base=series, value=td_12_hours, name='tmp')
expr_12_hours = series_12_hours.expression
return series.copy_override(
expression=Expression.construct("cast({} + {} as date)", series, expr_12_hours)
)
else:
return series
def dt_strip_timezone(value: Optional[datetime.datetime]) -> Optional[datetime.datetime]:
if value is None:
return None
return value.replace(tzinfo=None)
class SeriesTimestamp(SeriesAbstractDateTime):
"""
A Series that represents the timestamp/datetime type and its specific operations.
Timestamps are assumed to be in UTC, or without a timezone, both cases are treated the same.
These timestamps have a microsecond precision at best, in contrast to numpy's datetime64 which supports
up to attoseconds precision.
**Database support and types**
* Postgres: utilizes the 'timestamp without time zone' database type.
* BigQuery: utilizes the 'TIMESTAMP' database type.
"""
dtype = 'timestamp'
dtype_aliases = ('datetime64', 'datetime64[ns]', numpy.datetime64)
supported_db_dtype = {
DBDialect.POSTGRES: 'timestamp without time zone',
DBDialect.BIGQUERY: 'TIMESTAMP',
}
supported_value_types = (datetime.datetime, numpy.datetime64, datetime.date, str)
@classmethod
def supported_literal_to_expression(cls, dialect: Dialect, literal: Expression) -> Expression:
return Expression.construct(f'cast({{}} as {cls.get_db_dtype(dialect)})', literal)
@classmethod
def supported_value_to_literal(
cls,
dialect: Dialect,
value: Union[datetime.datetime, numpy.datetime64, datetime.date, str, None],
dtype: StructuredDtype
) -> Expression:
if value is None:
return Expression.raw('NULL')
# if value is not a datetime or date, then convert it to datetime first
dt_value: Union[datetime.datetime, datetime.date, None] = None
if isinstance(value, str):
formats = ['%Y-%m-%d %H:%M:%S.%f', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M', '%Y-%m-%d']
for format in formats:
try:
dt_value = datetime.datetime.strptime(value, format)
break
except ValueError:
continue
if dt_value is None:
raise ValueError(f'Not a valid timestamp string literal: {value}.'
f'Supported formats: {formats}')
elif isinstance(value, numpy.datetime64):
if numpy.isnat(value):
return Expression.raw('NULL')
# Weird trick: count number of microseconds in datetime, but only works on timedelta, so convert
# to a timedelta first, by subtracting 0 (epoch = 1970-01-01 00:00:00)
# Rounding can be unpredictable because of limited precision, so always truncate excess precision
microseconds = int((value - numpy.datetime64('1970', 'us')) // numpy.timedelta64(1, 'us'))
dt_value = datetime.datetime.utcfromtimestamp(microseconds / 1_000_000)
elif isinstance(value, (datetime.datetime, datetime.date)):
dt_value = value
if dt_value is None:
raise ValueError(f'Not a valid timestamp literal: {value}')
str_value = dt_value.strftime('%Y-%m-%d %H:%M:%S.%f')
return Expression.string_value(str_value)
@classmethod
def dtype_to_expression(cls, dialect: Dialect, source_dtype: str, expression: Expression) -> Expression:
if source_dtype == 'timestamp':
return expression
else:
if source_dtype not in ['string', 'date']:
raise ValueError(f'cannot convert {source_dtype} to timestamp')
return Expression.construct(f'cast({{}} as {cls.get_db_dtype(dialect)})', expression)
def to_pandas_info(self) -> Optional['ToPandasInfo']:
if is_postgres(self.engine):
return ToPandasInfo('datetime64[ns]', None)
if is_bigquery(self.engine):
return ToPandasInfo('datetime64[ns, UTC]', dt_strip_timezone)
return None
def __add__(self, other) -> 'Series':
return self._arithmetic_operation(other, 'add', '({}) + ({})', other_dtypes=tuple(['timedelta']))
def __sub__(self, other) -> 'Series':
type_mapping = {
'timedelta': 'timestamp',
'timestamp': 'timedelta'
}
return self._arithmetic_operation(other, 'sub', '({}) - ({})',
other_dtypes=tuple(type_mapping.keys()),
dtype=type_mapping)
class SeriesDate(SeriesAbstractDateTime):
"""
A Series that represents the date type and its specific operations
**Database support and types**
* Postgres: utilizes the 'date' database type.
* BigQuery: utilizes the 'DATE' database type.
"""
dtype = 'date'
dtype_aliases: Tuple[DtypeOrAlias, ...] = tuple()
supported_db_dtype = {
DBDialect.POSTGRES: 'date',
DBDialect.BIGQUERY: 'DATE'
}
supported_value_types = (datetime.datetime, datetime.date, str)
@classmethod
def supported_literal_to_expression(cls, dialect: Dialect, literal: Expression) -> Expression:
return Expression.construct(f'cast({{}} as date)', literal)
@classmethod
def supported_value_to_literal(
cls,
dialect: Dialect,
value: Union[str, datetime.date],
dtype: StructuredDtype
) -> Expression:
if isinstance(value, datetime.date):
value = str(value)
# TODO: check here already that the string has the correct format
return Expression.string_value(value)
@classmethod
def dtype_to_expression(cls, dialect: Dialect, source_dtype: str, expression: Expression) -> Expression:
if source_dtype == 'date':
return expression
else:
if source_dtype not in ['string', 'timestamp']:
raise ValueError(f'cannot convert {source_dtype} to date')
return Expression.construct(f'cast({{}} as {cls.get_db_dtype(dialect)})', expression)
def __add__(self, other) -> 'Series':
type_mapping = {
'timedelta': 'date' # PG returns timestamp, needs explicit cast to date
}
return self._cast_to_date_if_dtype_date(
self._arithmetic_operation(other, 'add', '({}) + ({})',
other_dtypes=tuple(type_mapping.keys()),
dtype=type_mapping)
)
def __sub__(self, other) -> 'Series':
type_mapping = {
'date': 'timedelta',
'timedelta': 'date', # PG returns timestamp, needs explicit cast to date
}
if other.dtype == 'date':
# PG does unexpected things when doing date - date. Work around that.
fmt_str = 'cast(cast({} as timestamp) - ({}) as interval)'
else:
fmt_str = '({}) - ({})'
return self._cast_to_date_if_dtype_date(
self._arithmetic_operation(other, 'sub', fmt_str,
other_dtypes=tuple(type_mapping.keys()),
dtype=type_mapping)
)
class SeriesTime(SeriesAbstractDateTime):
"""
A Series that represents the date time and its specific operations
**Database support and types**
* Postgres: utilizes the 'time without time zone' database type.
* BigQuery: utilizes the 'TIME' database type.
"""
dtype = 'time'
dtype_aliases: Tuple[DtypeOrAlias, ...] = tuple()
supported_db_dtype = {
DBDialect.POSTGRES: 'time without time zone',
DBDialect.BIGQUERY: 'TIME',
}
supported_value_types = (datetime.time, str)
@classmethod
def supported_literal_to_expression(cls, dialect: Dialect, literal: Expression) -> Expression:
return Expression.construct(f'cast({{}} as {cls.get_db_dtype(dialect)})', literal)
@classmethod
def supported_value_to_literal(
cls,
dialect: Dialect,
value: Union[str, datetime.time],
dtype: StructuredDtype
) -> Expression:
value = str(value)
# TODO: check here already that the string has the correct format
return Expression.string_value(value)
@classmethod
def dtype_to_expression(cls, dialect: Dialect, source_dtype: str, expression: Expression) -> Expression:
if source_dtype == 'time':
return expression
else:
if source_dtype not in ['string', 'timestamp']:
raise ValueError(f'cannot convert {source_dtype} to time')
return Expression.construct(f'cast({{}} as {cls.get_db_dtype(dialect)})', expression)
# python supports no arithmetic on Time
class SeriesTimedelta(SeriesAbstractDateTime):
"""
A Series that represents the timedelta type and its specific operations
**Database support and types**
* Postgres: utilizes the 'interval' database type.
* BigQuery: support coming soon
"""
dtype = 'timedelta'
dtype_aliases = ('interval',)
supported_db_dtype = {
DBDialect.POSTGRES: 'interval',
DBDialect.BIGQUERY: 'INTERVAL',
}
supported_value_types = (datetime.timedelta, numpy.timedelta64, str)
@classmethod
def supported_literal_to_expression(cls, dialect: Dialect, literal: Expression) -> Expression:
return Expression.construct(f'cast({{}} as {cls.get_db_dtype(dialect)})', literal)
@classmethod
def supported_value_to_literal(
cls,
dialect: Dialect,
value: Union[str, numpy.timedelta64, datetime.timedelta],
dtype: StructuredDtype
) -> Expression:
# pandas.Timedelta checks already that the string has the correct format
# round it up to microseconds precision in order to avoid problems with BigQuery
# pandas by default uses nanoseconds precision
value_td = pandas.Timedelta(value).round(freq='us')
if value_td is pandas.NaT:
return Expression.construct('NULL')
# interval values in iso format are allowed in SQL (both BQ and PG)
# https://www.postgresql.org/docs/8.4/datatype-datetime.html#:~:text=interval%20values%20can%20also%20be%20written%20as%20iso%208601%20time%20intervals%2C
return Expression.string_value(value_td.isoformat())
def to_pandas_info(self) -> Optional[ToPandasInfo]:
if is_bigquery(self.engine):
return ToPandasInfo(dtype='object', function=self._parse_interval_bigquery)
return None
def _parse_interval_bigquery(self, value: Optional[Any]) -> Optional[pandas.Timedelta]:
if value is None:
return None
# BigQuery returns a MonthDayNano object
# we need to normalize months to days (1 month == 30 day period)
return pandas.Timedelta(
days=value.days + value.months * 30,
nanoseconds=value.nanoseconds,
)
@classmethod
def dtype_to_expression(cls, dialect: Dialect, source_dtype: str, expression: Expression) -> Expression:
if source_dtype == 'timedelta':
return expression
else:
if not source_dtype == 'string':
raise ValueError(f'cannot convert {source_dtype} to timedelta')
return Expression.construct(f'cast({{}} as {cls.get_db_dtype(dialect)})', expression)
def _comparator_operation(self, other, comparator,
other_dtypes=('timedelta', 'string')) -> SeriesBoolean:
return super()._comparator_operation(other, comparator, other_dtypes)
def __add__(self, other) -> 'Series':
type_mapping = {
'date': 'date', # PG makes this a timestamp
'timedelta': 'timedelta',
'timestamp': 'timestamp'
}
return self._cast_to_date_if_dtype_date(
self._arithmetic_operation(other, 'add', '({}) + ({})',
other_dtypes=tuple(type_mapping.keys()),
dtype=type_mapping))
def __sub__(self, other) -> 'Series':
type_mapping = {
'timedelta': 'timedelta',
}
return self._arithmetic_operation(other, 'sub', '({}) - ({})',
other_dtypes=tuple(type_mapping.keys()),
dtype=type_mapping)
def __mul__(self, other) -> 'Series':
return self._arithmetic_operation(other, 'mul', '({}) * ({})', other_dtypes=('int64', 'float64'))
def __truediv__(self, other) -> 'Series':
return self._arithmetic_operation(other, 'div', '({}) / ({})', other_dtypes=('int64', 'float64'))
@property
def dt(self) -> TimedeltaOperation:
"""
Get access to date operations.
.. autoclass:: bach.series.series_datetime.TimedeltaOperation
:members:
"""
return TimedeltaOperation(self)
def sum(self, partition: WrappedPartition = None,
skipna: bool = True, min_count: int = None) -> 'SeriesTimedelta':
"""
:meta private:
"""
result = self._derived_agg_func(
partition=partition,
expression='sum',
skipna=skipna,
min_count=min_count
)
return result.copy_override_type(SeriesTimedelta)
def mean(self, partition: WrappedPartition = None, skipna: bool = True) -> 'SeriesTimedelta':
"""
:meta private:
"""
result = self._derived_agg_func(
partition=partition,
expression='avg',
skipna=skipna
)
result = result.copy_override_type(SeriesTimedelta)
if is_bigquery(self.engine):
result = result._remove_nano_precision_bigquery()
return result
def _remove_nano_precision_bigquery(self) -> 'SeriesTimedelta':
"""
Helper function that removes nano-precision from intervals.
"""
series = self.copy()
# https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#interval_type
_BQ_INTERVAL_FORMAT = '%d-%d %d %d:%d:%d.%06.0f'
_BQ_SUPPORTED_INTERVAL_PARTS = [
'YEAR', 'MONTH', 'DAY', 'HOUR', 'MINUTE', 'SECOND'
]
# aggregating intervals by average might generate a result with
# nano-precision, which is not supported by BigQuery TimeStamps
# therefore we need to make sure we always generate values up to
# microseconds precision
# https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#timestamp_type
all_extracted_parts_expr = [
Expression.construct(f'EXTRACT({date_part} FROM {{}})', series)
for date_part in _BQ_SUPPORTED_INTERVAL_PARTS
]
# convert nanoseconds to microseconds
all_extracted_parts_expr.append(
Expression.construct(f'EXTRACT(NANOSECOND FROM {{}}) / 1000', series)
)
format_arguments_expr = join_expressions(all_extracted_parts_expr)
# All parts will create a string with following format
# '%d-%d %d %d:%d:%d.%06.0f'
# where the first 6 digits are date parts from YEAR to SECOND
# Format specifier %06.0f will format fractional part of seconds with maximum width of 6 digits
# for example:
# nanoseconds = 1142857, converting them into microseconds is 1142.857
# when applying string formatting, the value will be rounded into 1143 (.0 precision)
# and will be left padded by 2 leading zeros: 001143 (0 flag and 6 minimum width)
# for more information:
# https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#format_string
format_expr = Expression.construct(
f'format({{}}, {{}})',
Expression.string_value(_BQ_INTERVAL_FORMAT),
format_arguments_expr,
)
return series.copy_override(
expression=self.dtype_to_expression(
self.engine, source_dtype='string', expression=format_expr,
)
)
def quantile(
self, partition: WrappedPartition = None, q: Union[float, List[float]] = 0.5,
) -> 'SeriesTimedelta':
"""
When q is a float or len(q) == 1, the resultant series index will remain
In case multiple quantiles are calculated, the resultant series index will have all calculated
quantiles as index values.
"""
from bach.quantile import calculate_quantiles
if not is_bigquery(self.engine):
return (
calculate_quantiles(series=self.copy(), partition=partition, q=q)
.copy_override_type(SeriesTimedelta)
)
# calculate quantiles based on total microseconds
# using total seconds might lose precision,
# since TIMESTAMP_SECONDS accepts only integers, therefore
# microseconds will be lost due to rounding
total_microseconds_series = (
self.dt.total_seconds / _TOTAL_SECONDS_PER_DATE_PART[DatePart.MICROSECOND]
)
total_microseconds_series = total_microseconds_series.copy_override_type(SeriesFloat64)
result = calculate_quantiles(series=total_microseconds_series, partition=partition, q=q)
# result must be a timedelta
result = result.copy_override(
expression=Expression.construct(
f"TIMESTAMP_MICROS({{}}) - CAST('1970-01-01' AS TIMESTAMP)",
result.astype('int64'),
),
name=self.name,
)
return result.copy_override_type(SeriesTimedelta)
|
[
"datetime.datetime.utcfromtimestamp",
"bach.series.utils.datetime_formats.parse_c_code_to_bigquery_code",
"datetime.timedelta",
"bach.series.series.ToPandasInfo",
"bach.DataFrame.from_pandas",
"bach.expression.Expression.construct",
"numpy.datetime64",
"warnings.warn",
"bach.series.utils.datetime_formats.parse_c_standard_code_to_postgres_code",
"bach.expression.Expression.raw",
"bach.expression.join_expressions",
"numpy.timedelta64",
"typing.cast",
"bach.expression.Expression.string_value",
"datetime.datetime.strptime",
"sql_models.util.is_postgres",
"pandas.Timedelta",
"bach.quantile.calculate_quantiles",
"sql_models.util.is_bigquery",
"numpy.isnat",
"sql_models.util.DatabaseNotSupportedException"
] |
[((2410, 2555), 'warnings.warn', 'warnings.warn', (['"""Call to deprecated method, we recommend to use SeriesAbstractDateTime.dt.strftime instead"""'], {'category': 'DeprecationWarning'}), "(\n 'Call to deprecated method, we recommend to use SeriesAbstractDateTime.dt.strftime instead'\n , category=DeprecationWarning)\n", (2423, 2555), False, 'import warnings\n'), ((3540, 3559), 'sql_models.util.is_postgres', 'is_postgres', (['engine'], {}), '(engine)\n', (3551, 3559), False, 'from sql_models.util import is_postgres, is_bigquery, DatabaseNotSupportedException\n'), ((4977, 5070), 'bach.DataFrame.from_pandas', 'DataFrame.from_pandas', ([], {'df': 'conversion_df', 'engine': 'self._series.engine', 'convert_objects': '(True)'}), '(df=conversion_df, engine=self._series.engine,\n convert_objects=True)\n', (4998, 5070), False, 'from bach import DataFrame\n'), ((14053, 14087), 'bach.expression.Expression.string_value', 'Expression.string_value', (['str_value'], {}), '(str_value)\n', (14076, 14087), False, 'from bach.expression import Expression, join_expressions\n'), ((14602, 14626), 'sql_models.util.is_postgres', 'is_postgres', (['self.engine'], {}), '(self.engine)\n', (14613, 14626), False, 'from sql_models.util import is_postgres, is_bigquery, DatabaseNotSupportedException\n'), ((14695, 14719), 'sql_models.util.is_bigquery', 'is_bigquery', (['self.engine'], {}), '(self.engine)\n', (14706, 14719), False, 'from sql_models.util import is_postgres, is_bigquery, DatabaseNotSupportedException\n'), ((15980, 16032), 'bach.expression.Expression.construct', 'Expression.construct', (['f"""cast({{}} as date)"""', 'literal'], {}), "(f'cast({{}} as date)', literal)\n", (16000, 16032), False, 'from bach.expression import Expression, join_expressions\n'), ((16385, 16415), 'bach.expression.Expression.string_value', 'Expression.string_value', (['value'], {}), '(value)\n', (16408, 16415), False, 'from bach.expression import Expression, join_expressions\n'), ((19024, 19054), 'bach.expression.Expression.string_value', 'Expression.string_value', (['value'], {}), '(value)\n', (19047, 19054), False, 'from bach.expression import Expression, join_expressions\n'), ((21210, 21234), 'sql_models.util.is_bigquery', 'is_bigquery', (['self.engine'], {}), '(self.engine)\n', (21221, 21234), False, 'from sql_models.util import is_postgres, is_bigquery, DatabaseNotSupportedException\n'), ((21625, 21714), 'pandas.Timedelta', 'pandas.Timedelta', ([], {'days': '(value.days + value.months * 30)', 'nanoseconds': 'value.nanoseconds'}), '(days=value.days + value.months * 30, nanoseconds=value.\n nanoseconds)\n', (21641, 21714), False, 'import pandas\n'), ((24521, 24545), 'sql_models.util.is_bigquery', 'is_bigquery', (['self.engine'], {}), '(self.engine)\n', (24532, 24545), False, 'from sql_models.util import is_postgres, is_bigquery, DatabaseNotSupportedException\n'), ((25831, 25873), 'bach.expression.join_expressions', 'join_expressions', (['all_extracted_parts_expr'], {}), '(all_extracted_parts_expr)\n', (25847, 25873), False, 'from bach.expression import Expression, join_expressions\n'), ((28058, 28137), 'bach.quantile.calculate_quantiles', 'calculate_quantiles', ([], {'series': 'total_microseconds_series', 'partition': 'partition', 'q': 'q'}), '(series=total_microseconds_series, partition=partition, q=q)\n', (28077, 28137), False, 'from bach.quantile import calculate_quantiles\n'), ((2699, 2734), 'bach.expression.Expression.string_value', 'Expression.string_value', (['format_str'], {}), '(format_str)\n', (2722, 2734), False, 'from bach.expression import Expression, join_expressions\n'), ((3593, 3643), 'bach.series.utils.datetime_formats.parse_c_standard_code_to_postgres_code', 'parse_c_standard_code_to_postgres_code', (['format_str'], {}), '(format_str)\n', (3631, 3643), False, 'from bach.series.utils.datetime_formats import parse_c_standard_code_to_postgres_code, parse_c_code_to_bigquery_code\n'), ((3811, 3830), 'sql_models.util.is_bigquery', 'is_bigquery', (['engine'], {}), '(engine)\n', (3822, 3830), False, 'from sql_models.util import is_postgres, is_bigquery, DatabaseNotSupportedException\n'), ((8655, 8687), 'sql_models.util.is_bigquery', 'is_bigquery', (['self._series.engine'], {}), '(self._series.engine)\n', (8666, 8687), False, 'from sql_models.util import is_postgres, is_bigquery, DatabaseNotSupportedException\n'), ((8807, 8870), 'bach.expression.Expression.construct', 'Expression.construct', (['f"""extract(epoch from {{}})"""', 'self._series'], {}), "(f'extract(epoch from {{}})', self._series)\n", (8827, 8870), False, 'from bach.expression import Expression, join_expressions\n'), ((8962, 9117), 'bach.expression.Expression.construct', 'Expression.construct', (['f"""UNIX_MICROS(CAST(\'1970-01-01\' AS TIMESTAMP) + {{}}) * {_TOTAL_SECONDS_PER_DATE_PART[DatePart.MICROSECOND]}"""', 'self._series'], {}), '(\n f"UNIX_MICROS(CAST(\'1970-01-01\' AS TIMESTAMP) + {{}}) * {_TOTAL_SECONDS_PER_DATE_PART[DatePart.MICROSECOND]}"\n , self._series)\n', (8982, 9117), False, 'from bach.expression import Expression, join_expressions\n'), ((10536, 10573), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(3600 * 12)'}), '(seconds=3600 * 12)\n', (10554, 10573), False, 'import datetime\n'), ((12448, 12470), 'bach.expression.Expression.raw', 'Expression.raw', (['"""NULL"""'], {}), "('NULL')\n", (12462, 12470), False, 'from bach.expression import Expression, join_expressions\n'), ((14647, 14683), 'bach.series.series.ToPandasInfo', 'ToPandasInfo', (['"""datetime64[ns]"""', 'None'], {}), "('datetime64[ns]', None)\n", (14659, 14683), False, 'from bach.series.series import WrappedPartition, ToPandasInfo\n'), ((14740, 14794), 'bach.series.series.ToPandasInfo', 'ToPandasInfo', (['"""datetime64[ns, UTC]"""', 'dt_strip_timezone'], {}), "('datetime64[ns, UTC]', dt_strip_timezone)\n", (14752, 14794), False, 'from bach.series.series import WrappedPartition, ToPandasInfo\n'), ((20812, 20840), 'bach.expression.Expression.construct', 'Expression.construct', (['"""NULL"""'], {}), "('NULL')\n", (20832, 20840), False, 'from bach.expression import Expression, join_expressions\n'), ((21255, 21323), 'bach.series.series.ToPandasInfo', 'ToPandasInfo', ([], {'dtype': '"""object"""', 'function': 'self._parse_interval_bigquery'}), "(dtype='object', function=self._parse_interval_bigquery)\n", (21267, 21323), False, 'from bach.series.series import WrappedPartition, ToPandasInfo\n'), ((25488, 25551), 'bach.expression.Expression.construct', 'Expression.construct', (['f"""EXTRACT({date_part} FROM {{}})"""', 'series'], {}), "(f'EXTRACT({date_part} FROM {{}})', series)\n", (25508, 25551), False, 'from bach.expression import Expression, join_expressions\n'), ((25719, 25788), 'bach.expression.Expression.construct', 'Expression.construct', (['f"""EXTRACT(NANOSECOND FROM {{}}) / 1000"""', 'series'], {}), "(f'EXTRACT(NANOSECOND FROM {{}}) / 1000', series)\n", (25739, 25788), False, 'from bach.expression import Expression, join_expressions\n'), ((26661, 26705), 'bach.expression.Expression.string_value', 'Expression.string_value', (['_BQ_INTERVAL_FORMAT'], {}), '(_BQ_INTERVAL_FORMAT)\n', (26684, 26705), False, 'from bach.expression import Expression, join_expressions\n'), ((27384, 27408), 'sql_models.util.is_bigquery', 'is_bigquery', (['self.engine'], {}), '(self.engine)\n', (27395, 27408), False, 'from sql_models.util import is_postgres, is_bigquery, DatabaseNotSupportedException\n'), ((3740, 3782), 'bach.expression.Expression.string_value', 'Expression.string_value', (['parsed_format_str'], {}), '(parsed_format_str)\n', (3763, 3782), False, 'from bach.expression import Expression, join_expressions\n'), ((4021, 4062), 'bach.series.utils.datetime_formats.parse_c_code_to_bigquery_code', 'parse_c_code_to_bigquery_code', (['format_str'], {}), '(format_str)\n', (4050, 4062), False, 'from bach.series.utils.datetime_formats import parse_c_standard_code_to_postgres_code, parse_c_code_to_bigquery_code\n'), ((4285, 4322), 'sql_models.util.DatabaseNotSupportedException', 'DatabaseNotSupportedException', (['engine'], {}), '(engine)\n', (4314, 4322), False, 'from sql_models.util import is_postgres, is_bigquery, DatabaseNotSupportedException\n'), ((13221, 13239), 'numpy.isnat', 'numpy.isnat', (['value'], {}), '(value)\n', (13232, 13239), False, 'import numpy\n'), ((13715, 13773), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', (['(microseconds / 1000000)'], {}), '(microseconds / 1000000)\n', (13749, 13773), False, 'import datetime\n'), ((20716, 20739), 'pandas.Timedelta', 'pandas.Timedelta', (['value'], {}), '(value)\n', (20732, 20739), False, 'import pandas\n'), ((4165, 4207), 'bach.expression.Expression.string_value', 'Expression.string_value', (['parsed_format_str'], {}), '(parsed_format_str)\n', (4188, 4207), False, 'from bach.expression import Expression, join_expressions\n'), ((6810, 6849), 'typing.cast', 'cast', (['SeriesFloat64', 'df[component_name]'], {}), '(SeriesFloat64, df[component_name])\n', (6814, 6849), False, 'from typing import Union, cast, List, Tuple, Optional, Any\n'), ((10799, 10867), 'bach.expression.Expression.construct', 'Expression.construct', (['"""cast({} + {} as date)"""', 'series', 'expr_12_hours'], {}), "('cast({} + {} as date)', series, expr_12_hours)\n", (10819, 10867), False, 'from bach.expression import Expression, join_expressions\n'), ((12842, 12883), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['value', 'format'], {}), '(value, format)\n', (12868, 12883), False, 'import datetime\n'), ((13264, 13286), 'bach.expression.Expression.raw', 'Expression.raw', (['"""NULL"""'], {}), "('NULL')\n", (13278, 13286), False, 'from bach.expression import Expression, join_expressions\n'), ((13664, 13690), 'numpy.timedelta64', 'numpy.timedelta64', (['(1)', '"""us"""'], {}), "(1, 'us')\n", (13681, 13690), False, 'import numpy\n'), ((13629, 13659), 'numpy.datetime64', 'numpy.datetime64', (['"""1970"""', '"""us"""'], {}), "('1970', 'us')\n", (13645, 13659), False, 'import numpy\n')]
|
import os
import logging
from timeit import default_timer as timer
import numpy as np
from automon import AutomonNode
from automon.zmq_socket_utils import init_client_socket
from function_def import func_inner_product
logging.getLogger('automon').setLevel(logging.INFO)
def time_to_wait_for_next_sample_milliseconds(start_time, num_received_samples):
return (num_received_samples - (timer() - start_time)) * 1000
NODE_IDX = int(os.getenv('NODE_IDX', '0')) # Change the node index for different nodes
node = AutomonNode(idx=NODE_IDX, func_to_monitor=func_inner_product, d=40)
# Open a client socket and connect to the server socket. Wait for 'start' message from the server.
client_socket = init_client_socket(NODE_IDX, host=os.getenv('HOST', '127.0.0.1'), port=6400)
# Wait for a message from the coordinator (local data requests or local constraint updates) and send the reply to the coordinator.
# Read new data samples every 1 second and update the node local vector. Report violations to the coordinator.
start = timer()
num_data_samples = 0
while True:
if time_to_wait_for_next_sample_milliseconds(start, num_data_samples) <= 0:
# Time to read the next data sample
data = np.random.normal(loc=1, scale=0.1, size=(40,))
message_violation = node.update_data(data)
if message_violation:
client_socket.send(message_violation)
num_data_samples += 1
event = client_socket.poll(timeout=time_to_wait_for_next_sample_milliseconds(start, num_data_samples))
if event != 0:
# Received a message from the coordinator before the timeout has reached
message = client_socket.recv()
reply = node.parse_message(message)
if reply:
client_socket.send(reply)
|
[
"logging.getLogger",
"numpy.random.normal",
"os.getenv",
"timeit.default_timer",
"automon.AutomonNode"
] |
[((516, 583), 'automon.AutomonNode', 'AutomonNode', ([], {'idx': 'NODE_IDX', 'func_to_monitor': 'func_inner_product', 'd': '(40)'}), '(idx=NODE_IDX, func_to_monitor=func_inner_product, d=40)\n', (527, 583), False, 'from automon import AutomonNode\n'), ((1027, 1034), 'timeit.default_timer', 'timer', ([], {}), '()\n', (1032, 1034), True, 'from timeit import default_timer as timer\n'), ((436, 462), 'os.getenv', 'os.getenv', (['"""NODE_IDX"""', '"""0"""'], {}), "('NODE_IDX', '0')\n", (445, 462), False, 'import os\n'), ((218, 246), 'logging.getLogger', 'logging.getLogger', (['"""automon"""'], {}), "('automon')\n", (235, 246), False, 'import logging\n'), ((733, 763), 'os.getenv', 'os.getenv', (['"""HOST"""', '"""127.0.0.1"""'], {}), "('HOST', '127.0.0.1')\n", (742, 763), False, 'import os\n'), ((1207, 1253), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(1)', 'scale': '(0.1)', 'size': '(40,)'}), '(loc=1, scale=0.1, size=(40,))\n', (1223, 1253), True, 'import numpy as np\n'), ((389, 396), 'timeit.default_timer', 'timer', ([], {}), '()\n', (394, 396), True, 'from timeit import default_timer as timer\n')]
|
#!/usr/bin/python
#-*- coding: utf-8 -*-
# >.>.>.>.>.>.>.>.>.>.>.>.>.>.>.>.
# Licensed under the Apache License, Version 2.0 (the "License")
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# --- File Name: shapes3d.py
# --- Creation Date: 16-01-2021
# --- Last Modified: Tue 13 Apr 2021 16:55:42 AEST
# --- Author: <NAME>
# .<.<.<.<.<.<.<.<.<.<.<.<.<.<.<.<
"""
Dataset for 3D Shapes
"""
import numpy as np
from torch.utils.data import Dataset
import os
import shutil
import h5py
import zipfile
from PIL import Image
import torch
import random
from datasets.transforms import PairTransform
class shapes3d(Dataset):
"""
Args:
root (str): Root directory of dataset containing 3dshapes.h5
transform (``Transform``, optional): A function/transform that takes in an PIL image and returns a transformed version. E.g, ``transforms.RandomCrop``
"""
def __init__(self, root, transform=None, fixed_shape=None):
super(shapes3d, self).__init__()
self.file = root
self.transform = transform
self.fixed_shape = fixed_shape
self.dataset_zip = self.load_data()
self.data = self.dataset_zip['images'][:] # array shape [480000,64,64,3], uint8 in range(256)
# self.latents_sizes = np.array([3, 6, 40, 32, 32])
self.latents_sizes = np.array([10, 10, 10, 8, 4, 15])
self.latents_bases = np.concatenate((self.latents_sizes[::-1].cumprod()[::-1][1:], np.array([1, ])))
# self.latents_classes = np.load(os.path.join(self.file, "latents_classes.npy"))
self.latents_classes = self.dataset_zip['labels'][:] # array shape [480000,6], float64
# if fixed_shape is not None:
# self._reduce_data(fixed_shape)
def generative_factors(self, index):
return self.latents_classes[index]
def latent_to_index(self, latents):
return np.dot(latents, self.latents_bases).astype(int)
def index_to_latent(self, index):
return self.latents_classes[index]
def get_img_by_latent(self, latent_code):
"""
Returns the image defined by the latent code
Args:
latent_code (:obj:`list` of :obj:`int`): Latent code of length 6 defining each generative factor
Returns:
Image defined by given code
"""
idx = self.latent_to_index(latent_code)
return self.__getitem__(idx)
def sample_latent(self):
f = []
for factor in self.latents_sizes:
f.append(np.random.randint(0, factor))
return np.array(f)
def load_data(self):
root = os.path.join(self.file, "3dshapes.h5")
dataset_zip = h5py.File(root, 'r')
# data = np.load(root)
return dataset_zip
def __getitem__(self, index):
data = self.data[index]
data = Image.fromarray(data)
labels = self.latents_classes[index]
if self.transform is not None:
data = self.transform(data)
return data, labels[1:]
def __len__(self):
return self.data.shape[0]
class PairShapes3D(shapes3d):
def __init__(self, root, download=False, transform=None, offset=2, max_varied=1, wrapping=False, noise_name=None, output_targets=True, fixed_shape=None):
""" dSprites dataset with symmetry sampling included if output_targets is True.
Args:
root (str): Root directory of dataset containing '3dshapes.h5' or to download it to
transform (``Transform``, optional): A function/transform that takes in an PIL image and returns a transformed version. E.g, ``transforms.RandomCrop``
offset (int, list[int]): Offset of generative factor indices when sampling symmetries
max_varied (int): Max number of symmetries acting per observation
wrapping (bool): Wrap at boundaries or invert action
noise_name (str): Name of noise to add, default None
output_targets (bool): If True output image pair corresponding to symmetry action. If False, standard dSprites.
"""
super().__init__(root, transform)
self.factor = [0, 1, 2, 3, 5]
self.offset = offset
self.max_varied = max_varied
self.wrapping = wrapping
self.noise_transform = PairTransform(noise_name) if noise_name is not None else None
self.output_targets = output_targets
def get_next_img_by_offset(self, label1, img1, factor):
max_offsets = [10, 10, 10, 8, 1, 15]
new_latents = np.array(list(label1))
offset = torch.zeros(label1.shape).to(img1.device)
for f in factor:
cur_offset = self.offset if self.offset < max_offsets[f] else max_offsets[f]
if torch.rand(1) < 0.5:
cur_offset = cur_offset * -1
if self.wrapping:
new_latents[f] = (label1[f] + cur_offset) % (self.latents_sizes[f])
else:
new_latents[f] = (label1[f] + cur_offset).clip(min=0, max=self.latents_sizes[f]-1)
offset[f] = cur_offset
idx = self.latent_to_index(new_latents)
return idx, offset
def get_next_img_by_rand(self, latent1):
idx = torch.randint(len(self), (1,)).int()
offset = self.index_to_latent(idx)[1:] - latent1
return idx, offset
def __getitem__(self, index):
factor = self.factor
img1, label1 = super().__getitem__(index)
if not self.output_targets:
return img1, label1
if not isinstance(factor, list):
factor = [factor]
else:
factor = random.choices(factor, k=self.max_varied)
# TODO: Always set offset to 1 for val set? So we can eval metrics. Images wouldn't show multi steps though...
if self.offset != -1:
idx, offset = self.get_next_img_by_offset(label1, img1, factor)
else:
idx, offset = self.get_next_img_by_rand(label1)
img2, label2 = super().__getitem__(idx)
if self.noise_transform is not None:
img1, img2 = self.noise_transform(img1, img2)
return (img1, offset), img2
|
[
"PIL.Image.fromarray",
"os.path.join",
"h5py.File",
"numpy.array",
"random.choices",
"numpy.dot",
"numpy.random.randint",
"datasets.transforms.PairTransform",
"torch.zeros",
"torch.rand"
] |
[((1353, 1385), 'numpy.array', 'np.array', (['[10, 10, 10, 8, 4, 15]'], {}), '([10, 10, 10, 8, 4, 15])\n', (1361, 1385), True, 'import numpy as np\n'), ((2578, 2589), 'numpy.array', 'np.array', (['f'], {}), '(f)\n', (2586, 2589), True, 'import numpy as np\n'), ((2631, 2669), 'os.path.join', 'os.path.join', (['self.file', '"""3dshapes.h5"""'], {}), "(self.file, '3dshapes.h5')\n", (2643, 2669), False, 'import os\n'), ((2692, 2712), 'h5py.File', 'h5py.File', (['root', '"""r"""'], {}), "(root, 'r')\n", (2701, 2712), False, 'import h5py\n'), ((2853, 2874), 'PIL.Image.fromarray', 'Image.fromarray', (['data'], {}), '(data)\n', (2868, 2874), False, 'from PIL import Image\n'), ((4295, 4320), 'datasets.transforms.PairTransform', 'PairTransform', (['noise_name'], {}), '(noise_name)\n', (4308, 4320), False, 'from datasets.transforms import PairTransform\n'), ((5623, 5664), 'random.choices', 'random.choices', (['factor'], {'k': 'self.max_varied'}), '(factor, k=self.max_varied)\n', (5637, 5664), False, 'import random\n'), ((1477, 1490), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (1485, 1490), True, 'import numpy as np\n'), ((1905, 1940), 'numpy.dot', 'np.dot', (['latents', 'self.latents_bases'], {}), '(latents, self.latents_bases)\n', (1911, 1940), True, 'import numpy as np\n'), ((2533, 2561), 'numpy.random.randint', 'np.random.randint', (['(0)', 'factor'], {}), '(0, factor)\n', (2550, 2561), True, 'import numpy as np\n'), ((4571, 4596), 'torch.zeros', 'torch.zeros', (['label1.shape'], {}), '(label1.shape)\n', (4582, 4596), False, 'import torch\n'), ((4743, 4756), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (4753, 4756), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
# Name: <NAME>
# NUSP: 9778985
# Course Code: SCC0251
# Semester: 2019/1
# Assignment: 2 - Image enhancement and filtering
# -
import numpy as np
import imageio
# ## Defining functions
# +
# method 1 - limiarization
def limiarization(img, t0):
t = 0.5 * (np.nanmean(np.where(img > t0, img, np.NaN)) + np.nanmean(np.where(img <= t0, img, np.NaN))) # calculating threshold
while(abs(t-t0) > 0.5):
t0 = t
m1 = np.nanmean(np.where(img > t, img, np.NaN)) # mean of group1
m2 = np.nanmean(np.where(img <= t, img, np.NaN)) # mean of group2
t = 0.5 * (m1 + m2)
return np.where(img > t, 1, 0)
# method 2 - 1d filtering
def filter1d(img, w):
imgFlat = img.flatten() # flattening img
imgFinal = np.zeros(imgFlat.shape, dtype=np.double) # creating new array and applying filter
for i in range(imgFlat.shape[0]):
imgFinal[i] = np.sum([imgFlat[(i+j) % imgFlat.shape[0]] * w[j] for j in range(len(w))])
return imgFinal.reshape(img.shape)
# method 3 - 2d filtering
def filter2d(img, w, t0):
imgPad = np.pad(img, w.shape[0]//2, 'symmetric') # padding input image to apply filter
imgFinal = np.zeros(img.shape, dtype=np.double) # creating new array and applying filter
for i in range(0, img.shape[0]):
for j in range(0, img.shape[1]):
imgFinal[i][j] = np.sum([[imgPad[i+x][j+y] * w[x][y] for x in range(w.shape[0])] for y in range(w.shape[1])])
return limiarization(imgFinal, t0) # return limiarization of filtered image
# method 4 - 2d median filter
def medianFilter2d(img, n):
imgPad = np.pad(img, n//2, 'constant', constant_values = 0) # padding input image to apply filter
imgFinal = np.zeros(img.shape, dtype=np.double) # creating new array and applying filter
for i in range(0, img.shape[0]):
for j in range(0, img.shape[1]):
imgFinal[i][j] = np.median(imgPad[i:i+n, j:j+n])
return imgFinal
# Normalize value of an numpy array between 0 and a given max value
def normalize (arr, maxvalue):
return (arr-arr.min()) * (maxvalue / (arr.max()-arr.min()))
# root mean squared error (RMSE) function
def rmse (img_g, img_r):
return np.sqrt((1/(img_g.shape[0]*img_g.shape[1])) * np.sum(np.power(img_g.astype(np.double) - img_r.astype(np.double), 2)))
# -
# ## Main function
if __name__ == '__main__':
# get user input
filename = str(input()).strip()
sourceImg = imageio.imread(filename)
method = int(input())
# executing processing based on value of "method" variable
if method == 1:
t0 = np.double(input())
outputImg = normalize(limiarization(sourceImg, t0), 255).astype(np.uint8)
elif method == 2:
n = int(input())
w = np.array(input().split(), dtype=np.double)
if w.shape[0] != n:
raise ValueError("unexpected number of values for filter.")
outputImg = normalize(filter1d(sourceImg, w), 255).astype(np.uint8)
elif method == 3:
n = int(input())
w = np.array([input().split() for i in range(n)], dtype=np.double)
if w.shape != (n, n):
raise ValueError("unexpected number of values for filter.")
t0 = np.double(input())
outputImg = normalize(filter2d(sourceImg, w, t0), 255).astype(np.uint8)
elif method == 4:
n = int(input())
outputImg = normalize(medianFilter2d(sourceImg, n), 255).astype(np.uint8)
else:
raise ValueError("method value not in supported range (minimum = 1, maximum = 4).")
# printing output
print('%.4f' % rmse(sourceImg, outputImg))
|
[
"numpy.median",
"numpy.where",
"numpy.zeros",
"imageio.imread",
"numpy.pad"
] |
[((632, 655), 'numpy.where', 'np.where', (['(img > t)', '(1)', '(0)'], {}), '(img > t, 1, 0)\n', (640, 655), True, 'import numpy as np\n'), ((765, 805), 'numpy.zeros', 'np.zeros', (['imgFlat.shape'], {'dtype': 'np.double'}), '(imgFlat.shape, dtype=np.double)\n', (773, 805), True, 'import numpy as np\n'), ((1087, 1128), 'numpy.pad', 'np.pad', (['img', '(w.shape[0] // 2)', '"""symmetric"""'], {}), "(img, w.shape[0] // 2, 'symmetric')\n", (1093, 1128), True, 'import numpy as np\n'), ((1180, 1216), 'numpy.zeros', 'np.zeros', (['img.shape'], {'dtype': 'np.double'}), '(img.shape, dtype=np.double)\n', (1188, 1216), True, 'import numpy as np\n'), ((1610, 1660), 'numpy.pad', 'np.pad', (['img', '(n // 2)', '"""constant"""'], {'constant_values': '(0)'}), "(img, n // 2, 'constant', constant_values=0)\n", (1616, 1660), True, 'import numpy as np\n'), ((1714, 1750), 'numpy.zeros', 'np.zeros', (['img.shape'], {'dtype': 'np.double'}), '(img.shape, dtype=np.double)\n', (1722, 1750), True, 'import numpy as np\n'), ((2439, 2463), 'imageio.imread', 'imageio.imread', (['filename'], {}), '(filename)\n', (2453, 2463), False, 'import imageio\n'), ((470, 500), 'numpy.where', 'np.where', (['(img > t)', 'img', 'np.NaN'], {}), '(img > t, img, np.NaN)\n', (478, 500), True, 'import numpy as np\n'), ((543, 574), 'numpy.where', 'np.where', (['(img <= t)', 'img', 'np.NaN'], {}), '(img <= t, img, np.NaN)\n', (551, 574), True, 'import numpy as np\n'), ((1899, 1934), 'numpy.median', 'np.median', (['imgPad[i:i + n, j:j + n]'], {}), '(imgPad[i:i + n, j:j + n])\n', (1908, 1934), True, 'import numpy as np\n'), ((298, 329), 'numpy.where', 'np.where', (['(img > t0)', 'img', 'np.NaN'], {}), '(img > t0, img, np.NaN)\n', (306, 329), True, 'import numpy as np\n'), ((344, 376), 'numpy.where', 'np.where', (['(img <= t0)', 'img', 'np.NaN'], {}), '(img <= t0, img, np.NaN)\n', (352, 376), True, 'import numpy as np\n')]
|
from functools import reduce
import numpy as np
import json
import tensorflow as tf
from scipy.optimize import linear_sum_assignment
import os
import time
def deleteDuplicate_v1(input_dict_lst):
f = lambda x,y:x if y in x else x + [y]
return reduce(f, [[], ] + input_dict_lst)
def get_context_pair(resp, l):
label_weights = l['label_weights']
valid_resp = {}
for key in resp:
valid_resp[key] = []
for index, value in enumerate(resp[key]):
if label_weights[index] == 1:
valid_resp[key].append(value)
answer = l['answer_tokens']
position_tokens = l['tokens']
label_position = [lpos-1 for index, lpos in enumerate(l['label_positions']) if label_weights[index]==1]
score_label = []
for index in range(len(valid_resp['pred_label'])):
label = valid_resp['pred_label'][index]
score = valid_resp['max_prob'][index]
position = label_position[index]
position_token = position_tokens[str(position)][1]
if label == 1:
score = 1 - score
score_label.append({"score":score, "label":label,
"position_token":position_token,
"answer":answer})
return score_label
def format_socre_matrix(result_lst, score_merge='mean'):
answer_dict = {}
candidate_dict = {}
answer_index = 0
pos_index = 0
for item in result_lst:
if item['answer'] not in answer_dict:
answer_dict[item['answer']] = answer_index
answer_index += 1
if item['position_token'] not in candidate_dict:
candidate_dict[item['position_token']] = pos_index
pos_index += 1
score_matrix = -np.ones((len(answer_dict), len(candidate_dict)))
for item in result_lst:
answer_pos = answer_dict[item['answer']]
candidate_pos = candidate_dict[item['position_token']]
score_matrix_score = score_matrix[answer_pos, candidate_pos]
if score_matrix_score == -1:
score_matrix[answer_pos, candidate_pos] = item['score']
else:
if score_merge == 'mean':
score_matrix[answer_pos, candidate_pos] += item['score']
score_matrix[answer_pos, candidate_pos] /= 2
elif score_merge == 'max':
if item['score'] > score_matrix[answer_pos, candidate_pos]:
score_matrix[answer_pos, candidate_pos] = item['score']
return score_matrix, answer_dict, candidate_dict
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf.logging.set_verbosity(tf.logging.INFO)
flags.DEFINE_string("buckets", "", "oss buckets")
flags.DEFINE_string(
"input_file", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"output_file", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"model_file", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"score_merge", "max",
"Input TF example files (can be a glob or comma separated).")
input_file = os.path.join(FLAGS.buckets, FLAGS.input_file)
output_file = os.path.join(FLAGS.buckets, FLAGS.output_file)
model_file = os.path.join(FLAGS.buckets, FLAGS.model_file)
from tensorflow.contrib import predictor
# model_dict = {
# "model":'/data/xuht/albert.xht/nlpcc2019/open_data/model/1566283032'
# }
model_dict = {
"model":model_file
}
chid_model = predictor.from_saved_model(model_dict['model'])
fwobj = tf.gfile.Open(output_file, "w")
cnt = 0
valid_keys = ['input_ids', 'label_weights',
'label_positions', 'label_ids',
'segment_ids']
with tf.gfile.Open(input_file, "r") as f:
for index, line in enumerate(f):
content = json.loads(line.strip())
total_resp = []
start = time.time()
for t in content:
tmp = {}
for l in t:
for key in valid_keys:
if key in tmp:
tmp[key].append(l[key])
else:
tmp[key] = [l[key]]
# tmp = {
# "input_ids":np.array([l['input_ids']]),
# 'label_weights':np.array([l['label_weights']]),
# 'label_positions':np.array([l['label_positions']]),
# 'label_ids':np.array([l['label_ids']]),
# 'segment_ids':np.array([l['segment_ids']]),
# }
resp = chid_model(tmp)
resp_lst = []
batch_size = int(resp['pred_label'].shape[0]/5)
for key in resp:
resp[key] = np.reshape(resp[key], [-1, 5]).tolist()
for i_index in range(batch_size):
tmp = {
"pred_label":resp['pred_label'][i_index],
"max_prob":resp['max_prob'][i_index],
}
resp_lst.append(tmp)
for i_index in range(len(t)):
resp_ = resp_lst[i_index]
l_ = t[i_index]
result = get_context_pair(resp_, l_)
total_resp.extend(result)
total_resp = deleteDuplicate_v1(total_resp)
resp = format_socre_matrix(total_resp, score_merge=FLAGS.score_merge)
row_ind, col_ind = linear_sum_assignment(resp[0])
mapping_dict = dict(zip(col_ind, row_ind))
dura = time.time()-start
candidte_dict = resp[-1]
candidate_inverse_dict = {}
for key in candidte_dict:
candidate_inverse_dict[candidte_dict[key]] = key
candidate_name_dict = {}
for col in mapping_dict:
col_name = candidate_inverse_dict[col]
candidate_name_dict[col_name] = int(mapping_dict[col])
cnt += len(candidate_name_dict)
if np.mod(index, 100) == 0:
print(candidate_name_dict, index, dura)
fwobj.write(json.dumps(candidate_name_dict, ensure_ascii=False)+"\n")
fwobj.close()
print('==total cnt==', cnt)
|
[
"tensorflow.gfile.Open",
"numpy.reshape",
"scipy.optimize.linear_sum_assignment",
"tensorflow.contrib.predictor.from_saved_model",
"functools.reduce",
"json.dumps",
"os.path.join",
"tensorflow.logging.set_verbosity",
"numpy.mod",
"time.time"
] |
[((2296, 2337), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (2320, 2337), True, 'import tensorflow as tf\n'), ((2830, 2875), 'os.path.join', 'os.path.join', (['FLAGS.buckets', 'FLAGS.input_file'], {}), '(FLAGS.buckets, FLAGS.input_file)\n', (2842, 2875), False, 'import os\n'), ((2890, 2936), 'os.path.join', 'os.path.join', (['FLAGS.buckets', 'FLAGS.output_file'], {}), '(FLAGS.buckets, FLAGS.output_file)\n', (2902, 2936), False, 'import os\n'), ((2950, 2995), 'os.path.join', 'os.path.join', (['FLAGS.buckets', 'FLAGS.model_file'], {}), '(FLAGS.buckets, FLAGS.model_file)\n', (2962, 2995), False, 'import os\n'), ((3186, 3233), 'tensorflow.contrib.predictor.from_saved_model', 'predictor.from_saved_model', (["model_dict['model']"], {}), "(model_dict['model'])\n", (3212, 3233), False, 'from tensorflow.contrib import predictor\n'), ((3243, 3274), 'tensorflow.gfile.Open', 'tf.gfile.Open', (['output_file', '"""w"""'], {}), "(output_file, 'w')\n", (3256, 3274), True, 'import tensorflow as tf\n'), ((245, 277), 'functools.reduce', 'reduce', (['f', '([[]] + input_dict_lst)'], {}), '(f, [[]] + input_dict_lst)\n', (251, 277), False, 'from functools import reduce\n'), ((3387, 3417), 'tensorflow.gfile.Open', 'tf.gfile.Open', (['input_file', '"""r"""'], {}), "(input_file, 'r')\n", (3400, 3417), True, 'import tensorflow as tf\n'), ((3523, 3534), 'time.time', 'time.time', ([], {}), '()\n', (3532, 3534), False, 'import time\n'), ((4611, 4641), 'scipy.optimize.linear_sum_assignment', 'linear_sum_assignment', (['resp[0]'], {}), '(resp[0])\n', (4632, 4641), False, 'from scipy.optimize import linear_sum_assignment\n'), ((4696, 4707), 'time.time', 'time.time', ([], {}), '()\n', (4705, 4707), False, 'import time\n'), ((5047, 5065), 'numpy.mod', 'np.mod', (['index', '(100)'], {}), '(index, 100)\n', (5053, 5065), True, 'import numpy as np\n'), ((5129, 5180), 'json.dumps', 'json.dumps', (['candidate_name_dict'], {'ensure_ascii': '(False)'}), '(candidate_name_dict, ensure_ascii=False)\n', (5139, 5180), False, 'import json\n'), ((4107, 4137), 'numpy.reshape', 'np.reshape', (['resp[key]', '[-1, 5]'], {}), '(resp[key], [-1, 5])\n', (4117, 4137), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
def residuals(fit, obs):
"""Calculate residuals for fit compared to observed data
:fit: list of discrete fit data points
:obs: list of observed data points
:returns: fit minus observed data points
"""
return fit-obs
def fit_stats(obs, fit):
"""
https://stackoverflow.com/questions/19189362/getting-the-r-squared-
value-using-curve-fit
"""
resid = fit - obs
ss_res = np.sum(resid**2)
ss_tot = np.sum((obs - np.mean(obs))**2)
r_squared = 1 - (ss_res / ss_tot)
return r_squared, ss_tot, ss_res, resid
def sum_squares_total(calc, obs):
"""
https://stackoverflow.com/questions/19189362/getting-the-r-squared-
value-using-curve-fit
"""
return np.sum((obs - np.mean(obs))**2)
def sum_squares_residuals(calc, obs):
"""
https://stackoverflow.com/questions/19189362/getting-the-r-squared-
value-using-curve-fit
"""
resids = residuals(calc, obs)
return np.sum(resids**2)
def rms_error(calc, obs):
"""Calculate root mean squared deviation
:calc: calculated data from fit
:obs: experimentally observed data
:returns: rmsd
"""
resids = residuals(calc, obs)
mean_sqrd = np.mean(resids**2)
return np.sqrt(mean_sqrd)
def r_squared(calc, obs):
"""
https://stackoverflow.com/questions/19189362/getting-the-r-squared-
value-using-curve-fit
"""
ss_res = sum_squares_residuals(calc, obs)
ss_tot = sum_squares_total(calc, obs)
return 1 - (ss_res / ss_tot)
|
[
"numpy.sum",
"numpy.mean",
"numpy.sqrt"
] |
[((486, 504), 'numpy.sum', 'np.sum', (['(resid ** 2)'], {}), '(resid ** 2)\n', (492, 504), True, 'import numpy as np\n'), ((1022, 1041), 'numpy.sum', 'np.sum', (['(resids ** 2)'], {}), '(resids ** 2)\n', (1028, 1041), True, 'import numpy as np\n'), ((1266, 1286), 'numpy.mean', 'np.mean', (['(resids ** 2)'], {}), '(resids ** 2)\n', (1273, 1286), True, 'import numpy as np\n'), ((1296, 1314), 'numpy.sqrt', 'np.sqrt', (['mean_sqrd'], {}), '(mean_sqrd)\n', (1303, 1314), True, 'import numpy as np\n'), ((530, 542), 'numpy.mean', 'np.mean', (['obs'], {}), '(obs)\n', (537, 542), True, 'import numpy as np\n'), ((805, 817), 'numpy.mean', 'np.mean', (['obs'], {}), '(obs)\n', (812, 817), True, 'import numpy as np\n')]
|
"""Helper functions and classes for users.
They should not be used in skorch directly.
"""
from collections import Sequence
from collections import namedtuple
from functools import partial
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.base import TransformerMixin
import torch
from skorch.cli import parse_args
from skorch.utils import _make_split
from skorch.utils import is_torch_data_type
from skorch.utils import to_tensor
class SliceDict(dict):
"""Wrapper for Python dict that makes it sliceable across values.
Use this if your input data is a dictionary and you have problems
with sklearn not being able to slice it. Wrap your dict with
SliceDict and it should usually work.
Note:
* SliceDict cannot be indexed by integers, if you want one row,
say row 3, use `[3:4]`.
* SliceDict accepts numpy arrays and torch tensors as values.
Examples
--------
>>> X = {'key0': val0, 'key1': val1}
>>> search = GridSearchCV(net, params, ...)
>>> search.fit(X, y) # raises error
>>> Xs = SliceDict(key0=val0, key1=val1) # or Xs = SliceDict(**X)
>>> search.fit(Xs, y) # works
"""
def __init__(self, **kwargs):
lengths = [value.shape[0] for value in kwargs.values()]
lengths_set = set(lengths)
if lengths_set and (len(lengths_set) != 1):
raise ValueError(
"Initialized with items of different lengths: {}"
"".format(', '.join(map(str, sorted(lengths_set)))))
if not lengths:
self._len = 0
else:
self._len = lengths[0]
super(SliceDict, self).__init__(**kwargs)
def __len__(self):
return self._len
def __getitem__(self, sl):
if isinstance(sl, int):
# Indexing with integers is not well-defined because that
# recudes the dimension of arrays by one, messing up
# lengths and shapes.
raise ValueError("SliceDict cannot be indexed by integers.")
if isinstance(sl, str):
return super(SliceDict, self).__getitem__(sl)
return SliceDict(**{k: v[sl] for k, v in self.items()})
def __setitem__(self, key, value):
if not isinstance(key, str):
raise TypeError("Key must be str, not {}.".format(type(key)))
length = value.shape[0]
if not self.keys():
self._len = length
if self._len != length:
raise ValueError(
"Cannot set array with shape[0] != {}"
"".format(self._len))
super(SliceDict, self).__setitem__(key, value)
def update(self, kwargs):
for key, value in kwargs.items():
self.__setitem__(key, value)
def __repr__(self):
out = super(SliceDict, self).__repr__()
return "SliceDict(**{})".format(out)
@property
def shape(self):
return (self._len,)
def copy(self):
return type(self)(**self)
def fromkeys(self, *args, **kwargs):
"""fromkeys method makes no sense with SliceDict and is thus not
supported."""
raise TypeError("SliceDict does not support fromkeys.")
def __eq__(self, other):
if self.keys() != other.keys():
return False
for key, val in self.items():
val_other = other[key]
# torch tensors
if is_torch_data_type(val):
if not is_torch_data_type(val_other):
return False
if not (val == val_other).all():
return False
continue
# numpy arrays
if isinstance(val, np.ndarray):
if not isinstance(val_other, np.ndarray):
return False
if not (val == val_other).all():
return False
continue
# rest
if val != val_other:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
# This class must be an instance of Sequence and have an ndim
# attribute because sklearn will test this.
class SliceDataset(Sequence):
# pylint: disable=anomalous-backslash-in-string
"""Helper class that wraps a torch dataset to make it work with
sklearn.
Sometimes, sklearn will touch the input data, e.g. when splitting
the data for a grid search. This will fail when the input data is
a torch dataset. To prevent this, use this wrapper class for your
dataset.
Note: This class will only return the X value by default (i.e. the
first value returned by indexing the original dataset). Sklearn,
and hence skorch, always require 2 values, X and y. Therefore, you
still need to provide the y data separately.
Note: This class behaves similarly to a PyTorch
:class:`~torch.utils.data.Subset` when it is indexed by a slice or
numpy array: It will return another ``SliceDataset`` that
references the subset instead of the actual values. Only when it
is indexed by an int does it return the actual values. The reason
for this is to avoid loading all data into memory when sklearn,
for instance, creates a train/validation split on the
dataset. Data will only be loaded in batches during the fit loop.
Examples
--------
>>> X = MyCustomDataset()
>>> search = GridSearchCV(net, params, ...)
>>> search.fit(X, y) # raises error
>>> ds = SliceDataset(X)
>>> search.fit(ds, y) # works
Parameters
----------
dataset : torch.utils.data.Dataset
A valid torch dataset.
idx : int (default=0)
Indicates which element of the dataset should be
returned. Typically, the dataset returns both X and y
values. SliceDataset can only return 1 value. If you want to
get X, choose idx=0 (default), if you want y, choose idx=1.
indices : list, np.ndarray, or None (default=None)
If you only want to return a subset of the dataset, indicate
which subset that is by passing this argument. Typically, this
can be left to be None, which returns all the data. See also
:class:`~torch.utils.data.Subset`.
"""
def __init__(self, dataset, idx=0, indices=None):
self.dataset = dataset
self.idx = idx
self.indices = indices
self.indices_ = (self.indices if self.indices is not None
else np.arange(len(self.dataset)))
self.ndim = 1
def __len__(self):
return len(self.indices_)
@property
def shape(self):
return (len(self),)
def transform(self, data):
"""Additional transformations on ``data``.
Note: If you use this in conjuction with PyTorch
:class:`~torch.utils.data.DataLoader`, the latter will call
the dataset for each row separately, which means that the
incoming ``data`` is a single rows.
"""
return data
def _select_item(self, Xn):
# Raise a custom error message when accessing out of
# bounds. However, this will only trigger as soon as this is
# indexed by an integer.
try:
return Xn[self.idx]
except IndexError:
name = self.__class__.__name__
msg = ("{} is trying to access element {} but there are only "
"{} elements.".format(name, self.idx, len(Xn)))
raise IndexError(msg)
def __getitem__(self, i):
if isinstance(i, (int, np.integer)):
Xn = self.dataset[self.indices_[i]]
Xi = self._select_item(Xn)
return self.transform(Xi)
if isinstance(i, slice):
return SliceDataset(self.dataset, idx=self.idx, indices=self.indices_[i])
if isinstance(i, np.ndarray):
if i.ndim != 1:
raise IndexError("SliceDataset only supports slicing with 1 "
"dimensional arrays, got {} dimensions instead."
"".format(i.ndim))
if i.dtype == np.bool:
i = np.flatnonzero(i)
return SliceDataset(self.dataset, idx=self.idx, indices=self.indices_[i])
def predefined_split(dataset):
"""Uses ``dataset`` for validiation in :class:`.NeuralNet`.
Examples
--------
>>> valid_ds = skorch.Dataset(X, y)
>>> net = NeuralNet(..., train_split=predefined_split(valid_ds))
Parameters
----------
dataset: torch Dataset
Validiation dataset
"""
return partial(_make_split, valid_ds=dataset)
class DataFrameTransformer(BaseEstimator, TransformerMixin):
"""Transform a DataFrame into a dict useful for working with skorch.
Transforms cardinal data to floats and categorical data to vectors
of ints so that they can be embedded.
Although skorch can deal with pandas DataFrames, the default
behavior is often not very useful. Use this transformer to
transform the DataFrame into a dict with all float columns
concatenated using the key "X" and all categorical values encoded
as integers, using their respective column names as keys.
Your module must have a matching signature for this to work. It
must accept an argument ``X`` for all cardinal
values. Additionally, for all categorical values, it must accept
an argument with the same name as the corresponding column (see
example below). If you need help with the required signature, use
the ``describe_signature`` method of this class and pass it your
data.
You can choose whether you want to treat int columns the same as
float columns (default) or as categorical values.
To one-hot encode categorical features, initialize their
corresponding embedding layers using the identity matrix.
Examples
--------
>>> df = pd.DataFrame({
... 'col_floats': np.linspace(0, 1, 12),
... 'col_ints': [11, 11, 10] * 4,
... 'col_cats': ['a', 'b', 'a'] * 4,
... })
>>> # cast to category dtype to later learn embeddings
>>> df['col_cats'] = df['col_cats'].astype('category')
>>> y = np.asarray([0, 1, 0] * 4)
>>> class MyModule(nn.Module):
... def __init__(self):
... super().__init__()
... self.reset_params()
>>> def reset_params(self):
... self.embedding = nn.Embedding(2, 10)
... self.linear = nn.Linear(2, 10)
... self.out = nn.Linear(20, 2)
... self.nonlin = nn.Softmax(dim=-1)
>>> def forward(self, X, col_cats):
... # "X" contains the values from col_floats and col_ints
... # "col_cats" contains the values from "col_cats"
... X_lin = self.linear(X)
... X_cat = self.embedding(col_cats)
... X_concat = torch.cat((X_lin, X_cat), dim=1)
... return self.nonlin(self.out(X_concat))
>>> net = NeuralNetClassifier(MyModule)
>>> pipe = Pipeline([
... ('transform', DataFrameTransformer()),
... ('net', net),
... ])
>>> pipe.fit(df, y)
Parameters
----------
treat_int_as_categorical : bool (default=False)
Whether to treat integers as categorical values or as cardinal
values, i.e. the same as floats.
float_dtype : numpy dtype or None (default=np.float32)
The dtype to cast the cardinal values to. If None, don't change
them.
int_dtype : numpy dtype or None (default=np.int64)
The dtype to cast the categorical values to. If None, don't
change them. If you do this, it can happen that the categorical
values will have different dtypes, reflecting the number of
unique categories.
Notes
-----
The value of X will always be 2-dimensional, even if it only
contains 1 column.
"""
import pandas as pd
def __init__(
self,
treat_int_as_categorical=False,
float_dtype=np.float32,
int_dtype=np.int64,
):
self.treat_int_as_categorical = treat_int_as_categorical
self.float_dtype = float_dtype
self.int_dtype = int_dtype
def _check_dtypes(self, df):
"""Perform a check on the DataFrame to detect wrong dtypes or keys.
Makes sure that there are no conflicts in key names.
If dtypes are found that cannot be dealt with, raises a
TypeError with a message indicating which ones caused trouble.
Raises
------
ValueError
If there already is a column named 'X'.
TypeError
If a wrong dtype is found.
"""
if 'X' in df:
raise ValueError(
"DataFrame contains a column named 'X', which clashes "
"with the name chosen for cardinal features; consider "
"renaming that column.")
wrong_dtypes = []
for col, dtype in zip(df, df.dtypes):
if isinstance(dtype, self.pd.api.types.CategoricalDtype):
continue
if np.issubdtype(dtype, np.integer):
continue
if np.issubdtype(dtype, np.floating):
continue
wrong_dtypes.append((col, dtype))
if not wrong_dtypes:
return
wrong_dtypes = sorted(wrong_dtypes, key=lambda tup: tup[0])
msg_dtypes = ", ".join(
"{} ({})".format(col, dtype) for col, dtype in wrong_dtypes)
msg = ("The following columns have dtypes that cannot be "
"interpreted as numerical dtypes: {}".format(msg_dtypes))
raise TypeError(msg)
# pylint: disable=unused-argument
def fit(self, df, y=None, **fit_params):
self._check_dtypes(df)
return self
def transform(self, df):
"""Transform DataFrame to become a dict that works well with skorch.
Parameters
----------
df : pd.DataFrame
Incoming DataFrame.
Returns
-------
X_dict: dict
Dictionary with all floats concatenated using the key "X"
and all categorical values encoded as integers, using their
respective column names as keys.
"""
self._check_dtypes(df)
X_dict = {}
Xf = [] # floats
for col, dtype in zip(df, df.dtypes):
X_col = df[col]
if isinstance(dtype, self.pd.api.types.CategoricalDtype):
x = X_col.cat.codes.values
if self.int_dtype is not None:
x = x.astype(self.int_dtype)
X_dict[col] = x
continue
if (
np.issubdtype(dtype, np.integer)
and self.treat_int_as_categorical
):
x = X_col.astype('category').cat.codes.values
if self.int_dtype is not None:
x = x.astype(self.int_dtype)
X_dict[col] = x
continue
Xf.append(X_col.values)
if not Xf:
return X_dict
X = np.stack(Xf, axis=1)
if self.float_dtype is not None:
X = X.astype(self.float_dtype)
X_dict['X'] = X
return X_dict
def describe_signature(self, df):
"""Describe the signature required for the given data.
Pass the DataFrame to receive a description of the signature
required for the module's forward method. The description
consists of three parts:
1. The names of the arguments that the forward method
needs.
2. The dtypes of the torch tensors passed to forward.
3. The number of input units that are required for the
corresponding argument. For the float parameter, this is just
the number of dimensions of the tensor. For categorical
parameters, it is the number of unique elements.
Returns
-------
signature : dict
Returns a dict with each key corresponding to one key
required for the forward method. The values are dictionaries
of two elements. The key "dtype" describes the torch dtype
of the resulting tensor, the key "input_units" describes the
required number of input units.
"""
X_dict = self.fit_transform(df)
signature = {}
X = X_dict.get('X')
if X is not None:
signature['X'] = dict(
dtype=to_tensor(X, device='cpu').dtype,
input_units=X.shape[1],
)
for key, val in X_dict.items():
if key == 'X':
continue
tensor = to_tensor(val, device='cpu')
nunique = len(torch.unique(tensor))
signature[key] = dict(
dtype=tensor.dtype,
input_units=nunique,
)
return signature
|
[
"torch.unique",
"numpy.flatnonzero",
"numpy.stack",
"numpy.issubdtype",
"skorch.utils.to_tensor",
"functools.partial",
"skorch.utils.is_torch_data_type"
] |
[((8580, 8618), 'functools.partial', 'partial', (['_make_split'], {'valid_ds': 'dataset'}), '(_make_split, valid_ds=dataset)\n', (8587, 8618), False, 'from functools import partial\n'), ((15120, 15140), 'numpy.stack', 'np.stack', (['Xf'], {'axis': '(1)'}), '(Xf, axis=1)\n', (15128, 15140), True, 'import numpy as np\n'), ((3407, 3430), 'skorch.utils.is_torch_data_type', 'is_torch_data_type', (['val'], {}), '(val)\n', (3425, 3430), False, 'from skorch.utils import is_torch_data_type\n'), ((13092, 13124), 'numpy.issubdtype', 'np.issubdtype', (['dtype', 'np.integer'], {}), '(dtype, np.integer)\n', (13105, 13124), True, 'import numpy as np\n'), ((13166, 13199), 'numpy.issubdtype', 'np.issubdtype', (['dtype', 'np.floating'], {}), '(dtype, np.floating)\n', (13179, 13199), True, 'import numpy as np\n'), ((16702, 16730), 'skorch.utils.to_tensor', 'to_tensor', (['val'], {'device': '"""cpu"""'}), "(val, device='cpu')\n", (16711, 16730), False, 'from skorch.utils import to_tensor\n'), ((8141, 8158), 'numpy.flatnonzero', 'np.flatnonzero', (['i'], {}), '(i)\n', (8155, 8158), True, 'import numpy as np\n'), ((14707, 14739), 'numpy.issubdtype', 'np.issubdtype', (['dtype', 'np.integer'], {}), '(dtype, np.integer)\n', (14720, 14739), True, 'import numpy as np\n'), ((16757, 16777), 'torch.unique', 'torch.unique', (['tensor'], {}), '(tensor)\n', (16769, 16777), False, 'import torch\n'), ((3455, 3484), 'skorch.utils.is_torch_data_type', 'is_torch_data_type', (['val_other'], {}), '(val_other)\n', (3473, 3484), False, 'from skorch.utils import is_torch_data_type\n'), ((16499, 16525), 'skorch.utils.to_tensor', 'to_tensor', (['X'], {'device': '"""cpu"""'}), "(X, device='cpu')\n", (16508, 16525), False, 'from skorch.utils import to_tensor\n')]
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
dataset classes
"""
import cv2
import numpy as np
import src.utils.img
from src.dataset.MPIIDataLoader import flipped_parts
class GenerateHeatmap:
"""
get train target heatmap
"""
def __init__(self, output_res, num_parts):
self.output_res = output_res
self.num_parts = num_parts
sigma = self.output_res / 64
self.sigma = sigma
size = 6 * sigma + 3
x = np.arange(0, size, 1, float)
y = x[:, np.newaxis]
x0, y0 = 3 * sigma + 1, 3 * sigma + 1
self.g = np.exp(-((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))
def __call__(self, keypoints):
hms = np.zeros(shape=(self.num_parts, self.output_res, self.output_res), dtype=np.float32)
sigma = self.sigma
for p in keypoints:
for idx, pt in enumerate(p):
if pt[0] > 0:
x, y = int(pt[0]), int(pt[1])
if x < 0 or y < 0 or x >= self.output_res or y >= self.output_res:
continue
ul = int(x - 3 * sigma - 1), int(y - 3 * sigma - 1)
br = int(x + 3 * sigma + 2), int(y + 3 * sigma + 2)
c, d = max(0, -ul[0]), min(br[0], self.output_res) - ul[0]
a, b = max(0, -ul[1]), min(br[1], self.output_res) - ul[1]
cc, dd = max(0, ul[0]), min(br[0], self.output_res)
aa, bb = max(0, ul[1]), min(br[1], self.output_res)
hms[idx, aa:bb, cc:dd] = np.maximum(hms[idx, aa:bb, cc:dd], self.g[a:b, c:d])
return hms
class DatasetGenerator:
"""
mindspore general dataset generator
"""
def __init__(self, input_res, output_res, ds, index):
self.input_res = input_res
self.output_res = output_res
self.generateHeatmap = GenerateHeatmap(self.output_res, 16)
self.ds = ds
self.index = index
def __len__(self):
return len(self.index)
def __getitem__(self, idx):
# print(f"loading...{idx}")
return self.loadImage(self.index[idx])
def loadImage(self, idx):
"""
load and preprocess image
"""
ds = self.ds
# Load + Crop
orig_img = ds.get_img(idx)
orig_keypoints = ds.get_kps(idx)
kptmp = orig_keypoints.copy()
c = ds.get_center(idx)
s = ds.get_scale(idx)
cropped = src.utils.img.crop(orig_img, c, s, (self.input_res, self.input_res))
for i in range(np.shape(orig_keypoints)[1]):
if orig_keypoints[0, i, 0] > 0:
orig_keypoints[0, i, :2] = src.utils.img.transform(
orig_keypoints[0, i, :2], c, s, (self.input_res, self.input_res)
)
keypoints = np.copy(orig_keypoints)
# Random Crop
height, width = cropped.shape[0:2]
center = np.array((width / 2, height / 2))
scale = max(height, width) / 200
aug_rot = 0
aug_rot = (np.random.random() * 2 - 1) * 30.0
aug_scale = np.random.random() * (1.25 - 0.75) + 0.75
scale *= aug_scale
mat_mask = src.utils.img.get_transform(center, scale, (self.output_res, self.output_res), aug_rot)[:2]
mat = src.utils.img.get_transform(center, scale, (self.input_res, self.input_res), aug_rot)[:2]
inp = cv2.warpAffine(cropped, mat, (self.input_res, self.input_res)).astype(np.float32) / 255
keypoints[:, :, 0:2] = src.utils.img.kpt_affine(keypoints[:, :, 0:2], mat_mask)
if np.random.randint(2) == 0:
inp = self.preprocess(inp)
inp = inp[:, ::-1]
keypoints = keypoints[:, flipped_parts["mpii"]]
keypoints[:, :, 0] = self.output_res - keypoints[:, :, 0]
orig_keypoints = orig_keypoints[:, flipped_parts["mpii"]]
orig_keypoints[:, :, 0] = self.input_res - orig_keypoints[:, :, 0]
# If keypoint is invisible, set to 0
for i in range(np.shape(orig_keypoints)[1]):
if kptmp[0, i, 0] == 0 and kptmp[0, i, 1] == 0:
keypoints[0, i, 0] = 0
keypoints[0, i, 1] = 0
orig_keypoints[0, i, 0] = 0
orig_keypoints[0, i, 1] = 0
# Generate target heatmap
heatmaps = self.generateHeatmap(keypoints)
return inp.astype(np.float32), heatmaps.astype(np.float32)
def preprocess(self, data):
"""
preprocess images
"""
# Random hue and saturation
data = cv2.cvtColor(data, cv2.COLOR_RGB2HSV)
delta = (np.random.random() * 2 - 1) * 0.2
data[:, :, 0] = np.mod(data[:, :, 0] + (delta * 360 + 360.0), 360.0)
delta_sature = np.random.random() + 0.5
data[:, :, 1] *= delta_sature
data[:, :, 1] = np.maximum(np.minimum(data[:, :, 1], 1), 0)
data = cv2.cvtColor(data, cv2.COLOR_HSV2RGB)
# Random brightness
delta = (np.random.random() * 2 - 1) * 0.3
data += delta
# Random contrast
mean = data.mean(axis=2, keepdims=True)
data = (data - mean) * (np.random.random() + 0.5) + mean
data = np.minimum(np.maximum(data, 0), 1)
return data
|
[
"numpy.copy",
"numpy.shape",
"cv2.warpAffine",
"numpy.minimum",
"numpy.random.random",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"numpy.random.randint",
"cv2.cvtColor",
"numpy.maximum",
"numpy.mod",
"numpy.arange"
] |
[((1091, 1119), 'numpy.arange', 'np.arange', (['(0)', 'size', '(1)', 'float'], {}), '(0, size, 1, float)\n', (1100, 1119), True, 'import numpy as np\n'), ((1212, 1271), 'numpy.exp', 'np.exp', (['(-((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))'], {}), '(-((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))\n', (1218, 1271), True, 'import numpy as np\n'), ((1322, 1411), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.num_parts, self.output_res, self.output_res)', 'dtype': 'np.float32'}), '(shape=(self.num_parts, self.output_res, self.output_res), dtype=np\n .float32)\n', (1330, 1411), True, 'import numpy as np\n'), ((3452, 3475), 'numpy.copy', 'np.copy', (['orig_keypoints'], {}), '(orig_keypoints)\n', (3459, 3475), True, 'import numpy as np\n'), ((3559, 3592), 'numpy.array', 'np.array', (['(width / 2, height / 2)'], {}), '((width / 2, height / 2))\n', (3567, 3592), True, 'import numpy as np\n'), ((5206, 5243), 'cv2.cvtColor', 'cv2.cvtColor', (['data', 'cv2.COLOR_RGB2HSV'], {}), '(data, cv2.COLOR_RGB2HSV)\n', (5218, 5243), False, 'import cv2\n'), ((5319, 5371), 'numpy.mod', 'np.mod', (['(data[:, :, 0] + (delta * 360 + 360.0))', '(360.0)'], {}), '(data[:, :, 0] + (delta * 360 + 360.0), 360.0)\n', (5325, 5371), True, 'import numpy as np\n'), ((5542, 5579), 'cv2.cvtColor', 'cv2.cvtColor', (['data', 'cv2.COLOR_HSV2RGB'], {}), '(data, cv2.COLOR_HSV2RGB)\n', (5554, 5579), False, 'import cv2\n'), ((4217, 4237), 'numpy.random.randint', 'np.random.randint', (['(2)'], {}), '(2)\n', (4234, 4237), True, 'import numpy as np\n'), ((5396, 5414), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (5412, 5414), True, 'import numpy as np\n'), ((5494, 5522), 'numpy.minimum', 'np.minimum', (['data[:, :, 1]', '(1)'], {}), '(data[:, :, 1], 1)\n', (5504, 5522), True, 'import numpy as np\n'), ((5848, 5867), 'numpy.maximum', 'np.maximum', (['data', '(0)'], {}), '(data, 0)\n', (5858, 5867), True, 'import numpy as np\n'), ((3187, 3211), 'numpy.shape', 'np.shape', (['orig_keypoints'], {}), '(orig_keypoints)\n', (3195, 3211), True, 'import numpy as np\n'), ((3730, 3748), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (3746, 3748), True, 'import numpy as np\n'), ((4662, 4686), 'numpy.shape', 'np.shape', (['orig_keypoints'], {}), '(orig_keypoints)\n', (4670, 4686), True, 'import numpy as np\n'), ((2196, 2248), 'numpy.maximum', 'np.maximum', (['hms[idx, aa:bb, cc:dd]', 'self.g[a:b, c:d]'], {}), '(hms[idx, aa:bb, cc:dd], self.g[a:b, c:d])\n', (2206, 2248), True, 'import numpy as np\n'), ((3675, 3693), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (3691, 3693), True, 'import numpy as np\n'), ((4030, 4092), 'cv2.warpAffine', 'cv2.warpAffine', (['cropped', 'mat', '(self.input_res, self.input_res)'], {}), '(cropped, mat, (self.input_res, self.input_res))\n', (4044, 4092), False, 'import cv2\n'), ((5261, 5279), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (5277, 5279), True, 'import numpy as np\n'), ((5626, 5644), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (5642, 5644), True, 'import numpy as np\n'), ((5789, 5807), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (5805, 5807), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
"""
import sys
import matplotlib.pyplot as plt
import numpy as np
# from tomo_encoders.misc_utils.feature_maps_vis import view_midplanes
import cupy as cp
import time
import h5py
#from recon_subvol import fbp_filter, recon_patch
# from tomo_encoders import DataFile
import os
fpath = '/data02/MyArchive/AM_part_Xuan/data/mli_L206_HT_650_L3_rec_1x1_uint16.hdf5'
binning = 1
def _rescale_data(data, min_val, max_val):
'''
Recales data to values into range [min_val, max_val]. Data can be any numpy or cupy array of any shape.
'''
xp = cp.get_array_module(data) # 'xp' is a standard usage in the community
eps = 1e-12
data = (data - min_val) / (max_val - min_val + eps)
return data
def _find_min_max(vol, sampling_factor):
ss = slice(None, None, sampling_factor)
xp = cp.get_array_module(vol[ss,ss,ss]) # 'xp' is a standard usage in the community
max_val = xp.max(vol[ss,ss,ss])
min_val = xp.min(vol[ss,ss,ss])
return max_val, min_val
def normalize_volume_gpu(vol, chunk_size = 64, normalize_sampling_factor = 1):
'''
Normalizes volume to values into range [0,1]
'''
tot_len = vol.shape[0]
nchunks = int(np.ceil(tot_len/chunk_size))
max_val, min_val = _find_min_max(vol, normalize_sampling_factor)
proc_times = []
copy_to_times = []
copy_from_times = []
stream1 = cp.cuda.Stream()
t0 = time.time()
vol_gpu = cp.zeros((chunk_size, vol.shape[1], vol.shape[2]), dtype = cp.float32)
for jj in range(nchunks):
t01 = time.time()
sz = slice(jj*chunk_size, min((jj+1)*chunk_size, tot_len))
## copy to gpu from cpu
with stream1:
vol_gpu.set(vol[sz,...])
stream1.synchronize()
t02 = time.time()
copy_to_times.append(t02-t01)
## process
with stream1:
vol_gpu = _rescale_data(vol_gpu, min_val, max_val)
stream1.synchronize()
t03 = time.time()
proc_times.append(t03-t02)
## copy from gpu to cpu
with stream1:
vol[sz,...] = vol_gpu.get()
stream1.synchronize()
t04 = time.time()
copy_from_times.append(t04 - t03)
print("copy to gpu time per %i size chunk: %.2f ms"%(chunk_size,np.mean(copy_to_times)*1000.0))
print("processing time per %i size chunk: %.2f ms"%(chunk_size,np.mean(proc_times)*1000.0))
print("copy from gpu time per %i size chunk: %.2f ms"%(chunk_size,np.mean(copy_from_times)*1000.0))
print("total time: ", time.time() - t0)
return vol
if len(sys.argv) > 1:
chunk_size = int(sys.argv[1])
else:
chunk_size = 64
if __name__ == "__main__":
vol_shape = (512,1224,1224)
vol = np.random.normal(0.0, 1.0, vol_shape).astype(np.float32)
print("input volume: ", vol.shape)
vol = normalize_volume_gpu(vol, chunk_size = chunk_size, normalize_sampling_factor = 4)
|
[
"numpy.random.normal",
"numpy.mean",
"numpy.ceil",
"cupy.cuda.Stream",
"cupy.get_array_module",
"time.time",
"cupy.zeros"
] |
[((621, 646), 'cupy.get_array_module', 'cp.get_array_module', (['data'], {}), '(data)\n', (640, 646), True, 'import cupy as cp\n'), ((877, 913), 'cupy.get_array_module', 'cp.get_array_module', (['vol[ss, ss, ss]'], {}), '(vol[ss, ss, ss])\n', (896, 913), True, 'import cupy as cp\n'), ((1436, 1452), 'cupy.cuda.Stream', 'cp.cuda.Stream', ([], {}), '()\n', (1450, 1452), True, 'import cupy as cp\n'), ((1462, 1473), 'time.time', 'time.time', ([], {}), '()\n', (1471, 1473), False, 'import time\n'), ((1493, 1561), 'cupy.zeros', 'cp.zeros', (['(chunk_size, vol.shape[1], vol.shape[2])'], {'dtype': 'cp.float32'}), '((chunk_size, vol.shape[1], vol.shape[2]), dtype=cp.float32)\n', (1501, 1561), True, 'import cupy as cp\n'), ((1251, 1280), 'numpy.ceil', 'np.ceil', (['(tot_len / chunk_size)'], {}), '(tot_len / chunk_size)\n', (1258, 1280), True, 'import numpy as np\n'), ((1608, 1619), 'time.time', 'time.time', ([], {}), '()\n', (1617, 1619), False, 'import time\n'), ((1848, 1859), 'time.time', 'time.time', ([], {}), '()\n', (1857, 1859), False, 'import time\n'), ((2056, 2067), 'time.time', 'time.time', ([], {}), '()\n', (2065, 2067), False, 'import time\n'), ((2266, 2277), 'time.time', 'time.time', ([], {}), '()\n', (2275, 2277), False, 'import time\n'), ((2651, 2662), 'time.time', 'time.time', ([], {}), '()\n', (2660, 2662), False, 'import time\n'), ((2843, 2880), 'numpy.random.normal', 'np.random.normal', (['(0.0)', '(1.0)', 'vol_shape'], {}), '(0.0, 1.0, vol_shape)\n', (2859, 2880), True, 'import numpy as np\n'), ((2393, 2415), 'numpy.mean', 'np.mean', (['copy_to_times'], {}), '(copy_to_times)\n', (2400, 2415), True, 'import numpy as np\n'), ((2492, 2511), 'numpy.mean', 'np.mean', (['proc_times'], {}), '(proc_times)\n', (2499, 2511), True, 'import numpy as np\n'), ((2591, 2615), 'numpy.mean', 'np.mean', (['copy_from_times'], {}), '(copy_from_times)\n', (2598, 2615), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
"""
Extract MFCC and filterbank features for the Buckeye dataset.
Author: <NAME>
Contact: <EMAIL>
Date: 2019, 2021
"""
from datetime import datetime
from os import path
from tqdm import tqdm
import argparse
import numpy as np
import os
import sys
sys.path.append("..")
from paths import buckeye_datadir
import features
import utils
def extract_features_for_subset(subset, feat_type, output_fn):
"""
Extract specified features for a subset.
The `feat_type` parameter can be "mfcc" or "fbank".
"""
# Speakers for subset
speaker_fn = path.join(
"..", "data", "buckeye_" + subset + "_speakers.list"
)
print("Reading:", speaker_fn)
speakers = set()
with open(speaker_fn) as f:
for line in f:
speakers.add(line.strip())
print("Speakers:", ", ".join(sorted(speakers)))
# Raw features
feat_dict = {}
print("Extracting features per speaker:")
for speaker in sorted(speakers):
if feat_type == "mfcc":
speaker_feat_dict = features.extract_mfcc_dir(
path.join(buckeye_datadir, speaker)
)
elif feat_type == "fbank":
speaker_feat_dict = features.extract_fbank_dir(
path.join(buckeye_datadir, speaker)
)
else:
assert False, "invalid feature type"
for wav_key in speaker_feat_dict:
feat_dict[speaker + "_" + wav_key[3:]] = speaker_feat_dict[wav_key]
# Read voice activity regions
fa_fn = path.join("..", "data", "buckeye_english.wrd")
print("Reading:", fa_fn)
vad_dict = utils.read_vad_from_fa(fa_fn)
# Only keep voice active regions
print("Extracting VAD regions:")
feat_dict = features.extract_vad(feat_dict, vad_dict)
# Perform per speaker mean and variance normalisation
print("Per speaker mean and variance normalisation:")
feat_dict = features.speaker_mvn(feat_dict)
# Write output
print("Writing:", output_fn)
np.savez_compressed(output_fn, **feat_dict)
def main():
print(datetime.now())
# RAW FEATURES
# Extract MFCCs for the different sets
mfcc_dir = path.join("mfcc", "buckeye")
for subset in ["devpart1", "devpart2", "zs"]:
if not path.isdir(mfcc_dir):
os.makedirs(mfcc_dir)
output_fn = path.join(mfcc_dir, subset + ".dd.npz")
if not path.isfile(output_fn):
print("Extracting MFCCs:", subset)
extract_features_for_subset(subset, "mfcc", output_fn)
else:
print("Using existing file:", output_fn)
# # Extract filterbanks for the different sets
# fbank_dir = path.join("fbank", "buckeye")
# for subset in ["devpart1", "devpart2", "zs"]:
# if not path.isdir(fbank_dir):
# os.makedirs(fbank_dir)
# output_fn = path.join(fbank_dir, subset + ".npz")
# if not path.isfile(output_fn):
# print("Extracting filterbanks:", subset)
# extract_features_for_subset(subset, "fbank", output_fn)
# else:
# print("Using existing file:", output_fn)
# GROUND TRUTH WORD SEGMENTS
# Create a ground truth word list of at least 50 frames and 5 characters
fa_fn = path.join("..", "data", "buckeye_english.wrd")
list_dir = "lists"
if not path.isdir(list_dir):
os.makedirs(list_dir)
list_fn = path.join(list_dir, "buckeye.samediff.list")
if not path.isfile(list_fn):
utils.write_samediff_words(fa_fn, list_fn)
else:
print("Using existing file:", list_fn)
# Extract word segments from the MFCC NumPy archives
for subset in ["devpart1", "devpart2", "zs"]:
input_npz_fn = path.join(mfcc_dir, subset + ".dd.npz")
output_npz_fn = path.join(mfcc_dir, subset + ".samediff.dd.npz")
if not path.isfile(output_npz_fn):
print("Extracting MFCCs for same-different word tokens:", subset)
utils.segments_from_npz(input_npz_fn, list_fn, output_npz_fn)
else:
print("Using existing file:", output_npz_fn)
print(datetime.now())
if __name__ == "__main__":
main()
|
[
"features.extract_vad",
"features.speaker_mvn",
"os.makedirs",
"os.path.join",
"utils.segments_from_npz",
"utils.read_vad_from_fa",
"datetime.datetime.now",
"os.path.isfile",
"os.path.isdir",
"utils.write_samediff_words",
"numpy.savez_compressed",
"sys.path.append"
] |
[((273, 294), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (288, 294), False, 'import sys\n'), ((586, 649), 'os.path.join', 'path.join', (['""".."""', '"""data"""', "('buckeye_' + subset + '_speakers.list')"], {}), "('..', 'data', 'buckeye_' + subset + '_speakers.list')\n", (595, 649), False, 'from os import path\n'), ((1549, 1595), 'os.path.join', 'path.join', (['""".."""', '"""data"""', '"""buckeye_english.wrd"""'], {}), "('..', 'data', 'buckeye_english.wrd')\n", (1558, 1595), False, 'from os import path\n'), ((1640, 1669), 'utils.read_vad_from_fa', 'utils.read_vad_from_fa', (['fa_fn'], {}), '(fa_fn)\n', (1662, 1669), False, 'import utils\n'), ((1761, 1802), 'features.extract_vad', 'features.extract_vad', (['feat_dict', 'vad_dict'], {}), '(feat_dict, vad_dict)\n', (1781, 1802), False, 'import features\n'), ((1936, 1967), 'features.speaker_mvn', 'features.speaker_mvn', (['feat_dict'], {}), '(feat_dict)\n', (1956, 1967), False, 'import features\n'), ((2025, 2068), 'numpy.savez_compressed', 'np.savez_compressed', (['output_fn'], {}), '(output_fn, **feat_dict)\n', (2044, 2068), True, 'import numpy as np\n'), ((2189, 2217), 'os.path.join', 'path.join', (['"""mfcc"""', '"""buckeye"""'], {}), "('mfcc', 'buckeye')\n", (2198, 2217), False, 'from os import path\n'), ((3270, 3316), 'os.path.join', 'path.join', (['""".."""', '"""data"""', '"""buckeye_english.wrd"""'], {}), "('..', 'data', 'buckeye_english.wrd')\n", (3279, 3316), False, 'from os import path\n'), ((3417, 3461), 'os.path.join', 'path.join', (['list_dir', '"""buckeye.samediff.list"""'], {}), "(list_dir, 'buckeye.samediff.list')\n", (3426, 3461), False, 'from os import path\n'), ((2094, 2108), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2106, 2108), False, 'from datetime import datetime\n'), ((2359, 2398), 'os.path.join', 'path.join', (['mfcc_dir', "(subset + '.dd.npz')"], {}), "(mfcc_dir, subset + '.dd.npz')\n", (2368, 2398), False, 'from os import path\n'), ((3351, 3371), 'os.path.isdir', 'path.isdir', (['list_dir'], {}), '(list_dir)\n', (3361, 3371), False, 'from os import path\n'), ((3381, 3402), 'os.makedirs', 'os.makedirs', (['list_dir'], {}), '(list_dir)\n', (3392, 3402), False, 'import os\n'), ((3473, 3493), 'os.path.isfile', 'path.isfile', (['list_fn'], {}), '(list_fn)\n', (3484, 3493), False, 'from os import path\n'), ((3503, 3545), 'utils.write_samediff_words', 'utils.write_samediff_words', (['fa_fn', 'list_fn'], {}), '(fa_fn, list_fn)\n', (3529, 3545), False, 'import utils\n'), ((3734, 3773), 'os.path.join', 'path.join', (['mfcc_dir', "(subset + '.dd.npz')"], {}), "(mfcc_dir, subset + '.dd.npz')\n", (3743, 3773), False, 'from os import path\n'), ((3798, 3846), 'os.path.join', 'path.join', (['mfcc_dir', "(subset + '.samediff.dd.npz')"], {}), "(mfcc_dir, subset + '.samediff.dd.npz')\n", (3807, 3846), False, 'from os import path\n'), ((4124, 4138), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4136, 4138), False, 'from datetime import datetime\n'), ((2283, 2303), 'os.path.isdir', 'path.isdir', (['mfcc_dir'], {}), '(mfcc_dir)\n', (2293, 2303), False, 'from os import path\n'), ((2317, 2338), 'os.makedirs', 'os.makedirs', (['mfcc_dir'], {}), '(mfcc_dir)\n', (2328, 2338), False, 'import os\n'), ((2414, 2436), 'os.path.isfile', 'path.isfile', (['output_fn'], {}), '(output_fn)\n', (2425, 2436), False, 'from os import path\n'), ((3862, 3888), 'os.path.isfile', 'path.isfile', (['output_npz_fn'], {}), '(output_npz_fn)\n', (3873, 3888), False, 'from os import path\n'), ((3980, 4041), 'utils.segments_from_npz', 'utils.segments_from_npz', (['input_npz_fn', 'list_fn', 'output_npz_fn'], {}), '(input_npz_fn, list_fn, output_npz_fn)\n', (4003, 4041), False, 'import utils\n'), ((1098, 1133), 'os.path.join', 'path.join', (['buckeye_datadir', 'speaker'], {}), '(buckeye_datadir, speaker)\n', (1107, 1133), False, 'from os import path\n'), ((1263, 1298), 'os.path.join', 'path.join', (['buckeye_datadir', 'speaker'], {}), '(buckeye_datadir, speaker)\n', (1272, 1298), False, 'from os import path\n')]
|
#ミニバッチ学習
import numpy as np
from dataset.mnist import load_mnist
(x_train, t_train), (x_test, t_test) =\
load_mnist(normalize=True, one_hot_label=True)
print(x_train.shape)
print(t_train.shape)
train_size = x_train.shape[0]
batch_size = 10
batch_mask = np.random.choice(train_size, batch_size)
x_batch = x_train[batch_mask]
t_batch = x_train[batch_mask]
print(batch_mask)
|
[
"numpy.random.choice",
"dataset.mnist.load_mnist"
] |
[((110, 156), 'dataset.mnist.load_mnist', 'load_mnist', ([], {'normalize': '(True)', 'one_hot_label': '(True)'}), '(normalize=True, one_hot_label=True)\n', (120, 156), False, 'from dataset.mnist import load_mnist\n'), ((260, 300), 'numpy.random.choice', 'np.random.choice', (['train_size', 'batch_size'], {}), '(train_size, batch_size)\n', (276, 300), True, 'import numpy as np\n')]
|
import numpy as np
import torch
import matplotlib.animation as animation
import matplotlib.pyplot as plt
from PIL import Image
import ThinPlateSpline as TPS
# 2048x2048.jpg size: 2048 x 2048
def on_press(event):
p = np.array([
[693.55, 531.26],
[1069.85, 1243.04],
[1243.74, 1238.69],
[472.82, 664.85],
[552.50, 1460.07],
[1021.03, 368.02],
[1260.78, 1571.90],
[93.16, 911.26],
[234.85, 914.14],
[383.34, 1140.97],
[375.46, 853.36],
[256.73, 597.61],
[338.32, 502.28],
[754.67, 337.95],
[1120.42, 1797.99],
[1521.97, 1655.66],
[1371.15, 1832.87],
[1522.78, 1315.94],
[1116.38, 754.82],
[1165.72, 1162.44],
[1024.00, 1024.00]])
v = np.array([
[121.52, 25.00],
[142.31, -10.74],
[150.81, -10.63],
[109.60, 18.24],
[113.58, -22.72],
[139.92, 34.87],
[153.25, -28.63],
[45.29, -25.83],
[95.26, 5.30],
[105.86, -6.01],
[104.90, 8.46],
[96.95, 16.70],
[96.81, 27.64],
[122.71, 37.11],
[147.14, -43.12],
[172.68, -34.63],
[167.75, -42.28],
[166.68, -14.63],
[144.68, 13.25],
[146.93, -6.96],
[141.01, 0.09]])
p = torch.Tensor(p.reshape([1, p.shape[0], 2]))
v = torch.Tensor(v.reshape([1, v.shape[0], 2]))
T = TPS.solve_system(p, v)
point = np.array([event.xdata, event.ydata])
point_T = TPS.point_transform(point, T, p)
print("Longitude:", point_T[0, 0, 0])
print("Latitude:", point_T[0, 1, 0])
if __name__ == '__main__':
print("It is suggested that clicking on the image close to the middle position will be more accurate.")
fig = plt.figure()
img = Image.open('2048x2048.jpg')
plt.imshow(img, animated= True)
fig.canvas.mpl_connect('button_press_event', on_press)
plt.show()
|
[
"matplotlib.pyplot.imshow",
"PIL.Image.open",
"numpy.array",
"matplotlib.pyplot.figure",
"ThinPlateSpline.solve_system",
"ThinPlateSpline.point_transform",
"matplotlib.pyplot.show"
] |
[((224, 651), 'numpy.array', 'np.array', (['[[693.55, 531.26], [1069.85, 1243.04], [1243.74, 1238.69], [472.82, 664.85],\n [552.5, 1460.07], [1021.03, 368.02], [1260.78, 1571.9], [93.16, 911.26],\n [234.85, 914.14], [383.34, 1140.97], [375.46, 853.36], [256.73, 597.61],\n [338.32, 502.28], [754.67, 337.95], [1120.42, 1797.99], [1521.97, \n 1655.66], [1371.15, 1832.87], [1522.78, 1315.94], [1116.38, 754.82], [\n 1165.72, 1162.44], [1024.0, 1024.0]]'], {}), '([[693.55, 531.26], [1069.85, 1243.04], [1243.74, 1238.69], [472.82,\n 664.85], [552.5, 1460.07], [1021.03, 368.02], [1260.78, 1571.9], [93.16,\n 911.26], [234.85, 914.14], [383.34, 1140.97], [375.46, 853.36], [256.73,\n 597.61], [338.32, 502.28], [754.67, 337.95], [1120.42, 1797.99], [\n 1521.97, 1655.66], [1371.15, 1832.87], [1522.78, 1315.94], [1116.38, \n 754.82], [1165.72, 1162.44], [1024.0, 1024.0]])\n', (232, 651), True, 'import numpy as np\n'), ((812, 1200), 'numpy.array', 'np.array', (['[[121.52, 25.0], [142.31, -10.74], [150.81, -10.63], [109.6, 18.24], [\n 113.58, -22.72], [139.92, 34.87], [153.25, -28.63], [45.29, -25.83], [\n 95.26, 5.3], [105.86, -6.01], [104.9, 8.46], [96.95, 16.7], [96.81, \n 27.64], [122.71, 37.11], [147.14, -43.12], [172.68, -34.63], [167.75, -\n 42.28], [166.68, -14.63], [144.68, 13.25], [146.93, -6.96], [141.01, 0.09]]'], {}), '([[121.52, 25.0], [142.31, -10.74], [150.81, -10.63], [109.6, 18.24\n ], [113.58, -22.72], [139.92, 34.87], [153.25, -28.63], [45.29, -25.83],\n [95.26, 5.3], [105.86, -6.01], [104.9, 8.46], [96.95, 16.7], [96.81, \n 27.64], [122.71, 37.11], [147.14, -43.12], [172.68, -34.63], [167.75, -\n 42.28], [166.68, -14.63], [144.68, 13.25], [146.93, -6.96], [141.01, 0.09]]\n )\n', (820, 1200), True, 'import numpy as np\n'), ((1465, 1487), 'ThinPlateSpline.solve_system', 'TPS.solve_system', (['p', 'v'], {}), '(p, v)\n', (1481, 1487), True, 'import ThinPlateSpline as TPS\n'), ((1500, 1536), 'numpy.array', 'np.array', (['[event.xdata, event.ydata]'], {}), '([event.xdata, event.ydata])\n', (1508, 1536), True, 'import numpy as np\n'), ((1551, 1583), 'ThinPlateSpline.point_transform', 'TPS.point_transform', (['point', 'T', 'p'], {}), '(point, T, p)\n', (1570, 1583), True, 'import ThinPlateSpline as TPS\n'), ((1813, 1825), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1823, 1825), True, 'import matplotlib.pyplot as plt\n'), ((1836, 1863), 'PIL.Image.open', 'Image.open', (['"""2048x2048.jpg"""'], {}), "('2048x2048.jpg')\n", (1846, 1863), False, 'from PIL import Image\n'), ((1868, 1898), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {'animated': '(True)'}), '(img, animated=True)\n', (1878, 1898), True, 'import matplotlib.pyplot as plt\n'), ((1963, 1973), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1971, 1973), True, 'import matplotlib.pyplot as plt\n')]
|
import collections
import os
from itertools import product
from pathlib import Path
from typing import Dict, Iterator, List, NamedTuple, Optional, OrderedDict, Sequence, Tuple, Union
import numpy as np
import xarray as xr
from tqdm import tqdm
from bioimageio.core import image_helper
from bioimageio.core import load_resource_description
from bioimageio.core.prediction_pipeline import PredictionPipeline, create_prediction_pipeline
from bioimageio.core.resource_io.nodes import ImplicitOutputShape, Model, ResourceDescription
from bioimageio.spec.shared import raw_nodes
from bioimageio.spec.shared.raw_nodes import ResourceDescription as RawResourceDescription
def _apply_crop(data, crop):
crop = tuple(crop[ax] for ax in data.dims)
return data[crop]
class TileDef(NamedTuple):
outer: Dict[str, slice]
inner: Dict[str, slice]
local: Dict[str, slice]
def get_tiling(
shape: Sequence[int], tile_shape: Dict[str, int], halo: Dict[str, int], input_axes: Sequence[str]
) -> Iterator[TileDef]:
assert len(shape) == len(input_axes)
shape_ = [sh for sh, ax in zip(shape, input_axes) if ax in "xyz"]
spatial_axes = [ax for ax in input_axes if ax in "xyz"]
inner_tile_shape_ = [tile_shape[ax] - 2 * halo[ax] for ax in spatial_axes]
halo_ = [halo[ax] for ax in spatial_axes]
assert len(shape_) == len(inner_tile_shape_) == len(spatial_axes) == len(halo_)
ranges = [range(sh // tsh if sh % tsh == 0 else sh // tsh + 1) for sh, tsh in zip(shape_, inner_tile_shape_)]
start_points = product(*ranges)
for start_point in start_points:
positions = [sp * tsh for sp, tsh in zip(start_point, inner_tile_shape_)]
inner_tile = {
ax: slice(pos, min(pos + tsh, sh))
for ax, pos, tsh, sh in zip(spatial_axes, positions, inner_tile_shape_, shape_)
}
inner_tile["b"] = slice(None)
inner_tile["c"] = slice(None)
outer_tile = {
ax: slice(max(pos - ha, 0), min(pos + tsh + ha, sh))
for ax, pos, tsh, sh, ha in zip(spatial_axes, positions, inner_tile_shape_, shape_, halo_)
}
outer_tile["b"] = slice(None)
outer_tile["c"] = slice(None)
local_tile = {
ax: slice(
inner_tile[ax].start - outer_tile[ax].start,
-(outer_tile[ax].stop - inner_tile[ax].stop) if outer_tile[ax].stop != inner_tile[ax].stop else None,
)
for ax in spatial_axes
}
local_tile["b"] = slice(None)
local_tile["c"] = slice(None)
yield TileDef(outer_tile, inner_tile, local_tile)
def _predict_with_tiling_impl(
prediction_pipeline: PredictionPipeline,
inputs: Sequence[xr.DataArray],
outputs: Sequence[xr.DataArray],
tile_shapes: Sequence[Dict[str, int]],
halos: Sequence[Dict[str, int]],
verbose: bool = False,
):
if len(inputs) > 1:
raise NotImplementedError("Tiling with multiple inputs not implemented yet")
if len(outputs) > 1:
raise NotImplementedError("Tiling with multiple outputs not implemented yet")
assert len(tile_shapes) == len(outputs)
assert len(halos) == len(outputs)
input_ = inputs[0]
output = outputs[0]
tile_shape = tile_shapes[0]
halo = halos[0]
tiles = get_tiling(shape=input_.shape, tile_shape=tile_shape, halo=halo, input_axes=input_.dims)
assert all(isinstance(ax, str) for ax in input_.dims)
input_axes: Tuple[str, ...] = input_.dims # noqa
def load_tile(tile):
inp = input_[tile]
# whether to pad on the right or left of the dim for the spatial dims
# + placeholders for batch and axis dimension, where we don't pad
pad_right = [tile[ax].start == 0 if ax in "xyz" else None for ax in input_axes]
return inp, pad_right
if verbose:
shape = {ax: sh for ax, sh in zip(prediction_pipeline.input_specs[0].axes, input_.shape)}
n_tiles = int(np.prod([np.ceil(float(shape[ax]) / (tsh - 2 * halo[ax])) for ax, tsh in tile_shape.items()]))
tiles = tqdm(tiles, total=n_tiles, desc="prediction with tiling")
# we need to use padded prediction for the individual tiles in case the
# border tiles don't match the requested tile shape
padding = {ax: tile_shape[ax] for ax in input_axes if ax in "xyz"}
padding["mode"] = "fixed"
for outer_tile, inner_tile, local_tile in tiles:
inp, pad_right = load_tile(outer_tile)
out = predict_with_padding(prediction_pipeline, inp, padding, pad_right)
assert len(out) == 1
out = out[0]
output[inner_tile] = out[local_tile]
#
# prediction functions
#
def predict(
prediction_pipeline: PredictionPipeline,
inputs: Union[xr.DataArray, List[xr.DataArray], Tuple[xr.DataArray]],
) -> List[xr.DataArray]:
"""Run prediction for a single set of input(s) with a bioimage.io model
Args:
prediction_pipeline: the prediction pipeline for the input model.
inputs: the input(s) for this model represented as xarray data.
"""
if not isinstance(inputs, (tuple, list)):
inputs = [inputs]
assert len(inputs) == len(prediction_pipeline.input_specs)
tagged_data = [
xr.DataArray(ipt, dims=ipt_spec.axes) for ipt, ipt_spec in zip(inputs, prediction_pipeline.input_specs)
]
return prediction_pipeline.forward(*tagged_data)
def _parse_padding(padding, input_specs):
if padding is None: # no padding
return padding
if len(input_specs) > 1:
raise NotImplementedError("Padding for multiple inputs not yet implemented")
input_spec = input_specs[0]
pad_keys = tuple(input_spec.axes) + ("mode",)
def check_padding(padding):
assert all(k in pad_keys for k in padding.keys())
if isinstance(padding, dict): # pre-defined padding
check_padding(padding)
elif isinstance(padding, bool): # determine padding from spec
if padding:
axes = input_spec.axes
shape = input_spec.shape
if isinstance(shape, list): # fixed padding
padding = {ax: sh for ax, sh in zip(axes, shape) if ax in "xyz"}
padding["mode"] = "fixed"
else: # dynamic padding
step = shape.step
padding = {ax: st for ax, st in zip(axes, step) if ax in "xyz"}
padding["mode"] = "dynamic"
check_padding(padding)
else: # no padding
padding = None
else:
raise ValueError(f"Invalid argument for padding: {padding}")
return padding
def predict_with_padding(
prediction_pipeline: PredictionPipeline,
inputs: Union[xr.DataArray, List[xr.DataArray], Tuple[xr.DataArray]],
padding: Union[bool, Dict[str, int]] = True,
pad_right: bool = True,
) -> List[xr.DataArray]:
"""Run prediction with padding for a single set of input(s) with a bioimage.io model.
Args:
prediction_pipeline: the prediction pipeline for the input model.
inputs: the input(s) for this model represented as xarray data.
padding: the padding settings. Pass True to derive from the model spec.
pad_right: whether to applying padding to the right or left of the input.
"""
if not padding:
raise ValueError
assert len(inputs) == len(prediction_pipeline.input_specs)
output_spec = prediction_pipeline.output_specs[0]
if hasattr(output_spec.shape, "scale"):
scale = dict(zip(output_spec.axes, output_spec.shape.scale))
offset = dict(zip(output_spec.axes, output_spec.shape.offset))
network_resizes = any(sc != 1 for ax, sc in scale.items() if ax in "xyz") or any(
off != 0 for ax, off in offset.items() if ax in "xyz"
)
else:
network_resizes = False
padding = _parse_padding(padding, prediction_pipeline.input_specs)
if not isinstance(inputs, (tuple, list)):
inputs = [inputs]
if not isinstance(padding, (tuple, list)):
padding = [padding]
assert len(padding) == len(prediction_pipeline.input_specs)
inputs, crops = zip(
*[
image_helper.pad(inp, spec.axes, p, pad_right=pad_right)
for inp, spec, p in zip(inputs, prediction_pipeline.input_specs, padding)
]
)
result = predict(prediction_pipeline, inputs)
if network_resizes:
crops = tuple(
{
ax: slice(int(crp.start * scale[ax] + 2 * offset[ax]), int(crp.stop * scale[ax] + 2 * offset[ax]))
if ax in "xyz"
else crp
for ax, crp in crop.items()
}
for crop in crops
)
return [_apply_crop(res, crop) for res, crop in zip(result, crops)]
# simple heuristic to determine suitable shape from min and step
def _determine_shape(min_shape, step, axes):
is3d = "z" in axes
min_len = 64 if is3d else 256
shape = []
for ax, min_ax, step_ax in zip(axes, min_shape, step):
if ax in "zyx" and step_ax > 0:
len_ax = min_ax
while len_ax < min_len:
len_ax += step_ax
shape.append(len_ax)
else:
shape.append(min_ax)
return shape
def _parse_tiling(tiling, input_specs, output_specs):
if tiling is None: # no tiling
return tiling
if len(input_specs) > 1:
raise NotImplementedError("Tiling for multiple inputs not yet implemented")
if len(output_specs) > 1:
raise NotImplementedError("Tiling for multiple outputs not yet implemented")
input_spec = input_specs[0]
output_spec = output_specs[0]
axes = input_spec.axes
def check_tiling(tiling):
assert "halo" in tiling and "tile" in tiling
spatial_axes = [ax for ax in axes if ax in "xyz"]
halo = tiling["halo"]
tile = tiling["tile"]
assert all(halo.get(ax, 0) >= 0 for ax in spatial_axes)
assert all(tile.get(ax, 0) > 0 for ax in spatial_axes)
if isinstance(tiling, dict):
check_tiling(tiling)
elif isinstance(tiling, bool):
if tiling:
# NOTE we assume here that shape in input and output are the same
# for different input and output shapes, we should actually tile in the
# output space and then request the corresponding input tiles
# so we would need to apply the output scale and offset to the
# input shape to compute the tile size and halo here
shape = input_spec.shape
if not isinstance(shape, list):
shape = _determine_shape(shape.min, shape.step, axes)
assert isinstance(shape, list)
assert len(shape) == len(axes)
halo = output_spec.halo
if halo is None:
halo = [0] * len(axes)
assert len(halo) == len(axes)
tiling = {
"halo": {ax: ha for ax, ha in zip(axes, halo) if ax in "xyz"},
"tile": {ax: sh for ax, sh in zip(axes, shape) if ax in "xyz"},
}
check_tiling(tiling)
else:
tiling = None
else:
raise ValueError(f"Invalid argument for tiling: {tiling}")
return tiling
def predict_with_tiling(
prediction_pipeline: PredictionPipeline,
inputs: Union[xr.DataArray, List[xr.DataArray], Tuple[xr.DataArray]],
tiling: Union[bool, Dict[str, Dict[str, int]]] = True,
verbose: bool = False,
) -> List[xr.DataArray]:
"""Run prediction with tiling for a single set of input(s) with a bioimage.io model.
Args:
prediction_pipeline: the prediction pipeline for the input model.
inputs: the input(s) for this model represented as xarray data.
tiling: the tiling settings. Pass True to derive from the model spec.
verbose: whether to print the prediction progress.
"""
if not tiling:
raise ValueError
assert len(inputs) == len(prediction_pipeline.input_specs)
tiling = _parse_tiling(tiling, prediction_pipeline.input_specs, prediction_pipeline.output_specs)
if not isinstance(inputs, (list, tuple)):
inputs = [inputs]
named_inputs: OrderedDict[str, xr.DataArray] = collections.OrderedDict(
**{
ipt_spec.name: xr.DataArray(ipt_data, dims=tuple(ipt_spec.axes))
for ipt_data, ipt_spec in zip(inputs, prediction_pipeline.input_specs)
}
)
outputs = []
for output_spec in prediction_pipeline.output_specs:
if isinstance(output_spec.shape, ImplicitOutputShape):
scale = dict(zip(output_spec.axes, output_spec.shape.scale))
offset = dict(zip(output_spec.axes, output_spec.shape.offset))
if any(sc != 1 for ax, sc in scale.items() if ax in "xyz") or any(
off != 0 for ax, off in offset.items() if ax in "xyz"
):
raise NotImplementedError("Tiling with a different output shape is not yet supported")
ref_input = named_inputs[output_spec.shape.reference_tensor]
ref_input_shape = dict(zip(ref_input.dims, ref_input.shape))
output_shape = tuple(int(scale[ax] * ref_input_shape[ax] + 2 * offset[ax]) for ax in output_spec.axes)
else:
if len(inputs) > 1:
raise NotImplementedError
input_spec = prediction_pipeline.input_specs[0]
if input_spec.axes != output_spec.axes:
raise NotImplementedError("Tiling with a different output shape is not yet supported")
out_axes = output_spec.axes
fixed_shape = tuple(output_spec.shape)
if not all(fixed_shape[out_axes.index(ax)] == tile_shape for ax, tile_shape in tiling["tile"].items()):
raise NotImplementedError("Tiling with a different output shape is not yet supported")
output_shape = list(inputs[0].shape)
chan_id = out_axes.index("c")
if fixed_shape[chan_id] != output_shape[chan_id]:
output_shape[chan_id] = fixed_shape[chan_id]
output_shape = tuple(output_shape)
outputs.append(xr.DataArray(np.zeros(output_shape, dtype=output_spec.data_type), dims=tuple(output_spec.axes)))
_predict_with_tiling_impl(
prediction_pipeline,
list(named_inputs.values()),
outputs,
tile_shapes=[tiling["tile"]], # todo: update tiling for multiple inputs/outputs
halos=[tiling["halo"]],
verbose=verbose,
)
return outputs
def _predict_sample(prediction_pipeline, inputs, outputs, padding, tiling):
if padding and tiling:
raise ValueError("Only one of padding or tiling is supported")
input_data = image_helper.load_tensors(inputs, prediction_pipeline.input_specs)
if padding is not None:
result = predict_with_padding(prediction_pipeline, input_data, padding)
elif tiling is not None:
result = predict_with_tiling(prediction_pipeline, input_data, tiling)
else:
result = predict(prediction_pipeline, input_data)
assert isinstance(result, list)
assert len(result) == len(outputs)
for res, out in zip(result, outputs):
image_helper.save_image(out, res)
def predict_image(
model_rdf: Union[RawResourceDescription, ResourceDescription, os.PathLike, str, dict, raw_nodes.URI],
inputs: Union[Tuple[Path, ...], List[Path], Path],
outputs: Union[Tuple[Path, ...], List[Path], Path],
padding: Optional[Union[bool, Dict[str, int]]] = None,
tiling: Optional[Union[bool, Dict[str, Dict[str, int]]]] = None,
weight_format: Optional[str] = None,
devices: Optional[List[str]] = None,
verbose: bool = False,
):
"""Run prediction for a single set of input image(s) with a bioimage.io model.
Args:
model_rdf: the bioimageio model.
inputs: the filepaths for the input images.
outputs: the filepaths for saving the input images.
padding: the padding settings for prediction. By default no padding is used.
tiling: the tiling settings for prediction. By default no tiling is used.
weight_format: the weight format to use for predictions.
devices: the devices to use for prediction.
verbose: run prediction in verbose mode.
"""
if not isinstance(inputs, (tuple, list)):
inputs = [inputs]
if not isinstance(outputs, (tuple, list)):
outputs = [outputs]
model = load_resource_description(model_rdf)
assert isinstance(model, Model)
if len(model.inputs) != len(inputs):
raise ValueError
if len(model.outputs) != len(outputs):
raise ValueError
with create_prediction_pipeline(
bioimageio_model=model, weight_format=weight_format, devices=devices
) as prediction_pipeline:
_predict_sample(prediction_pipeline, inputs, outputs, padding, tiling)
def predict_images(
model_rdf: Union[RawResourceDescription, ResourceDescription, os.PathLike, str, dict, raw_nodes.URI],
inputs: Sequence[Union[Tuple[Path, ...], List[Path], Path]],
outputs: Sequence[Union[Tuple[Path, ...], List[Path], Path]],
padding: Optional[Union[bool, Dict[str, int]]] = None,
tiling: Optional[Union[bool, Dict[str, Dict[str, int]]]] = None,
weight_format: Optional[str] = None,
devices: Optional[List[str]] = None,
verbose: bool = False,
):
"""Predict multiple input images with a bioimage.io model.
Args:
model_rdf: the bioimageio model.
inputs: the filepaths for the input images.
outputs: the filepaths for saving the input images.
padding: the padding settings for prediction. By default no padding is used.
tiling: the tiling settings for prediction. By default no tiling is used.
weight_format: the weight format to use for predictions.
devices: the devices to use for prediction.
verbose: run prediction in verbose mode.
"""
model = load_resource_description(model_rdf)
assert isinstance(model, Model)
with create_prediction_pipeline(
bioimageio_model=model, weight_format=weight_format, devices=devices
) as prediction_pipeline:
prog = zip(inputs, outputs)
if verbose:
prog = tqdm(prog, total=len(inputs))
for inp, outp in prog:
if not isinstance(inp, (tuple, list)):
inp = [inp]
if not isinstance(outp, (tuple, list)):
outp = [outp]
_predict_sample(prediction_pipeline, inp, outp, padding, tiling)
|
[
"bioimageio.core.load_resource_description",
"bioimageio.core.image_helper.load_tensors",
"bioimageio.core.prediction_pipeline.create_prediction_pipeline",
"itertools.product",
"tqdm.tqdm",
"bioimageio.core.image_helper.save_image",
"numpy.zeros",
"xarray.DataArray",
"bioimageio.core.image_helper.pad"
] |
[((1538, 1554), 'itertools.product', 'product', (['*ranges'], {}), '(*ranges)\n', (1545, 1554), False, 'from itertools import product\n'), ((14736, 14802), 'bioimageio.core.image_helper.load_tensors', 'image_helper.load_tensors', (['inputs', 'prediction_pipeline.input_specs'], {}), '(inputs, prediction_pipeline.input_specs)\n', (14761, 14802), False, 'from bioimageio.core import image_helper\n'), ((16473, 16509), 'bioimageio.core.load_resource_description', 'load_resource_description', (['model_rdf'], {}), '(model_rdf)\n', (16498, 16509), False, 'from bioimageio.core import load_resource_description\n'), ((17984, 18020), 'bioimageio.core.load_resource_description', 'load_resource_description', (['model_rdf'], {}), '(model_rdf)\n', (18009, 18020), False, 'from bioimageio.core import load_resource_description\n'), ((4073, 4130), 'tqdm.tqdm', 'tqdm', (['tiles'], {'total': 'n_tiles', 'desc': '"""prediction with tiling"""'}), "(tiles, total=n_tiles, desc='prediction with tiling')\n", (4077, 4130), False, 'from tqdm import tqdm\n'), ((5234, 5271), 'xarray.DataArray', 'xr.DataArray', (['ipt'], {'dims': 'ipt_spec.axes'}), '(ipt, dims=ipt_spec.axes)\n', (5246, 5271), True, 'import xarray as xr\n'), ((15212, 15245), 'bioimageio.core.image_helper.save_image', 'image_helper.save_image', (['out', 'res'], {}), '(out, res)\n', (15235, 15245), False, 'from bioimageio.core import image_helper\n'), ((16690, 16791), 'bioimageio.core.prediction_pipeline.create_prediction_pipeline', 'create_prediction_pipeline', ([], {'bioimageio_model': 'model', 'weight_format': 'weight_format', 'devices': 'devices'}), '(bioimageio_model=model, weight_format=\n weight_format, devices=devices)\n', (16716, 16791), False, 'from bioimageio.core.prediction_pipeline import PredictionPipeline, create_prediction_pipeline\n'), ((18067, 18168), 'bioimageio.core.prediction_pipeline.create_prediction_pipeline', 'create_prediction_pipeline', ([], {'bioimageio_model': 'model', 'weight_format': 'weight_format', 'devices': 'devices'}), '(bioimageio_model=model, weight_format=\n weight_format, devices=devices)\n', (18093, 18168), False, 'from bioimageio.core.prediction_pipeline import PredictionPipeline, create_prediction_pipeline\n'), ((8153, 8209), 'bioimageio.core.image_helper.pad', 'image_helper.pad', (['inp', 'spec.axes', 'p'], {'pad_right': 'pad_right'}), '(inp, spec.axes, p, pad_right=pad_right)\n', (8169, 8209), False, 'from bioimageio.core import image_helper\n'), ((14171, 14222), 'numpy.zeros', 'np.zeros', (['output_shape'], {'dtype': 'output_spec.data_type'}), '(output_shape, dtype=output_spec.data_type)\n', (14179, 14222), True, 'import numpy as np\n')]
|
import random
import numpy as np
def set_seed(random_state: int = 42) -> None:
"""Function fixes random state to ensure results are reproducible"""
np.random.seed(random_state)
random.seed(random_state)
|
[
"numpy.random.seed",
"random.seed"
] |
[((159, 187), 'numpy.random.seed', 'np.random.seed', (['random_state'], {}), '(random_state)\n', (173, 187), True, 'import numpy as np\n'), ((192, 217), 'random.seed', 'random.seed', (['random_state'], {}), '(random_state)\n', (203, 217), False, 'import random\n')]
|
#!/usr/bin/env python
#
# Copyright 2019 DFKI GmbH.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the
# following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
from anim_utils.animation_data import SkeletonBuilder, SKELETON_NODE_TYPE_END_SITE, LEN_EULER, LEN_ROOT,\
LEN_QUAT
import numpy as np
from transformations import euler_matrix, euler_from_matrix
from .motion_plane import Plane
from anim_utils.animation_data.utils import pose_orientation_euler, check_quat, convert_quat_frame_value_to_array,\
euler_to_quaternion, convert_euler_frames_to_quaternion_frames
from anim_utils.utilities.custom_math import angle_between_vectors
class BVHAnalyzer():
def __init__(self, bvhreader):
self.skeleton = SkeletonBuilder().load_from_bvh(bvhreader)
self.bvhreader = bvhreader
self.quat_frames = []
self.euler_frames = bvhreader.frames
self.n_frames = len(self.euler_frames)
self.body_plane = None
def get_global_pos(self, joint_name, frame_index):
joint_chain = self.get_joint_chain(joint_name)
global_trans = np.eye(4)
global_trans[:3, 3] = self.euler_frames[frame_index][:LEN_ROOT]
for joint in joint_chain:
offset = joint.offset
if 'EndSite' in joint.node_name: # end site joint
rot_mat = np.eye(4)
rot_mat[:3, 3] = offset
else:
rot_angles_euler = self.get_relative_orientation_euler(joint.node_name, frame_index)
rot_angles_rad = np.deg2rad(rot_angles_euler)
rot_mat = euler_matrix(rot_angles_rad[0],
rot_angles_rad[1],
rot_angles_rad[2],
'rxyz')
rot_mat[:3, 3] = offset
global_trans = np.dot(global_trans, rot_mat)
return global_trans[:3, 3]
def get_global_joint_positions(self, joint_name):
'''
Get joint positions for the sequence of frames
:param joint_name: str
:return: numpy.array<3d>
'''
joint_pos = np.zeros((self.n_frames, LEN_ROOT))
for i in range(self.n_frames):
joint_pos[i] = self.get_global_pos(joint_name, i)
return joint_pos
def get_relative_joint_position(self, joint_name, frame_index):
"""
relative joint position to Hips
:param joint_name: str
:param frame_index: int
:return:
"""
joint_global_pos = self.get_global_pos(joint_name, frame_index)
root_global_pos = self.get_global_pos('Hips', frame_index)
return joint_global_pos - root_global_pos
def get_filtered_joint_index(self, joint_name):
return self.skeleton.node_name_frame_map.keys().index(joint_name)
def get_parent_joint_name(self, joint_name):
node = self.get_joint_by_joint_name(joint_name)
if node.parent is not None:
return node.parent.node_name
else:
return None
def get_filtered_joint_param_range(self, joint_name):
reduced_joint_index = self.get_filtered_joint_index(joint_name)
start_index = LEN_ROOT + reduced_joint_index * LEN_QUAT
end_index = LEN_ROOT + (reduced_joint_index + 1) * LEN_QUAT
return start_index, end_index
def get_joint_speed_at_frame_each_dim(self, joint_name, frame_idx):
assert frame_idx != 0, ("Index starts from 1")
return self.get_global_pos(joint_name, frame_idx) - self.get_global_pos(joint_name, frame_idx-1)
def get_joint_speed_each_dim(self, joint_name):
speed = [np.zeros(3)]
for i in range(1, self.n_frames):
speed.append(self.get_joint_speed_at_frame_each_dim(joint_name, i))
return np.asarray(speed)
def get_joint_speed(self, joint_name):
speed = []
for i in range(1, self.n_frames):
speed.append(self.get_joint_speed_at_frame(joint_name, i))
return np.asarray(speed)
def get_joint_speed_at_frame(self, joint_name, frame_idx):
assert frame_idx != 0, ("Index starts from 1")
return np.linalg.norm(self.get_global_pos(joint_name, frame_idx) - self.get_global_pos(joint_name, frame_idx-1))
def get_joint_acceleration_at_frame(self, joint_name, frame_idx):
assert frame_idx != self.n_frames - 1 and frame_idx != 0, ("frame index is out of range!")
return self.get_global_pos(joint_name, frame_idx + 1) + self.get_global_pos(joint_name, frame_idx - 1) - \
2 * self.get_global_pos(joint_name, frame_idx)
def get_joint_acceleration(self, joint_name):
acc = [np.zeros(3)]
for i in range(1, self.n_frames-1):
acc.append(self.get_joint_acceleration_at_frame(joint_name, i))
acc.append(np.zeros(3))
return np.asarray(acc)
def get_global_pos_for_all_frames(self, joint_name):
pos = np.zeros((self.n_frames, 3))
for i in range(self.n_frames):
pos[i] = self.get_global_pos(joint_name, i)
return pos
def get_joint_chain(self, joint_name):
joint = self.get_joint_by_joint_name(joint_name)
joint_chain = []
while joint.parent is not None:
joint_chain.append(joint)
joint = joint.parent
joint_chain.append(joint)
joint_chain.reverse()
return joint_chain
def get_relative_pos(self, joint_name, frame_index):
joint_chain = self.get_joint_chain(joint_name)
if len(joint_chain) == 1:
raise ValueError('Root joint has no relative position')
pos = self.get_global_pos(joint_name, frame_index)
parent_pos = self.get_global_pos(joint_chain[-2].node_name, frame_index)
return pos - parent_pos
def get_joint_offset(self, joint_name):
return self.skeleton.nodes[joint_name].offset
def _get_nodes_without_endsite(self):
animated_nodes = self.skeleton.nodes.values()
nodes_without_endsite = [node for node in animated_nodes if node.node_type != SKELETON_NODE_TYPE_END_SITE]
return nodes_without_endsite
def get_relative_orientation_euler(self, joint_name, frame_index):
# assert frame_index in range(self.n_frames), ('Frame index is invalid!')
nodes_without_endsite = self._get_nodes_without_endsite()
# assert (len(nodes_without_endsite)+1) * 3 == len(self.euler_frames[0]), \
# ('The length of euler frame is not corresponding to length of modeled joints')
joint = self.get_joint_by_joint_name(joint_name)
assert joint in nodes_without_endsite, ("The joint is not modeled!")
joint_index = nodes_without_endsite.index(joint)
start_channel_index = joint_index * 3 + LEN_ROOT
end_channel_index = start_channel_index + LEN_EULER
return self.euler_frames[frame_index][start_channel_index: end_channel_index]
def get_global_transform(self, joint_name, frame_index):
joint_chain = self.get_joint_chain(joint_name)
global_trans = np.eye(4)
global_trans[:3, 3] = self.euler_frames[frame_index][:LEN_ROOT]
for joint in joint_chain:
offset = joint.offset
if 'EndSite' in joint.node_name: # end site joint
rot_mat = np.eye(4)
rot_mat[:3, 3] = offset
else:
rot_angles_euler = self.get_relative_orientation_euler(joint.node_name, frame_index)
rot_angles_rad = np.deg2rad(rot_angles_euler)
rot_mat = euler_matrix(rot_angles_rad[0],
rot_angles_rad[1],
rot_angles_rad[2],
'rxyz')
rot_mat[:3, 3] = offset
global_trans = np.dot(global_trans, rot_mat)
return global_trans
def get_global_orientation_euler(self, joint_name, frame_index):
joint_chain = self.get_joint_chain(joint_name)
global_trans = np.eye(4)
global_trans[:3, 3] = self.euler_frames[frame_index][:LEN_ROOT]
for joint in joint_chain:
offset = joint.offset
rot_angles_euler = self.get_relative_orientation_euler(joint.node_name, frame_index)
rot_angles_rad = np.deg2rad(rot_angles_euler)
rot_mat = euler_matrix(rot_angles_rad[0],
rot_angles_rad[1],
rot_angles_rad[2],
'rxyz')
rot_mat[:3, 3] = offset
global_trans = np.dot(global_trans, rot_mat)
global_angles_rad = euler_from_matrix(global_trans,
'rxyz')
return np.rad2deg(global_angles_rad)
def get_global_orientation_quat(self, joint_name, frame_index):
return euler_to_quaternion(self.get_global_orientation_euler(joint_name,
frame_index))
def set_relative_orientation_euler(self, joint_name, frame_index, euler_angles):
"""
:param joint_name: str
:param frame_index: int
:param euler_angles: array<float> degree
:return:
"""
# assert frame_index in range(self.n_frames), ('Frame index is invalid!')
animated_nodes = self.skeleton.nodes.values()
nodes_without_endsite = [node for node in animated_nodes if node.node_type != SKELETON_NODE_TYPE_END_SITE]
assert (len(nodes_without_endsite)+1) * 3 == len(self.euler_frames[0]), \
('The length of euler frame is not corresponding to length of modeled joints')
joint_index = 0
for node in nodes_without_endsite:
if node.node_name == joint_name:
break
else:
joint_index += 1
start_channel_index = (joint_index + 1) * 3
end_channel_index = start_channel_index + LEN_EULER
self.euler_frames[frame_index][start_channel_index: end_channel_index] = euler_angles
def get_joint_index(self, joint_name):
joint_name_list = self.skeleton.nodes.keys()
if joint_name not in joint_name_list:
raise ValueError('joint name is not found!')
return joint_name_list.index(joint_name)
def set_joint_offset(self, joint_name, offset):
assert len(offset) == 3, ('The length of joint is not correct')
joint = self.get_joint_by_joint_name(joint_name)
joint.offset = [offset[0], offset[1], offset[2]]
def get_joint_by_joint_name(self, joint_name):
if joint_name not in self.skeleton.nodes.keys():
print(joint_name)
raise KeyError('Joint name is not found!')
return self.skeleton.nodes[joint_name]
def to_quaternion(self, filter_joints=True):
self.quat_frames = np.array(convert_euler_frames_to_quaternion_frames(self.bvhreader,
self.euler_frames,
filter_joints))
def get_joint_channel_in_full_euler_frame(self, joint):
"""
:param joint: str, joint name
:return:
"""
return self.skeleton.node_channels.index((joint, 'Xrotation'))
def get_closure_kinematic_chain(self, joint):
joint_chain = []
if joint.parent is not None:
joint_chain.append(joint)
return joint_chain.reverse()
def get_body_plane(self, frame_idx):
body_plane_joints = ['Hips', 'Spine', 'LeftShoulder', 'RightShoulder', 'LeftUpLeg', 'RightUpLeg']
points = []
for joint in body_plane_joints:
points.append(self.get_relative_joint_position(joint, frame_idx))
points = np.asarray(points)
return Plane(points)
def get_left_elbow_angle(self, frame_idx):
left_arm_pos = self.get_global_pos('LeftArm', frame_idx)
left_forearm_pos = self.get_global_pos('LeftForeArm', frame_idx)
left_hand_pos = self.get_global_pos('LeftHand', frame_idx)
upper_arm = left_forearm_pos - left_arm_pos
lower_arm = left_forearm_pos - left_hand_pos
theta = np.arccos(np.dot(upper_arm, lower_arm)/(np.linalg.norm(upper_arm) * np.linalg.norm(lower_arm)))
theta = np.rad2deg(theta)
return theta
def get_left_elbow_angles(self):
left_elbow_anlges = []
for i in range(self.n_frames):
left_elbow_anlges.append(self.get_left_elbow_angle(i))
return left_elbow_anlges
def get_right_elbow_angle(self, frame_idx):
right_arm_pos = self.get_global_pos('RightArm', frame_idx)
right_forearm_pos = self.get_global_pos('RightForeArm', frame_idx)
right_hand_pos = self.get_global_pos('RightHand', frame_idx)
upper_arm = right_forearm_pos - right_arm_pos
lower_arm = right_forearm_pos - right_hand_pos
theta = np.arccos(np.dot(upper_arm, lower_arm)/(np.linalg.norm(upper_arm) * np.linalg.norm(lower_arm)))
theta = np.rad2deg(theta)
return theta
def get_right_elbow_anlges(self):
right_elbow_angles = []
for i in range(self.n_frames):
right_elbow_angles.append(self.get_right_elbow_angle(i))
return right_elbow_angles
def right_hand_forward(self):
relative_right_hand_pos = np.zeros((self.n_frames, 3))
for i in range(self.n_frames):
relative_right_hand_pos[i] = self.get_global_pos('RightHand', i) - self.get_global_pos('Hips', i)
moving_offsets = relative_right_hand_pos[1:] - relative_right_hand_pos[:-1]
annotation = [False]
for i in range(self.n_frames-1):
body_dir = pose_orientation_euler(self.euler_frames[i+1])
if np.dot(body_dir, np.array([moving_offsets[i, 0], moving_offsets[i, 2]])) > 0.5:
annotation.append(True)
else:
annotation.append(False)
return annotation
def left_hand_forward(self):
left_hand_pos = np.zeros((self.n_frames, 3))
for i in range(self.n_frames):
left_hand_pos[i] = self.get_global_pos('LeftHand', i)
moving_offsets = left_hand_pos[1:] - left_hand_pos[:-1]
annotation = [False]
for i in range(self.n_frames-1):
body_dir = pose_orientation_euler(self.euler_frames[i+1])
if np.dot(body_dir, np.array([moving_offsets[i, 0], moving_offsets[i, 2]])) > 0.1:
annotation.append(True)
else:
annotation.append(False)
return annotation
def feet_distance_on_ground(self):
left_foot_pos = self.get_global_joint_positions('LeftFoot')
right_foot_pos = self.get_global_joint_positions('RightFoot')
feet_distance = []
for i in range(self.n_frames):
feet_distance.append(np.linalg.norm(left_foot_pos[i, [0, 2]] - right_foot_pos[i, [0, 2]]))
return np.asarray(feet_distance)
def rfoot_behind_lleg(self, frame_index, jointlist=['LeftUpLeg', 'RightUpLeg', 'LeftFoot', 'RightFoot']):
"""
involved joints: Hips, LeftUpLeg, LeftFoot, RightLeg
:return:
"""
points = []
for joint in jointlist:
points.append(self.get_global_pos(joint, frame_index))
# determine the last point is before the body plane defined by the other three joints or behind
# reverse the list of joints, because the direction of the plane is decided by the right-hand rule
body_plane = Plane(points[:3])
return not body_plane.is_before_plane(points[-1])
def lfoot_behind_rleg(self, frame_index, jointlist=['LeftUpLeg', 'RightUpLeg', 'RightFoot', 'LeftFoot']):
"""
involve joints: Hips, RightUpLeg, RightFoot, LeftLeg
:param frame_index:
:return:
"""
points = []
for joint in jointlist:
points.append(self.get_global_pos(joint, frame_index))
body_plane = Plane(points[:3])
return not body_plane.is_before_plane(points[-1])
def rhand_moving_forwards(self, frameIndex):
"""
involved joints: body plane and RightHand
:param frameIndex:
:return:
"""
if self.body_plane is None:
self.get_body_plane(frameIndex)
if frameIndex == self.n_frames - 1:
return False
else:
current_distance = self.joint_disntace_to_body('RightHand', frameIndex)
next_distance = self.joint_disntace_to_body('RightHand', frameIndex + 1)
if next_distance - current_distance > 0.1:
return True
else:
return False
def lhand_moving_forwards(self, frameIndex):
"""
involved joints: body plane and LeftHand
:param frameIndex:
:return:
"""
if self.body_plane is None:
self.get_body_plane(frameIndex)
left_hand_pos = self.get_relative_joint_position('LeftHand', frameIndex)
if frameIndex == self.n_frames - 1:
return False
else:
next_pos = self.get_relative_joint_position('LeftHand', frameIndex + 1)
current_distance = self.body_plane.distance(left_hand_pos)
next_distance = self.body_plane.distance(next_pos)
if next_distance - current_distance > 0.1:
return True
else:
return False
def lhand_moving_forwards_one_frame(self, frameIndex):
threshold = 0.1
if frameIndex <= 0:
return False
else:
current_pos = self.get_relative_joint_position('LeftHand', frameIndex)
previous_pos = self.get_relative_joint_position('LeftHand', frameIndex)
if self.body_plane is None:
self.get_body_plane(frameIndex)
current_dist = self.body_plane.distance(current_pos)
previous_dist = self.body_plane.distance(previous_pos)
if current_dist - previous_dist > threshold:
return True
else:
return False
def lhand_moving_forwards2(self, frameIndex, windowSize=10):
if frameIndex < windowSize:
max_frame = frameIndex
elif self.n_frames - frameIndex < windowSize:
max_frame = self.n_frames - frameIndex - 1
else:
max_frame = windowSize
w = 1
while w <= max_frame:
prev_frame = self.lhand_moving_forwards_one_frame(frameIndex - w)
next_frame = self.lhand_moving_forwards_one_frame(frameIndex + w)
if prev_frame and next_frame:
return 1
elif not prev_frame and not next_frame:
return -1
else:
w += 1
return 0
def joint_disntace_to_body(self, jointname, frameIndex):
body_plane = self.get_body_plane(frameIndex)
joint_pos = self.get_relative_joint_position(jointname, frameIndex)
return body_plane.distance(joint_pos)
def rhand_moving_forwards_one_frame(self, frameIndex):
threshold = 0.1
if frameIndex <= 0:
return False
else:
current_dist = self.joint_disntace_to_body('RightHand', frameIndex)
previous_dist = self.joint_disntace_to_body('RightHand', frameIndex - 1)
# print('current distance: ', current_dist)
# print('previous distance: ', previous_dist)
if current_dist - previous_dist > threshold:
return True
else:
return False
def rhand_moving_forwards2(self, frameIndex, windowSize=10):
if frameIndex < windowSize:
max_frame = frameIndex
elif self.n_frames - frameIndex < windowSize:
max_frame = self.n_frames - frameIndex - 1
else:
max_frame = windowSize
# print("test1 max_frame: ", max_frame)
w = 1
while w <= max_frame:
prev_frame = self.rhand_moving_forwards_one_frame(frameIndex - w)
next_frame = self.rhand_moving_forwards_one_frame(frameIndex + w)
# print("w: ", w)
# print("prev_frame: ", prev_frame)
# print("next_frame: ", next_frame)
if prev_frame and next_frame:
return 1
elif not prev_frame and not next_frame:
return -1
else:
w += 1
return 0
def lknee_angle(self, frameIndex):
"""
involved joints: LeftUpLeg, LeftLeg, LeftFoot
:param frameIndex:
:return:
"""
leftUpLeg_position = self.get_relative_joint_position('LeftUpLeg', frameIndex)
leftLeg_position = self.get_relative_joint_position('LeftLeg', frameIndex)
leftFoot_position = self.get_relative_joint_position('LeftFoot', frameIndex)
upLegBone = leftLeg_position - leftUpLeg_position
lowLegBone = leftFoot_position - leftLeg_position
return angle_between_vectors(upLegBone, lowLegBone)
def rknee_angle(self, frameIndex):
"""
involved joints: RightUpLeg, RightLeg, RightFoot
:param frameIndex:
:return:
"""
rightUpLeg_position = self.get_relative_joint_position('RightUpLeg', frameIndex)
rightLeg_position = self.get_relative_joint_position('RightLeg', frameIndex)
rightFoot_position = self.get_relative_joint_position('RightFoot', frameIndex)
upLegBone = rightLeg_position - rightUpLeg_position
lowLegBone = rightFoot_position - rightLeg_position
return angle_between_vectors(upLegBone, lowLegBone)
def lleg_bending(self, frameIndex):
"""
involved joints: LeftUpLeg, LeftLeg, LeftFoot
:param frameIndex:
:param w (int): window size
:return:
reverse indexing is not supported
"""
angle_threshold = 0.001
if frameIndex <= 0:
return False
else:
previous_angle = self.lknee_angle(frameIndex - 1)
angle = self.lknee_angle(frameIndex)
if angle - previous_angle < -angle_threshold:
return True
else:
return False
def lleg_stretching(self, frameIndex):
"""
involved joints: LeftUpLeg, LeftLeg, LeftFoot
:param frameIndex:
:param w (int): window size
:return:
reverse indexing is not supported
"""
angle_threshold = 0.01
if frameIndex <= 0:
return False
else:
previous_angle = self.lknee_angle(frameIndex - 1)
angle = self.lknee_angle(frameIndex)
if angle - previous_angle >angle_threshold:
return True
else:
return False
def rleg_bending(self, frameIndex):
"""
involved joints: RightUpLeg, RightLeg, RightFoot
:param frameIndex:
:param w (int): window size
:return:
reverse indexing is not supported
"""
angle_threshold = 0.001
if frameIndex <= 0:
return False
else:
previous_angle = self.rknee_angle(frameIndex - 1)
angle = self.rknee_angle(frameIndex)
if angle - previous_angle < -angle_threshold:
return True
else:
return False
def rleg_stretching(self, frameIndex):
"""
involved joints: RightUpLeg, RightLeg, RightFoot
:param frameIndex:
:param w (int): window size
:return:
reverse indexing is not supported
"""
angle_threshold = 0.01
if frameIndex <= 0:
return False
else:
previous_angle = self.rknee_angle(frameIndex - 1)
angle = self.rknee_angle(frameIndex)
if angle - previous_angle > angle_threshold:
return True
else:
return False
def rtoe_before_lleg(self, frameIndex):
"""
involved joints: Hips, LeftUpLeg, LeftLeg, Bip01_R_Toe0
:param frameIndex:
:return:
"""
jointList = ['Hips', 'LeftUpLeg', 'LeftLeg', 'Bip01_R_Toe0']
points = []
for joint in jointList:
points.append(self.get_relative_joint_position(joint, frameIndex))
points.reverse()
relative_plane = Plane(points[1:])
return relative_plane.is_before_plane(points[0])
def ltoe_before_rleg(self, frameIndex):
"""
involved joints: Hips, RightUpLeg, RightLeg, Bip01_L_Toe0
:param frameIndex:
:return:
"""
jointlist = ['Hips', 'RightUpLeg', 'RightLeg', 'Bip01_L_Toe0']
points = []
for joint in jointlist:
points.append(self.get_relative_joint_position(joint, frameIndex))
relative_plane = Plane(points[:3])
return relative_plane.is_before_plane(points[-1])
def spine_horizontal(self, frameIndex):
"""
involved joints:
:param frameIndex:
:return:
"""
pass
def feet_moving_towards_each_other(self):
'''
Feature: Distance between two feet on the ground
involved joints:
:return Boolean: status
'''
pass
def process(self, frame_idx):
'''
use a list of signal processor to process given frame
:return:
'''
pass
|
[
"numpy.eye",
"transformations.euler_from_matrix",
"transformations.euler_matrix",
"anim_utils.animation_data.SkeletonBuilder",
"numpy.asarray",
"anim_utils.utilities.custom_math.angle_between_vectors",
"numpy.array",
"numpy.zeros",
"numpy.dot",
"numpy.deg2rad",
"numpy.linalg.norm",
"numpy.rad2deg",
"anim_utils.animation_data.utils.pose_orientation_euler",
"anim_utils.animation_data.utils.convert_euler_frames_to_quaternion_frames"
] |
[((2074, 2083), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (2080, 2083), True, 'import numpy as np\n'), ((3114, 3149), 'numpy.zeros', 'np.zeros', (['(self.n_frames, LEN_ROOT)'], {}), '((self.n_frames, LEN_ROOT))\n', (3122, 3149), True, 'import numpy as np\n'), ((4780, 4797), 'numpy.asarray', 'np.asarray', (['speed'], {}), '(speed)\n', (4790, 4797), True, 'import numpy as np\n'), ((4989, 5006), 'numpy.asarray', 'np.asarray', (['speed'], {}), '(speed)\n', (4999, 5006), True, 'import numpy as np\n'), ((5840, 5855), 'numpy.asarray', 'np.asarray', (['acc'], {}), '(acc)\n', (5850, 5855), True, 'import numpy as np\n'), ((5928, 5956), 'numpy.zeros', 'np.zeros', (['(self.n_frames, 3)'], {}), '((self.n_frames, 3))\n', (5936, 5956), True, 'import numpy as np\n'), ((8065, 8074), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (8071, 8074), True, 'import numpy as np\n'), ((9028, 9037), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (9034, 9037), True, 'import numpy as np\n'), ((9659, 9698), 'transformations.euler_from_matrix', 'euler_from_matrix', (['global_trans', '"""rxyz"""'], {}), "(global_trans, 'rxyz')\n", (9676, 9698), False, 'from transformations import euler_matrix, euler_from_matrix\n'), ((9760, 9789), 'numpy.rad2deg', 'np.rad2deg', (['global_angles_rad'], {}), '(global_angles_rad)\n', (9770, 9789), True, 'import numpy as np\n'), ((12845, 12863), 'numpy.asarray', 'np.asarray', (['points'], {}), '(points)\n', (12855, 12863), True, 'import numpy as np\n'), ((13379, 13396), 'numpy.rad2deg', 'np.rad2deg', (['theta'], {}), '(theta)\n', (13389, 13396), True, 'import numpy as np\n'), ((14123, 14140), 'numpy.rad2deg', 'np.rad2deg', (['theta'], {}), '(theta)\n', (14133, 14140), True, 'import numpy as np\n'), ((14444, 14472), 'numpy.zeros', 'np.zeros', (['(self.n_frames, 3)'], {}), '((self.n_frames, 3))\n', (14452, 14472), True, 'import numpy as np\n'), ((15124, 15152), 'numpy.zeros', 'np.zeros', (['(self.n_frames, 3)'], {}), '((self.n_frames, 3))\n', (15132, 15152), True, 'import numpy as np\n'), ((16044, 16069), 'numpy.asarray', 'np.asarray', (['feet_distance'], {}), '(feet_distance)\n', (16054, 16069), True, 'import numpy as np\n'), ((22143, 22187), 'anim_utils.utilities.custom_math.angle_between_vectors', 'angle_between_vectors', (['upLegBone', 'lowLegBone'], {}), '(upLegBone, lowLegBone)\n', (22164, 22187), False, 'from anim_utils.utilities.custom_math import angle_between_vectors\n'), ((22749, 22793), 'anim_utils.utilities.custom_math.angle_between_vectors', 'angle_between_vectors', (['upLegBone', 'lowLegBone'], {}), '(upLegBone, lowLegBone)\n', (22770, 22793), False, 'from anim_utils.utilities.custom_math import angle_between_vectors\n'), ((2831, 2860), 'numpy.dot', 'np.dot', (['global_trans', 'rot_mat'], {}), '(global_trans, rot_mat)\n', (2837, 2860), True, 'import numpy as np\n'), ((4630, 4641), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (4638, 4641), True, 'import numpy as np\n'), ((5660, 5671), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (5668, 5671), True, 'import numpy as np\n'), ((5812, 5823), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (5820, 5823), True, 'import numpy as np\n'), ((8822, 8851), 'numpy.dot', 'np.dot', (['global_trans', 'rot_mat'], {}), '(global_trans, rot_mat)\n', (8828, 8851), True, 'import numpy as np\n'), ((9304, 9332), 'numpy.deg2rad', 'np.deg2rad', (['rot_angles_euler'], {}), '(rot_angles_euler)\n', (9314, 9332), True, 'import numpy as np\n'), ((9355, 9432), 'transformations.euler_matrix', 'euler_matrix', (['rot_angles_rad[0]', 'rot_angles_rad[1]', 'rot_angles_rad[2]', '"""rxyz"""'], {}), "(rot_angles_rad[0], rot_angles_rad[1], rot_angles_rad[2], 'rxyz')\n", (9367, 9432), False, 'from transformations import euler_matrix, euler_from_matrix\n'), ((9601, 9630), 'numpy.dot', 'np.dot', (['global_trans', 'rot_mat'], {}), '(global_trans, rot_mat)\n', (9607, 9630), True, 'import numpy as np\n'), ((11893, 11988), 'anim_utils.animation_data.utils.convert_euler_frames_to_quaternion_frames', 'convert_euler_frames_to_quaternion_frames', (['self.bvhreader', 'self.euler_frames', 'filter_joints'], {}), '(self.bvhreader, self.euler_frames,\n filter_joints)\n', (11934, 11988), False, 'from anim_utils.animation_data.utils import pose_orientation_euler, check_quat, convert_quat_frame_value_to_array, euler_to_quaternion, convert_euler_frames_to_quaternion_frames\n'), ((14799, 14847), 'anim_utils.animation_data.utils.pose_orientation_euler', 'pose_orientation_euler', (['self.euler_frames[i + 1]'], {}), '(self.euler_frames[i + 1])\n', (14821, 14847), False, 'from anim_utils.animation_data.utils import pose_orientation_euler, check_quat, convert_quat_frame_value_to_array, euler_to_quaternion, convert_euler_frames_to_quaternion_frames\n'), ((15415, 15463), 'anim_utils.animation_data.utils.pose_orientation_euler', 'pose_orientation_euler', (['self.euler_frames[i + 1]'], {}), '(self.euler_frames[i + 1])\n', (15437, 15463), False, 'from anim_utils.animation_data.utils import pose_orientation_euler, check_quat, convert_quat_frame_value_to_array, euler_to_quaternion, convert_euler_frames_to_quaternion_frames\n'), ((1709, 1726), 'anim_utils.animation_data.SkeletonBuilder', 'SkeletonBuilder', ([], {}), '()\n', (1724, 1726), False, 'from anim_utils.animation_data import SkeletonBuilder, SKELETON_NODE_TYPE_END_SITE, LEN_EULER, LEN_ROOT, LEN_QUAT\n'), ((2312, 2321), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (2318, 2321), True, 'import numpy as np\n'), ((2514, 2542), 'numpy.deg2rad', 'np.deg2rad', (['rot_angles_euler'], {}), '(rot_angles_euler)\n', (2524, 2542), True, 'import numpy as np\n'), ((2569, 2646), 'transformations.euler_matrix', 'euler_matrix', (['rot_angles_rad[0]', 'rot_angles_rad[1]', 'rot_angles_rad[2]', '"""rxyz"""'], {}), "(rot_angles_rad[0], rot_angles_rad[1], rot_angles_rad[2], 'rxyz')\n", (2581, 2646), False, 'from transformations import euler_matrix, euler_from_matrix\n'), ((8303, 8312), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (8309, 8312), True, 'import numpy as np\n'), ((8505, 8533), 'numpy.deg2rad', 'np.deg2rad', (['rot_angles_euler'], {}), '(rot_angles_euler)\n', (8515, 8533), True, 'import numpy as np\n'), ((8560, 8637), 'transformations.euler_matrix', 'euler_matrix', (['rot_angles_rad[0]', 'rot_angles_rad[1]', 'rot_angles_rad[2]', '"""rxyz"""'], {}), "(rot_angles_rad[0], rot_angles_rad[1], rot_angles_rad[2], 'rxyz')\n", (8572, 8637), False, 'from transformations import euler_matrix, euler_from_matrix\n'), ((13277, 13305), 'numpy.dot', 'np.dot', (['upper_arm', 'lower_arm'], {}), '(upper_arm, lower_arm)\n', (13283, 13305), True, 'import numpy as np\n'), ((14021, 14049), 'numpy.dot', 'np.dot', (['upper_arm', 'lower_arm'], {}), '(upper_arm, lower_arm)\n', (14027, 14049), True, 'import numpy as np\n'), ((15959, 16027), 'numpy.linalg.norm', 'np.linalg.norm', (['(left_foot_pos[i, [0, 2]] - right_foot_pos[i, [0, 2]])'], {}), '(left_foot_pos[i, [0, 2]] - right_foot_pos[i, [0, 2]])\n', (15973, 16027), True, 'import numpy as np\n'), ((13307, 13332), 'numpy.linalg.norm', 'np.linalg.norm', (['upper_arm'], {}), '(upper_arm)\n', (13321, 13332), True, 'import numpy as np\n'), ((13335, 13360), 'numpy.linalg.norm', 'np.linalg.norm', (['lower_arm'], {}), '(lower_arm)\n', (13349, 13360), True, 'import numpy as np\n'), ((14051, 14076), 'numpy.linalg.norm', 'np.linalg.norm', (['upper_arm'], {}), '(upper_arm)\n', (14065, 14076), True, 'import numpy as np\n'), ((14079, 14104), 'numpy.linalg.norm', 'np.linalg.norm', (['lower_arm'], {}), '(lower_arm)\n', (14093, 14104), True, 'import numpy as np\n'), ((14878, 14932), 'numpy.array', 'np.array', (['[moving_offsets[i, 0], moving_offsets[i, 2]]'], {}), '([moving_offsets[i, 0], moving_offsets[i, 2]])\n', (14886, 14932), True, 'import numpy as np\n'), ((15494, 15548), 'numpy.array', 'np.array', (['[moving_offsets[i, 0], moving_offsets[i, 2]]'], {}), '([moving_offsets[i, 0], moving_offsets[i, 2]])\n', (15502, 15548), True, 'import numpy as np\n')]
|
import numpy as np
import pydensecrf.densecrf as dcrf
from pydensecrf.utils import compute_unary, create_pairwise_bilateral, create_pairwise_gaussian, unary_from_softmax
def dense_crf(img, prob):
'''
input:
img: numpy array of shape (num of channels, height, width)
prob: numpy array of shape (9, height, width), neural network last layer sigmoid output for img
output:
res: (height, width)
Modified from:
http://warmspringwinds.github.io/tensorflow/tf-slim/2016/12/18/image-segmentation-with-tensorflow-using-cnns-and-conditional-random-fields/
https://github.com/yt605155624/tensorflow-deeplab-resnet/blob/e81482d7bb1ae674f07eae32b0953fe09ff1c9d1/inference_crf.py
'''
img = np.swapaxes(img, 0, 2)
# img.shape: (width, height, num of channels)(224,224,3)
num_iter = 50
prob = np.swapaxes(prob, 1, 2) # shape: (1, width, height) (9,224,224)
num_classes = 9 #2
d = dcrf.DenseCRF2D(img.shape[0] , img.shape[1], num_classes)
unary = unary_from_softmax(prob) # shape: (num_classes, width * height)
unary = np.ascontiguousarray(unary)
img = np.ascontiguousarray(img,dtype=np.uint8)
d.setUnaryEnergy(unary)
d.addPairwiseBilateral(sxy=5, srgb=3, rgbim=img, compat=3)
Q = d.inference(num_iter) # set the number of iterations
res = np.argmax(Q, axis=0).reshape((img.shape[0], img.shape[1]))
# res.shape: (width, height)
res = np.swapaxes(res, 0, 1) # res.shape: (height, width)
# res = res[np.newaxis, :, :] # res.shape: (1, height, width)
# func_end = time.time()
# print('{:.2f} sec spent on CRF with {} iterations'.format(func_end - func_start, num_iter))
# about 2 sec for a 1280 * 960 image with 5 iterations
return res
|
[
"pydensecrf.densecrf.DenseCRF2D",
"numpy.argmax",
"numpy.ascontiguousarray",
"numpy.swapaxes",
"pydensecrf.utils.unary_from_softmax"
] |
[((733, 755), 'numpy.swapaxes', 'np.swapaxes', (['img', '(0)', '(2)'], {}), '(img, 0, 2)\n', (744, 755), True, 'import numpy as np\n'), ((848, 871), 'numpy.swapaxes', 'np.swapaxes', (['prob', '(1)', '(2)'], {}), '(prob, 1, 2)\n', (859, 871), True, 'import numpy as np\n'), ((945, 1001), 'pydensecrf.densecrf.DenseCRF2D', 'dcrf.DenseCRF2D', (['img.shape[0]', 'img.shape[1]', 'num_classes'], {}), '(img.shape[0], img.shape[1], num_classes)\n', (960, 1001), True, 'import pydensecrf.densecrf as dcrf\n'), ((1016, 1040), 'pydensecrf.utils.unary_from_softmax', 'unary_from_softmax', (['prob'], {}), '(prob)\n', (1034, 1040), False, 'from pydensecrf.utils import compute_unary, create_pairwise_bilateral, create_pairwise_gaussian, unary_from_softmax\n'), ((1093, 1120), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['unary'], {}), '(unary)\n', (1113, 1120), True, 'import numpy as np\n'), ((1131, 1172), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['img'], {'dtype': 'np.uint8'}), '(img, dtype=np.uint8)\n', (1151, 1172), True, 'import numpy as np\n'), ((1440, 1462), 'numpy.swapaxes', 'np.swapaxes', (['res', '(0)', '(1)'], {}), '(res, 0, 1)\n', (1451, 1462), True, 'import numpy as np\n'), ((1337, 1357), 'numpy.argmax', 'np.argmax', (['Q'], {'axis': '(0)'}), '(Q, axis=0)\n', (1346, 1357), True, 'import numpy as np\n')]
|
import os
import numpy
import scipy
import scipy.optimize
from cryspy.A_functions_base.symmetry_elements import \
calc_asymmetric_unit_cell_indexes
from cryspy.A_functions_base.mempy import \
calc_mem_col, \
calc_mem_chi, \
calc_symm_elem_points_by_index_points, \
get_uniform_density_col, \
renormailize_density_col, \
save_spin_density_into_file,\
form_basins,\
calc_point_susceptibility, \
get_uniform_density_chi,\
renormailize_density_chi, \
calc_model_value_by_precalculated_data, \
calc_chi_atoms
from cryspy.A_functions_base.unit_cell import \
calc_volume_uc_by_unit_cell_parameters, \
calc_sthovl_by_unit_cell_parameters, \
calc_eq_ccs_by_unit_cell_parameters
from cryspy.A_functions_base.structure_factor import \
calc_f_nucl_by_dictionary
from cryspy.A_functions_base.flip_ratio import \
calc_iint, calc_flip_ratio_by_iint, \
calc_asymmetry_by_iint
from cryspy.A_functions_base.extinction import \
calc_extinction_sphere
from cryspy.A_functions_base.orbital_functions import \
calc_density_spherical
from cryspy.A_functions_base.matrix_operations import \
calc_vv_as_v1_v2_v1
from cryspy.A_functions_base.function_1_error_simplex import \
error_estimation_simplex
def mempy_reconstruction_by_dictionary(dict_crystal, dict_mem_parameters, l_dict_diffrn, dict_in_out,
parameter_lambda:float=1.e-5, iteration_max:int=1000, parameter_lambda_min:float=1.e-9, delta_density:float=1.e-5):
# **Input information about mem parameters**
print("*******************************************")
print("MEM reconstruction by CrysPy (module MEMPy)")
print("*******************************************\n")
print("MEM iteration parameters")
print("------------------------")
print(f" starting lambda parameter: {parameter_lambda*1e6:.3f}*10^-6")
print(f" maximal number of iterations: {iteration_max:}")
print(f" minimal lambda parameter: {parameter_lambda_min*1e6:}*10^-6")
print(f" delta_density: {delta_density*1e5:}*10^-5\n")
dict_in_out_keys = dict_in_out.keys()
print("Density reconstruction")
print("----------------------")
n_abc = dict_mem_parameters["points_abc"]
print(f"Unit cell is devided on points {n_abc[0]:} x {n_abc[1]:} x {n_abc[2]:}.")
channel_plus_minus = dict_mem_parameters["channel_plus_minus"]
channel_chi = dict_mem_parameters["channel_chi"]
if channel_plus_minus:
magnetization_plus = dict_mem_parameters["magnetization_plus"]
magnetization_minus = dict_mem_parameters["magnetization_minus"]
file_spin_density = dict_mem_parameters["file_spin_density"]
dict_in_out["magnetization_plus"] = magnetization_plus
dict_in_out["magnetization_minus"] = magnetization_minus
if channel_chi:
flag_uniform_prior_density = dict_mem_parameters["flag_uniform_prior_density"]
flag_only_magnetic_basins = dict_mem_parameters["flag_only_magnetic_basins"]
file_magnetization_density = dict_mem_parameters["file_magnetization_density"]
flag_asymmetry = dict_mem_parameters["flag_asymmetry"]
gof_desired = dict_mem_parameters["gof_desired"]
# **Input information about crystal**
unit_cell_parameters = dict_crystal["unit_cell_parameters"]
full_symm_elems = dict_crystal["full_symm_elems"]
volume_unit_cell = calc_volume_uc_by_unit_cell_parameters(unit_cell_parameters, flag_unit_cell_parameters=False)[0]
reduced_symm_elems = dict_crystal["reduced_symm_elems"]
centrosymmetry = dict_crystal["centrosymmetry"]
if centrosymmetry:
centrosymmetry_position = dict_crystal["centrosymmetry_position"]
else:
centrosymmetry_position = None
translation_elems = dict_crystal["translation_elems"]
atom_label = dict_crystal["atom_label"]
atom_fract_xyz = dict_crystal["atom_fract_xyz"]
atom_multiplicity = dict_crystal["atom_multiplicity"]
if channel_chi:
atom_para_label = dict_crystal["atom_para_label"]
atom_para_susceptibility = dict_crystal["atom_para_susceptibility"]
atom_para_sc_chi = dict_crystal["atom_para_sc_chi"]
# **Index in asymmetric unit cell**
print("Calculation of asymmetric unit cell...", end="\r")
index_auc, point_multiplicity = calc_asymmetric_unit_cell_indexes(n_abc, full_symm_elems)
symm_elem_auc = calc_symm_elem_points_by_index_points(index_auc, n_abc)
print(f"Number of points in asymmetric unit cell is {index_auc.shape[1]:}.", end="\n")
# **Basin devision**
if channel_chi and flag_only_magnetic_basins:
print("Devision of asymmetric unit cell on bassins...", end="\r")
flag_atom_para = numpy.any(numpy.expand_dims(atom_label, axis=1) == numpy.expand_dims(atom_para_label, axis=0), axis=1)
flag_chi, atom_label_auc_chi, atom_multiplicity_auc_chi, atom_distance_auc_chi, atom_symm_elems_auc_chi = \
form_basins(symm_elem_auc, full_symm_elems, unit_cell_parameters, atom_label[flag_atom_para],
atom_fract_xyz[:,flag_atom_para], atom_multiplicity[flag_atom_para], atom_para_label)
dict_in_out["atom_multiplicity_channel_chi"] = atom_multiplicity_auc_chi
print(f"Magnetic basins occupy entire unit cell. \n(flag_only_magnetic_basins: {flag_only_magnetic_basins:})\n")
elif channel_chi:
print("Devision of asymmetric unit cell on bassins...", end="\r")
flag_chi, atom_label_auc_chi, atom_multiplicity_auc_chi, atom_distance_auc_chi, atom_symm_elems_auc_chi = \
form_basins(symm_elem_auc, full_symm_elems, unit_cell_parameters, atom_label,
atom_fract_xyz, atom_multiplicity, atom_para_label)
dict_in_out["atom_multiplicity_channel_chi"] = atom_multiplicity_auc_chi
print(f"Magnetic basins occupy area around magnetic atoms. \n(flag_only_magnetic_basins: {flag_only_magnetic_basins:})\n")
if channel_chi:
index_auc_chi = index_auc[:, flag_chi]
point_multiplicity_chi = point_multiplicity[flag_chi]
dict_in_out["point_multiplicity_channel_chi"] = point_multiplicity_chi
symm_elem_auc_chi = symm_elem_auc[:, flag_chi]
dict_in_out["symm_elem_channel_chi"] = symm_elem_auc_chi
if channel_plus_minus and channel_chi:
flag_col = numpy.logical_not(flag_chi)
index_auc_col = index_auc[:, flag_col]
point_multiplicity_col = point_multiplicity[flag_col]
symm_elem_auc_col = symm_elem_auc[:, flag_col]
dict_in_out["point_multiplicity_channel_plus_minus"] = point_multiplicity_col
dict_in_out["symm_elem_channel_plus_minus"] = symm_elem_auc_col
elif channel_plus_minus:
index_auc_col = numpy.copy(index_auc)
point_multiplicity_col = numpy.copy(point_multiplicity)
symm_elem_auc_col = numpy.copy(symm_elem_auc)
dict_in_out["point_multiplicity_channel_plus_minus"] = point_multiplicity_col
dict_in_out["symm_elem_channel_plus_minus"] = symm_elem_auc_col
print(f"channel_plus_minus: {channel_plus_minus:}")
print(f"channel_chi: {channel_chi:}\n")
if channel_plus_minus:
print(f"Magnetization of unit cell: {magnetization_plus+magnetization_minus:.3f} mu_B")
print(f"(positive channel {magnetization_plus:.3f} mu_B, negative channel {magnetization_minus:.3f} mu_B)")
print(f"\nNumber of density points for channel_plus_minus is {index_auc_col.shape[1]}.")
if channel_chi:
print(f"Number of density points for channel_chi is {index_auc_chi.shape[1]}.")
# **Susceptibility tensor $(3\times 3)$ for each point in magnetic basin**
if channel_chi:
print("Calculation of restriction on susceptibility...", end="\r")
point_susceptibility = calc_point_susceptibility(
unit_cell_parameters, atom_symm_elems_auc_chi, atom_label_auc_chi,
atom_para_label, atom_para_susceptibility, atom_para_sc_chi, full_symm_elems, symm_elem_auc_chi)
dict_in_out["susceptibility_channel_chi"] = point_susceptibility
print(80*" ", end="\r")
# **Prior density**
number_unit_cell = numpy.prod(n_abc)
print("\nCalculation of prior density... ", end="\r")
if channel_chi:
if flag_uniform_prior_density:
density_chi_prior = get_uniform_density_chi(point_multiplicity_chi, atom_label_auc_chi, atom_multiplicity_auc_chi, volume_unit_cell, number_unit_cell)
print("Prior density in channel chi is uniform. ")
else:
density_chi_prior = numpy.zeros_like(atom_distance_auc_chi)
for label in atom_para_label:
flag_atom = atom_label_auc_chi==label
dict_shell = dict_crystal[f"shell_{label:}"]
kappa = float(dict_crystal["mag_atom_kappa"][dict_crystal["mag_atom_label"] == label])
den_atom = calc_density_spherical(
atom_distance_auc_chi[flag_atom], dict_shell["core_population"], dict_shell["core_coeff"], dict_shell["core_zeta"],
dict_shell["core_n"], kappa)
density_chi_prior[flag_atom] = den_atom
density_chi_prior = renormailize_density_chi(density_chi_prior, point_multiplicity_chi, atom_label_auc_chi, atom_multiplicity_auc_chi, volume_unit_cell, number_unit_cell)
print("Prior density in channel chi is core. ")
if channel_plus_minus:
density_col_prior = get_uniform_density_col(point_multiplicity_col, volume_unit_cell, number_unit_cell)
print("Prior density in channel plus-minus is uniform. ")
# **Input information about experiments**
flag_use_precalculated_data = False
l_exp_value_sigma = []
l_mem_chi, l_mem_col = [], []
print(f"Number of experiments is {len(l_dict_diffrn):}. ")
for dict_diffrn in l_dict_diffrn:
if "dict_in_out_"+dict_diffrn["type_name"] in dict_in_out_keys:
diffrn_dict_in_out = dict_in_out["dict_in_out_"+dict_diffrn["type_name"]]
else:
diffrn_dict_in_out = {}
dict_in_out["dict_in_out_"+dict_diffrn["type_name"]] = diffrn_dict_in_out
index_hkl = dict_diffrn["index_hkl"]
h_ccs = dict_diffrn["magnetic_field"]
eh_ccs = dict_diffrn["matrix_u"][6:]
print(f"Preliminary calculation for experiment {dict_diffrn['name']:}...", end="\r")
diffrn_dict_in_out["index_hkl"] = index_hkl
diffrn_dict_in_out_keys = diffrn_dict_in_out.keys()
if channel_plus_minus:
if "dict_in_out_col" in diffrn_dict_in_out_keys:
dict_in_out_col = diffrn_dict_in_out["dict_in_out_col"]
else:
dict_in_out_col = {}
diffrn_dict_in_out["dict_in_out_col"] = dict_in_out_col
mem_col = calc_mem_col(
index_hkl, unit_cell_parameters, eh_ccs, full_symm_elems, symm_elem_auc_col,
volume_unit_cell, number_unit_cell,
point_multiplicity=point_multiplicity_col,
dict_in_out=dict_in_out_col, flag_use_precalculated_data=flag_use_precalculated_data)
diffrn_dict_in_out["mem_col"] = mem_col
l_mem_col.append(mem_col)
if channel_chi:
if "dict_in_out_chi" in diffrn_dict_in_out_keys:
dict_in_out_chi = diffrn_dict_in_out["dict_in_out_chi"]
else:
dict_in_out_chi = {}
diffrn_dict_in_out["dict_in_out_chi"] = dict_in_out_chi
mem_chi = calc_mem_chi(
index_hkl, unit_cell_parameters, h_ccs, full_symm_elems, symm_elem_auc_chi,
point_susceptibility, volume_unit_cell, number_unit_cell,
point_multiplicity=point_multiplicity_chi,
dict_in_out=dict_in_out_chi, flag_use_precalculated_data=flag_use_precalculated_data)
diffrn_dict_in_out["mem_chi"] = mem_chi
l_mem_chi.append(mem_chi)
f_nucl, dder = calc_f_nucl_by_dictionary(
dict_crystal, diffrn_dict_in_out, flag_use_precalculated_data=flag_use_precalculated_data)
diffrn_dict_in_out["f_nucl"] = f_nucl
flip_ratio_es = dict_diffrn["flip_ratio_es"]
if flag_asymmetry:
asymmetry_e = (flip_ratio_es[0] -1.)/(flip_ratio_es[0] + 1.)
asymmetry_s = numpy.sqrt(2.)*flip_ratio_es[1] * numpy.sqrt(numpy.square(flip_ratio_es[0]) + 1.)/numpy.square(flip_ratio_es[0] + 1.)
asymmetry_es = numpy.stack([asymmetry_e, asymmetry_s], axis=0)
l_exp_value_sigma.append(asymmetry_es)
else:
l_exp_value_sigma.append(flip_ratio_es)
exp_value_sigma = numpy.concatenate(l_exp_value_sigma, axis=1)
if channel_plus_minus:
mem_col = numpy.concatenate(l_mem_col, axis=1)
if channel_chi:
mem_chi = numpy.concatenate(l_mem_chi, axis=1)
print(f"Total number of reflections is {exp_value_sigma.shape[1]: }. ")
if flag_asymmetry:
print("Density reconstruction is based on asymmetry parameters.")
else:
print("Density reconstruction is based on flip ratios. ")
# **Preaparation to MEM itertion procedure**
if channel_plus_minus:
density_col = numpy.copy(density_col_prior)
density_col_next = numpy.copy(density_col_prior)
if channel_chi:
density_chi = numpy.copy(density_chi_prior)
density_chi_next = numpy.copy(density_chi_prior)
# **MEM iteration**
print("\nMEM iteration procedure")
print("-----------------------")
print(f"Desired GoF is {gof_desired:.2f}.")
c_desired = gof_desired
c_previous = numpy.inf
if channel_plus_minus:
der_c_den_col_previous = numpy.zeros_like(density_col_prior)
if channel_chi:
der_c_den_chi_previous = numpy.zeros_like(density_chi_prior)
iteration = 0
flag_next = True
while flag_next:
iteration += 1
if channel_plus_minus:
density_col = numpy.copy(density_col_next)
if channel_chi:
density_chi = numpy.copy(density_chi_next)
l_model_value = []
l_der_model_den_pm, l_der_model_den_chi = [], []
for dict_diffrn in l_dict_diffrn:
diffrn_dict_in_out = dict_in_out["dict_in_out_"+dict_diffrn['type_name']]
index_hkl = diffrn_dict_in_out["index_hkl"]
f_m_perp = numpy.zeros(index_hkl.shape, dtype=complex)
if channel_plus_minus:
mem_col_exp = diffrn_dict_in_out["mem_col"]
hh = numpy.expand_dims(numpy.expand_dims(magnetization_plus * density_col[0] + magnetization_minus * density_col[1], axis=0), axis=1)
f_m_perp_col = (hh*mem_col_exp).sum(axis=2)
f_m_perp += f_m_perp_col
if channel_chi:
mem_chi_exp = diffrn_dict_in_out["mem_chi"]
f_m_perp_chi = (density_chi*mem_chi_exp).sum(axis=2)
f_m_perp += f_m_perp_chi
beam_polarization = dict_diffrn["beam_polarization"]
flipper_efficiency = dict_diffrn["flipper_efficiency"]
matrix_u = dict_diffrn["matrix_u"]
flip_ratio_es = dict_diffrn["flip_ratio_es"]
f_nucl = diffrn_dict_in_out["f_nucl"]
wavelength = dict_diffrn["wavelength"]
sthovl = calc_sthovl_by_unit_cell_parameters(index_hkl, unit_cell_parameters, flag_unit_cell_parameters=False)[0]
cos_2theta = numpy.cos(2*numpy.arcsin(sthovl*wavelength))
extinction_model = dict_diffrn["extinction_model"]
extinction_radius = dict_diffrn["extinction_radius"]
extinction_mosaicity = dict_diffrn["extinction_mosaicity"]
func_extinction = lambda f_sq, flag_f_sq: calc_extinction_sphere(
f_sq, extinction_radius, extinction_mosaicity, volume_unit_cell, cos_2theta, wavelength,
extinction_model, flag_f_sq=False, flag_radius=False,
flag_mosaicity=False,
flag_volume_unit_cell=False,
flag_cos_2theta=False,
flag_wavelength=False)
iint_plus, iint_minus, dder_plus, dder_minus = calc_iint(
beam_polarization, flipper_efficiency, f_nucl, f_m_perp, matrix_u, func_extinction = func_extinction,
flag_beam_polarization = False, flag_flipper_efficiency = False,
flag_f_nucl = False, flag_f_m_perp = True,
dict_in_out = dict_in_out, flag_use_precalculated_data = flag_use_precalculated_data)
diffrn_dict_in_out["flip_ratio"] = iint_plus/iint_minus
der_int_plus_fm_perp_real = dder_plus["f_m_perp_real"]
der_int_plus_fm_perp_imag = dder_plus["f_m_perp_imag"]
der_int_minus_fm_perp_real = dder_minus["f_m_perp_real"]
der_int_minus_fm_perp_imag = dder_minus["f_m_perp_imag"]
if flag_asymmetry:
model_exp, dder_model_exp = calc_asymmetry_by_iint(
iint_plus, iint_minus, c_lambda2=None, iint_2hkl=None,
flag_iint_plus=True, flag_iint_minus=True,
flag_c_lambda2=False, flag_iint_2hkl=False)
else:
model_exp, dder_model_exp = calc_flip_ratio_by_iint(
iint_plus, iint_minus, c_lambda2=None, iint_2hkl=None,
flag_iint_plus=True, flag_iint_minus=True,
flag_c_lambda2=False, flag_iint_2hkl=False)
l_model_value.append(model_exp)
der_model_int_plus = numpy.expand_dims(dder_model_exp["iint_plus"], axis=0)
der_model_int_minus = numpy.expand_dims(dder_model_exp["iint_minus"], axis=0)
if channel_plus_minus:
der_model_den_pm_exp = (
(mem_col_exp.real*numpy.expand_dims(
der_model_int_plus*der_int_plus_fm_perp_real +
der_model_int_minus*der_int_minus_fm_perp_real, axis=2)
).sum(axis=0) +
(mem_col_exp.imag*numpy.expand_dims(
der_model_int_plus*der_int_plus_fm_perp_imag +
der_model_int_minus*der_int_minus_fm_perp_imag, axis=2)
).sum(axis=0))
l_der_model_den_pm.append(der_model_den_pm_exp)
if channel_chi:
der_model_den_chi_exp = (
(mem_chi_exp.real*numpy.expand_dims(
der_model_int_plus*der_int_plus_fm_perp_real +
der_model_int_minus*der_int_minus_fm_perp_real, axis=2)
).sum(axis=0) +
(mem_chi_exp.imag*numpy.expand_dims(
der_model_int_plus*der_int_plus_fm_perp_imag +
der_model_int_minus*der_int_minus_fm_perp_imag, axis=2)
).sum(axis=0))
l_der_model_den_chi.append(der_model_den_chi_exp)
model_value = numpy.concatenate(l_model_value, axis=0)
diff_value = (exp_value_sigma[0]-model_value)/exp_value_sigma[1]
c = numpy.square(diff_value).sum(axis=0)/diff_value.shape[0]
if channel_plus_minus:
der_model_den_pm = numpy.concatenate(l_der_model_den_pm, axis=0)
der_c_den_pm = (-2.)/diff_value.shape[0] * (
numpy.expand_dims((diff_value/exp_value_sigma[1]),axis=1) *
der_model_den_pm).sum(axis=0)
der_c_den_col = numpy.stack([magnetization_plus * der_c_den_pm, magnetization_minus * der_c_den_pm], axis=0)
if channel_chi:
der_model_den_chi = numpy.concatenate(l_der_model_den_chi, axis=0)
der_c_den_chi = (-2.)/diff_value.shape[0] * (
numpy.expand_dims((diff_value/exp_value_sigma[1]),axis=1) *
der_model_den_chi).sum(axis=0)
if c > c_previous:
parameter_lambda = 0.5 * parameter_lambda
c = c_previous
if channel_plus_minus:
density_col = numpy.copy(density_col_previous)
der_c_den_col = der_c_den_col_previous
if channel_chi:
density_chi = numpy.copy(density_chi_previous)
der_c_den_chi = der_c_den_chi_previous
else:
c_previous = c
parameter_lambda = 1.03 * parameter_lambda
if channel_plus_minus:
density_col_previous = numpy.copy(density_col)
der_c_den_col_previous = der_c_den_col
if channel_chi:
density_chi_previous = numpy.copy(density_chi)
der_c_den_chi_previous = der_c_den_chi
print(f"Iteration {iteration:5}, lambda {parameter_lambda*1e6:.3f}*10^-6, chi_sq: {c:.2f} ", end='\r')
if channel_plus_minus:
coeff = (parameter_lambda*number_unit_cell/(c_desired*volume_unit_cell))/point_multiplicity_col
hh = (density_col+delta_density)*numpy.exp(-coeff*der_c_den_col)-delta_density
hh = numpy.where(hh>0, hh, 0)
density_col_next = renormailize_density_col(hh, point_multiplicity_col, volume_unit_cell, number_unit_cell)
if channel_chi:
coeff = (parameter_lambda*number_unit_cell/(c_desired*volume_unit_cell))*atom_multiplicity_auc_chi/point_multiplicity_chi
hh = (density_chi+delta_density)*numpy.exp(-coeff*der_c_den_chi)-delta_density
hh = numpy.where(hh>0, hh, 0)
density_chi_next = renormailize_density_chi(hh, point_multiplicity_chi, atom_label_auc_chi, atom_multiplicity_auc_chi, volume_unit_cell, number_unit_cell)
if iteration >= iteration_max:
flag_next = False
print(f"Maximal number of iteration is reached ({iteration:}). ", end='\n')
if parameter_lambda < parameter_lambda_min:
flag_next = False
print(f"Minimal value of parameter lambda {parameter_lambda*1e6:.3f}*10^-6 is reached at iteration {iteration:}. ", end='\n')
if c <= c_desired:
flag_next = False
print(f"Desired value is reached at iteration {iteration:}. ", end='\n')
c_best = c_previous
print(f"Chi_sq best is {c_best:.2f}")
if channel_plus_minus:
density_col_best = numpy.copy(density_col_previous)
dict_in_out["density_channel_plus_minus"] = density_col_best
if channel_chi:
density_chi_best = numpy.copy(density_chi_previous)
dict_in_out["density_channel_chi"] = density_chi
# **Save to .den file**
if channel_plus_minus and (file_spin_density is not None):
spin_density = density_col_best * numpy.array([[magnetization_plus, ], [magnetization_minus, ]], dtype=float)
save_spin_density_into_file(file_spin_density, index_auc_col, spin_density, n_abc, unit_cell_parameters,
reduced_symm_elems, translation_elems, centrosymmetry, centrosymmetry_position)
print(f"\nReconstructed spin density is written in file '{file_spin_density:}'.")
if channel_chi and (file_magnetization_density is not None):
spin_density = numpy.stack([density_chi_best, numpy.zeros_like(density_chi_best)], axis=0)
save_spin_density_into_file(file_magnetization_density, index_auc_chi, spin_density, n_abc, unit_cell_parameters,
reduced_symm_elems, translation_elems, centrosymmetry, centrosymmetry_position)
print(f"\nReconstructed magnetization density is written in file '{file_magnetization_density:}'.")
def mempy_susceptibility_refinement(dict_channel_chi, dict_crystal, dict_mem_parameters, l_dict_diffrn, dict_in_out):
print("****************************************")
print("Susceptibility refinement (module MEMPy)")
print("****************************************")
number_points = numpy.prod(dict_mem_parameters["points_abc"])
flag_asymmetry = dict_mem_parameters["flag_asymmetry"]
channel_plus_minus = dict_mem_parameters["channel_plus_minus"]
channel_chi = dict_mem_parameters["channel_chi"]
print(f"Channel plus/minus is {channel_plus_minus:}")
print("ATTENTION: Channel plus/minus is not taken into account.")
print(f"Channel chi is {channel_chi:}")
print(f"Flag asymmetry is {flag_asymmetry:}")
if channel_plus_minus:
magnetization_plus = dict_mem_parameters["magnetization_plus"]
magnetization_minus = dict_mem_parameters["magnetization_minus"]
symm_elem_channel_chi = dict_channel_chi["symm_elem_channel_chi"]
atom_multiplicity_channel_chi = dict_channel_chi["atom_multiplicity_channel_chi"]
density_channel_chi = dict_channel_chi["density_channel_chi"]
point_multiplicity_channel_chi = dict_channel_chi["point_multiplicity_channel_chi"]
unit_cell_parameters = dict_crystal["unit_cell_parameters"]
full_symm_elems = dict_crystal["full_symm_elems"]
atom_fract_xyz = dict_crystal["atom_fract_xyz"]
atom_para_sc_chi = dict_crystal["atom_para_sc_chi"]
atom_para_index = dict_crystal["atom_para_index"]
atom_para_label = dict_crystal["atom_para_label"]
atom_para_susceptibility = dict_crystal["atom_para_susceptibility"]
flags_atom_para_susceptibility = dict_crystal["flags_atom_para_susceptibility"]
print(f"Number of refined parameters is {flags_atom_para_susceptibility.sum():}.")
if flags_atom_para_susceptibility.sum() == 0:
print("There is no refined susceptibility parameters.")
return
atom_para_fract_xyz = atom_fract_xyz[:, atom_para_index]
n_atom_para = atom_para_susceptibility.shape[1]
print("Preliminary calculations of chi atoms ...", end="\r")
l_exp_value_sigma = []
for dict_diffrn in l_dict_diffrn:
flag_use_precalculated_data = False
index_hkl = dict_diffrn["index_hkl"]
diffrn_dict_in_out = {"index_hkl": index_hkl}
chi_atoms = calc_chi_atoms(
unit_cell_parameters, number_points, full_symm_elems,
index_hkl, atom_para_fract_xyz, atom_para_sc_chi,
symm_elem_channel_chi, point_multiplicity_channel_chi, density_channel_chi)
diffrn_dict_in_out["chi_atoms"] = chi_atoms
eq_ccs, dder = calc_eq_ccs_by_unit_cell_parameters(index_hkl, unit_cell_parameters)
vp, dder = calc_vv_as_v1_v2_v1(eq_ccs)
diffrn_dict_in_out["vp"] = vp
f_nucl, dder = calc_f_nucl_by_dictionary(
dict_crystal, diffrn_dict_in_out, flag_use_precalculated_data=flag_use_precalculated_data)
diffrn_dict_in_out["f_nucl"] = f_nucl
dict_in_out["dict_in_out_"+dict_diffrn['type_name']] = diffrn_dict_in_out
flip_ratio_es = dict_diffrn["flip_ratio_es"]
if flag_asymmetry:
asymmetry_e = (flip_ratio_es[0] -1.)/(flip_ratio_es[0] + 1.)
asymmetry_s = numpy.sqrt(2.)*flip_ratio_es[1] * numpy.sqrt(numpy.square(flip_ratio_es[0]) + 1.)/numpy.square(flip_ratio_es[0] + 1.)
asymmetry_es = numpy.stack([asymmetry_e, asymmetry_s], axis=0)
l_exp_value_sigma.append(asymmetry_es)
else:
l_exp_value_sigma.append(flip_ratio_es)
exp_value_sigma = numpy.concatenate(l_exp_value_sigma, axis=1)
def calc_chi_sq(param):
atom_para_susceptibility[flags_atom_para_susceptibility] = param
model_value = calc_model_value_by_precalculated_data(atom_para_susceptibility, unit_cell_parameters, flag_asymmetry, dict_in_out, l_dict_diffrn)
chi_sq = numpy.square((model_value-exp_value_sigma[0])/exp_value_sigma[1]).sum()
return chi_sq
param_0 = atom_para_susceptibility[flags_atom_para_susceptibility]
chi_sq_per_n = calc_chi_sq(param_0)/exp_value_sigma.shape[1]
print(70*" ")
print("Before susceptibility refinement")
print("Susceptibility tensor:")
for ind_at, label in enumerate(atom_para_label):
print(f"{label:5} {atom_para_susceptibility[0, ind_at]:.5f} {atom_para_susceptibility[1, ind_at]:.5f} {atom_para_susceptibility[2, ind_at]:.5f} {atom_para_susceptibility[3, ind_at]:.5f} {atom_para_susceptibility[4, ind_at]:.5f} {atom_para_susceptibility[5, ind_at]:.5f}")
print(f"chi_sq_per_n is {chi_sq_per_n:.2f}.")
print("Minimization procedure ...", end="\r")
res = scipy.optimize.minimize(calc_chi_sq, param_0, method="Nelder-Mead")
apss = None
if "hess_inv" in res.keys():
hess_inv = res["hess_inv"]
dict_in_out["hess_inv"] = hess_inv
sigma_p = numpy.sqrt(numpy.abs(numpy.diag(hess_inv)))
atom_para_susceptibility_sigma = numpy.zeros_like(atom_para_susceptibility)
atom_para_susceptibility_sigma[flags_atom_para_susceptibility] = sigma_p
apss = (atom_para_sc_chi * numpy.expand_dims(atom_para_susceptibility_sigma, axis=0)).sum(axis=1)
dict_in_out["atom_para_susceptibility_sigma"] = apss
elif "final_simplex" in res.keys():
n = exp_value_sigma.shape[1]
m_error, dist_hh = error_estimation_simplex(
res["final_simplex"][0], res["final_simplex"][1], calc_chi_sq)
l_sigma = []
for i, val_2 in zip(range(m_error.shape[0]), dist_hh):
# slightly change definition, instead of (n-k) here is n
error = (abs(m_error[i, i])*1./n)**0.5
if m_error[i, i] < 0.:
pass
# warn("Negative diagonal elements of Hessian.", UserWarning)
if val_2 > error:
pass
# warn("Minimum is not found.", UserWarning)
l_sigma.append(max(error, val_2))
sigma_p = numpy.array(l_sigma)
atom_para_susceptibility_sigma = numpy.zeros_like(atom_para_susceptibility)
atom_para_susceptibility_sigma[flags_atom_para_susceptibility] = sigma_p
apss = (atom_para_sc_chi * numpy.expand_dims(atom_para_susceptibility_sigma, axis=0)).sum(axis=1)
dict_in_out["atom_para_susceptibility_sigma"] = apss
print(sigma_p)
print(70*" ")
chi_sq_per_n = calc_chi_sq(res.x)/exp_value_sigma.shape[1]
atom_para_susceptibility[flags_atom_para_susceptibility] = res.x
atom_para_susceptibility = (atom_para_sc_chi * numpy.expand_dims(atom_para_susceptibility, axis=0)).sum(axis=1)
dict_crystal["atom_para_susceptibility"] = atom_para_susceptibility
print("After susceptibility refinement")
print("Susceptibility tensor:")
for ind_at, label in enumerate(atom_para_label):
print(f"{label:5} {atom_para_susceptibility[0, ind_at]:8.5f} {atom_para_susceptibility[1, ind_at]:8.5f} {atom_para_susceptibility[2, ind_at]:8.5f} {atom_para_susceptibility[3, ind_at]:8.5f} {atom_para_susceptibility[4, ind_at]:8.5f} {atom_para_susceptibility[5, ind_at]:8.5f}")
if apss is not None:
print(f"sigma {apss[0, ind_at]:8.5f} {apss[1, ind_at]:8.5f} {apss[2, ind_at]:8.5f} {apss[3, ind_at]:8.5f} {apss[4, ind_at]:8.5f} {apss[5, ind_at]:8.5f}")
print(f"chi_sq_per_n is {chi_sq_per_n:.2f}.")
print(70*"*")
print("End of MEMPy procedure for susceptibility refinement")
print(70*"*")
return
def mempy_cycle_density_susceptibility(dict_crystal, dict_mem_parameters, l_dict_diffrn, dict_in_out,
parameter_lambda:float=1.e-5, iteration_max:int=1000, parameter_lambda_min:float=1.e-9, delta_density:float=1.e-5, n_cycle:int=10):
print(70*"*")
print("MEMPy: cycle iteration")
print(70*"*")
print(f"Number of cycles is {n_cycle:}")
print(70*" ")
for i_cycle in range(n_cycle):
print(f"Cycle {i_cycle+1:}")
print(len(f"Cycle {i_cycle+1:}")*"-")
dict_in_out_den = {}
mempy_reconstruction_by_dictionary(dict_crystal, dict_mem_parameters, l_dict_diffrn, dict_in_out_den,
parameter_lambda=parameter_lambda, iteration_max=iteration_max, parameter_lambda_min=parameter_lambda_min, delta_density=delta_density)
dict_channel_chi = {
'atom_multiplicity_channel_chi': dict_in_out_den['atom_multiplicity_channel_chi'],
'point_multiplicity_channel_chi': dict_in_out_den['point_multiplicity_channel_chi'],
'symm_elem_channel_chi': dict_in_out_den['symm_elem_channel_chi'],
'susceptibility_channel_chi': dict_in_out_den['susceptibility_channel_chi'],
'density_channel_chi': dict_in_out_den['density_channel_chi'],
}
dict_in_out_susc = {}
mempy_susceptibility_refinement(dict_channel_chi, dict_crystal, dict_mem_parameters, l_dict_diffrn, dict_in_out_susc)
print(70*" ")
dict_in_out["dict_in_out_den"] = dict_in_out_den
dict_in_out["dict_in_out_susc"] = dict_in_out_susc
return
|
[
"numpy.prod",
"cryspy.A_functions_base.orbital_functions.calc_density_spherical",
"cryspy.A_functions_base.flip_ratio.calc_flip_ratio_by_iint",
"numpy.sqrt",
"cryspy.A_functions_base.matrix_operations.calc_vv_as_v1_v2_v1",
"numpy.logical_not",
"numpy.array",
"cryspy.A_functions_base.flip_ratio.calc_asymmetry_by_iint",
"cryspy.A_functions_base.mempy.renormailize_density_col",
"cryspy.A_functions_base.mempy.calc_mem_col",
"cryspy.A_functions_base.flip_ratio.calc_iint",
"cryspy.A_functions_base.mempy.calc_model_value_by_precalculated_data",
"numpy.where",
"numpy.exp",
"numpy.stack",
"cryspy.A_functions_base.unit_cell.calc_volume_uc_by_unit_cell_parameters",
"numpy.concatenate",
"cryspy.A_functions_base.extinction.calc_extinction_sphere",
"cryspy.A_functions_base.mempy.form_basins",
"cryspy.A_functions_base.mempy.calc_point_susceptibility",
"cryspy.A_functions_base.mempy.get_uniform_density_col",
"cryspy.A_functions_base.mempy.calc_chi_atoms",
"scipy.optimize.minimize",
"numpy.square",
"cryspy.A_functions_base.unit_cell.calc_eq_ccs_by_unit_cell_parameters",
"cryspy.A_functions_base.structure_factor.calc_f_nucl_by_dictionary",
"cryspy.A_functions_base.symmetry_elements.calc_asymmetric_unit_cell_indexes",
"cryspy.A_functions_base.mempy.calc_symm_elem_points_by_index_points",
"numpy.copy",
"cryspy.A_functions_base.unit_cell.calc_sthovl_by_unit_cell_parameters",
"cryspy.A_functions_base.mempy.calc_mem_chi",
"numpy.arcsin",
"cryspy.A_functions_base.mempy.get_uniform_density_chi",
"numpy.diag",
"cryspy.A_functions_base.mempy.renormailize_density_chi",
"numpy.zeros",
"cryspy.A_functions_base.function_1_error_simplex.error_estimation_simplex",
"numpy.expand_dims",
"cryspy.A_functions_base.mempy.save_spin_density_into_file",
"numpy.zeros_like"
] |
[((4346, 4403), 'cryspy.A_functions_base.symmetry_elements.calc_asymmetric_unit_cell_indexes', 'calc_asymmetric_unit_cell_indexes', (['n_abc', 'full_symm_elems'], {}), '(n_abc, full_symm_elems)\n', (4379, 4403), False, 'from cryspy.A_functions_base.symmetry_elements import calc_asymmetric_unit_cell_indexes\n'), ((4424, 4479), 'cryspy.A_functions_base.mempy.calc_symm_elem_points_by_index_points', 'calc_symm_elem_points_by_index_points', (['index_auc', 'n_abc'], {}), '(index_auc, n_abc)\n', (4461, 4479), False, 'from cryspy.A_functions_base.mempy import calc_mem_col, calc_mem_chi, calc_symm_elem_points_by_index_points, get_uniform_density_col, renormailize_density_col, save_spin_density_into_file, form_basins, calc_point_susceptibility, get_uniform_density_chi, renormailize_density_chi, calc_model_value_by_precalculated_data, calc_chi_atoms\n'), ((8220, 8237), 'numpy.prod', 'numpy.prod', (['n_abc'], {}), '(n_abc)\n', (8230, 8237), False, 'import numpy\n'), ((12814, 12858), 'numpy.concatenate', 'numpy.concatenate', (['l_exp_value_sigma'], {'axis': '(1)'}), '(l_exp_value_sigma, axis=1)\n', (12831, 12858), False, 'import numpy\n'), ((24009, 24054), 'numpy.prod', 'numpy.prod', (["dict_mem_parameters['points_abc']"], {}), "(dict_mem_parameters['points_abc'])\n", (24019, 24054), False, 'import numpy\n'), ((27374, 27418), 'numpy.concatenate', 'numpy.concatenate', (['l_exp_value_sigma'], {'axis': '(1)'}), '(l_exp_value_sigma, axis=1)\n', (27391, 27418), False, 'import numpy\n'), ((28490, 28557), 'scipy.optimize.minimize', 'scipy.optimize.minimize', (['calc_chi_sq', 'param_0'], {'method': '"""Nelder-Mead"""'}), "(calc_chi_sq, param_0, method='Nelder-Mead')\n", (28513, 28557), False, 'import scipy\n'), ((3423, 3520), 'cryspy.A_functions_base.unit_cell.calc_volume_uc_by_unit_cell_parameters', 'calc_volume_uc_by_unit_cell_parameters', (['unit_cell_parameters'], {'flag_unit_cell_parameters': '(False)'}), '(unit_cell_parameters,\n flag_unit_cell_parameters=False)\n', (3461, 3520), False, 'from cryspy.A_functions_base.unit_cell import calc_volume_uc_by_unit_cell_parameters, calc_sthovl_by_unit_cell_parameters, calc_eq_ccs_by_unit_cell_parameters\n'), ((4978, 5166), 'cryspy.A_functions_base.mempy.form_basins', 'form_basins', (['symm_elem_auc', 'full_symm_elems', 'unit_cell_parameters', 'atom_label[flag_atom_para]', 'atom_fract_xyz[:, flag_atom_para]', 'atom_multiplicity[flag_atom_para]', 'atom_para_label'], {}), '(symm_elem_auc, full_symm_elems, unit_cell_parameters,\n atom_label[flag_atom_para], atom_fract_xyz[:, flag_atom_para],\n atom_multiplicity[flag_atom_para], atom_para_label)\n', (4989, 5166), False, 'from cryspy.A_functions_base.mempy import calc_mem_col, calc_mem_chi, calc_symm_elem_points_by_index_points, get_uniform_density_col, renormailize_density_col, save_spin_density_into_file, form_basins, calc_point_susceptibility, get_uniform_density_chi, renormailize_density_chi, calc_model_value_by_precalculated_data, calc_chi_atoms\n'), ((6388, 6415), 'numpy.logical_not', 'numpy.logical_not', (['flag_chi'], {}), '(flag_chi)\n', (6405, 6415), False, 'import numpy\n'), ((7852, 8049), 'cryspy.A_functions_base.mempy.calc_point_susceptibility', 'calc_point_susceptibility', (['unit_cell_parameters', 'atom_symm_elems_auc_chi', 'atom_label_auc_chi', 'atom_para_label', 'atom_para_susceptibility', 'atom_para_sc_chi', 'full_symm_elems', 'symm_elem_auc_chi'], {}), '(unit_cell_parameters, atom_symm_elems_auc_chi,\n atom_label_auc_chi, atom_para_label, atom_para_susceptibility,\n atom_para_sc_chi, full_symm_elems, symm_elem_auc_chi)\n', (7877, 8049), False, 'from cryspy.A_functions_base.mempy import calc_mem_col, calc_mem_chi, calc_symm_elem_points_by_index_points, get_uniform_density_col, renormailize_density_col, save_spin_density_into_file, form_basins, calc_point_susceptibility, get_uniform_density_chi, renormailize_density_chi, calc_model_value_by_precalculated_data, calc_chi_atoms\n'), ((9544, 9631), 'cryspy.A_functions_base.mempy.get_uniform_density_col', 'get_uniform_density_col', (['point_multiplicity_col', 'volume_unit_cell', 'number_unit_cell'], {}), '(point_multiplicity_col, volume_unit_cell,\n number_unit_cell)\n', (9567, 9631), False, 'from cryspy.A_functions_base.mempy import calc_mem_col, calc_mem_chi, calc_symm_elem_points_by_index_points, get_uniform_density_col, renormailize_density_col, save_spin_density_into_file, form_basins, calc_point_susceptibility, get_uniform_density_chi, renormailize_density_chi, calc_model_value_by_precalculated_data, calc_chi_atoms\n'), ((12117, 12237), 'cryspy.A_functions_base.structure_factor.calc_f_nucl_by_dictionary', 'calc_f_nucl_by_dictionary', (['dict_crystal', 'diffrn_dict_in_out'], {'flag_use_precalculated_data': 'flag_use_precalculated_data'}), '(dict_crystal, diffrn_dict_in_out,\n flag_use_precalculated_data=flag_use_precalculated_data)\n', (12142, 12237), False, 'from cryspy.A_functions_base.structure_factor import calc_f_nucl_by_dictionary\n'), ((12908, 12944), 'numpy.concatenate', 'numpy.concatenate', (['l_mem_col'], {'axis': '(1)'}), '(l_mem_col, axis=1)\n', (12925, 12944), False, 'import numpy\n'), ((12983, 13019), 'numpy.concatenate', 'numpy.concatenate', (['l_mem_chi'], {'axis': '(1)'}), '(l_mem_chi, axis=1)\n', (13000, 13019), False, 'import numpy\n'), ((13390, 13419), 'numpy.copy', 'numpy.copy', (['density_col_prior'], {}), '(density_col_prior)\n', (13400, 13419), False, 'import numpy\n'), ((13447, 13476), 'numpy.copy', 'numpy.copy', (['density_col_prior'], {}), '(density_col_prior)\n', (13457, 13476), False, 'import numpy\n'), ((13519, 13548), 'numpy.copy', 'numpy.copy', (['density_chi_prior'], {}), '(density_chi_prior)\n', (13529, 13548), False, 'import numpy\n'), ((13576, 13605), 'numpy.copy', 'numpy.copy', (['density_chi_prior'], {}), '(density_chi_prior)\n', (13586, 13605), False, 'import numpy\n'), ((13871, 13906), 'numpy.zeros_like', 'numpy.zeros_like', (['density_col_prior'], {}), '(density_col_prior)\n', (13887, 13906), False, 'import numpy\n'), ((13960, 13995), 'numpy.zeros_like', 'numpy.zeros_like', (['density_chi_prior'], {}), '(density_chi_prior)\n', (13976, 13995), False, 'import numpy\n'), ((19134, 19174), 'numpy.concatenate', 'numpy.concatenate', (['l_model_value'], {'axis': '(0)'}), '(l_model_value, axis=0)\n', (19151, 19174), False, 'import numpy\n'), ((22472, 22504), 'numpy.copy', 'numpy.copy', (['density_col_previous'], {}), '(density_col_previous)\n', (22482, 22504), False, 'import numpy\n'), ((22621, 22653), 'numpy.copy', 'numpy.copy', (['density_chi_previous'], {}), '(density_chi_previous)\n', (22631, 22653), False, 'import numpy\n'), ((22929, 23121), 'cryspy.A_functions_base.mempy.save_spin_density_into_file', 'save_spin_density_into_file', (['file_spin_density', 'index_auc_col', 'spin_density', 'n_abc', 'unit_cell_parameters', 'reduced_symm_elems', 'translation_elems', 'centrosymmetry', 'centrosymmetry_position'], {}), '(file_spin_density, index_auc_col, spin_density,\n n_abc, unit_cell_parameters, reduced_symm_elems, translation_elems,\n centrosymmetry, centrosymmetry_position)\n', (22956, 23121), False, 'from cryspy.A_functions_base.mempy import calc_mem_col, calc_mem_chi, calc_symm_elem_points_by_index_points, get_uniform_density_col, renormailize_density_col, save_spin_density_into_file, form_basins, calc_point_susceptibility, get_uniform_density_chi, renormailize_density_chi, calc_model_value_by_precalculated_data, calc_chi_atoms\n'), ((23392, 23593), 'cryspy.A_functions_base.mempy.save_spin_density_into_file', 'save_spin_density_into_file', (['file_magnetization_density', 'index_auc_chi', 'spin_density', 'n_abc', 'unit_cell_parameters', 'reduced_symm_elems', 'translation_elems', 'centrosymmetry', 'centrosymmetry_position'], {}), '(file_magnetization_density, index_auc_chi,\n spin_density, n_abc, unit_cell_parameters, reduced_symm_elems,\n translation_elems, centrosymmetry, centrosymmetry_position)\n', (23419, 23593), False, 'from cryspy.A_functions_base.mempy import calc_mem_col, calc_mem_chi, calc_symm_elem_points_by_index_points, get_uniform_density_col, renormailize_density_col, save_spin_density_into_file, form_basins, calc_point_susceptibility, get_uniform_density_chi, renormailize_density_chi, calc_model_value_by_precalculated_data, calc_chi_atoms\n'), ((26097, 26299), 'cryspy.A_functions_base.mempy.calc_chi_atoms', 'calc_chi_atoms', (['unit_cell_parameters', 'number_points', 'full_symm_elems', 'index_hkl', 'atom_para_fract_xyz', 'atom_para_sc_chi', 'symm_elem_channel_chi', 'point_multiplicity_channel_chi', 'density_channel_chi'], {}), '(unit_cell_parameters, number_points, full_symm_elems,\n index_hkl, atom_para_fract_xyz, atom_para_sc_chi, symm_elem_channel_chi,\n point_multiplicity_channel_chi, density_channel_chi)\n', (26111, 26299), False, 'from cryspy.A_functions_base.mempy import calc_mem_col, calc_mem_chi, calc_symm_elem_points_by_index_points, get_uniform_density_col, renormailize_density_col, save_spin_density_into_file, form_basins, calc_point_susceptibility, get_uniform_density_chi, renormailize_density_chi, calc_model_value_by_precalculated_data, calc_chi_atoms\n'), ((26415, 26483), 'cryspy.A_functions_base.unit_cell.calc_eq_ccs_by_unit_cell_parameters', 'calc_eq_ccs_by_unit_cell_parameters', (['index_hkl', 'unit_cell_parameters'], {}), '(index_hkl, unit_cell_parameters)\n', (26450, 26483), False, 'from cryspy.A_functions_base.unit_cell import calc_volume_uc_by_unit_cell_parameters, calc_sthovl_by_unit_cell_parameters, calc_eq_ccs_by_unit_cell_parameters\n'), ((26503, 26530), 'cryspy.A_functions_base.matrix_operations.calc_vv_as_v1_v2_v1', 'calc_vv_as_v1_v2_v1', (['eq_ccs'], {}), '(eq_ccs)\n', (26522, 26530), False, 'from cryspy.A_functions_base.matrix_operations import calc_vv_as_v1_v2_v1\n'), ((26597, 26717), 'cryspy.A_functions_base.structure_factor.calc_f_nucl_by_dictionary', 'calc_f_nucl_by_dictionary', (['dict_crystal', 'diffrn_dict_in_out'], {'flag_use_precalculated_data': 'flag_use_precalculated_data'}), '(dict_crystal, diffrn_dict_in_out,\n flag_use_precalculated_data=flag_use_precalculated_data)\n', (26622, 26717), False, 'from cryspy.A_functions_base.structure_factor import calc_f_nucl_by_dictionary\n'), ((27549, 27683), 'cryspy.A_functions_base.mempy.calc_model_value_by_precalculated_data', 'calc_model_value_by_precalculated_data', (['atom_para_susceptibility', 'unit_cell_parameters', 'flag_asymmetry', 'dict_in_out', 'l_dict_diffrn'], {}), '(atom_para_susceptibility,\n unit_cell_parameters, flag_asymmetry, dict_in_out, l_dict_diffrn)\n', (27587, 27683), False, 'from cryspy.A_functions_base.mempy import calc_mem_col, calc_mem_chi, calc_symm_elem_points_by_index_points, get_uniform_density_col, renormailize_density_col, save_spin_density_into_file, form_basins, calc_point_susceptibility, get_uniform_density_chi, renormailize_density_chi, calc_model_value_by_precalculated_data, calc_chi_atoms\n'), ((28788, 28830), 'numpy.zeros_like', 'numpy.zeros_like', (['atom_para_susceptibility'], {}), '(atom_para_susceptibility)\n', (28804, 28830), False, 'import numpy\n'), ((5618, 5751), 'cryspy.A_functions_base.mempy.form_basins', 'form_basins', (['symm_elem_auc', 'full_symm_elems', 'unit_cell_parameters', 'atom_label', 'atom_fract_xyz', 'atom_multiplicity', 'atom_para_label'], {}), '(symm_elem_auc, full_symm_elems, unit_cell_parameters,\n atom_label, atom_fract_xyz, atom_multiplicity, atom_para_label)\n', (5629, 5751), False, 'from cryspy.A_functions_base.mempy import calc_mem_col, calc_mem_chi, calc_symm_elem_points_by_index_points, get_uniform_density_col, renormailize_density_col, save_spin_density_into_file, form_basins, calc_point_susceptibility, get_uniform_density_chi, renormailize_density_chi, calc_model_value_by_precalculated_data, calc_chi_atoms\n'), ((6793, 6814), 'numpy.copy', 'numpy.copy', (['index_auc'], {}), '(index_auc)\n', (6803, 6814), False, 'import numpy\n'), ((6848, 6878), 'numpy.copy', 'numpy.copy', (['point_multiplicity'], {}), '(point_multiplicity)\n', (6858, 6878), False, 'import numpy\n'), ((6907, 6932), 'numpy.copy', 'numpy.copy', (['symm_elem_auc'], {}), '(symm_elem_auc)\n', (6917, 6932), False, 'import numpy\n'), ((8395, 8529), 'cryspy.A_functions_base.mempy.get_uniform_density_chi', 'get_uniform_density_chi', (['point_multiplicity_chi', 'atom_label_auc_chi', 'atom_multiplicity_auc_chi', 'volume_unit_cell', 'number_unit_cell'], {}), '(point_multiplicity_chi, atom_label_auc_chi,\n atom_multiplicity_auc_chi, volume_unit_cell, number_unit_cell)\n', (8418, 8529), False, 'from cryspy.A_functions_base.mempy import calc_mem_col, calc_mem_chi, calc_symm_elem_points_by_index_points, get_uniform_density_col, renormailize_density_col, save_spin_density_into_file, form_basins, calc_point_susceptibility, get_uniform_density_chi, renormailize_density_chi, calc_model_value_by_precalculated_data, calc_chi_atoms\n'), ((8642, 8681), 'numpy.zeros_like', 'numpy.zeros_like', (['atom_distance_auc_chi'], {}), '(atom_distance_auc_chi)\n', (8658, 8681), False, 'import numpy\n'), ((9267, 9425), 'cryspy.A_functions_base.mempy.renormailize_density_chi', 'renormailize_density_chi', (['density_chi_prior', 'point_multiplicity_chi', 'atom_label_auc_chi', 'atom_multiplicity_auc_chi', 'volume_unit_cell', 'number_unit_cell'], {}), '(density_chi_prior, point_multiplicity_chi,\n atom_label_auc_chi, atom_multiplicity_auc_chi, volume_unit_cell,\n number_unit_cell)\n', (9291, 9425), False, 'from cryspy.A_functions_base.mempy import calc_mem_col, calc_mem_chi, calc_symm_elem_points_by_index_points, get_uniform_density_col, renormailize_density_col, save_spin_density_into_file, form_basins, calc_point_susceptibility, get_uniform_density_chi, renormailize_density_chi, calc_model_value_by_precalculated_data, calc_chi_atoms\n'), ((10929, 11195), 'cryspy.A_functions_base.mempy.calc_mem_col', 'calc_mem_col', (['index_hkl', 'unit_cell_parameters', 'eh_ccs', 'full_symm_elems', 'symm_elem_auc_col', 'volume_unit_cell', 'number_unit_cell'], {'point_multiplicity': 'point_multiplicity_col', 'dict_in_out': 'dict_in_out_col', 'flag_use_precalculated_data': 'flag_use_precalculated_data'}), '(index_hkl, unit_cell_parameters, eh_ccs, full_symm_elems,\n symm_elem_auc_col, volume_unit_cell, number_unit_cell,\n point_multiplicity=point_multiplicity_col, dict_in_out=dict_in_out_col,\n flag_use_precalculated_data=flag_use_precalculated_data)\n', (10941, 11195), False, 'from cryspy.A_functions_base.mempy import calc_mem_col, calc_mem_chi, calc_symm_elem_points_by_index_points, get_uniform_density_col, renormailize_density_col, save_spin_density_into_file, form_basins, calc_point_susceptibility, get_uniform_density_chi, renormailize_density_chi, calc_model_value_by_precalculated_data, calc_chi_atoms\n'), ((11661, 11953), 'cryspy.A_functions_base.mempy.calc_mem_chi', 'calc_mem_chi', (['index_hkl', 'unit_cell_parameters', 'h_ccs', 'full_symm_elems', 'symm_elem_auc_chi', 'point_susceptibility', 'volume_unit_cell', 'number_unit_cell'], {'point_multiplicity': 'point_multiplicity_chi', 'dict_in_out': 'dict_in_out_chi', 'flag_use_precalculated_data': 'flag_use_precalculated_data'}), '(index_hkl, unit_cell_parameters, h_ccs, full_symm_elems,\n symm_elem_auc_chi, point_susceptibility, volume_unit_cell,\n number_unit_cell, point_multiplicity=point_multiplicity_chi,\n dict_in_out=dict_in_out_chi, flag_use_precalculated_data=\n flag_use_precalculated_data)\n', (11673, 11953), False, 'from cryspy.A_functions_base.mempy import calc_mem_col, calc_mem_chi, calc_symm_elem_points_by_index_points, get_uniform_density_col, renormailize_density_col, save_spin_density_into_file, form_basins, calc_point_susceptibility, get_uniform_density_chi, renormailize_density_chi, calc_model_value_by_precalculated_data, calc_chi_atoms\n'), ((12616, 12663), 'numpy.stack', 'numpy.stack', (['[asymmetry_e, asymmetry_s]'], {'axis': '(0)'}), '([asymmetry_e, asymmetry_s], axis=0)\n', (12627, 12663), False, 'import numpy\n'), ((14137, 14165), 'numpy.copy', 'numpy.copy', (['density_col_next'], {}), '(density_col_next)\n', (14147, 14165), False, 'import numpy\n'), ((14216, 14244), 'numpy.copy', 'numpy.copy', (['density_chi_next'], {}), '(density_chi_next)\n', (14226, 14244), False, 'import numpy\n'), ((14537, 14580), 'numpy.zeros', 'numpy.zeros', (['index_hkl.shape'], {'dtype': 'complex'}), '(index_hkl.shape, dtype=complex)\n', (14548, 14580), False, 'import numpy\n'), ((16362, 16670), 'cryspy.A_functions_base.flip_ratio.calc_iint', 'calc_iint', (['beam_polarization', 'flipper_efficiency', 'f_nucl', 'f_m_perp', 'matrix_u'], {'func_extinction': 'func_extinction', 'flag_beam_polarization': '(False)', 'flag_flipper_efficiency': '(False)', 'flag_f_nucl': '(False)', 'flag_f_m_perp': '(True)', 'dict_in_out': 'dict_in_out', 'flag_use_precalculated_data': 'flag_use_precalculated_data'}), '(beam_polarization, flipper_efficiency, f_nucl, f_m_perp, matrix_u,\n func_extinction=func_extinction, flag_beam_polarization=False,\n flag_flipper_efficiency=False, flag_f_nucl=False, flag_f_m_perp=True,\n dict_in_out=dict_in_out, flag_use_precalculated_data=\n flag_use_precalculated_data)\n', (16371, 16670), False, 'from cryspy.A_functions_base.flip_ratio import calc_iint, calc_flip_ratio_by_iint, calc_asymmetry_by_iint\n'), ((17746, 17800), 'numpy.expand_dims', 'numpy.expand_dims', (["dder_model_exp['iint_plus']"], {'axis': '(0)'}), "(dder_model_exp['iint_plus'], axis=0)\n", (17763, 17800), False, 'import numpy\n'), ((17835, 17890), 'numpy.expand_dims', 'numpy.expand_dims', (["dder_model_exp['iint_minus']"], {'axis': '(0)'}), "(dder_model_exp['iint_minus'], axis=0)\n", (17852, 17890), False, 'import numpy\n'), ((19381, 19426), 'numpy.concatenate', 'numpy.concatenate', (['l_der_model_den_pm'], {'axis': '(0)'}), '(l_der_model_den_pm, axis=0)\n', (19398, 19426), False, 'import numpy\n'), ((19635, 19731), 'numpy.stack', 'numpy.stack', (['[magnetization_plus * der_c_den_pm, magnetization_minus * der_c_den_pm]'], {'axis': '(0)'}), '([magnetization_plus * der_c_den_pm, magnetization_minus *\n der_c_den_pm], axis=0)\n', (19646, 19731), False, 'import numpy\n'), ((19785, 19831), 'numpy.concatenate', 'numpy.concatenate', (['l_der_model_den_chi'], {'axis': '(0)'}), '(l_der_model_den_chi, axis=0)\n', (19802, 19831), False, 'import numpy\n'), ((21193, 21219), 'numpy.where', 'numpy.where', (['(hh > 0)', 'hh', '(0)'], {}), '(hh > 0, hh, 0)\n', (21204, 21219), False, 'import numpy\n'), ((21249, 21341), 'cryspy.A_functions_base.mempy.renormailize_density_col', 'renormailize_density_col', (['hh', 'point_multiplicity_col', 'volume_unit_cell', 'number_unit_cell'], {}), '(hh, point_multiplicity_col, volume_unit_cell,\n number_unit_cell)\n', (21273, 21341), False, 'from cryspy.A_functions_base.mempy import calc_mem_col, calc_mem_chi, calc_symm_elem_points_by_index_points, get_uniform_density_col, renormailize_density_col, save_spin_density_into_file, form_basins, calc_point_susceptibility, get_uniform_density_chi, renormailize_density_chi, calc_model_value_by_precalculated_data, calc_chi_atoms\n'), ((21605, 21631), 'numpy.where', 'numpy.where', (['(hh > 0)', 'hh', '(0)'], {}), '(hh > 0, hh, 0)\n', (21616, 21631), False, 'import numpy\n'), ((21661, 21800), 'cryspy.A_functions_base.mempy.renormailize_density_chi', 'renormailize_density_chi', (['hh', 'point_multiplicity_chi', 'atom_label_auc_chi', 'atom_multiplicity_auc_chi', 'volume_unit_cell', 'number_unit_cell'], {}), '(hh, point_multiplicity_chi, atom_label_auc_chi,\n atom_multiplicity_auc_chi, volume_unit_cell, number_unit_cell)\n', (21685, 21800), False, 'from cryspy.A_functions_base.mempy import calc_mem_col, calc_mem_chi, calc_symm_elem_points_by_index_points, get_uniform_density_col, renormailize_density_col, save_spin_density_into_file, form_basins, calc_point_susceptibility, get_uniform_density_chi, renormailize_density_chi, calc_model_value_by_precalculated_data, calc_chi_atoms\n'), ((22845, 22916), 'numpy.array', 'numpy.array', (['[[magnetization_plus], [magnetization_minus]]'], {'dtype': 'float'}), '([[magnetization_plus], [magnetization_minus]], dtype=float)\n', (22856, 22916), False, 'import numpy\n'), ((27187, 27234), 'numpy.stack', 'numpy.stack', (['[asymmetry_e, asymmetry_s]'], {'axis': '(0)'}), '([asymmetry_e, asymmetry_s], axis=0)\n', (27198, 27234), False, 'import numpy\n'), ((29183, 29274), 'cryspy.A_functions_base.function_1_error_simplex.error_estimation_simplex', 'error_estimation_simplex', (["res['final_simplex'][0]", "res['final_simplex'][1]", 'calc_chi_sq'], {}), "(res['final_simplex'][0], res['final_simplex'][1],\n calc_chi_sq)\n", (29207, 29274), False, 'from cryspy.A_functions_base.function_1_error_simplex import error_estimation_simplex\n'), ((29798, 29818), 'numpy.array', 'numpy.array', (['l_sigma'], {}), '(l_sigma)\n', (29809, 29818), False, 'import numpy\n'), ((29860, 29902), 'numpy.zeros_like', 'numpy.zeros_like', (['atom_para_susceptibility'], {}), '(atom_para_susceptibility)\n', (29876, 29902), False, 'import numpy\n'), ((4756, 4793), 'numpy.expand_dims', 'numpy.expand_dims', (['atom_label'], {'axis': '(1)'}), '(atom_label, axis=1)\n', (4773, 4793), False, 'import numpy\n'), ((4797, 4839), 'numpy.expand_dims', 'numpy.expand_dims', (['atom_para_label'], {'axis': '(0)'}), '(atom_para_label, axis=0)\n', (4814, 4839), False, 'import numpy\n'), ((8970, 9146), 'cryspy.A_functions_base.orbital_functions.calc_density_spherical', 'calc_density_spherical', (['atom_distance_auc_chi[flag_atom]', "dict_shell['core_population']", "dict_shell['core_coeff']", "dict_shell['core_zeta']", "dict_shell['core_n']", 'kappa'], {}), "(atom_distance_auc_chi[flag_atom], dict_shell[\n 'core_population'], dict_shell['core_coeff'], dict_shell['core_zeta'],\n dict_shell['core_n'], kappa)\n", (8992, 9146), False, 'from cryspy.A_functions_base.orbital_functions import calc_density_spherical\n'), ((12553, 12589), 'numpy.square', 'numpy.square', (['(flip_ratio_es[0] + 1.0)'], {}), '(flip_ratio_es[0] + 1.0)\n', (12565, 12589), False, 'import numpy\n'), ((15488, 15593), 'cryspy.A_functions_base.unit_cell.calc_sthovl_by_unit_cell_parameters', 'calc_sthovl_by_unit_cell_parameters', (['index_hkl', 'unit_cell_parameters'], {'flag_unit_cell_parameters': '(False)'}), '(index_hkl, unit_cell_parameters,\n flag_unit_cell_parameters=False)\n', (15523, 15593), False, 'from cryspy.A_functions_base.unit_cell import calc_volume_uc_by_unit_cell_parameters, calc_sthovl_by_unit_cell_parameters, calc_eq_ccs_by_unit_cell_parameters\n'), ((15918, 16194), 'cryspy.A_functions_base.extinction.calc_extinction_sphere', 'calc_extinction_sphere', (['f_sq', 'extinction_radius', 'extinction_mosaicity', 'volume_unit_cell', 'cos_2theta', 'wavelength', 'extinction_model'], {'flag_f_sq': '(False)', 'flag_radius': '(False)', 'flag_mosaicity': '(False)', 'flag_volume_unit_cell': '(False)', 'flag_cos_2theta': '(False)', 'flag_wavelength': '(False)'}), '(f_sq, extinction_radius, extinction_mosaicity,\n volume_unit_cell, cos_2theta, wavelength, extinction_model, flag_f_sq=\n False, flag_radius=False, flag_mosaicity=False, flag_volume_unit_cell=\n False, flag_cos_2theta=False, flag_wavelength=False)\n', (15940, 16194), False, 'from cryspy.A_functions_base.extinction import calc_extinction_sphere\n'), ((17151, 17324), 'cryspy.A_functions_base.flip_ratio.calc_asymmetry_by_iint', 'calc_asymmetry_by_iint', (['iint_plus', 'iint_minus'], {'c_lambda2': 'None', 'iint_2hkl': 'None', 'flag_iint_plus': '(True)', 'flag_iint_minus': '(True)', 'flag_c_lambda2': '(False)', 'flag_iint_2hkl': '(False)'}), '(iint_plus, iint_minus, c_lambda2=None, iint_2hkl=\n None, flag_iint_plus=True, flag_iint_minus=True, flag_c_lambda2=False,\n flag_iint_2hkl=False)\n', (17173, 17324), False, 'from cryspy.A_functions_base.flip_ratio import calc_iint, calc_flip_ratio_by_iint, calc_asymmetry_by_iint\n'), ((17440, 17614), 'cryspy.A_functions_base.flip_ratio.calc_flip_ratio_by_iint', 'calc_flip_ratio_by_iint', (['iint_plus', 'iint_minus'], {'c_lambda2': 'None', 'iint_2hkl': 'None', 'flag_iint_plus': '(True)', 'flag_iint_minus': '(True)', 'flag_c_lambda2': '(False)', 'flag_iint_2hkl': '(False)'}), '(iint_plus, iint_minus, c_lambda2=None, iint_2hkl=\n None, flag_iint_plus=True, flag_iint_minus=True, flag_c_lambda2=False,\n flag_iint_2hkl=False)\n', (17463, 17614), False, 'from cryspy.A_functions_base.flip_ratio import calc_iint, calc_flip_ratio_by_iint, calc_asymmetry_by_iint\n'), ((20187, 20219), 'numpy.copy', 'numpy.copy', (['density_col_previous'], {}), '(density_col_previous)\n', (20197, 20219), False, 'import numpy\n'), ((20333, 20365), 'numpy.copy', 'numpy.copy', (['density_chi_previous'], {}), '(density_chi_previous)\n', (20343, 20365), False, 'import numpy\n'), ((20591, 20614), 'numpy.copy', 'numpy.copy', (['density_col'], {}), '(density_col)\n', (20601, 20614), False, 'import numpy\n'), ((20737, 20760), 'numpy.copy', 'numpy.copy', (['density_chi'], {}), '(density_chi)\n', (20747, 20760), False, 'import numpy\n'), ((23339, 23373), 'numpy.zeros_like', 'numpy.zeros_like', (['density_chi_best'], {}), '(density_chi_best)\n', (23355, 23373), False, 'import numpy\n'), ((27124, 27160), 'numpy.square', 'numpy.square', (['(flip_ratio_es[0] + 1.0)'], {}), '(flip_ratio_es[0] + 1.0)\n', (27136, 27160), False, 'import numpy\n'), ((27702, 27771), 'numpy.square', 'numpy.square', (['((model_value - exp_value_sigma[0]) / exp_value_sigma[1])'], {}), '((model_value - exp_value_sigma[0]) / exp_value_sigma[1])\n', (27714, 27771), False, 'import numpy\n'), ((28724, 28744), 'numpy.diag', 'numpy.diag', (['hess_inv'], {}), '(hess_inv)\n', (28734, 28744), False, 'import numpy\n'), ((30380, 30431), 'numpy.expand_dims', 'numpy.expand_dims', (['atom_para_susceptibility'], {'axis': '(0)'}), '(atom_para_susceptibility, axis=0)\n', (30397, 30431), False, 'import numpy\n'), ((14715, 14820), 'numpy.expand_dims', 'numpy.expand_dims', (['(magnetization_plus * density_col[0] + magnetization_minus * density_col[1])'], {'axis': '(0)'}), '(magnetization_plus * density_col[0] + magnetization_minus *\n density_col[1], axis=0)\n', (14732, 14820), False, 'import numpy\n'), ((15630, 15663), 'numpy.arcsin', 'numpy.arcsin', (['(sthovl * wavelength)'], {}), '(sthovl * wavelength)\n', (15642, 15663), False, 'import numpy\n'), ((19261, 19285), 'numpy.square', 'numpy.square', (['diff_value'], {}), '(diff_value)\n', (19273, 19285), False, 'import numpy\n'), ((21130, 21163), 'numpy.exp', 'numpy.exp', (['(-coeff * der_c_den_col)'], {}), '(-coeff * der_c_den_col)\n', (21139, 21163), False, 'import numpy\n'), ((21542, 21575), 'numpy.exp', 'numpy.exp', (['(-coeff * der_c_den_chi)'], {}), '(-coeff * der_c_den_chi)\n', (21551, 21575), False, 'import numpy\n'), ((28947, 29004), 'numpy.expand_dims', 'numpy.expand_dims', (['atom_para_susceptibility_sigma'], {'axis': '(0)'}), '(atom_para_susceptibility_sigma, axis=0)\n', (28964, 29004), False, 'import numpy\n'), ((12471, 12486), 'numpy.sqrt', 'numpy.sqrt', (['(2.0)'], {}), '(2.0)\n', (12481, 12486), False, 'import numpy\n'), ((27042, 27057), 'numpy.sqrt', 'numpy.sqrt', (['(2.0)'], {}), '(2.0)\n', (27052, 27057), False, 'import numpy\n'), ((30019, 30076), 'numpy.expand_dims', 'numpy.expand_dims', (['atom_para_susceptibility_sigma'], {'axis': '(0)'}), '(atom_para_susceptibility_sigma, axis=0)\n', (30036, 30076), False, 'import numpy\n'), ((12516, 12546), 'numpy.square', 'numpy.square', (['flip_ratio_es[0]'], {}), '(flip_ratio_es[0])\n', (12528, 12546), False, 'import numpy\n'), ((19500, 19558), 'numpy.expand_dims', 'numpy.expand_dims', (['(diff_value / exp_value_sigma[1])'], {'axis': '(1)'}), '(diff_value / exp_value_sigma[1], axis=1)\n', (19517, 19558), False, 'import numpy\n'), ((19906, 19964), 'numpy.expand_dims', 'numpy.expand_dims', (['(diff_value / exp_value_sigma[1])'], {'axis': '(1)'}), '(diff_value / exp_value_sigma[1], axis=1)\n', (19923, 19964), False, 'import numpy\n'), ((27087, 27117), 'numpy.square', 'numpy.square', (['flip_ratio_es[0]'], {}), '(flip_ratio_es[0])\n', (27099, 27117), False, 'import numpy\n'), ((18006, 18135), 'numpy.expand_dims', 'numpy.expand_dims', (['(der_model_int_plus * der_int_plus_fm_perp_real + der_model_int_minus *\n der_int_minus_fm_perp_real)'], {'axis': '(2)'}), '(der_model_int_plus * der_int_plus_fm_perp_real + \n der_model_int_minus * der_int_minus_fm_perp_real, axis=2)\n', (18023, 18135), False, 'import numpy\n'), ((18242, 18371), 'numpy.expand_dims', 'numpy.expand_dims', (['(der_model_int_plus * der_int_plus_fm_perp_imag + der_model_int_minus *\n der_int_minus_fm_perp_imag)'], {'axis': '(2)'}), '(der_model_int_plus * der_int_plus_fm_perp_imag + \n der_model_int_minus * der_int_minus_fm_perp_imag, axis=2)\n', (18259, 18371), False, 'import numpy\n'), ((18612, 18741), 'numpy.expand_dims', 'numpy.expand_dims', (['(der_model_int_plus * der_int_plus_fm_perp_real + der_model_int_minus *\n der_int_minus_fm_perp_real)'], {'axis': '(2)'}), '(der_model_int_plus * der_int_plus_fm_perp_real + \n der_model_int_minus * der_int_minus_fm_perp_real, axis=2)\n', (18629, 18741), False, 'import numpy\n'), ((18848, 18977), 'numpy.expand_dims', 'numpy.expand_dims', (['(der_model_int_plus * der_int_plus_fm_perp_imag + der_model_int_minus *\n der_int_minus_fm_perp_imag)'], {'axis': '(2)'}), '(der_model_int_plus * der_int_plus_fm_perp_imag + \n der_model_int_minus * der_int_minus_fm_perp_imag, axis=2)\n', (18865, 18977), False, 'import numpy\n')]
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
from tqdm import tqdm
import torch
from torch.utils.data import DataLoader
import torch.nn.functional as F
from model.model import BaseNet
from model.config import arguments
from dataset.dataset import FlowerData
def get_cm_each_round(args, device, dataloader_test, round_num: int=None, all_classes: bool=False):
"""confusion matrix and probabilities each round"""
network = BaseNet(num_class=args.class_num)
if all_classes:
network.load_state_dict(torch.load('../checkpoint/all_class.pth'))
else:
network.load_state_dict(torch.load(
'../checkpoint/round%.2d_epoch%.4d.pth' % (round_num, args.epochs)))
network = network.to(device).half()
network.eval()
prob = np.zeros((args.class_num * args.num_image_per_class // 2, args.class_num))
cm = np.zeros((args.class_num, args.class_num))
with torch.no_grad():
for batch, (data, target) in enumerate(tqdm(dataloader_test)):
data = data.to(device).half()
target = target.to(device).long()
output = network(data)
_, pred = torch.max(output, 1)
target = target.cpu().numpy()
pred = pred.cpu().numpy()
output = F.softmax(output, 1).cpu().numpy()
idx1 = batch * args.test_batch_size
idx2 = idx1 + args.test_batch_size
prob[idx1: idx2, :] = output
for i, j in zip(target, pred):
cm[i, j] += 1
return cm, prob
def get_confidence(cms, normalization: bool=False, save: bool=False):
"""accuracy of each classifier on each class
normalization: weighted by precision
normalization = False: weighted by accuracy
"""
confidences = np.zeros((cms.shape[0], cms.shape[1])) # (10, 17)
for i in range(confidences.shape[0]):
if normalization:
cms[i] /= cms[i].sum(0)
else:
cms[i] /= cms[i].sum(1)
confidences[i] = cms[i].diagonal()
suffix = 'confidences'
if normalization:
suffix += '_normalized'
if save:
np.save('../log/cm/' + suffix, confidences)
return confidences
def plot_cm(matrix, round_num: int=None, suffix=''):
"""draw confusion matrix"""
classes = ['%d' % j for j in range(matrix.shape[0])]
# Normalize by row
matrix = matrix.astype(np.float)
linesum = matrix.sum(1)
linesum = np.dot(linesum.reshape(-1, 1), np.ones((1, matrix.shape[1])))
matrix /= linesum
# plot
plt.switch_backend('agg')
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(matrix)
fig.colorbar(cax)
ax.xaxis.set_major_locator(MultipleLocator(1))
ax.yaxis.set_major_locator(MultipleLocator(1))
for i in range(matrix.shape[0]):
ax.text(i, i, str('%.2f' % (matrix[i, i] * 100)), va='center', ha='center', fontsize=5.5)
ax.set_xticklabels([''] + classes, rotation=90)
ax.set_yticklabels([''] + classes)
if round_num:
suffix += '_round_%.2d' % round_num
plt.savefig('../log/cm/cm%s.png' % suffix, dpi=200)
plt.close()
def get_cm_assemble_prob(confusion_all, probs_all, confidences_all, targets, save: bool=False,
classifier_num=None, use_weight: bool=False, classifier_list=None,
normalization: bool=False):
"""
soft vote
cms: (10, 17, 17)
probs: (10, 680, 17)
confidences: (10, 17)
targets: (680,)
save: save confusion matrix as .npy
classifier_num: use the first `classifier_num` classifiers to assemble a new classifier
"""
cms = confusion_all
probs = probs_all
confidences = confidences_all
if normalization:
confidences = get_confidence(cms, normalization=normalization)
if classifier_num:
cms = cms[:classifier_num]
probs = probs[:classifier_num]
confidences = confidences[:classifier_num]
if classifier_list:
cms = cms[classifier_list]
probs = probs[classifier_list]
confidences = confidences[classifier_list]
cm_assemble = np.zeros(cms.shape[1:])
probs = probs.transpose((1, 0, 2)) # 680 * 10 * 17
if use_weight:
probs = probs * confidences # 680 * 10 * 17
probs = probs.sum(1) # 680 * 17
predictions = probs.argmax(1)
for target, prediction in zip(targets, predictions):
cm_assemble[int(target), prediction] += 1
if save:
if classifier_num:
if use_weight:
np.save('../log/cm/cm_assemble_prob_weight_%.2dclassifiers' % classifier_num, cm_assemble)
else:
np.save('../log/cm/cm_assemble_prob_%.2dclassifiers' % classifier_num, cm_assemble)
acc = cm_assemble.diagonal().sum() / cm_assemble.sum()
suffix = ', soft vote'
if use_weight:
suffix += ', use weight'
else:
suffix += ', no weight'
if classifier_num:
suffix += ', %d classifiers' % classifier_num
if classifier_list:
suffix += ', selected list'
if normalization:
suffix += ', normalization'
print('accuracy of assemble method' + suffix + ' : %.4f' % acc)
return cm_assemble
def get_cm_assemble_vote(confusion_all, probs_all, confidences_all, targets, save: bool=False,
classifier_num: int=None, use_weight: bool=False, classifier_list=None,
normalization: bool = False):
"""
hard vote
cms: (10, 17, 17)
probs: (10, 680, 17)
confidences: (10, 17)
targets: (680,)
save: save confusion matrix as .npy
classifier_num: use the first `classifier_num` classifiers to assemble a new classifier
"""
cms = confusion_all
probs = probs_all
confidences = confidences_all
if normalization:
confidences = get_confidence(cms, normalization=normalization)
if classifier_num:
cms = cms[:classifier_num]
probs = probs[:classifier_num]
confidences = confidences[:classifier_num]
if classifier_list:
cms = cms[classifier_list]
probs = probs[classifier_list]
confidences = confidences[classifier_list]
cm_assemble = np.zeros(cms.shape[1:])
probs = probs.transpose((1, 0, 2)) # 680 * 10 * 17
probs = probs.argmax(2) # 680 * 10, the vote of each classifier
votes = np.zeros((probs.shape[0], cms.shape[2])) # 680 * 17, the vote of each class
for i in range(probs.shape[0]):
for j in range(probs.shape[1]):
if use_weight:
votes[i, probs[i, j]] += confidences[j, probs[i, j]]
else:
votes[i, probs[i, j]] += 1
predictions = votes.argmax(1)
for target, prediction in zip(targets, predictions):
cm_assemble[int(target), prediction] += 1
if save:
if classifier_num:
if use_weight:
np.save('../log/cm/cm_assemble_vote_weight_%.2dclassifiers' % classifier_num, cm_assemble)
else:
np.save('../log/cm/cm_assemble_vote_%.2dclassifiers' % classifier_num, cm_assemble)
acc = cm_assemble.diagonal().sum() / cm_assemble.sum()
suffix = ', hard vote'
if use_weight:
suffix += ', use weight'
else:
suffix += ', no weight'
if classifier_num:
suffix += ', %d classifiers' % classifier_num
if classifier_list:
suffix += ', selected list'
if normalization:
suffix += ', normalization'
print('accuracy of assemble method' + suffix + ' : %.4f' % acc)
return cm_assemble
def main(args, matrix_from_file: bool = False):
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
if not matrix_from_file:
cms = np.zeros((10, args.class_num, args.class_num)) # (10, 17, 17)
probs = np.zeros((10, args.class_num * args.num_image_per_class // 2, args.class_num)) # (10, 680, 17)
dataset_test = FlowerData(args, split='test')
dataloader_test = DataLoader(dataset_test, batch_size=args.test_batch_size,
shuffle=False, num_workers=10)
for i in range(10):
cm, prob = get_cm_each_round(args, device, dataloader_test, round_num=i)
cms[i], probs[i] = cm, prob
confidences = get_confidence(cms)
np.save('../log/cm/cms.npy', cms)
np.save('../log/cm/probabilities.npy', probs)
else:
cms = np.load('../log/cm/cms.npy')
probs = np.load('../log/cm/probabilities.npy')
confidences = np.load('../log/cm/confidences.npy')
targets = np.load('../log/cm/targets.npy')
# for i in range(1, 11):
cm = get_cm_assemble_vote(cms, probs, confidences, targets)
plot_cm(cm, suffix='_hard_no_weight')
# get_cm_assemble_vote(cms, probs, confidences, targets, use_weight=True)
cm = get_cm_assemble_vote(cms, probs, confidences, targets, use_weight=True, normalization=True)
plot_cm(cm, suffix='_hard_weight')
cm = get_cm_assemble_prob(cms, probs, confidences, targets)
plot_cm(cm, suffix='_soft_no_weight')
# get_cm_assemble_prob(cms, probs, confidences, targets, use_weight=True)
cm = get_cm_assemble_prob(cms, probs, confidences, targets, use_weight=True, normalization=True)
plot_cm(cm, suffix='_soft_weight')
# for i in range(10):
# # plot confusion matrix
# plot_cm(cms[i], round_num=i)
if __name__ == '__main__':
argument = arguments()
main(argument, matrix_from_file=True)
# args = argument
# use_cuda = not argument.no_cuda and torch.cuda.is_available()
# device = torch.device("cuda:0" if use_cuda else "cpu")
# dataset_test = FlowerData(args, split='test')
# dataloader_test = DataLoader(dataset_test, batch_size=args.test_batch_size,
# shuffle=False, num_workers=10)
# cm, _ = get_cm_each_round(args, device, dataloader_test, all_classes=True)
# plot_cm(cm, suffix='all_classes')
# print(cm.diagonal().sum() / cm.sum())
|
[
"torch.max",
"dataset.dataset.FlowerData",
"torch.cuda.is_available",
"matplotlib.pyplot.switch_backend",
"model.config.arguments",
"torch.nn.functional.softmax",
"numpy.save",
"matplotlib.pyplot.close",
"model.model.BaseNet",
"matplotlib.pyplot.savefig",
"numpy.ones",
"torch.device",
"matplotlib.ticker.MultipleLocator",
"torch.load",
"tqdm.tqdm",
"numpy.zeros",
"matplotlib.pyplot.figure",
"torch.utils.data.DataLoader",
"torch.no_grad",
"numpy.load"
] |
[((485, 518), 'model.model.BaseNet', 'BaseNet', ([], {'num_class': 'args.class_num'}), '(num_class=args.class_num)\n', (492, 518), False, 'from model.model import BaseNet\n'), ((820, 894), 'numpy.zeros', 'np.zeros', (['(args.class_num * args.num_image_per_class // 2, args.class_num)'], {}), '((args.class_num * args.num_image_per_class // 2, args.class_num))\n', (828, 894), True, 'import numpy as np\n'), ((904, 946), 'numpy.zeros', 'np.zeros', (['(args.class_num, args.class_num)'], {}), '((args.class_num, args.class_num))\n', (912, 946), True, 'import numpy as np\n'), ((1819, 1857), 'numpy.zeros', 'np.zeros', (['(cms.shape[0], cms.shape[1])'], {}), '((cms.shape[0], cms.shape[1]))\n', (1827, 1857), True, 'import numpy as np\n'), ((2588, 2613), 'matplotlib.pyplot.switch_backend', 'plt.switch_backend', (['"""agg"""'], {}), "('agg')\n", (2606, 2613), True, 'import matplotlib.pyplot as plt\n'), ((2624, 2636), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2634, 2636), True, 'import matplotlib.pyplot as plt\n'), ((3114, 3165), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('../log/cm/cm%s.png' % suffix)"], {'dpi': '(200)'}), "('../log/cm/cm%s.png' % suffix, dpi=200)\n", (3125, 3165), True, 'import matplotlib.pyplot as plt\n'), ((3171, 3182), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3180, 3182), True, 'import matplotlib.pyplot as plt\n'), ((4172, 4195), 'numpy.zeros', 'np.zeros', (['cms.shape[1:]'], {}), '(cms.shape[1:])\n', (4180, 4195), True, 'import numpy as np\n'), ((6269, 6292), 'numpy.zeros', 'np.zeros', (['cms.shape[1:]'], {}), '(cms.shape[1:])\n', (6277, 6292), True, 'import numpy as np\n'), ((6431, 6471), 'numpy.zeros', 'np.zeros', (['(probs.shape[0], cms.shape[2])'], {}), '((probs.shape[0], cms.shape[2]))\n', (6439, 6471), True, 'import numpy as np\n'), ((7776, 7821), 'torch.device', 'torch.device', (["('cuda:0' if use_cuda else 'cpu')"], {}), "('cuda:0' if use_cuda else 'cpu')\n", (7788, 7821), False, 'import torch\n'), ((9627, 9638), 'model.config.arguments', 'arguments', ([], {}), '()\n', (9636, 9638), False, 'from model.config import arguments\n'), ((957, 972), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (970, 972), False, 'import torch\n'), ((2174, 2217), 'numpy.save', 'np.save', (["('../log/cm/' + suffix)", 'confidences'], {}), "('../log/cm/' + suffix, confidences)\n", (2181, 2217), True, 'import numpy as np\n'), ((2520, 2549), 'numpy.ones', 'np.ones', (['(1, matrix.shape[1])'], {}), '((1, matrix.shape[1]))\n', (2527, 2549), True, 'import numpy as np\n'), ((2749, 2767), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(1)'], {}), '(1)\n', (2764, 2767), False, 'from matplotlib.ticker import MultipleLocator\n'), ((2800, 2818), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(1)'], {}), '(1)\n', (2815, 2818), False, 'from matplotlib.ticker import MultipleLocator\n'), ((7737, 7762), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7760, 7762), False, 'import torch\n'), ((7866, 7912), 'numpy.zeros', 'np.zeros', (['(10, args.class_num, args.class_num)'], {}), '((10, args.class_num, args.class_num))\n', (7874, 7912), True, 'import numpy as np\n'), ((7945, 8023), 'numpy.zeros', 'np.zeros', (['(10, args.class_num * args.num_image_per_class // 2, args.class_num)'], {}), '((10, args.class_num * args.num_image_per_class // 2, args.class_num))\n', (7953, 8023), True, 'import numpy as np\n'), ((8065, 8095), 'dataset.dataset.FlowerData', 'FlowerData', (['args'], {'split': '"""test"""'}), "(args, split='test')\n", (8075, 8095), False, 'from dataset.dataset import FlowerData\n'), ((8122, 8214), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset_test'], {'batch_size': 'args.test_batch_size', 'shuffle': '(False)', 'num_workers': '(10)'}), '(dataset_test, batch_size=args.test_batch_size, shuffle=False,\n num_workers=10)\n', (8132, 8214), False, 'from torch.utils.data import DataLoader\n'), ((8454, 8487), 'numpy.save', 'np.save', (['"""../log/cm/cms.npy"""', 'cms'], {}), "('../log/cm/cms.npy', cms)\n", (8461, 8487), True, 'import numpy as np\n'), ((8496, 8541), 'numpy.save', 'np.save', (['"""../log/cm/probabilities.npy"""', 'probs'], {}), "('../log/cm/probabilities.npy', probs)\n", (8503, 8541), True, 'import numpy as np\n'), ((8566, 8594), 'numpy.load', 'np.load', (['"""../log/cm/cms.npy"""'], {}), "('../log/cm/cms.npy')\n", (8573, 8594), True, 'import numpy as np\n'), ((8611, 8649), 'numpy.load', 'np.load', (['"""../log/cm/probabilities.npy"""'], {}), "('../log/cm/probabilities.npy')\n", (8618, 8649), True, 'import numpy as np\n'), ((8672, 8708), 'numpy.load', 'np.load', (['"""../log/cm/confidences.npy"""'], {}), "('../log/cm/confidences.npy')\n", (8679, 8708), True, 'import numpy as np\n'), ((8727, 8759), 'numpy.load', 'np.load', (['"""../log/cm/targets.npy"""'], {}), "('../log/cm/targets.npy')\n", (8734, 8759), True, 'import numpy as np\n'), ((571, 612), 'torch.load', 'torch.load', (['"""../checkpoint/all_class.pth"""'], {}), "('../checkpoint/all_class.pth')\n", (581, 612), False, 'import torch\n'), ((656, 734), 'torch.load', 'torch.load', (["('../checkpoint/round%.2d_epoch%.4d.pth' % (round_num, args.epochs))"], {}), "('../checkpoint/round%.2d_epoch%.4d.pth' % (round_num, args.epochs))\n", (666, 734), False, 'import torch\n'), ((1021, 1042), 'tqdm.tqdm', 'tqdm', (['dataloader_test'], {}), '(dataloader_test)\n', (1025, 1042), False, 'from tqdm import tqdm\n'), ((1191, 1211), 'torch.max', 'torch.max', (['output', '(1)'], {}), '(output, 1)\n', (1200, 1211), False, 'import torch\n'), ((4590, 4684), 'numpy.save', 'np.save', (["('../log/cm/cm_assemble_prob_weight_%.2dclassifiers' % classifier_num)", 'cm_assemble'], {}), "('../log/cm/cm_assemble_prob_weight_%.2dclassifiers' %\n classifier_num, cm_assemble)\n", (4597, 4684), True, 'import numpy as np\n'), ((4715, 4802), 'numpy.save', 'np.save', (["('../log/cm/cm_assemble_prob_%.2dclassifiers' % classifier_num)", 'cm_assemble'], {}), "('../log/cm/cm_assemble_prob_%.2dclassifiers' % classifier_num,\n cm_assemble)\n", (4722, 4802), True, 'import numpy as np\n'), ((6969, 7063), 'numpy.save', 'np.save', (["('../log/cm/cm_assemble_vote_weight_%.2dclassifiers' % classifier_num)", 'cm_assemble'], {}), "('../log/cm/cm_assemble_vote_weight_%.2dclassifiers' %\n classifier_num, cm_assemble)\n", (6976, 7063), True, 'import numpy as np\n'), ((7094, 7181), 'numpy.save', 'np.save', (["('../log/cm/cm_assemble_vote_%.2dclassifiers' % classifier_num)", 'cm_assemble'], {}), "('../log/cm/cm_assemble_vote_%.2dclassifiers' % classifier_num,\n cm_assemble)\n", (7101, 7181), True, 'import numpy as np\n'), ((1314, 1334), 'torch.nn.functional.softmax', 'F.softmax', (['output', '(1)'], {}), '(output, 1)\n', (1323, 1334), True, 'import torch.nn.functional as F\n')]
|
# Copyright 2021 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=line-too-long
r"""Synthetic dataset generated from the PlasmaSpectroscopy model.
This was generated using the following snippet:
```python
import tensorflow.compat.v2 as tf
tf.enable_v2_behavior()
import tensorflow_probability as tfp
from inference_gym.internal import array_to_source
from inference_gym import using_tensorflow as gym
import numpy as np
num_sensors = 40
num_wavelengths = 40
wavelengths = np.linspace(0.01, 0.2, num_wavelengths)
center_wavelength = wavelengths.mean()
model = gym.targets.PlasmaSpectroscopy(
tf.zeros((num_wavelengths, num_sensors)),
wavelengths=wavelengths,
center_wavelength=center_wavelength)
sample, dataset = model._sample_dataset(seed=(0, 8))
sources = []
for k, v in sample._asdict().items():
sources.append(
array_to_source.array_to_source(
k.upper(), v))
for k, v in dataset.items():
sources.append(
array_to_source.array_to_source(
k.upper(), v))
with open('/tmp/synthetic_plasma_spectroscopy.py', 'w') as f:
f.write("\n\n".join(sources))
```
Note that the final `_sample_dataset` is not reproducible reproducible across
software versions, hence the output is checked in.
"""
import numpy as np
AMPLITUDE = np.array([
1.4802036,
1.8915913,
-0.011120212,
1.1328301,
1.2841645,
0.6033605,
-1.887041,
-2.012894,
0.046582267,
1.5555662,
0.4305847,
-1.7179363,
-1.1399889,
-0.4432812,
-1.4721184,
0.35457477,
]).reshape((16,))
TEMPERATURE = np.array([
1.2321296,
-0.020694781,
-1.3441145,
-0.51342154,
-0.6282792,
-0.22180416,
-1.0089059,
1.4475185,
-1.8519154,
0.5540126,
-1.3644233,
1.5542297,
-0.4033564,
-0.029513652,
-0.14812116,
0.93214256,
]).reshape((16,))
VELOCITY = np.array([
0.010279292,
-1.6109133,
0.85784495,
0.8826037,
0.19365458,
-0.36963812,
1.2059057,
-0.93545884,
0.38819882,
1.6983186,
-1.8130875,
0.94406796,
-0.79738003,
-1.0478632,
-0.38848934,
-0.48529625,
]).reshape((16,))
SHIFT = np.array([
-0.5514385,
]).reshape(())
WAVELENGTHS = np.array([
0.01,
0.014871794871794873,
0.019743589743589744,
0.024615384615384615,
0.029487179487179487,
0.03435897435897436,
0.039230769230769236,
0.04410256410256411,
0.04897435897435898,
0.05384615384615385,
0.05871794871794872,
0.06358974358974359,
0.06846153846153846,
0.07333333333333333,
0.0782051282051282,
0.08307692307692308,
0.08794871794871795,
0.09282051282051282,
0.09769230769230769,
0.10256410256410256,
0.10743589743589743,
0.1123076923076923,
0.11717948717948717,
0.12205128205128205,
0.12692307692307694,
0.13179487179487182,
0.1366666666666667,
0.14153846153846156,
0.14641025641025643,
0.1512820512820513,
0.15615384615384617,
0.16102564102564104,
0.1658974358974359,
0.17076923076923078,
0.17564102564102566,
0.18051282051282053,
0.1853846153846154,
0.19025641025641027,
0.19512820512820514,
0.2,
]).reshape((40,))
CENTER_WAVELENGTH = np.array([
0.10500000000000001,
]).reshape(())
MEASUREMENTS = np.array([
-0.66101485,
0.31644753,
-0.5896422,
0.4764485,
2.1545932,
15.793148,
8.2264805,
6.457074,
5.7062893,
6.1811686,
8.777044,
6.9074125,
7.9522552,
7.701313,
8.559349,
8.296498,
6.1969037,
6.4804926,
6.8852997,
8.830744,
14.376627,
0.54612935,
0.124028,
0.44405863,
0.5131382,
0.5987899,
0.008983987,
-0.24756075,
0.7618118,
-0.21146192,
0.4546959,
0.09494688,
-0.26813537,
0.5798886,
-0.10784844,
0.18372172,
0.8161483,
-0.3787802,
0.61460984,
-0.41957632,
0.13647377,
-0.3481221,
0.03326019,
1.7144626,
3.8620698,
14.40822,
9.046495,
7.6838465,
7.2554746,
8.057631,
11.189637,
9.038466,
8.125581,
8.294034,
10.172681,
11.90528,
7.1925435,
6.708079,
7.6085744,
9.414239,
14.608672,
1.5265317,
1.09792,
0.29970562,
0.29824358,
0.36030084,
-0.37960574,
0.47860667,
0.91203105,
-0.6904322,
-0.2722036,
0.23733543,
-0.6658274,
0.62095886,
0.73466265,
-0.8475226,
-0.1700871,
0.9261157,
0.422822,
0.32836267,
0.58122945,
-0.83155084,
-0.20049855,
-0.040298104,
4.014356,
16.160791,
7.2828264,
7.3377733,
6.665611,
8.653453,
11.973017,
9.656379,
10.9801235,
9.05112,
10.565474,
11.942185,
7.2904882,
7.4630857,
6.514908,
9.644132,
14.969957,
0.07107994,
0.11467081,
0.92357284,
0.04355552,
0.6726098,
-0.15279476,
0.713554,
0.5466241,
-0.38109347,
0.5590394,
0.08306945,
0.9525252,
0.6713458,
0.51892877,
-0.1279359,
-0.15663871,
0.020156374,
-0.060285714,
-1.0264076,
-0.53699505,
-0.9786586,
0.015289649,
1.5724823,
4.0689135,
13.646254,
8.417458,
7.3368583,
6.966266,
8.73208,
14.498494,
10.2102165,
11.423929,
11.351579,
12.9430065,
15.01266,
9.051174,
7.077483,
6.785291,
9.483119,
15.76488,
1.1677985,
1.6693239,
-0.21604359,
0.32284033,
-0.22243214,
0.60323435,
-0.11199745,
0.29957047,
0.006062749,
0.7996792,
0.3094816,
-0.7718058,
0.503415,
0.07231447,
-0.2853677,
0.4330218,
0.844616,
-0.19574685,
-0.3879851,
0.5901966,
0.051313907,
-0.29432508,
1.2537544,
3.1426716,
14.615546,
8.347049,
7.4366584,
6.4491363,
9.865336,
15.843064,
12.469691,
11.894229,
12.133173,
14.63979,
16.16245,
9.504371,
8.017702,
7.867693,
9.518961,
14.380217,
0.66953653,
0.60293055,
0.00082825124,
-0.28320992,
0.8367502,
0.12513764,
0.22053392,
-0.10229007,
-0.20082277,
0.63717407,
0.32739908,
-0.093239225,
-0.80318755,
0.9917766,
0.24838758,
-0.07330545,
0.15537623,
0.09008534,
-0.06607497,
1.0962121,
0.55644095,
0.6913326,
0.9021442,
3.8921309,
14.102233,
7.184174,
7.315026,
7.334084,
10.787065,
19.485243,
13.958044,
14.3500805,
13.616628,
15.63192,
17.07027,
9.131023,
6.8167133,
6.970449,
8.922994,
14.361785,
1.7793398,
0.94775784,
0.105669454,
-0.18747061,
0.6676264,
-0.3883816,
-0.6202498,
-0.0833843,
-0.5216094,
1.1268811,
-0.59910476,
0.39042526,
0.47714886,
-0.7111677,
-0.5756576,
0.9333002,
0.1010186,
0.13677923,
-0.75147396,
1.2583244,
-0.23063457,
0.7901664,
0.24705392,
3.6259048,
12.530731,
6.9297647,
7.079164,
7.2256374,
11.940973,
20.025602,
14.700426,
13.519883,
14.241193,
17.55714,
17.386055,
10.167002,
7.536337,
7.0136056,
9.326938,
12.228463,
0.17775005,
0.8319777,
-0.8991761,
-0.01412341,
0.61705685,
-0.14188325,
-0.41435227,
-0.316557,
-0.5893145,
-0.010637931,
0.20675054,
0.44020182,
-0.7080041,
0.16052538,
-0.48142046,
0.9052833,
0.432698,
0.03338314,
0.35594848,
1.1689888,
0.36019892,
0.23971666,
1.4662509,
3.3352752,
11.360069,
8.300535,
7.5611286,
7.2111707,
17.327162,
20.148909,
17.380922,
17.596447,
14.160338,
19.188683,
17.219112,
10.499862,
8.309862,
6.1963353,
7.3864193,
12.878287,
1.4184926,
1.7496321,
-0.082713336,
0.23216072,
0.20258206,
1.0141679,
0.14271286,
-0.29340488,
-0.055605985,
-0.5336929,
-0.54352623,
0.19902669,
0.12139763,
-0.018293247,
-0.20558693,
-0.8606704,
0.22833318,
0.4463366,
0.20494421,
0.7066752,
-0.62247527,
0.117985666,
1.831157,
3.299585,
9.63925,
7.483565,
7.1289496,
6.4751153,
15.985568,
21.507505,
18.539736,
16.699535,
16.726501,
19.698357,
22.443224,
11.952675,
7.005475,
6.2864413,
8.778635,
10.89195,
0.66351974,
1.1440128,
-0.25076824,
0.66586065,
1.0526825,
0.015522989,
0.07891381,
1.104366,
0.7747889,
0.15351877,
-0.12182697,
-0.59052014,
-0.12581429,
0.5053382,
0.17305401,
0.67090386,
1.036633,
0.05909565,
0.28418896,
0.86726683,
0.1763895,
0.33444333,
1.7197226,
2.5705223,
9.934082,
6.614648,
5.9702163,
7.0940704,
18.322672,
24.886862,
18.648033,
19.174364,
17.071978,
18.935146,
20.495438,
13.39125,
7.1744776,
5.476832,
7.2689962,
10.46958,
1.1804211,
1.0994785,
0.64040864,
0.021063149,
0.75519574,
0.40024444,
-0.48553574,
0.87461084,
-0.23675112,
0.1914608,
-0.49892142,
0.2618199,
0.6261685,
-1.4913763,
0.41756257,
0.5763335,
-0.45616063,
0.38227928,
-0.6692691,
1.8232274,
0.7977414,
0.40125495,
2.787939,
3.2074018,
8.831141,
6.6602535,
7.500632,
8.793667,
18.995548,
23.698793,
18.186054,
17.543282,
18.392523,
20.788607,
24.634804,
14.188387,
8.168461,
5.5740485,
6.8008204,
8.531001,
1.4529983,
2.276989,
1.0289037,
0.9468033,
-0.038641334,
-0.39401633,
-1.1387177,
0.49660775,
0.5171432,
-0.6254447,
1.2226907,
-0.13812594,
0.11419458,
-0.36041245,
0.16572447,
-0.2501292,
-0.95744544,
0.6987992,
0.3099944,
1.108943,
0.41807377,
1.350997,
1.2673455,
3.2821457,
8.0927515,
5.9851384,
4.8361425,
8.642136,
20.54146,
23.320255,
20.936903,
19.881096,
18.084406,
20.986282,
22.538109,
15.849695,
7.59143,
5.759286,
7.9955835,
7.542832,
1.5869404,
2.191163,
-0.0054766536,
0.38372415,
1.4580531,
-0.6341528,
-0.20307654,
-0.82046396,
0.30573404,
0.59632486,
-0.12896755,
-0.42806864,
-0.47942856,
-0.7036555,
0.075889945,
0.29308736,
-1.4974035,
-0.036708307,
-0.43896213,
0.54672736,
1.3562044,
1.5058006,
2.0175235,
3.2622445,
7.817541,
6.1968045,
5.7298784,
8.535798,
22.878216,
23.569859,
21.438442,
20.779306,
18.338245,
23.335554,
23.656643,
16.534071,
7.0056953,
5.3699074,
6.2035737,
6.91238,
1.8461741,
2.0328891,
0.6284174,
0.07324934,
0.72266495,
0.43248987,
0.55657876,
-0.36850226,
0.2892055,
0.120979175,
-0.3255677,
0.18210961,
-0.13677588,
-0.79952997,
-0.16948017,
0.27382505,
0.011414817,
-0.002753294,
0.1875501,
1.7294772,
0.86453336,
0.8789885,
2.0237687,
2.686733,
7.0931683,
6.7965593,
5.703301,
9.106176,
19.852842,
22.134148,
24.209602,
20.48003,
19.87589,
22.650255,
24.67572,
17.161873,
7.185769,
5.12218,
5.9893394,
5.907269,
2.1844404,
1.9687537,
1.0286644,
0.052360654,
1.7644687,
0.5339646,
-0.53046066,
-0.2281848,
-1.2462859,
0.6778776,
0.5408989,
-0.14820653,
0.38658077,
-0.65733767,
0.014478714,
0.45866382,
0.47466084,
0.48330665,
0.52647215,
1.6572766,
-0.093874216,
1.0939939,
2.8252633,
3.250628,
7.286972,
5.736179,
5.5879693,
9.545634,
22.925808,
23.213871,
23.39594,
21.748808,
22.024412,
24.974943,
23.57301,
18.065563,
8.397812,
4.8709254,
7.626314,
4.6410003,
1.8595266,
3.0831103,
1.4402436,
1.2672244,
1.312456,
-0.18201214,
0.21097422,
-0.026861114,
0.18476872,
0.7252849,
-0.002409873,
-0.29303908,
1.3546691,
-0.04322617,
-0.053203642,
-0.30067968,
-0.12050266,
-0.5528519,
0.057745364,
1.3053449,
1.8519605,
1.8503615,
2.5469666,
4.2060847,
5.5301046,
7.0553675,
5.9386334,
11.875089,
23.438046,
20.363987,
23.725615,
20.967691,
21.432257,
24.202627,
19.774887,
18.783188,
7.98809,
6.2239876,
7.760503,
5.212336,
2.9735184,
2.7213335,
2.0156252,
1.814288,
2.2770615,
0.01533184,
0.58220863,
-0.49351138,
0.31417957,
-0.36469758,
0.45743746,
0.66627234,
0.3081961,
0.828259,
-0.31382263,
0.26520026,
0.22944771,
-0.6709603,
-0.07570245,
1.5327783,
1.7784487,
2.6468341,
3.198592,
3.7656205,
5.9252257,
6.9020658,
4.9581833,
12.047751,
22.348654,
20.17518,
24.174393,
21.535011,
19.05106,
22.163195,
21.497072,
18.43445,
8.682917,
5.3132563,
7.030179,
3.717919,
2.0626392,
2.4575338,
2.2717822,
0.8625143,
2.4770658,
-0.786061,
1.2881083,
-0.2518999,
0.72405684,
-0.122574806,
-0.34197915,
0.13918422,
0.26873538,
-0.47515658,
-0.54810023,
0.89566797,
-0.54384357,
-0.12311963,
0.567525,
2.7046611,
1.5512958,
1.7786896,
3.8791292,
3.9559023,
4.788476,
8.228316,
5.3946,
12.281274,
21.967098,
20.923243,
23.913458,
20.710938,
19.420635,
25.138704,
18.289383,
19.177135,
8.415327,
4.8929396,
8.965305,
4.3885813,
3.4578655,
3.0384607,
1.5863328,
1.91974,
2.4258208,
0.5892152,
0.048560977,
-0.13528748,
-0.21397328,
0.16264682,
-0.57951355,
-0.40301454,
0.21641892,
-0.22450455,
0.38177252,
-0.967473,
-0.35485935,
0.062246032,
-0.03395147,
2.1338463,
1.9084859,
3.1863737,
1.9375713,
3.4518764,
6.570703,
6.878443,
5.679476,
13.351213,
22.931889,
19.282558,
22.36135,
23.796984,
21.032475,
23.09803,
20.966232,
20.72223,
6.7338567,
6.4885483,
7.190284,
4.9310346,
3.1236634,
3.5150487,
2.9693668,
2.2454295,
1.82249,
-0.09966546,
0.72314006,
-0.79027426,
0.41793302,
-0.14793015,
0.45988762,
0.8456978,
-0.5273398,
0.1830612,
-1.0828326,
-1.0117317,
-0.3019783,
0.17001551,
-0.62556803,
2.961217,
2.6823378,
2.9682546,
5.2445164,
4.9527783,
6.309333,
7.7392774,
6.2129936,
15.35368,
20.683935,
20.589102,
22.10926,
20.185204,
20.562426,
22.645317,
18.869568,
20.659521,
8.880328,
6.4410696,
9.769155,
5.5935693,
5.527752,
4.5683465,
3.4019177,
3.3163903,
2.244741,
0.38402623,
0.2960868,
-0.4828044,
0.13759217,
0.25681636,
0.11657055,
-0.330115,
0.4011577,
-0.7654019,
0.14916949,
-0.6228205,
-0.96823233,
-0.022868,
-0.49047035,
3.20636,
2.6912642,
2.9050756,
4.912674,
5.7441964,
6.489336,
9.632326,
6.2825303,
16.68777,
21.077969,
17.172966,
18.92938,
23.38385,
20.251026,
22.16378,
18.001736,
20.24098,
11.019654,
6.6073513,
8.655663,
6.298364,
6.4654784,
3.6983974,
3.1087956,
2.226927,
2.6668777,
-0.35526595,
1.4488825,
0.20488043,
0.047601122,
-0.6924504,
0.57495445,
0.5399022,
-0.47663862,
0.8161736,
-0.36598107,
-0.59101355,
0.20327158,
0.41677478,
0.27029967,
3.7847342,
3.2484818,
3.747693,
4.7734656,
6.716756,
8.185982,
9.418276,
7.493696,
14.704602,
17.729408,
17.48148,
19.855602,
20.371563,
18.5821,
18.155266,
16.968113,
17.100256,
10.015516,
7.8247633,
8.993816,
6.4911056,
6.2132425,
4.3434267,
3.7000012,
3.7377622,
3.1024928,
-0.30869377,
0.051026687,
-0.34078225,
0.7479868,
0.03696166,
-0.75611556,
1.1542099,
-0.028129257,
0.08181842,
0.09559424,
0.8364861,
0.096545294,
0.5584201,
-0.5194905,
3.589691,
4.05453,
3.794124,
4.707637,
9.231918,
8.564278,
9.2333975,
7.006125,
16.20831,
19.324417,
15.819074,
19.356344,
17.93927,
18.384487,
18.001207,
16.142382,
21.02356,
9.986794,
6.614442,
10.657583,
6.6237283,
8.433239,
4.4907804,
4.2819304,
3.7269611,
3.5132716,
0.4662154,
0.30799574,
0.96793664,
-0.23279454,
-0.65458816,
0.3362532,
-0.25408295,
0.06732974,
0.4873681,
0.51199776,
0.14874719,
-0.29994798,
0.4666868,
0.33490536,
3.3489285,
2.9599032,
3.7671084,
5.274986,
11.143537,
9.2554245,
9.07235,
9.138557,
17.255503,
18.355011,
15.364281,
17.336935,
18.85955,
17.050003,
15.608138,
15.812602,
18.231024,
11.6336155,
6.9478188,
11.149977,
7.419574,
10.250601,
4.7022414,
3.971905,
4.7929826,
3.3438401,
-0.39000547,
-0.28059074,
0.6398243,
0.54544014,
0.6069346,
-0.17257981,
0.22857136,
0.5565434,
0.004583537,
-1.6335539,
-0.8888735,
-0.51765877,
0.25269827,
-0.01876194,
3.6656997,
3.8518455,
5.484056,
6.189166,
12.860901,
9.803692,
10.184517,
8.937886,
17.70772,
18.956602,
15.036017,
18.585073,
18.892986,
18.184309,
15.378883,
13.1691475,
16.713081,
11.373385,
10.050861,
11.757488,
10.44355,
12.29941,
4.694755,
5.29064,
3.8482742,
3.204164,
0.0923521,
0.023937136,
0.1471634,
0.6328977,
0.086753555,
0.4752982,
-0.6725007,
0.39593527,
0.22832835,
-0.27118513,
-0.8305444,
0.61332023,
-0.46385112,
-0.07130288,
3.392937,
5.612763,
5.2056,
5.706025,
15.220109,
11.131699,
11.811647,
9.684384,
18.768026,
16.84839,
13.052551,
16.32535,
17.554602,
17.395172,
14.127713,
12.6871,
17.62177,
11.645812,
8.629343,
11.129438,
11.581531,
14.195255,
4.8469067,
5.1938415,
4.0862703,
3.181031,
-1.0452468,
-0.25019166,
-0.7914238,
0.12144237,
-0.41462633,
0.54280686,
-0.69631076,
0.3511648,
0.004874259,
-0.06835556,
0.8735261,
0.24838078,
-0.31527227,
0.52716863,
3.9399889,
6.0550613,
6.129095,
6.861085,
18.186186,
11.700109,
9.944186,
8.473949,
16.194746,
15.487744,
11.69865,
15.148699,
17.62606,
18.724825,
14.773164,
12.397501,
17.29195,
12.904611,
10.236364,
9.858109,
12.551205,
17.244278,
5.081826,
5.861555,
4.532901,
2.9011462,
-0.6339103,
-0.14527631,
-0.34604034,
0.16419859,
-0.21205892,
1.0102317,
-0.6850754,
-0.35831228,
0.2243401,
-0.12707797,
0.12315286,
0.75053287,
-0.30611196,
0.946708,
3.2013948,
5.563331,
4.7585716,
7.213843,
20.686522,
11.607341,
12.30799,
10.50174,
15.599098,
14.504682,
13.629604,
13.69594,
17.019728,
16.432478,
13.931328,
13.392891,
16.40223,
12.716988,
10.136288,
11.304484,
14.544636,
18.359613,
5.5700507,
5.302722,
5.3971443,
4.0632043,
0.34419727,
-0.43536162,
0.2166448,
-0.95898896,
0.54851377,
0.7104762,
0.73580873,
-0.025371978,
-0.42447037,
-0.055623855,
-0.057257153,
-0.042765763,
-0.32910374,
0.110769786,
4.9113693,
6.042119,
5.789901,
8.213889,
21.399662,
13.620898,
12.268165,
12.022924,
15.812675,
14.541431,
11.235446,
13.432023,
16.380638,
17.424328,
13.075844,
13.108509,
16.125572,
12.70376,
9.833503,
12.167731,
15.966658,
19.35662,
4.726227,
5.754112,
5.277654,
3.513394,
0.27682012,
-0.6424214,
0.63972783,
0.052361738,
0.6900285,
0.8120001,
0.13217215,
-0.06418637,
-0.34938893,
-0.1332957,
-0.14414565,
0.13367409,
0.2113514,
0.013457297,
5.1611977,
5.566288,
5.6893077,
6.982988,
20.4595,
14.453565,
13.59946,
10.934562,
16.137613,
14.927114,
11.994792,
13.434463,
17.021969,
17.274439,
13.322607,
11.919087,
16.481926,
12.076119,
10.847066,
11.398886,
16.077639,
19.727343,
4.5308523,
6.236413,
4.8869467,
3.9474933,
0.5430834,
-0.16916445,
1.1437705,
0.16070405,
0.31188658,
0.8880989,
-0.14495048,
-0.5266939,
0.22656989,
0.3505556,
0.015732061,
-0.005636345,
-0.56870633,
0.40287915,
4.4800043,
4.970619,
4.5086727,
7.2337227,
21.180979,
13.984755,
12.418574,
10.579776,
14.925623,
11.359912,
10.660921,
12.467203,
17.208267,
17.148045,
11.586628,
11.8577,
13.493896,
13.254265,
10.851606,
13.149869,
17.053873,
19.849815,
4.9660897,
5.8460274,
3.998473,
3.6802619,
0.8031087,
-0.013905935,
0.3503995,
0.31186494,
-0.038673762,
-0.07608058,
0.21588215,
-0.23191574,
-0.3952367,
-0.09744672,
0.10716237,
-1.3977432,
-0.2775279,
0.28267142,
3.4341362,
5.5165367,
4.798283,
5.5223513,
23.267078,
15.076336,
13.030845,
10.9562845,
13.846566,
11.140822,
10.528686,
12.319912,
15.81127,
17.356304,
10.330765,
10.917309,
11.82135,
11.22828,
9.395469,
12.859789,
15.528548,
18.173409,
4.9549546,
7.068773,
5.830448,
2.882567,
-0.47524917,
-0.3299339,
0.19532575,
-0.5605442,
-0.05505767,
-0.22165492,
-0.4325593,
0.13398468,
-0.34254703,
0.0140561955,
-0.31874263,
-0.14240773,
-0.91078305,
0.69452536,
4.23155,
5.7011547,
6.0003905,
6.377488,
20.312622,
13.978043,
11.040157,
11.176402,
13.108543,
9.652381,
9.632209,
11.781593,
14.856762,
15.745179,
9.215103,
9.966311,
12.876652,
11.37008,
10.591258,
10.1424675,
14.367625,
19.73172,
3.84762,
7.103483,
3.7233605,
2.376824,
0.5252924,
0.38380843,
0.99321234,
-0.46900645,
0.12149067,
0.42257598,
0.0632253,
-0.6670193,
0.03464376,
0.452787,
0.29236665,
-0.017891373,
-0.075127214,
0.9828477,
2.3365817,
5.2860856,
4.3626456,
5.785785,
20.600492,
12.966171,
11.047343,
9.063554,
10.454045,
10.47048,
9.218836,
11.104739,
15.136548,
14.689532,
10.122101,
9.4212675,
11.134829,
8.617753,
9.327736,
11.278048,
13.085438,
18.43459,
3.9763334,
5.9072723,
3.9930198,
3.4963682,
0.2813723,
1.0457343,
0.31889322,
0.37867522,
1.2037315,
-0.47904515,
0.582204,
0.68306595,
-0.088313825,
-0.107233785,
-0.53984404,
0.39104667,
1.1425363,
0.51777375,
2.9267018,
5.183814,
4.495046,
4.6087675,
18.143732,
12.06679,
8.621597,
7.8071413,
9.6548195,
8.168409,
7.199488,
7.962524,
13.9421425,
12.19501,
8.027851,
8.022394,
8.449041,
8.428407,
7.2122917,
9.045476,
12.2283,
16.851568,
4.1475954,
5.7582254,
3.977257,
1.8516432,
-0.32922924,
-0.12237206,
-0.072756164,
-0.6167613,
0.5225413,
0.37072095,
-0.6287377,
-0.7166235,
-0.37311992,
0.81874573,
0.17337193,
0.17729722,
0.40824133,
-0.3479744,
2.9783738,
4.5450144,
3.9617758,
4.9179983,
15.7159395,
10.0808935,
7.922992,
6.9472337,
9.000638,
7.62391,
6.7539964,
8.514194,
12.004702,
12.731859,
7.173314,
7.301387,
7.240425,
7.4015136,
7.516923,
8.6178665,
9.913477,
14.592376,
4.5969114,
5.9667635,
2.2334886,
2.1020658,
-0.9194653,
0.43381432,
-0.74259335,
-0.8438142,
0.01724637,
-0.6245163,
0.34715256,
-0.24820891,
-0.6074153,
-0.066010244,
-0.05560958,
-0.32758415,
0.3784681,
-0.09629097,
2.7877793,
4.203103,
3.26329,
4.44158,
12.650619,
8.000976,
5.2695656,
5.8276386,
7.0067124,
6.36843,
5.256174,
7.340733,
9.230904,
13.014863,
5.453347,
6.2923303,
6.518343,
6.5802903,
5.615034,
7.000242,
8.82858,
11.683347,
3.8504424,
4.365258,
3.2354295,
2.2202947,
0.5615039,
0.41533247,
0.21722497,
0.3176445,
0.2709266,
-0.2929376,
0.090651914,
-0.32017383,
-0.30647907,
0.15408067,
-0.3604456,
0.6241022,
0.42943946,
0.30790985,
2.0098479,
3.1669462,
3.8518548,
4.0607076,
11.639872,
5.7104745,
7.125849,
5.09103,
5.6111135,
3.951972,
4.0356493,
7.02897,
11.430392,
11.738871,
4.115266,
5.621048,
5.3278913,
5.120655,
5.990115,
5.7664003,
5.7767644,
9.013329,
2.9515538,
5.6055756,
4.1827626,
1.7799046,
-0.21542077,
0.24031225,
-0.6824815,
-0.6190339,
0.6256524,
-0.48574805,
0.09997501,
0.3266095,
0.07135873,
-0.3254111,
-0.047491744,
-0.014772129,
-0.38849118,
0.286563,
2.9551277,
3.957588,
3.0914695,
3.1707056,
8.462824,
4.728864,
5.0381837,
4.0804534,
5.1110387,
4.62399,
4.415538,
6.1308045,
10.654469,
10.723281,
4.4972973,
3.627521,
3.8499038,
4.373936,
4.0010695,
4.3314424,
6.3237967,
7.2798166,
2.3315697,
4.04032,
3.2531312,
2.022844,
-0.5356632,
0.52645034,
0.11135009,
-0.26490784,
0.39241284,
0.13336958,
-0.15545088,
-0.048340384,
0.6705195,
-0.14051451,
-0.7617515,
0.11379189,
0.21909207,
0.63809645,
1.5451268,
4.243852,
3.2245193,
3.3400161,
6.511011,
4.033045,
2.8604522,
3.6116364,
3.5580635,
3.1904101,
2.9593391,
4.813459,
8.871713,
8.875507,
2.922824,
2.6118903,
3.5907378,
2.6278322,
3.5242443,
3.0563798,
4.969574,
5.5496926,
3.3797112,
3.520721,
2.3572729,
1.7771024,
-0.43368375,
-0.6439688,
-0.56648374,
0.25869504,
-0.13318418,
-0.25542453,
-1.2330167,
0.34627095,
1.5127228,
-0.6055812,
0.6232876,
0.23605451,
-0.5616809,
0.500821,
]).reshape((40, 40))
|
[
"numpy.array"
] |
[((1821, 2027), 'numpy.array', 'np.array', (['[1.4802036, 1.8915913, -0.011120212, 1.1328301, 1.2841645, 0.6033605, -\n 1.887041, -2.012894, 0.046582267, 1.5555662, 0.4305847, -1.7179363, -\n 1.1399889, -0.4432812, -1.4721184, 0.35457477]'], {}), '([1.4802036, 1.8915913, -0.011120212, 1.1328301, 1.2841645, \n 0.6033605, -1.887041, -2.012894, 0.046582267, 1.5555662, 0.4305847, -\n 1.7179363, -1.1399889, -0.4432812, -1.4721184, 0.35457477])\n', (1829, 2027), True, 'import numpy as np\n'), ((2116, 2331), 'numpy.array', 'np.array', (['[1.2321296, -0.020694781, -1.3441145, -0.51342154, -0.6282792, -0.22180416,\n -1.0089059, 1.4475185, -1.8519154, 0.5540126, -1.3644233, 1.5542297, -\n 0.4033564, -0.029513652, -0.14812116, 0.93214256]'], {}), '([1.2321296, -0.020694781, -1.3441145, -0.51342154, -0.6282792, -\n 0.22180416, -1.0089059, 1.4475185, -1.8519154, 0.5540126, -1.3644233, \n 1.5542297, -0.4033564, -0.029513652, -0.14812116, 0.93214256])\n', (2124, 2331), True, 'import numpy as np\n'), ((2417, 2632), 'numpy.array', 'np.array', (['[0.010279292, -1.6109133, 0.85784495, 0.8826037, 0.19365458, -0.36963812, \n 1.2059057, -0.93545884, 0.38819882, 1.6983186, -1.8130875, 0.94406796, \n -0.79738003, -1.0478632, -0.38848934, -0.48529625]'], {}), '([0.010279292, -1.6109133, 0.85784495, 0.8826037, 0.19365458, -\n 0.36963812, 1.2059057, -0.93545884, 0.38819882, 1.6983186, -1.8130875, \n 0.94406796, -0.79738003, -1.0478632, -0.38848934, -0.48529625])\n', (2425, 2632), True, 'import numpy as np\n'), ((2715, 2737), 'numpy.array', 'np.array', (['[-0.5514385]'], {}), '([-0.5514385])\n', (2723, 2737), True, 'import numpy as np\n'), ((2773, 3651), 'numpy.array', 'np.array', (['[0.01, 0.014871794871794873, 0.019743589743589744, 0.024615384615384615, \n 0.029487179487179487, 0.03435897435897436, 0.039230769230769236, \n 0.04410256410256411, 0.04897435897435898, 0.05384615384615385, \n 0.05871794871794872, 0.06358974358974359, 0.06846153846153846, \n 0.07333333333333333, 0.0782051282051282, 0.08307692307692308, \n 0.08794871794871795, 0.09282051282051282, 0.09769230769230769, \n 0.10256410256410256, 0.10743589743589743, 0.1123076923076923, \n 0.11717948717948717, 0.12205128205128205, 0.12692307692307694, \n 0.13179487179487182, 0.1366666666666667, 0.14153846153846156, \n 0.14641025641025643, 0.1512820512820513, 0.15615384615384617, \n 0.16102564102564104, 0.1658974358974359, 0.17076923076923078, \n 0.17564102564102566, 0.18051282051282053, 0.1853846153846154, \n 0.19025641025641027, 0.19512820512820514, 0.2]'], {}), '([0.01, 0.014871794871794873, 0.019743589743589744, \n 0.024615384615384615, 0.029487179487179487, 0.03435897435897436, \n 0.039230769230769236, 0.04410256410256411, 0.04897435897435898, \n 0.05384615384615385, 0.05871794871794872, 0.06358974358974359, \n 0.06846153846153846, 0.07333333333333333, 0.0782051282051282, \n 0.08307692307692308, 0.08794871794871795, 0.09282051282051282, \n 0.09769230769230769, 0.10256410256410256, 0.10743589743589743, \n 0.1123076923076923, 0.11717948717948717, 0.12205128205128205, \n 0.12692307692307694, 0.13179487179487182, 0.1366666666666667, \n 0.14153846153846156, 0.14641025641025643, 0.1512820512820513, \n 0.15615384615384617, 0.16102564102564104, 0.1658974358974359, \n 0.17076923076923078, 0.17564102564102566, 0.18051282051282053, \n 0.1853846153846154, 0.19025641025641027, 0.19512820512820514, 0.2])\n', (2781, 3651), True, 'import numpy as np\n'), ((3792, 3823), 'numpy.array', 'np.array', (['[0.10500000000000001]'], {}), '([0.10500000000000001])\n', (3800, 3823), True, 'import numpy as np\n'), ((3860, 23159), 'numpy.array', 'np.array', (['[-0.66101485, 0.31644753, -0.5896422, 0.4764485, 2.1545932, 15.793148, \n 8.2264805, 6.457074, 5.7062893, 6.1811686, 8.777044, 6.9074125, \n 7.9522552, 7.701313, 8.559349, 8.296498, 6.1969037, 6.4804926, \n 6.8852997, 8.830744, 14.376627, 0.54612935, 0.124028, 0.44405863, \n 0.5131382, 0.5987899, 0.008983987, -0.24756075, 0.7618118, -0.21146192,\n 0.4546959, 0.09494688, -0.26813537, 0.5798886, -0.10784844, 0.18372172,\n 0.8161483, -0.3787802, 0.61460984, -0.41957632, 0.13647377, -0.3481221,\n 0.03326019, 1.7144626, 3.8620698, 14.40822, 9.046495, 7.6838465, \n 7.2554746, 8.057631, 11.189637, 9.038466, 8.125581, 8.294034, 10.172681,\n 11.90528, 7.1925435, 6.708079, 7.6085744, 9.414239, 14.608672, \n 1.5265317, 1.09792, 0.29970562, 0.29824358, 0.36030084, -0.37960574, \n 0.47860667, 0.91203105, -0.6904322, -0.2722036, 0.23733543, -0.6658274,\n 0.62095886, 0.73466265, -0.8475226, -0.1700871, 0.9261157, 0.422822, \n 0.32836267, 0.58122945, -0.83155084, -0.20049855, -0.040298104, \n 4.014356, 16.160791, 7.2828264, 7.3377733, 6.665611, 8.653453, \n 11.973017, 9.656379, 10.9801235, 9.05112, 10.565474, 11.942185, \n 7.2904882, 7.4630857, 6.514908, 9.644132, 14.969957, 0.07107994, \n 0.11467081, 0.92357284, 0.04355552, 0.6726098, -0.15279476, 0.713554, \n 0.5466241, -0.38109347, 0.5590394, 0.08306945, 0.9525252, 0.6713458, \n 0.51892877, -0.1279359, -0.15663871, 0.020156374, -0.060285714, -\n 1.0264076, -0.53699505, -0.9786586, 0.015289649, 1.5724823, 4.0689135, \n 13.646254, 8.417458, 7.3368583, 6.966266, 8.73208, 14.498494, \n 10.2102165, 11.423929, 11.351579, 12.9430065, 15.01266, 9.051174, \n 7.077483, 6.785291, 9.483119, 15.76488, 1.1677985, 1.6693239, -\n 0.21604359, 0.32284033, -0.22243214, 0.60323435, -0.11199745, \n 0.29957047, 0.006062749, 0.7996792, 0.3094816, -0.7718058, 0.503415, \n 0.07231447, -0.2853677, 0.4330218, 0.844616, -0.19574685, -0.3879851, \n 0.5901966, 0.051313907, -0.29432508, 1.2537544, 3.1426716, 14.615546, \n 8.347049, 7.4366584, 6.4491363, 9.865336, 15.843064, 12.469691, \n 11.894229, 12.133173, 14.63979, 16.16245, 9.504371, 8.017702, 7.867693,\n 9.518961, 14.380217, 0.66953653, 0.60293055, 0.00082825124, -0.28320992,\n 0.8367502, 0.12513764, 0.22053392, -0.10229007, -0.20082277, 0.63717407,\n 0.32739908, -0.093239225, -0.80318755, 0.9917766, 0.24838758, -\n 0.07330545, 0.15537623, 0.09008534, -0.06607497, 1.0962121, 0.55644095,\n 0.6913326, 0.9021442, 3.8921309, 14.102233, 7.184174, 7.315026, \n 7.334084, 10.787065, 19.485243, 13.958044, 14.3500805, 13.616628, \n 15.63192, 17.07027, 9.131023, 6.8167133, 6.970449, 8.922994, 14.361785,\n 1.7793398, 0.94775784, 0.105669454, -0.18747061, 0.6676264, -0.3883816,\n -0.6202498, -0.0833843, -0.5216094, 1.1268811, -0.59910476, 0.39042526,\n 0.47714886, -0.7111677, -0.5756576, 0.9333002, 0.1010186, 0.13677923, -\n 0.75147396, 1.2583244, -0.23063457, 0.7901664, 0.24705392, 3.6259048, \n 12.530731, 6.9297647, 7.079164, 7.2256374, 11.940973, 20.025602, \n 14.700426, 13.519883, 14.241193, 17.55714, 17.386055, 10.167002, \n 7.536337, 7.0136056, 9.326938, 12.228463, 0.17775005, 0.8319777, -\n 0.8991761, -0.01412341, 0.61705685, -0.14188325, -0.41435227, -0.316557,\n -0.5893145, -0.010637931, 0.20675054, 0.44020182, -0.7080041, \n 0.16052538, -0.48142046, 0.9052833, 0.432698, 0.03338314, 0.35594848, \n 1.1689888, 0.36019892, 0.23971666, 1.4662509, 3.3352752, 11.360069, \n 8.300535, 7.5611286, 7.2111707, 17.327162, 20.148909, 17.380922, \n 17.596447, 14.160338, 19.188683, 17.219112, 10.499862, 8.309862, \n 6.1963353, 7.3864193, 12.878287, 1.4184926, 1.7496321, -0.082713336, \n 0.23216072, 0.20258206, 1.0141679, 0.14271286, -0.29340488, -\n 0.055605985, -0.5336929, -0.54352623, 0.19902669, 0.12139763, -\n 0.018293247, -0.20558693, -0.8606704, 0.22833318, 0.4463366, 0.20494421,\n 0.7066752, -0.62247527, 0.117985666, 1.831157, 3.299585, 9.63925, \n 7.483565, 7.1289496, 6.4751153, 15.985568, 21.507505, 18.539736, \n 16.699535, 16.726501, 19.698357, 22.443224, 11.952675, 7.005475, \n 6.2864413, 8.778635, 10.89195, 0.66351974, 1.1440128, -0.25076824, \n 0.66586065, 1.0526825, 0.015522989, 0.07891381, 1.104366, 0.7747889, \n 0.15351877, -0.12182697, -0.59052014, -0.12581429, 0.5053382, \n 0.17305401, 0.67090386, 1.036633, 0.05909565, 0.28418896, 0.86726683, \n 0.1763895, 0.33444333, 1.7197226, 2.5705223, 9.934082, 6.614648, \n 5.9702163, 7.0940704, 18.322672, 24.886862, 18.648033, 19.174364, \n 17.071978, 18.935146, 20.495438, 13.39125, 7.1744776, 5.476832, \n 7.2689962, 10.46958, 1.1804211, 1.0994785, 0.64040864, 0.021063149, \n 0.75519574, 0.40024444, -0.48553574, 0.87461084, -0.23675112, 0.1914608,\n -0.49892142, 0.2618199, 0.6261685, -1.4913763, 0.41756257, 0.5763335, -\n 0.45616063, 0.38227928, -0.6692691, 1.8232274, 0.7977414, 0.40125495, \n 2.787939, 3.2074018, 8.831141, 6.6602535, 7.500632, 8.793667, 18.995548,\n 23.698793, 18.186054, 17.543282, 18.392523, 20.788607, 24.634804, \n 14.188387, 8.168461, 5.5740485, 6.8008204, 8.531001, 1.4529983, \n 2.276989, 1.0289037, 0.9468033, -0.038641334, -0.39401633, -1.1387177, \n 0.49660775, 0.5171432, -0.6254447, 1.2226907, -0.13812594, 0.11419458, \n -0.36041245, 0.16572447, -0.2501292, -0.95744544, 0.6987992, 0.3099944,\n 1.108943, 0.41807377, 1.350997, 1.2673455, 3.2821457, 8.0927515, \n 5.9851384, 4.8361425, 8.642136, 20.54146, 23.320255, 20.936903, \n 19.881096, 18.084406, 20.986282, 22.538109, 15.849695, 7.59143, \n 5.759286, 7.9955835, 7.542832, 1.5869404, 2.191163, -0.0054766536, \n 0.38372415, 1.4580531, -0.6341528, -0.20307654, -0.82046396, 0.30573404,\n 0.59632486, -0.12896755, -0.42806864, -0.47942856, -0.7036555, \n 0.075889945, 0.29308736, -1.4974035, -0.036708307, -0.43896213, \n 0.54672736, 1.3562044, 1.5058006, 2.0175235, 3.2622445, 7.817541, \n 6.1968045, 5.7298784, 8.535798, 22.878216, 23.569859, 21.438442, \n 20.779306, 18.338245, 23.335554, 23.656643, 16.534071, 7.0056953, \n 5.3699074, 6.2035737, 6.91238, 1.8461741, 2.0328891, 0.6284174, \n 0.07324934, 0.72266495, 0.43248987, 0.55657876, -0.36850226, 0.2892055,\n 0.120979175, -0.3255677, 0.18210961, -0.13677588, -0.79952997, -\n 0.16948017, 0.27382505, 0.011414817, -0.002753294, 0.1875501, 1.7294772,\n 0.86453336, 0.8789885, 2.0237687, 2.686733, 7.0931683, 6.7965593, \n 5.703301, 9.106176, 19.852842, 22.134148, 24.209602, 20.48003, 19.87589,\n 22.650255, 24.67572, 17.161873, 7.185769, 5.12218, 5.9893394, 5.907269,\n 2.1844404, 1.9687537, 1.0286644, 0.052360654, 1.7644687, 0.5339646, -\n 0.53046066, -0.2281848, -1.2462859, 0.6778776, 0.5408989, -0.14820653, \n 0.38658077, -0.65733767, 0.014478714, 0.45866382, 0.47466084, \n 0.48330665, 0.52647215, 1.6572766, -0.093874216, 1.0939939, 2.8252633, \n 3.250628, 7.286972, 5.736179, 5.5879693, 9.545634, 22.925808, 23.213871,\n 23.39594, 21.748808, 22.024412, 24.974943, 23.57301, 18.065563, \n 8.397812, 4.8709254, 7.626314, 4.6410003, 1.8595266, 3.0831103, \n 1.4402436, 1.2672244, 1.312456, -0.18201214, 0.21097422, -0.026861114, \n 0.18476872, 0.7252849, -0.002409873, -0.29303908, 1.3546691, -\n 0.04322617, -0.053203642, -0.30067968, -0.12050266, -0.5528519, \n 0.057745364, 1.3053449, 1.8519605, 1.8503615, 2.5469666, 4.2060847, \n 5.5301046, 7.0553675, 5.9386334, 11.875089, 23.438046, 20.363987, \n 23.725615, 20.967691, 21.432257, 24.202627, 19.774887, 18.783188, \n 7.98809, 6.2239876, 7.760503, 5.212336, 2.9735184, 2.7213335, 2.0156252,\n 1.814288, 2.2770615, 0.01533184, 0.58220863, -0.49351138, 0.31417957, -\n 0.36469758, 0.45743746, 0.66627234, 0.3081961, 0.828259, -0.31382263, \n 0.26520026, 0.22944771, -0.6709603, -0.07570245, 1.5327783, 1.7784487, \n 2.6468341, 3.198592, 3.7656205, 5.9252257, 6.9020658, 4.9581833, \n 12.047751, 22.348654, 20.17518, 24.174393, 21.535011, 19.05106, \n 22.163195, 21.497072, 18.43445, 8.682917, 5.3132563, 7.030179, 3.717919,\n 2.0626392, 2.4575338, 2.2717822, 0.8625143, 2.4770658, -0.786061, \n 1.2881083, -0.2518999, 0.72405684, -0.122574806, -0.34197915, \n 0.13918422, 0.26873538, -0.47515658, -0.54810023, 0.89566797, -\n 0.54384357, -0.12311963, 0.567525, 2.7046611, 1.5512958, 1.7786896, \n 3.8791292, 3.9559023, 4.788476, 8.228316, 5.3946, 12.281274, 21.967098,\n 20.923243, 23.913458, 20.710938, 19.420635, 25.138704, 18.289383, \n 19.177135, 8.415327, 4.8929396, 8.965305, 4.3885813, 3.4578655, \n 3.0384607, 1.5863328, 1.91974, 2.4258208, 0.5892152, 0.048560977, -\n 0.13528748, -0.21397328, 0.16264682, -0.57951355, -0.40301454, \n 0.21641892, -0.22450455, 0.38177252, -0.967473, -0.35485935, \n 0.062246032, -0.03395147, 2.1338463, 1.9084859, 3.1863737, 1.9375713, \n 3.4518764, 6.570703, 6.878443, 5.679476, 13.351213, 22.931889, \n 19.282558, 22.36135, 23.796984, 21.032475, 23.09803, 20.966232, \n 20.72223, 6.7338567, 6.4885483, 7.190284, 4.9310346, 3.1236634, \n 3.5150487, 2.9693668, 2.2454295, 1.82249, -0.09966546, 0.72314006, -\n 0.79027426, 0.41793302, -0.14793015, 0.45988762, 0.8456978, -0.5273398,\n 0.1830612, -1.0828326, -1.0117317, -0.3019783, 0.17001551, -0.62556803,\n 2.961217, 2.6823378, 2.9682546, 5.2445164, 4.9527783, 6.309333, \n 7.7392774, 6.2129936, 15.35368, 20.683935, 20.589102, 22.10926, \n 20.185204, 20.562426, 22.645317, 18.869568, 20.659521, 8.880328, \n 6.4410696, 9.769155, 5.5935693, 5.527752, 4.5683465, 3.4019177, \n 3.3163903, 2.244741, 0.38402623, 0.2960868, -0.4828044, 0.13759217, \n 0.25681636, 0.11657055, -0.330115, 0.4011577, -0.7654019, 0.14916949, -\n 0.6228205, -0.96823233, -0.022868, -0.49047035, 3.20636, 2.6912642, \n 2.9050756, 4.912674, 5.7441964, 6.489336, 9.632326, 6.2825303, 16.68777,\n 21.077969, 17.172966, 18.92938, 23.38385, 20.251026, 22.16378, \n 18.001736, 20.24098, 11.019654, 6.6073513, 8.655663, 6.298364, \n 6.4654784, 3.6983974, 3.1087956, 2.226927, 2.6668777, -0.35526595, \n 1.4488825, 0.20488043, 0.047601122, -0.6924504, 0.57495445, 0.5399022, \n -0.47663862, 0.8161736, -0.36598107, -0.59101355, 0.20327158, \n 0.41677478, 0.27029967, 3.7847342, 3.2484818, 3.747693, 4.7734656, \n 6.716756, 8.185982, 9.418276, 7.493696, 14.704602, 17.729408, 17.48148,\n 19.855602, 20.371563, 18.5821, 18.155266, 16.968113, 17.100256, \n 10.015516, 7.8247633, 8.993816, 6.4911056, 6.2132425, 4.3434267, \n 3.7000012, 3.7377622, 3.1024928, -0.30869377, 0.051026687, -0.34078225,\n 0.7479868, 0.03696166, -0.75611556, 1.1542099, -0.028129257, 0.08181842,\n 0.09559424, 0.8364861, 0.096545294, 0.5584201, -0.5194905, 3.589691, \n 4.05453, 3.794124, 4.707637, 9.231918, 8.564278, 9.2333975, 7.006125, \n 16.20831, 19.324417, 15.819074, 19.356344, 17.93927, 18.384487, \n 18.001207, 16.142382, 21.02356, 9.986794, 6.614442, 10.657583, \n 6.6237283, 8.433239, 4.4907804, 4.2819304, 3.7269611, 3.5132716, \n 0.4662154, 0.30799574, 0.96793664, -0.23279454, -0.65458816, 0.3362532,\n -0.25408295, 0.06732974, 0.4873681, 0.51199776, 0.14874719, -0.29994798,\n 0.4666868, 0.33490536, 3.3489285, 2.9599032, 3.7671084, 5.274986, \n 11.143537, 9.2554245, 9.07235, 9.138557, 17.255503, 18.355011, \n 15.364281, 17.336935, 18.85955, 17.050003, 15.608138, 15.812602, \n 18.231024, 11.6336155, 6.9478188, 11.149977, 7.419574, 10.250601, \n 4.7022414, 3.971905, 4.7929826, 3.3438401, -0.39000547, -0.28059074, \n 0.6398243, 0.54544014, 0.6069346, -0.17257981, 0.22857136, 0.5565434, \n 0.004583537, -1.6335539, -0.8888735, -0.51765877, 0.25269827, -\n 0.01876194, 3.6656997, 3.8518455, 5.484056, 6.189166, 12.860901, \n 9.803692, 10.184517, 8.937886, 17.70772, 18.956602, 15.036017, \n 18.585073, 18.892986, 18.184309, 15.378883, 13.1691475, 16.713081, \n 11.373385, 10.050861, 11.757488, 10.44355, 12.29941, 4.694755, 5.29064,\n 3.8482742, 3.204164, 0.0923521, 0.023937136, 0.1471634, 0.6328977, \n 0.086753555, 0.4752982, -0.6725007, 0.39593527, 0.22832835, -0.27118513,\n -0.8305444, 0.61332023, -0.46385112, -0.07130288, 3.392937, 5.612763, \n 5.2056, 5.706025, 15.220109, 11.131699, 11.811647, 9.684384, 18.768026,\n 16.84839, 13.052551, 16.32535, 17.554602, 17.395172, 14.127713, 12.6871,\n 17.62177, 11.645812, 8.629343, 11.129438, 11.581531, 14.195255, \n 4.8469067, 5.1938415, 4.0862703, 3.181031, -1.0452468, -0.25019166, -\n 0.7914238, 0.12144237, -0.41462633, 0.54280686, -0.69631076, 0.3511648,\n 0.004874259, -0.06835556, 0.8735261, 0.24838078, -0.31527227, \n 0.52716863, 3.9399889, 6.0550613, 6.129095, 6.861085, 18.186186, \n 11.700109, 9.944186, 8.473949, 16.194746, 15.487744, 11.69865, \n 15.148699, 17.62606, 18.724825, 14.773164, 12.397501, 17.29195, \n 12.904611, 10.236364, 9.858109, 12.551205, 17.244278, 5.081826, \n 5.861555, 4.532901, 2.9011462, -0.6339103, -0.14527631, -0.34604034, \n 0.16419859, -0.21205892, 1.0102317, -0.6850754, -0.35831228, 0.2243401,\n -0.12707797, 0.12315286, 0.75053287, -0.30611196, 0.946708, 3.2013948, \n 5.563331, 4.7585716, 7.213843, 20.686522, 11.607341, 12.30799, 10.50174,\n 15.599098, 14.504682, 13.629604, 13.69594, 17.019728, 16.432478, \n 13.931328, 13.392891, 16.40223, 12.716988, 10.136288, 11.304484, \n 14.544636, 18.359613, 5.5700507, 5.302722, 5.3971443, 4.0632043, \n 0.34419727, -0.43536162, 0.2166448, -0.95898896, 0.54851377, 0.7104762,\n 0.73580873, -0.025371978, -0.42447037, -0.055623855, -0.057257153, -\n 0.042765763, -0.32910374, 0.110769786, 4.9113693, 6.042119, 5.789901, \n 8.213889, 21.399662, 13.620898, 12.268165, 12.022924, 15.812675, \n 14.541431, 11.235446, 13.432023, 16.380638, 17.424328, 13.075844, \n 13.108509, 16.125572, 12.70376, 9.833503, 12.167731, 15.966658, \n 19.35662, 4.726227, 5.754112, 5.277654, 3.513394, 0.27682012, -\n 0.6424214, 0.63972783, 0.052361738, 0.6900285, 0.8120001, 0.13217215, -\n 0.06418637, -0.34938893, -0.1332957, -0.14414565, 0.13367409, 0.2113514,\n 0.013457297, 5.1611977, 5.566288, 5.6893077, 6.982988, 20.4595, \n 14.453565, 13.59946, 10.934562, 16.137613, 14.927114, 11.994792, \n 13.434463, 17.021969, 17.274439, 13.322607, 11.919087, 16.481926, \n 12.076119, 10.847066, 11.398886, 16.077639, 19.727343, 4.5308523, \n 6.236413, 4.8869467, 3.9474933, 0.5430834, -0.16916445, 1.1437705, \n 0.16070405, 0.31188658, 0.8880989, -0.14495048, -0.5266939, 0.22656989,\n 0.3505556, 0.015732061, -0.005636345, -0.56870633, 0.40287915, \n 4.4800043, 4.970619, 4.5086727, 7.2337227, 21.180979, 13.984755, \n 12.418574, 10.579776, 14.925623, 11.359912, 10.660921, 12.467203, \n 17.208267, 17.148045, 11.586628, 11.8577, 13.493896, 13.254265, \n 10.851606, 13.149869, 17.053873, 19.849815, 4.9660897, 5.8460274, \n 3.998473, 3.6802619, 0.8031087, -0.013905935, 0.3503995, 0.31186494, -\n 0.038673762, -0.07608058, 0.21588215, -0.23191574, -0.3952367, -\n 0.09744672, 0.10716237, -1.3977432, -0.2775279, 0.28267142, 3.4341362, \n 5.5165367, 4.798283, 5.5223513, 23.267078, 15.076336, 13.030845, \n 10.9562845, 13.846566, 11.140822, 10.528686, 12.319912, 15.81127, \n 17.356304, 10.330765, 10.917309, 11.82135, 11.22828, 9.395469, \n 12.859789, 15.528548, 18.173409, 4.9549546, 7.068773, 5.830448, \n 2.882567, -0.47524917, -0.3299339, 0.19532575, -0.5605442, -0.05505767,\n -0.22165492, -0.4325593, 0.13398468, -0.34254703, 0.0140561955, -\n 0.31874263, -0.14240773, -0.91078305, 0.69452536, 4.23155, 5.7011547, \n 6.0003905, 6.377488, 20.312622, 13.978043, 11.040157, 11.176402, \n 13.108543, 9.652381, 9.632209, 11.781593, 14.856762, 15.745179, \n 9.215103, 9.966311, 12.876652, 11.37008, 10.591258, 10.1424675, \n 14.367625, 19.73172, 3.84762, 7.103483, 3.7233605, 2.376824, 0.5252924,\n 0.38380843, 0.99321234, -0.46900645, 0.12149067, 0.42257598, 0.0632253,\n -0.6670193, 0.03464376, 0.452787, 0.29236665, -0.017891373, -\n 0.075127214, 0.9828477, 2.3365817, 5.2860856, 4.3626456, 5.785785, \n 20.600492, 12.966171, 11.047343, 9.063554, 10.454045, 10.47048, \n 9.218836, 11.104739, 15.136548, 14.689532, 10.122101, 9.4212675, \n 11.134829, 8.617753, 9.327736, 11.278048, 13.085438, 18.43459, \n 3.9763334, 5.9072723, 3.9930198, 3.4963682, 0.2813723, 1.0457343, \n 0.31889322, 0.37867522, 1.2037315, -0.47904515, 0.582204, 0.68306595, -\n 0.088313825, -0.107233785, -0.53984404, 0.39104667, 1.1425363, \n 0.51777375, 2.9267018, 5.183814, 4.495046, 4.6087675, 18.143732, \n 12.06679, 8.621597, 7.8071413, 9.6548195, 8.168409, 7.199488, 7.962524,\n 13.9421425, 12.19501, 8.027851, 8.022394, 8.449041, 8.428407, 7.2122917,\n 9.045476, 12.2283, 16.851568, 4.1475954, 5.7582254, 3.977257, 1.8516432,\n -0.32922924, -0.12237206, -0.072756164, -0.6167613, 0.5225413, \n 0.37072095, -0.6287377, -0.7166235, -0.37311992, 0.81874573, 0.17337193,\n 0.17729722, 0.40824133, -0.3479744, 2.9783738, 4.5450144, 3.9617758, \n 4.9179983, 15.7159395, 10.0808935, 7.922992, 6.9472337, 9.000638, \n 7.62391, 6.7539964, 8.514194, 12.004702, 12.731859, 7.173314, 7.301387,\n 7.240425, 7.4015136, 7.516923, 8.6178665, 9.913477, 14.592376, \n 4.5969114, 5.9667635, 2.2334886, 2.1020658, -0.9194653, 0.43381432, -\n 0.74259335, -0.8438142, 0.01724637, -0.6245163, 0.34715256, -0.24820891,\n -0.6074153, -0.066010244, -0.05560958, -0.32758415, 0.3784681, -\n 0.09629097, 2.7877793, 4.203103, 3.26329, 4.44158, 12.650619, 8.000976,\n 5.2695656, 5.8276386, 7.0067124, 6.36843, 5.256174, 7.340733, 9.230904,\n 13.014863, 5.453347, 6.2923303, 6.518343, 6.5802903, 5.615034, 7.000242,\n 8.82858, 11.683347, 3.8504424, 4.365258, 3.2354295, 2.2202947, \n 0.5615039, 0.41533247, 0.21722497, 0.3176445, 0.2709266, -0.2929376, \n 0.090651914, -0.32017383, -0.30647907, 0.15408067, -0.3604456, \n 0.6241022, 0.42943946, 0.30790985, 2.0098479, 3.1669462, 3.8518548, \n 4.0607076, 11.639872, 5.7104745, 7.125849, 5.09103, 5.6111135, 3.951972,\n 4.0356493, 7.02897, 11.430392, 11.738871, 4.115266, 5.621048, 5.3278913,\n 5.120655, 5.990115, 5.7664003, 5.7767644, 9.013329, 2.9515538, \n 5.6055756, 4.1827626, 1.7799046, -0.21542077, 0.24031225, -0.6824815, -\n 0.6190339, 0.6256524, -0.48574805, 0.09997501, 0.3266095, 0.07135873, -\n 0.3254111, -0.047491744, -0.014772129, -0.38849118, 0.286563, 2.9551277,\n 3.957588, 3.0914695, 3.1707056, 8.462824, 4.728864, 5.0381837, \n 4.0804534, 5.1110387, 4.62399, 4.415538, 6.1308045, 10.654469, \n 10.723281, 4.4972973, 3.627521, 3.8499038, 4.373936, 4.0010695, \n 4.3314424, 6.3237967, 7.2798166, 2.3315697, 4.04032, 3.2531312, \n 2.022844, -0.5356632, 0.52645034, 0.11135009, -0.26490784, 0.39241284, \n 0.13336958, -0.15545088, -0.048340384, 0.6705195, -0.14051451, -\n 0.7617515, 0.11379189, 0.21909207, 0.63809645, 1.5451268, 4.243852, \n 3.2245193, 3.3400161, 6.511011, 4.033045, 2.8604522, 3.6116364, \n 3.5580635, 3.1904101, 2.9593391, 4.813459, 8.871713, 8.875507, 2.922824,\n 2.6118903, 3.5907378, 2.6278322, 3.5242443, 3.0563798, 4.969574, \n 5.5496926, 3.3797112, 3.520721, 2.3572729, 1.7771024, -0.43368375, -\n 0.6439688, -0.56648374, 0.25869504, -0.13318418, -0.25542453, -\n 1.2330167, 0.34627095, 1.5127228, -0.6055812, 0.6232876, 0.23605451, -\n 0.5616809, 0.500821]'], {}), '([-0.66101485, 0.31644753, -0.5896422, 0.4764485, 2.1545932, \n 15.793148, 8.2264805, 6.457074, 5.7062893, 6.1811686, 8.777044, \n 6.9074125, 7.9522552, 7.701313, 8.559349, 8.296498, 6.1969037, \n 6.4804926, 6.8852997, 8.830744, 14.376627, 0.54612935, 0.124028, \n 0.44405863, 0.5131382, 0.5987899, 0.008983987, -0.24756075, 0.7618118, \n -0.21146192, 0.4546959, 0.09494688, -0.26813537, 0.5798886, -0.10784844,\n 0.18372172, 0.8161483, -0.3787802, 0.61460984, -0.41957632, 0.13647377,\n -0.3481221, 0.03326019, 1.7144626, 3.8620698, 14.40822, 9.046495, \n 7.6838465, 7.2554746, 8.057631, 11.189637, 9.038466, 8.125581, 8.294034,\n 10.172681, 11.90528, 7.1925435, 6.708079, 7.6085744, 9.414239, \n 14.608672, 1.5265317, 1.09792, 0.29970562, 0.29824358, 0.36030084, -\n 0.37960574, 0.47860667, 0.91203105, -0.6904322, -0.2722036, 0.23733543,\n -0.6658274, 0.62095886, 0.73466265, -0.8475226, -0.1700871, 0.9261157, \n 0.422822, 0.32836267, 0.58122945, -0.83155084, -0.20049855, -\n 0.040298104, 4.014356, 16.160791, 7.2828264, 7.3377733, 6.665611, \n 8.653453, 11.973017, 9.656379, 10.9801235, 9.05112, 10.565474, \n 11.942185, 7.2904882, 7.4630857, 6.514908, 9.644132, 14.969957, \n 0.07107994, 0.11467081, 0.92357284, 0.04355552, 0.6726098, -0.15279476,\n 0.713554, 0.5466241, -0.38109347, 0.5590394, 0.08306945, 0.9525252, \n 0.6713458, 0.51892877, -0.1279359, -0.15663871, 0.020156374, -\n 0.060285714, -1.0264076, -0.53699505, -0.9786586, 0.015289649, \n 1.5724823, 4.0689135, 13.646254, 8.417458, 7.3368583, 6.966266, 8.73208,\n 14.498494, 10.2102165, 11.423929, 11.351579, 12.9430065, 15.01266, \n 9.051174, 7.077483, 6.785291, 9.483119, 15.76488, 1.1677985, 1.6693239,\n -0.21604359, 0.32284033, -0.22243214, 0.60323435, -0.11199745, \n 0.29957047, 0.006062749, 0.7996792, 0.3094816, -0.7718058, 0.503415, \n 0.07231447, -0.2853677, 0.4330218, 0.844616, -0.19574685, -0.3879851, \n 0.5901966, 0.051313907, -0.29432508, 1.2537544, 3.1426716, 14.615546, \n 8.347049, 7.4366584, 6.4491363, 9.865336, 15.843064, 12.469691, \n 11.894229, 12.133173, 14.63979, 16.16245, 9.504371, 8.017702, 7.867693,\n 9.518961, 14.380217, 0.66953653, 0.60293055, 0.00082825124, -0.28320992,\n 0.8367502, 0.12513764, 0.22053392, -0.10229007, -0.20082277, 0.63717407,\n 0.32739908, -0.093239225, -0.80318755, 0.9917766, 0.24838758, -\n 0.07330545, 0.15537623, 0.09008534, -0.06607497, 1.0962121, 0.55644095,\n 0.6913326, 0.9021442, 3.8921309, 14.102233, 7.184174, 7.315026, \n 7.334084, 10.787065, 19.485243, 13.958044, 14.3500805, 13.616628, \n 15.63192, 17.07027, 9.131023, 6.8167133, 6.970449, 8.922994, 14.361785,\n 1.7793398, 0.94775784, 0.105669454, -0.18747061, 0.6676264, -0.3883816,\n -0.6202498, -0.0833843, -0.5216094, 1.1268811, -0.59910476, 0.39042526,\n 0.47714886, -0.7111677, -0.5756576, 0.9333002, 0.1010186, 0.13677923, -\n 0.75147396, 1.2583244, -0.23063457, 0.7901664, 0.24705392, 3.6259048, \n 12.530731, 6.9297647, 7.079164, 7.2256374, 11.940973, 20.025602, \n 14.700426, 13.519883, 14.241193, 17.55714, 17.386055, 10.167002, \n 7.536337, 7.0136056, 9.326938, 12.228463, 0.17775005, 0.8319777, -\n 0.8991761, -0.01412341, 0.61705685, -0.14188325, -0.41435227, -0.316557,\n -0.5893145, -0.010637931, 0.20675054, 0.44020182, -0.7080041, \n 0.16052538, -0.48142046, 0.9052833, 0.432698, 0.03338314, 0.35594848, \n 1.1689888, 0.36019892, 0.23971666, 1.4662509, 3.3352752, 11.360069, \n 8.300535, 7.5611286, 7.2111707, 17.327162, 20.148909, 17.380922, \n 17.596447, 14.160338, 19.188683, 17.219112, 10.499862, 8.309862, \n 6.1963353, 7.3864193, 12.878287, 1.4184926, 1.7496321, -0.082713336, \n 0.23216072, 0.20258206, 1.0141679, 0.14271286, -0.29340488, -\n 0.055605985, -0.5336929, -0.54352623, 0.19902669, 0.12139763, -\n 0.018293247, -0.20558693, -0.8606704, 0.22833318, 0.4463366, 0.20494421,\n 0.7066752, -0.62247527, 0.117985666, 1.831157, 3.299585, 9.63925, \n 7.483565, 7.1289496, 6.4751153, 15.985568, 21.507505, 18.539736, \n 16.699535, 16.726501, 19.698357, 22.443224, 11.952675, 7.005475, \n 6.2864413, 8.778635, 10.89195, 0.66351974, 1.1440128, -0.25076824, \n 0.66586065, 1.0526825, 0.015522989, 0.07891381, 1.104366, 0.7747889, \n 0.15351877, -0.12182697, -0.59052014, -0.12581429, 0.5053382, \n 0.17305401, 0.67090386, 1.036633, 0.05909565, 0.28418896, 0.86726683, \n 0.1763895, 0.33444333, 1.7197226, 2.5705223, 9.934082, 6.614648, \n 5.9702163, 7.0940704, 18.322672, 24.886862, 18.648033, 19.174364, \n 17.071978, 18.935146, 20.495438, 13.39125, 7.1744776, 5.476832, \n 7.2689962, 10.46958, 1.1804211, 1.0994785, 0.64040864, 0.021063149, \n 0.75519574, 0.40024444, -0.48553574, 0.87461084, -0.23675112, 0.1914608,\n -0.49892142, 0.2618199, 0.6261685, -1.4913763, 0.41756257, 0.5763335, -\n 0.45616063, 0.38227928, -0.6692691, 1.8232274, 0.7977414, 0.40125495, \n 2.787939, 3.2074018, 8.831141, 6.6602535, 7.500632, 8.793667, 18.995548,\n 23.698793, 18.186054, 17.543282, 18.392523, 20.788607, 24.634804, \n 14.188387, 8.168461, 5.5740485, 6.8008204, 8.531001, 1.4529983, \n 2.276989, 1.0289037, 0.9468033, -0.038641334, -0.39401633, -1.1387177, \n 0.49660775, 0.5171432, -0.6254447, 1.2226907, -0.13812594, 0.11419458, \n -0.36041245, 0.16572447, -0.2501292, -0.95744544, 0.6987992, 0.3099944,\n 1.108943, 0.41807377, 1.350997, 1.2673455, 3.2821457, 8.0927515, \n 5.9851384, 4.8361425, 8.642136, 20.54146, 23.320255, 20.936903, \n 19.881096, 18.084406, 20.986282, 22.538109, 15.849695, 7.59143, \n 5.759286, 7.9955835, 7.542832, 1.5869404, 2.191163, -0.0054766536, \n 0.38372415, 1.4580531, -0.6341528, -0.20307654, -0.82046396, 0.30573404,\n 0.59632486, -0.12896755, -0.42806864, -0.47942856, -0.7036555, \n 0.075889945, 0.29308736, -1.4974035, -0.036708307, -0.43896213, \n 0.54672736, 1.3562044, 1.5058006, 2.0175235, 3.2622445, 7.817541, \n 6.1968045, 5.7298784, 8.535798, 22.878216, 23.569859, 21.438442, \n 20.779306, 18.338245, 23.335554, 23.656643, 16.534071, 7.0056953, \n 5.3699074, 6.2035737, 6.91238, 1.8461741, 2.0328891, 0.6284174, \n 0.07324934, 0.72266495, 0.43248987, 0.55657876, -0.36850226, 0.2892055,\n 0.120979175, -0.3255677, 0.18210961, -0.13677588, -0.79952997, -\n 0.16948017, 0.27382505, 0.011414817, -0.002753294, 0.1875501, 1.7294772,\n 0.86453336, 0.8789885, 2.0237687, 2.686733, 7.0931683, 6.7965593, \n 5.703301, 9.106176, 19.852842, 22.134148, 24.209602, 20.48003, 19.87589,\n 22.650255, 24.67572, 17.161873, 7.185769, 5.12218, 5.9893394, 5.907269,\n 2.1844404, 1.9687537, 1.0286644, 0.052360654, 1.7644687, 0.5339646, -\n 0.53046066, -0.2281848, -1.2462859, 0.6778776, 0.5408989, -0.14820653, \n 0.38658077, -0.65733767, 0.014478714, 0.45866382, 0.47466084, \n 0.48330665, 0.52647215, 1.6572766, -0.093874216, 1.0939939, 2.8252633, \n 3.250628, 7.286972, 5.736179, 5.5879693, 9.545634, 22.925808, 23.213871,\n 23.39594, 21.748808, 22.024412, 24.974943, 23.57301, 18.065563, \n 8.397812, 4.8709254, 7.626314, 4.6410003, 1.8595266, 3.0831103, \n 1.4402436, 1.2672244, 1.312456, -0.18201214, 0.21097422, -0.026861114, \n 0.18476872, 0.7252849, -0.002409873, -0.29303908, 1.3546691, -\n 0.04322617, -0.053203642, -0.30067968, -0.12050266, -0.5528519, \n 0.057745364, 1.3053449, 1.8519605, 1.8503615, 2.5469666, 4.2060847, \n 5.5301046, 7.0553675, 5.9386334, 11.875089, 23.438046, 20.363987, \n 23.725615, 20.967691, 21.432257, 24.202627, 19.774887, 18.783188, \n 7.98809, 6.2239876, 7.760503, 5.212336, 2.9735184, 2.7213335, 2.0156252,\n 1.814288, 2.2770615, 0.01533184, 0.58220863, -0.49351138, 0.31417957, -\n 0.36469758, 0.45743746, 0.66627234, 0.3081961, 0.828259, -0.31382263, \n 0.26520026, 0.22944771, -0.6709603, -0.07570245, 1.5327783, 1.7784487, \n 2.6468341, 3.198592, 3.7656205, 5.9252257, 6.9020658, 4.9581833, \n 12.047751, 22.348654, 20.17518, 24.174393, 21.535011, 19.05106, \n 22.163195, 21.497072, 18.43445, 8.682917, 5.3132563, 7.030179, 3.717919,\n 2.0626392, 2.4575338, 2.2717822, 0.8625143, 2.4770658, -0.786061, \n 1.2881083, -0.2518999, 0.72405684, -0.122574806, -0.34197915, \n 0.13918422, 0.26873538, -0.47515658, -0.54810023, 0.89566797, -\n 0.54384357, -0.12311963, 0.567525, 2.7046611, 1.5512958, 1.7786896, \n 3.8791292, 3.9559023, 4.788476, 8.228316, 5.3946, 12.281274, 21.967098,\n 20.923243, 23.913458, 20.710938, 19.420635, 25.138704, 18.289383, \n 19.177135, 8.415327, 4.8929396, 8.965305, 4.3885813, 3.4578655, \n 3.0384607, 1.5863328, 1.91974, 2.4258208, 0.5892152, 0.048560977, -\n 0.13528748, -0.21397328, 0.16264682, -0.57951355, -0.40301454, \n 0.21641892, -0.22450455, 0.38177252, -0.967473, -0.35485935, \n 0.062246032, -0.03395147, 2.1338463, 1.9084859, 3.1863737, 1.9375713, \n 3.4518764, 6.570703, 6.878443, 5.679476, 13.351213, 22.931889, \n 19.282558, 22.36135, 23.796984, 21.032475, 23.09803, 20.966232, \n 20.72223, 6.7338567, 6.4885483, 7.190284, 4.9310346, 3.1236634, \n 3.5150487, 2.9693668, 2.2454295, 1.82249, -0.09966546, 0.72314006, -\n 0.79027426, 0.41793302, -0.14793015, 0.45988762, 0.8456978, -0.5273398,\n 0.1830612, -1.0828326, -1.0117317, -0.3019783, 0.17001551, -0.62556803,\n 2.961217, 2.6823378, 2.9682546, 5.2445164, 4.9527783, 6.309333, \n 7.7392774, 6.2129936, 15.35368, 20.683935, 20.589102, 22.10926, \n 20.185204, 20.562426, 22.645317, 18.869568, 20.659521, 8.880328, \n 6.4410696, 9.769155, 5.5935693, 5.527752, 4.5683465, 3.4019177, \n 3.3163903, 2.244741, 0.38402623, 0.2960868, -0.4828044, 0.13759217, \n 0.25681636, 0.11657055, -0.330115, 0.4011577, -0.7654019, 0.14916949, -\n 0.6228205, -0.96823233, -0.022868, -0.49047035, 3.20636, 2.6912642, \n 2.9050756, 4.912674, 5.7441964, 6.489336, 9.632326, 6.2825303, 16.68777,\n 21.077969, 17.172966, 18.92938, 23.38385, 20.251026, 22.16378, \n 18.001736, 20.24098, 11.019654, 6.6073513, 8.655663, 6.298364, \n 6.4654784, 3.6983974, 3.1087956, 2.226927, 2.6668777, -0.35526595, \n 1.4488825, 0.20488043, 0.047601122, -0.6924504, 0.57495445, 0.5399022, \n -0.47663862, 0.8161736, -0.36598107, -0.59101355, 0.20327158, \n 0.41677478, 0.27029967, 3.7847342, 3.2484818, 3.747693, 4.7734656, \n 6.716756, 8.185982, 9.418276, 7.493696, 14.704602, 17.729408, 17.48148,\n 19.855602, 20.371563, 18.5821, 18.155266, 16.968113, 17.100256, \n 10.015516, 7.8247633, 8.993816, 6.4911056, 6.2132425, 4.3434267, \n 3.7000012, 3.7377622, 3.1024928, -0.30869377, 0.051026687, -0.34078225,\n 0.7479868, 0.03696166, -0.75611556, 1.1542099, -0.028129257, 0.08181842,\n 0.09559424, 0.8364861, 0.096545294, 0.5584201, -0.5194905, 3.589691, \n 4.05453, 3.794124, 4.707637, 9.231918, 8.564278, 9.2333975, 7.006125, \n 16.20831, 19.324417, 15.819074, 19.356344, 17.93927, 18.384487, \n 18.001207, 16.142382, 21.02356, 9.986794, 6.614442, 10.657583, \n 6.6237283, 8.433239, 4.4907804, 4.2819304, 3.7269611, 3.5132716, \n 0.4662154, 0.30799574, 0.96793664, -0.23279454, -0.65458816, 0.3362532,\n -0.25408295, 0.06732974, 0.4873681, 0.51199776, 0.14874719, -0.29994798,\n 0.4666868, 0.33490536, 3.3489285, 2.9599032, 3.7671084, 5.274986, \n 11.143537, 9.2554245, 9.07235, 9.138557, 17.255503, 18.355011, \n 15.364281, 17.336935, 18.85955, 17.050003, 15.608138, 15.812602, \n 18.231024, 11.6336155, 6.9478188, 11.149977, 7.419574, 10.250601, \n 4.7022414, 3.971905, 4.7929826, 3.3438401, -0.39000547, -0.28059074, \n 0.6398243, 0.54544014, 0.6069346, -0.17257981, 0.22857136, 0.5565434, \n 0.004583537, -1.6335539, -0.8888735, -0.51765877, 0.25269827, -\n 0.01876194, 3.6656997, 3.8518455, 5.484056, 6.189166, 12.860901, \n 9.803692, 10.184517, 8.937886, 17.70772, 18.956602, 15.036017, \n 18.585073, 18.892986, 18.184309, 15.378883, 13.1691475, 16.713081, \n 11.373385, 10.050861, 11.757488, 10.44355, 12.29941, 4.694755, 5.29064,\n 3.8482742, 3.204164, 0.0923521, 0.023937136, 0.1471634, 0.6328977, \n 0.086753555, 0.4752982, -0.6725007, 0.39593527, 0.22832835, -0.27118513,\n -0.8305444, 0.61332023, -0.46385112, -0.07130288, 3.392937, 5.612763, \n 5.2056, 5.706025, 15.220109, 11.131699, 11.811647, 9.684384, 18.768026,\n 16.84839, 13.052551, 16.32535, 17.554602, 17.395172, 14.127713, 12.6871,\n 17.62177, 11.645812, 8.629343, 11.129438, 11.581531, 14.195255, \n 4.8469067, 5.1938415, 4.0862703, 3.181031, -1.0452468, -0.25019166, -\n 0.7914238, 0.12144237, -0.41462633, 0.54280686, -0.69631076, 0.3511648,\n 0.004874259, -0.06835556, 0.8735261, 0.24838078, -0.31527227, \n 0.52716863, 3.9399889, 6.0550613, 6.129095, 6.861085, 18.186186, \n 11.700109, 9.944186, 8.473949, 16.194746, 15.487744, 11.69865, \n 15.148699, 17.62606, 18.724825, 14.773164, 12.397501, 17.29195, \n 12.904611, 10.236364, 9.858109, 12.551205, 17.244278, 5.081826, \n 5.861555, 4.532901, 2.9011462, -0.6339103, -0.14527631, -0.34604034, \n 0.16419859, -0.21205892, 1.0102317, -0.6850754, -0.35831228, 0.2243401,\n -0.12707797, 0.12315286, 0.75053287, -0.30611196, 0.946708, 3.2013948, \n 5.563331, 4.7585716, 7.213843, 20.686522, 11.607341, 12.30799, 10.50174,\n 15.599098, 14.504682, 13.629604, 13.69594, 17.019728, 16.432478, \n 13.931328, 13.392891, 16.40223, 12.716988, 10.136288, 11.304484, \n 14.544636, 18.359613, 5.5700507, 5.302722, 5.3971443, 4.0632043, \n 0.34419727, -0.43536162, 0.2166448, -0.95898896, 0.54851377, 0.7104762,\n 0.73580873, -0.025371978, -0.42447037, -0.055623855, -0.057257153, -\n 0.042765763, -0.32910374, 0.110769786, 4.9113693, 6.042119, 5.789901, \n 8.213889, 21.399662, 13.620898, 12.268165, 12.022924, 15.812675, \n 14.541431, 11.235446, 13.432023, 16.380638, 17.424328, 13.075844, \n 13.108509, 16.125572, 12.70376, 9.833503, 12.167731, 15.966658, \n 19.35662, 4.726227, 5.754112, 5.277654, 3.513394, 0.27682012, -\n 0.6424214, 0.63972783, 0.052361738, 0.6900285, 0.8120001, 0.13217215, -\n 0.06418637, -0.34938893, -0.1332957, -0.14414565, 0.13367409, 0.2113514,\n 0.013457297, 5.1611977, 5.566288, 5.6893077, 6.982988, 20.4595, \n 14.453565, 13.59946, 10.934562, 16.137613, 14.927114, 11.994792, \n 13.434463, 17.021969, 17.274439, 13.322607, 11.919087, 16.481926, \n 12.076119, 10.847066, 11.398886, 16.077639, 19.727343, 4.5308523, \n 6.236413, 4.8869467, 3.9474933, 0.5430834, -0.16916445, 1.1437705, \n 0.16070405, 0.31188658, 0.8880989, -0.14495048, -0.5266939, 0.22656989,\n 0.3505556, 0.015732061, -0.005636345, -0.56870633, 0.40287915, \n 4.4800043, 4.970619, 4.5086727, 7.2337227, 21.180979, 13.984755, \n 12.418574, 10.579776, 14.925623, 11.359912, 10.660921, 12.467203, \n 17.208267, 17.148045, 11.586628, 11.8577, 13.493896, 13.254265, \n 10.851606, 13.149869, 17.053873, 19.849815, 4.9660897, 5.8460274, \n 3.998473, 3.6802619, 0.8031087, -0.013905935, 0.3503995, 0.31186494, -\n 0.038673762, -0.07608058, 0.21588215, -0.23191574, -0.3952367, -\n 0.09744672, 0.10716237, -1.3977432, -0.2775279, 0.28267142, 3.4341362, \n 5.5165367, 4.798283, 5.5223513, 23.267078, 15.076336, 13.030845, \n 10.9562845, 13.846566, 11.140822, 10.528686, 12.319912, 15.81127, \n 17.356304, 10.330765, 10.917309, 11.82135, 11.22828, 9.395469, \n 12.859789, 15.528548, 18.173409, 4.9549546, 7.068773, 5.830448, \n 2.882567, -0.47524917, -0.3299339, 0.19532575, -0.5605442, -0.05505767,\n -0.22165492, -0.4325593, 0.13398468, -0.34254703, 0.0140561955, -\n 0.31874263, -0.14240773, -0.91078305, 0.69452536, 4.23155, 5.7011547, \n 6.0003905, 6.377488, 20.312622, 13.978043, 11.040157, 11.176402, \n 13.108543, 9.652381, 9.632209, 11.781593, 14.856762, 15.745179, \n 9.215103, 9.966311, 12.876652, 11.37008, 10.591258, 10.1424675, \n 14.367625, 19.73172, 3.84762, 7.103483, 3.7233605, 2.376824, 0.5252924,\n 0.38380843, 0.99321234, -0.46900645, 0.12149067, 0.42257598, 0.0632253,\n -0.6670193, 0.03464376, 0.452787, 0.29236665, -0.017891373, -\n 0.075127214, 0.9828477, 2.3365817, 5.2860856, 4.3626456, 5.785785, \n 20.600492, 12.966171, 11.047343, 9.063554, 10.454045, 10.47048, \n 9.218836, 11.104739, 15.136548, 14.689532, 10.122101, 9.4212675, \n 11.134829, 8.617753, 9.327736, 11.278048, 13.085438, 18.43459, \n 3.9763334, 5.9072723, 3.9930198, 3.4963682, 0.2813723, 1.0457343, \n 0.31889322, 0.37867522, 1.2037315, -0.47904515, 0.582204, 0.68306595, -\n 0.088313825, -0.107233785, -0.53984404, 0.39104667, 1.1425363, \n 0.51777375, 2.9267018, 5.183814, 4.495046, 4.6087675, 18.143732, \n 12.06679, 8.621597, 7.8071413, 9.6548195, 8.168409, 7.199488, 7.962524,\n 13.9421425, 12.19501, 8.027851, 8.022394, 8.449041, 8.428407, 7.2122917,\n 9.045476, 12.2283, 16.851568, 4.1475954, 5.7582254, 3.977257, 1.8516432,\n -0.32922924, -0.12237206, -0.072756164, -0.6167613, 0.5225413, \n 0.37072095, -0.6287377, -0.7166235, -0.37311992, 0.81874573, 0.17337193,\n 0.17729722, 0.40824133, -0.3479744, 2.9783738, 4.5450144, 3.9617758, \n 4.9179983, 15.7159395, 10.0808935, 7.922992, 6.9472337, 9.000638, \n 7.62391, 6.7539964, 8.514194, 12.004702, 12.731859, 7.173314, 7.301387,\n 7.240425, 7.4015136, 7.516923, 8.6178665, 9.913477, 14.592376, \n 4.5969114, 5.9667635, 2.2334886, 2.1020658, -0.9194653, 0.43381432, -\n 0.74259335, -0.8438142, 0.01724637, -0.6245163, 0.34715256, -0.24820891,\n -0.6074153, -0.066010244, -0.05560958, -0.32758415, 0.3784681, -\n 0.09629097, 2.7877793, 4.203103, 3.26329, 4.44158, 12.650619, 8.000976,\n 5.2695656, 5.8276386, 7.0067124, 6.36843, 5.256174, 7.340733, 9.230904,\n 13.014863, 5.453347, 6.2923303, 6.518343, 6.5802903, 5.615034, 7.000242,\n 8.82858, 11.683347, 3.8504424, 4.365258, 3.2354295, 2.2202947, \n 0.5615039, 0.41533247, 0.21722497, 0.3176445, 0.2709266, -0.2929376, \n 0.090651914, -0.32017383, -0.30647907, 0.15408067, -0.3604456, \n 0.6241022, 0.42943946, 0.30790985, 2.0098479, 3.1669462, 3.8518548, \n 4.0607076, 11.639872, 5.7104745, 7.125849, 5.09103, 5.6111135, 3.951972,\n 4.0356493, 7.02897, 11.430392, 11.738871, 4.115266, 5.621048, 5.3278913,\n 5.120655, 5.990115, 5.7664003, 5.7767644, 9.013329, 2.9515538, \n 5.6055756, 4.1827626, 1.7799046, -0.21542077, 0.24031225, -0.6824815, -\n 0.6190339, 0.6256524, -0.48574805, 0.09997501, 0.3266095, 0.07135873, -\n 0.3254111, -0.047491744, -0.014772129, -0.38849118, 0.286563, 2.9551277,\n 3.957588, 3.0914695, 3.1707056, 8.462824, 4.728864, 5.0381837, \n 4.0804534, 5.1110387, 4.62399, 4.415538, 6.1308045, 10.654469, \n 10.723281, 4.4972973, 3.627521, 3.8499038, 4.373936, 4.0010695, \n 4.3314424, 6.3237967, 7.2798166, 2.3315697, 4.04032, 3.2531312, \n 2.022844, -0.5356632, 0.52645034, 0.11135009, -0.26490784, 0.39241284, \n 0.13336958, -0.15545088, -0.048340384, 0.6705195, -0.14051451, -\n 0.7617515, 0.11379189, 0.21909207, 0.63809645, 1.5451268, 4.243852, \n 3.2245193, 3.3400161, 6.511011, 4.033045, 2.8604522, 3.6116364, \n 3.5580635, 3.1904101, 2.9593391, 4.813459, 8.871713, 8.875507, 2.922824,\n 2.6118903, 3.5907378, 2.6278322, 3.5242443, 3.0563798, 4.969574, \n 5.5496926, 3.3797112, 3.520721, 2.3572729, 1.7771024, -0.43368375, -\n 0.6439688, -0.56648374, 0.25869504, -0.13318418, -0.25542453, -\n 1.2330167, 0.34627095, 1.5127228, -0.6055812, 0.6232876, 0.23605451, -\n 0.5616809, 0.500821])\n', (3868, 23159), True, 'import numpy as np\n')]
|
import pytest
import numpy as np
import pandas as pd
import xarray as xr
import bmorph
from bmorph.util import mizuroute_utils as mizutil
reference = xr.open_dataset("./bmorph/tests/data/test_reference.nc")
routed = xr.open_dataset("./bmorph/tests/data/test_routed.nc")
topo = xr.open_dataset("./bmorph/tests/data/test_topo.nc")
true_fill = xr.open_dataset("./bmorph/tests/data/true_fill_segs.nc")
true_results = xr.open_dataset("./bmorph/tests/data/true_results.nc")
test_fill_methods = ['kge', 'kldiv', 'r2', 'leave_null']
gauge_flows = xr.Dataset(
{
'reference_flow' : (('seg', 'time'), reference['reference_flow'].transpose().values)
},
{"seg": reference['seg'].values, "time": reference['time'].values},
)
def test_map_headwater_sites(routed=routed.copy()):
routed['down_seg'] = true_results['down_seg']
test_routed = mizutil.map_headwater_sites(routed)
assert 'is_headwaters' in test_routed.var()
for truth, test in zip(true_results['is_headwaters'].values, test_routed['is_headwaters']):
assert truth == test
def test_find_up(routed=routed.copy()):
test_routed = routed
test_routed['down_seg'] = true_results['down_seg']
test_routed['is_headwaters'] = true_results['is_headwaters']
for seg, true_up_seg in zip(test_routed['seg'].values, true_results['up_seg'].values):
test_up_seg = mizutil.find_up(test_routed, seg)
if np.isnan(true_up_seg):
assert np.isnan(test_up_seg)
else:
assert true_up_seg == test_up_seg
def test_find_max_r2(routed=routed.copy()):
true_r2_fill = true_fill.sel(fill_method='r2')['true_seg']
for true_fill_seg, test_flow in zip(true_r2_fill.values, routed['flow'].values):
test_fill_seg = mizutil.find_max_r2(gauge_flows['reference_flow'], test_flow)[1]
assert true_fill_seg == test_fill_seg
def test_find_max_kge(routed=routed.copy()):
true_kge_fill = true_fill.sel(fill_method='kge')['true_seg']
for true_fill_seg, test_flow in zip(true_kge_fill.values, routed['flow'].values):
test_fill_seg = mizutil.find_max_kge(gauge_flows['reference_flow'], test_flow)[1]
assert true_fill_seg == test_fill_seg
def test_find_min_kldiv(routed=routed.copy()):
true_kldiv_fill = true_fill.sel(fill_method='kldiv')['true_seg']
for true_fill_seg, test_flow in zip(true_kldiv_fill.values, routed['flow'].values):
test_fill_seg = mizutil.find_min_kldiv(gauge_flows['reference_flow'], test_flow)[1]
assert true_fill_seg == test_fill_seg
def test_map_ref_sites(routed=routed.copy(), fill_methods=test_fill_methods):
test_routed = routed
test_routed['down_seg'] = true_results['down_seg']
test_routed['is_headwaters'] = true_results['is_headwaters']
for fill_method in fill_methods:
test_routed = mizutil.map_ref_sites(routed=test_routed, gauge_reference=reference,
route_var = 'flow', fill_method = fill_method
)
for true_up_ref_seg, test_up_ref_seg in zip(true_fill.sel(fill_method=f"{fill_method}_up")['true_seg'].values,
test_routed['up_ref_seg'].values):
assert true_up_ref_seg == test_up_ref_seg
for true_down_ref_seg, test_down_ref_seg in zip(true_fill.sel(fill_method=f"{fill_method}_down")['true_seg'].values,
test_routed['down_ref_seg'].values):
assert true_down_ref_seg == test_down_ref_seg
|
[
"bmorph.util.mizuroute_utils.find_up",
"bmorph.util.mizuroute_utils.find_max_kge",
"bmorph.util.mizuroute_utils.find_min_kldiv",
"bmorph.util.mizuroute_utils.map_headwater_sites",
"bmorph.util.mizuroute_utils.find_max_r2",
"numpy.isnan",
"bmorph.util.mizuroute_utils.map_ref_sites",
"xarray.open_dataset"
] |
[((153, 209), 'xarray.open_dataset', 'xr.open_dataset', (['"""./bmorph/tests/data/test_reference.nc"""'], {}), "('./bmorph/tests/data/test_reference.nc')\n", (168, 209), True, 'import xarray as xr\n'), ((219, 272), 'xarray.open_dataset', 'xr.open_dataset', (['"""./bmorph/tests/data/test_routed.nc"""'], {}), "('./bmorph/tests/data/test_routed.nc')\n", (234, 272), True, 'import xarray as xr\n'), ((280, 331), 'xarray.open_dataset', 'xr.open_dataset', (['"""./bmorph/tests/data/test_topo.nc"""'], {}), "('./bmorph/tests/data/test_topo.nc')\n", (295, 331), True, 'import xarray as xr\n'), ((344, 400), 'xarray.open_dataset', 'xr.open_dataset', (['"""./bmorph/tests/data/true_fill_segs.nc"""'], {}), "('./bmorph/tests/data/true_fill_segs.nc')\n", (359, 400), True, 'import xarray as xr\n'), ((416, 470), 'xarray.open_dataset', 'xr.open_dataset', (['"""./bmorph/tests/data/true_results.nc"""'], {}), "('./bmorph/tests/data/true_results.nc')\n", (431, 470), True, 'import xarray as xr\n'), ((857, 892), 'bmorph.util.mizuroute_utils.map_headwater_sites', 'mizutil.map_headwater_sites', (['routed'], {}), '(routed)\n', (884, 892), True, 'from bmorph.util import mizuroute_utils as mizutil\n'), ((1373, 1406), 'bmorph.util.mizuroute_utils.find_up', 'mizutil.find_up', (['test_routed', 'seg'], {}), '(test_routed, seg)\n', (1388, 1406), True, 'from bmorph.util import mizuroute_utils as mizutil\n'), ((1418, 1439), 'numpy.isnan', 'np.isnan', (['true_up_seg'], {}), '(true_up_seg)\n', (1426, 1439), True, 'import numpy as np\n'), ((2861, 2976), 'bmorph.util.mizuroute_utils.map_ref_sites', 'mizutil.map_ref_sites', ([], {'routed': 'test_routed', 'gauge_reference': 'reference', 'route_var': '"""flow"""', 'fill_method': 'fill_method'}), "(routed=test_routed, gauge_reference=reference,\n route_var='flow', fill_method=fill_method)\n", (2882, 2976), True, 'from bmorph.util import mizuroute_utils as mizutil\n'), ((1460, 1481), 'numpy.isnan', 'np.isnan', (['test_up_seg'], {}), '(test_up_seg)\n', (1468, 1481), True, 'import numpy as np\n'), ((1767, 1828), 'bmorph.util.mizuroute_utils.find_max_r2', 'mizutil.find_max_r2', (["gauge_flows['reference_flow']", 'test_flow'], {}), "(gauge_flows['reference_flow'], test_flow)\n", (1786, 1828), True, 'from bmorph.util import mizuroute_utils as mizutil\n'), ((2107, 2169), 'bmorph.util.mizuroute_utils.find_max_kge', 'mizutil.find_max_kge', (["gauge_flows['reference_flow']", 'test_flow'], {}), "(gauge_flows['reference_flow'], test_flow)\n", (2127, 2169), True, 'from bmorph.util import mizuroute_utils as mizutil\n'), ((2456, 2520), 'bmorph.util.mizuroute_utils.find_min_kldiv', 'mizutil.find_min_kldiv', (["gauge_flows['reference_flow']", 'test_flow'], {}), "(gauge_flows['reference_flow'], test_flow)\n", (2478, 2520), True, 'from bmorph.util import mizuroute_utils as mizutil\n')]
|
'''
Main function to be called from GCE's cloud function
This function is in charge of adding training data to
the datastore for later generation of models and feature study
'''
import sys
import os
import time
import numpy as np
from google.cloud import datastore
from google.cloud import storage
from google.api_core import retry
from urllib3.exceptions import ProtocolError
sys.path.insert(0, 'imports')
from imports.video_asset_processor import VideoAssetProcessor
DATASTORE_CLIENT = datastore.Client()
STORAGE_CLIENT = storage.Client()
SOURCES_BUCKET = 'livepeer-verifier-originals'
RENDITIONS_BUCKET = 'livepeer-verifier-renditions'
ENTITY_NAME = 'features_input_60_540'
def download_to_local(bucket_name, local_folder, local_file, origin_blob_name):
"""
Downloads a file from the bucket.
"""
predicate = retry.if_exception_type(ConnectionResetError, ProtocolError)
reset_retry = retry.Retry(predicate)
bucket = STORAGE_CLIENT.get_bucket(bucket_name)
blob = bucket.blob('{}'.format(origin_blob_name))
# print('Downloading blob {} from bucket {}'.format(origin_blob_name, bucket_name))
# print('File download Started…. Wait for the job to complete.')
# Create this folder locally if not exists
if not os.path.exists(local_folder):
os.makedirs(local_folder)
local_path = '{}/{}'.format(local_folder, local_file)
# print('Downloading {} to {}'.format(origin_blob_name, local_path))
reset_retry(blob.download_to_filename(local_path))
# print('Downloaded {} to {}'.format(origin_blob_name, local_path))
def compute_metrics(asset, renditions):
'''
Function that instantiates the VideoAssetProcessor class with a list
of metrics to be computed.
The feature_list argument is left void as every descriptor of each
temporal metric is potentially used for model training
'''
start_time = time.time()
source_asset = asset
max_samples = 30
renditions_list = renditions
metrics_list = ['temporal_ssim',
'temporal_psnr',
'temporal_dct',
'temporal_gaussian_mse',
'temporal_gaussian_difference',
'temporal_threshold_gaussian_difference'
]
asset_processor = VideoAssetProcessor(source_asset,
renditions_list,
metrics_list,
False,
max_samples,
features_list=None)
metrics_df, _, _ = asset_processor.process()
for _, row in metrics_df.iterrows():
line = row.to_dict()
for column in metrics_df.columns:
if 'series' in column:
line[column] = np.array2string(np.around(line[column], decimals=5))
add_asset_input(DATASTORE_CLIENT, '{}/{}'.format(row['title'], row['attack']), line)
elapsed_time = time.time() - start_time
print('Computation time:', elapsed_time)
def add_asset_input(client, title, input_data):
"""
Function to add the asset's computed data to the database
"""
key = client.key(ENTITY_NAME, title, namespace='livepeer-verifier-QoE')
video = datastore.Entity(key)
video.update(input_data)
client.put(video)
def dataset_generator_http(request):
"""HTTP Cloud Function.
Args:
request (flask.Request): The request object, containing the name
of the source asset
Returns:
The response text, or any set of values that can be turned into a
Response object using `make_response`
"""
request_json = request.get_json(silent=True)
request_args = request.args
if request_json and 'name' in request_json:
source_name = request_json['name']
resolution_list = request_json['resolution_list'].split(',')
elif request_args and 'name' in request_args:
source_name = request_args['name']
resolution_list = request_args['resolution_list'].split(',')
print(resolution_list)
# Create the folder for the source asset
source_folder = '/tmp/1080p'
# if not os.path.exists(source_folder):
# os.makedirs(source_folder)
# Get the file that has been uploaded to GCS
asset_path = {'path': '{}/{}'.format(source_folder, source_name)}
renditions_paths = []
# Check if the source is not already in the path
if not os.path.exists(asset_path['path']):
download_to_local(SOURCES_BUCKET, source_folder, source_name, source_name)
#Bring the attacks to be processed locally
# resolution_list = ['1080p', '720p', '480p', '360p', '240p', '144p']
attack_names = ['watermark',
'watermark-345x114',
'watermark-856x856',
'vignette',
# 'rotate_90_clockwise',
'black_and_white',
'low_bitrate_4',
'low_bitrate_8']
# Create a comprehension list with all the possible attacks
attacks_list = ['{}_{}'.format(resolution, attack)
for resolution in resolution_list
for attack in attack_names
]
if '1080p' in resolution_list:
resolution_list.remove('1080p')
attacks_list += resolution_list
for attack in attacks_list:
remote_file = '{}/{}'.format(attack, source_name)
local_folder = '/tmp/{}'.format(attack)
try:
download_to_local(RENDITIONS_BUCKET,
local_folder,
source_name,
remote_file)
local_file = '{}/{}'.format(local_folder, source_name)
renditions_paths.append({'path': local_file})
except Exception as err:
print('Unable to download {}/{}: {}'.format(attack, source_name, err))
if len(renditions_paths) > 0:
print('Processing the following renditions: {}'.format(renditions_paths))
compute_metrics(asset_path, renditions_paths)
else:
print('Empty renditions list. No renditions to process')
# Cleanup
if os.path.exists(asset_path['path']):
os.remove(asset_path['path'])
for rendition in attacks_list:
rendition_folder = '/tmp/{}'.format(rendition)
local_path = '{}/{}'.format(rendition_folder, source_name)
if os.path.exists(local_path):
os.remove(local_path)
return 'Process completed: {}'.format(asset_path['path'])
|
[
"google.cloud.storage.Client",
"os.path.exists",
"sys.path.insert",
"os.makedirs",
"google.cloud.datastore.Entity",
"google.cloud.datastore.Client",
"imports.video_asset_processor.VideoAssetProcessor",
"numpy.around",
"time.time",
"google.api_core.retry.if_exception_type",
"google.api_core.retry.Retry",
"os.remove"
] |
[((381, 410), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""imports"""'], {}), "(0, 'imports')\n", (396, 410), False, 'import sys\n'), ((494, 512), 'google.cloud.datastore.Client', 'datastore.Client', ([], {}), '()\n', (510, 512), False, 'from google.cloud import datastore\n'), ((530, 546), 'google.cloud.storage.Client', 'storage.Client', ([], {}), '()\n', (544, 546), False, 'from google.cloud import storage\n'), ((836, 896), 'google.api_core.retry.if_exception_type', 'retry.if_exception_type', (['ConnectionResetError', 'ProtocolError'], {}), '(ConnectionResetError, ProtocolError)\n', (859, 896), False, 'from google.api_core import retry\n'), ((915, 937), 'google.api_core.retry.Retry', 'retry.Retry', (['predicate'], {}), '(predicate)\n', (926, 937), False, 'from google.api_core import retry\n'), ((1891, 1902), 'time.time', 'time.time', ([], {}), '()\n', (1900, 1902), False, 'import time\n'), ((2297, 2405), 'imports.video_asset_processor.VideoAssetProcessor', 'VideoAssetProcessor', (['source_asset', 'renditions_list', 'metrics_list', '(False)', 'max_samples'], {'features_list': 'None'}), '(source_asset, renditions_list, metrics_list, False,\n max_samples, features_list=None)\n', (2316, 2405), False, 'from imports.video_asset_processor import VideoAssetProcessor\n'), ((3294, 3315), 'google.cloud.datastore.Entity', 'datastore.Entity', (['key'], {}), '(key)\n', (3310, 3315), False, 'from google.cloud import datastore\n'), ((6237, 6271), 'os.path.exists', 'os.path.exists', (["asset_path['path']"], {}), "(asset_path['path'])\n", (6251, 6271), False, 'import os\n'), ((1260, 1288), 'os.path.exists', 'os.path.exists', (['local_folder'], {}), '(local_folder)\n', (1274, 1288), False, 'import os\n'), ((1298, 1323), 'os.makedirs', 'os.makedirs', (['local_folder'], {}), '(local_folder)\n', (1309, 1323), False, 'import os\n'), ((3007, 3018), 'time.time', 'time.time', ([], {}), '()\n', (3016, 3018), False, 'import time\n'), ((4490, 4524), 'os.path.exists', 'os.path.exists', (["asset_path['path']"], {}), "(asset_path['path'])\n", (4504, 4524), False, 'import os\n'), ((6281, 6310), 'os.remove', 'os.remove', (["asset_path['path']"], {}), "(asset_path['path'])\n", (6290, 6310), False, 'import os\n'), ((6479, 6505), 'os.path.exists', 'os.path.exists', (['local_path'], {}), '(local_path)\n', (6493, 6505), False, 'import os\n'), ((6519, 6540), 'os.remove', 'os.remove', (['local_path'], {}), '(local_path)\n', (6528, 6540), False, 'import os\n'), ((2857, 2892), 'numpy.around', 'np.around', (['line[column]'], {'decimals': '(5)'}), '(line[column], decimals=5)\n', (2866, 2892), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3.5
import sys
import os
import logging
import numpy as np
import musm
from sklearn.utils import check_random_state
from textwrap import dedent
#1Social Choice
_LOG = musm.get_logger('adt17')
PROBLEMS = {
'synthetic': musm.Synthetic,
'pc': musm.PC,
}
USERS = {
'noiseless': musm.NoiselessUser,
'pl': musm.PlackettLuceUser,
}
def get_results_path(args):
properties = [
args['problem'], args['num_groups'], args['num_clusters_per_group'],
args['num_users_per_group'], args['max_iters'], args['set_size'],
args['pick'], args['transform'], args['tau'], args['lmbda'],
args['enable_cv'], args['min_regret'], args['distrib'],
args['density'], args['response_model'], args['noise'], args['seed'],
]
return os.path.join('results', '_'.join(map(str, properties)) + '.pickle')
def _sparsify(w, density, rng):
if not (0 < density <= 1):
raise ValueError('density must be in (0, 1], got {}'.format(density))
w = np.array(w, copy=True)
perm = rng.permutation(w.shape[1])
num_zeros = round((1 - density) * w.shape[1])
w[:,perm[:min(num_zeros, w.shape[1] - 1)]] = 0
return w
def sample_cluster(problem, num_users=5, distrib='normal', density=1, rng=0):
num_attributes = problem.num_attributes
if hasattr(problem, 'cost_matrix'):
num_attributes += problem.cost_matrix.shape[0]
if distrib == 'uniform':
w_mean = rng.uniform(0, 1, size=num_attributes)
elif distrib == 'normal':
w_mean = rng.uniform(-1, 1, size=num_attributes)
else:
raise ValueError('invalid distrib, got {}'.format(distrib))
if True: # XXX
w = w_mean + np.zeros((num_users, num_attributes))
else:
w = w_mean + rng.uniform(0, 25, size=(num_users, num_attributes))
return _sparsify(np.abs(w), density, rng)
def generate_user_groups(problem, args):
User = USERS[args['response_model']]
rng = check_random_state(0)
num_users_per_cluster = max(1, round(args['num_users_per_group'] /
args['num_clusters_per_group']))
user_groups = []
for gid in range(args['num_groups']):
w_star = []
for cid in range(1, args['num_clusters_per_group'] + 1):
if cid == args['num_clusters_per_group']:
num_users_in_cluster = args['num_users_per_group'] - len(w_star)
else:
num_users_in_cluster = num_users_per_cluster
temp = sample_cluster(problem,
num_users=num_users_in_cluster,
distrib=args['distrib'],
density=args['density'],
rng=rng)
ttemp = temp
if hasattr(problem, 'cost_matrix'):
num_costs = problem.cost_matrix.shape[0]
temp_bools = temp[:, :-num_costs]
temp_costs = temp[:, -num_costs:]
ttemp = temp_bools + np.dot(temp_costs, problem.cost_matrix)
_LOG.debug(dedent('''\
CLUSTER {cid}:
true user weights =
{temp}
true user weights transformed by cost matrix =
{ttemp}
''').format(**locals()))
if len(w_star) == 0:
w_star = ttemp
else:
w_star = np.append(w_star, ttemp, axis=0)
user_groups.append([User(problem,
w_star[uid],
min_regret=args['min_regret'],
noise=args['noise'],
rng=rng)
for uid in range(args['num_users_per_group'])])
return user_groups
def run(args):
problem = PROBLEMS[args['problem']]()
try:
user_groups = musm.load(args['groups'])
except:
user_groups = generate_user_groups(problem,
musm.subdict(args, nokeys={'problem'}))
if args['groups'] is not None:
musm.dump(args['groups'], user_groups)
rng = check_random_state(args['seed'])
traces = []
for gid in range(args['num_groups']):
traces.append(musm.musm(problem,
user_groups[gid],
gid,
set_size=args['set_size'],
max_iters=args['max_iters'],
enable_cv=args['enable_cv'],
pick=args['pick'],
transform=args['transform'],
tau=args['tau'],
lmbda=args['lmbda'],
rng=0))
musm.dump(get_results_path(args), {'args': args, 'traces': traces})
def main():
import argparse
np.seterr(all='raise')
np.set_printoptions(precision=2, linewidth=1000000)
fmt = argparse.ArgumentDefaultsHelpFormatter
parser = argparse.ArgumentParser(formatter_class=fmt)
group = parser.add_argument_group('Experiment')
group.add_argument('problem', type=str,
help='the problem, any of {}'
.format(sorted(PROBLEMS.keys())))
group.add_argument('-N', '--num-groups', type=int, default=20,
help='number of user groups')
group.add_argument('-C', '--num-clusters-per-group', type=int, default=1,
help='number of clusters in a group')
group.add_argument('-M', '--num-users-per-group', type=int, default=5,
help='number of users in a group')
group.add_argument('-T', '--max-iters', type=int, default=100,
help='maximum number of elicitation iterations')
group.add_argument('-s', '--seed', type=int, default=0,
help='RNG seed')
group.add_argument('-v', '--verbose', action='store_true',
help='enable debug spew')
group = parser.add_argument_group('Algorithm')
group.add_argument('-K', '--set-size', type=int, default=2,
help='set size')
group.add_argument('-P', '--pick', type=str, default='maxvar',
help='critertion used for picking users')
group.add_argument('-F', '--transform', type=str, default='indep',
help='user-user transformation to use')
group.add_argument('-t', '--tau', type=float, default=0.25,
help='kernel inverse temperature parameter')
group.add_argument('-L', '--lmbda', type=float, default=0.5,
help='transform importance')
group.add_argument('-X', '--enable-cv', action='store_true',
help='enable hyperparameter cross-validation')
group = parser.add_argument_group('User Simulation')
group.add_argument('--min-regret', type=float, default=0,
help='minimum regret for satisfaction')
group.add_argument('-G', '--groups', type=str, default=None,
help='path to pickle with user weights')
group.add_argument('-u', '--distrib', type=str, default='normal',
help='distribution of user weights')
group.add_argument('-d', '--density', type=float, default=1,
help='proportion of non-zero user weights')
group.add_argument('-R', '--response-model', type=str, default='pl',
help='user response model for choice queries')
group.add_argument('-n', '--noise', type=float, default=1,
help='amount of user response noise')
args = parser.parse_args()
handlers = []
if args.verbose:
handlers.append(logging.StreamHandler(sys.stdout))
logging.basicConfig(level=logging.DEBUG, handlers=handlers,
format='%(levelname)-6s %(name)-6s %(funcName)-12s: %(message)s')
run(vars(args))
if __name__ == '__main__':
main()
|
[
"logging.basicConfig",
"numpy.abs",
"textwrap.dedent",
"sklearn.utils.check_random_state",
"logging.StreamHandler",
"argparse.ArgumentParser",
"musm.musm",
"musm.dump",
"numpy.append",
"numpy.array",
"numpy.zeros",
"numpy.dot",
"musm.get_logger",
"numpy.seterr",
"musm.subdict",
"musm.load",
"numpy.set_printoptions"
] |
[((191, 215), 'musm.get_logger', 'musm.get_logger', (['"""adt17"""'], {}), "('adt17')\n", (206, 215), False, 'import musm\n'), ((1014, 1036), 'numpy.array', 'np.array', (['w'], {'copy': '(True)'}), '(w, copy=True)\n', (1022, 1036), True, 'import numpy as np\n'), ((1965, 1986), 'sklearn.utils.check_random_state', 'check_random_state', (['(0)'], {}), '(0)\n', (1983, 1986), False, 'from sklearn.utils import check_random_state\n'), ((4204, 4236), 'sklearn.utils.check_random_state', 'check_random_state', (["args['seed']"], {}), "(args['seed'])\n", (4222, 4236), False, 'from sklearn.utils import check_random_state\n'), ((4972, 4994), 'numpy.seterr', 'np.seterr', ([], {'all': '"""raise"""'}), "(all='raise')\n", (4981, 4994), True, 'import numpy as np\n'), ((4999, 5050), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(2)', 'linewidth': '(1000000)'}), '(precision=2, linewidth=1000000)\n', (5018, 5050), True, 'import numpy as np\n'), ((5114, 5158), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'fmt'}), '(formatter_class=fmt)\n', (5137, 5158), False, 'import argparse\n'), ((7898, 8028), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'handlers': 'handlers', 'format': '"""%(levelname)-6s %(name)-6s %(funcName)-12s: %(message)s"""'}), "(level=logging.DEBUG, handlers=handlers, format=\n '%(levelname)-6s %(name)-6s %(funcName)-12s: %(message)s')\n", (7917, 8028), False, 'import logging\n'), ((1845, 1854), 'numpy.abs', 'np.abs', (['w'], {}), '(w)\n', (1851, 1854), True, 'import numpy as np\n'), ((3930, 3955), 'musm.load', 'musm.load', (["args['groups']"], {}), "(args['groups'])\n", (3939, 3955), False, 'import musm\n'), ((1701, 1738), 'numpy.zeros', 'np.zeros', (['(num_users, num_attributes)'], {}), '((num_users, num_attributes))\n', (1709, 1738), True, 'import numpy as np\n'), ((4318, 4551), 'musm.musm', 'musm.musm', (['problem', 'user_groups[gid]', 'gid'], {'set_size': "args['set_size']", 'max_iters': "args['max_iters']", 'enable_cv': "args['enable_cv']", 'pick': "args['pick']", 'transform': "args['transform']", 'tau': "args['tau']", 'lmbda': "args['lmbda']", 'rng': '(0)'}), "(problem, user_groups[gid], gid, set_size=args['set_size'],\n max_iters=args['max_iters'], enable_cv=args['enable_cv'], pick=args[\n 'pick'], transform=args['transform'], tau=args['tau'], lmbda=args[\n 'lmbda'], rng=0)\n", (4327, 4551), False, 'import musm\n'), ((7859, 7892), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (7880, 7892), False, 'import logging\n'), ((3458, 3490), 'numpy.append', 'np.append', (['w_star', 'ttemp'], {'axis': '(0)'}), '(w_star, ttemp, axis=0)\n', (3467, 3490), True, 'import numpy as np\n'), ((4063, 4101), 'musm.subdict', 'musm.subdict', (['args'], {'nokeys': "{'problem'}"}), "(args, nokeys={'problem'})\n", (4075, 4101), False, 'import musm\n'), ((4154, 4192), 'musm.dump', 'musm.dump', (["args['groups']", 'user_groups'], {}), "(args['groups'], user_groups)\n", (4163, 4192), False, 'import musm\n'), ((3036, 3075), 'numpy.dot', 'np.dot', (['temp_costs', 'problem.cost_matrix'], {}), '(temp_costs, problem.cost_matrix)\n', (3042, 3075), True, 'import numpy as np\n'), ((3100, 3337), 'textwrap.dedent', 'dedent', (['""" CLUSTER {cid}:\n true user weights =\n {temp}\n true user weights transformed by cost matrix =\n {ttemp}\n """'], {}), '(\n """ CLUSTER {cid}:\n true user weights =\n {temp}\n true user weights transformed by cost matrix =\n {ttemp}\n """\n )\n', (3106, 3337), False, 'from textwrap import dedent\n')]
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 4 15:57:47 2017
@author: wangronin
"""
import pdb
import numpy as np
from deap import benchmarks
from GaussianProcess_old import GaussianProcess_extra as GaussianProcess
from BayesOpt import BayesOpt, RandomForest, RrandomForest
np.random.seed(1)
dim = 2
n_step = 20
n_init_sample = 10
obj_func = lambda x: benchmarks.himmelblau(x)[0]
lb = np.array([-6] * dim)
ub = np.array([6] * dim)
x1 = {'name' : "x1",
'type' : 'R',
'bounds': [lb[0], ub[0]]}
x2 = {'name' : "x2",
'type' : 'R',
'bounds': [lb[1], ub[1]]}
thetaL = 1e-3 * (ub - lb) * np.ones(dim)
thetaU = 10 * (ub - lb) * np.ones(dim)
theta0 = np.random.rand(dim) * (thetaU - thetaL) + thetaL
model = GaussianProcess(regr='constant', corr='matern',
theta0=theta0, thetaL=thetaL,
thetaU=thetaU, nugget=None,
nugget_estim=False, normalize=False,
verbose=False, random_start=15 * dim,
random_state=None, optimizer='BFGS')
# min_samples_leaf = max(1, int(n_init_sample / 20.))
# max_features = int(np.ceil(dim * 5 / 6.))
# model = RandomForest(n_estimators=100,
# max_features=max_features,
# min_samples_leaf=min_samples_leaf)
# model = RrandomForest()
search_space = [x1, x2]
opt = BayesOpt(search_space, obj_func, model, max_iter=n_step, random_seed=None,
n_init_sample=n_init_sample, minimize=True, verbose=False, debug=True,
optimizer='MIES')
opt.run()
|
[
"numpy.ones",
"numpy.random.rand",
"GaussianProcess_old.GaussianProcess_extra",
"deap.benchmarks.himmelblau",
"numpy.array",
"numpy.random.seed",
"BayesOpt.BayesOpt"
] |
[((304, 321), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (318, 321), True, 'import numpy as np\n'), ((416, 436), 'numpy.array', 'np.array', (['([-6] * dim)'], {}), '([-6] * dim)\n', (424, 436), True, 'import numpy as np\n'), ((442, 461), 'numpy.array', 'np.array', (['([6] * dim)'], {}), '([6] * dim)\n', (450, 461), True, 'import numpy as np\n'), ((758, 984), 'GaussianProcess_old.GaussianProcess_extra', 'GaussianProcess', ([], {'regr': '"""constant"""', 'corr': '"""matern"""', 'theta0': 'theta0', 'thetaL': 'thetaL', 'thetaU': 'thetaU', 'nugget': 'None', 'nugget_estim': '(False)', 'normalize': '(False)', 'verbose': '(False)', 'random_start': '(15 * dim)', 'random_state': 'None', 'optimizer': '"""BFGS"""'}), "(regr='constant', corr='matern', theta0=theta0, thetaL=\n thetaL, thetaU=thetaU, nugget=None, nugget_estim=False, normalize=False,\n verbose=False, random_start=15 * dim, random_state=None, optimizer='BFGS')\n", (773, 984), True, 'from GaussianProcess_old import GaussianProcess_extra as GaussianProcess\n'), ((1402, 1573), 'BayesOpt.BayesOpt', 'BayesOpt', (['search_space', 'obj_func', 'model'], {'max_iter': 'n_step', 'random_seed': 'None', 'n_init_sample': 'n_init_sample', 'minimize': '(True)', 'verbose': '(False)', 'debug': '(True)', 'optimizer': '"""MIES"""'}), "(search_space, obj_func, model, max_iter=n_step, random_seed=None,\n n_init_sample=n_init_sample, minimize=True, verbose=False, debug=True,\n optimizer='MIES')\n", (1410, 1573), False, 'from BayesOpt import BayesOpt, RandomForest, RrandomForest\n'), ((639, 651), 'numpy.ones', 'np.ones', (['dim'], {}), '(dim)\n', (646, 651), True, 'import numpy as np\n'), ((678, 690), 'numpy.ones', 'np.ones', (['dim'], {}), '(dim)\n', (685, 690), True, 'import numpy as np\n'), ((383, 407), 'deap.benchmarks.himmelblau', 'benchmarks.himmelblau', (['x'], {}), '(x)\n', (404, 407), False, 'from deap import benchmarks\n'), ((700, 719), 'numpy.random.rand', 'np.random.rand', (['dim'], {}), '(dim)\n', (714, 719), True, 'import numpy as np\n')]
|
import numpy as np
from compmech.stiffpanelbay import StiffPanelBay
from compmech.analysis import freq, lb
def test_freq_models():
print('Testing frequency analysis for StiffPanelBay with 2 plates')
# From Table 4 of
# Lee and Lee. "Vibration analysis of anisotropic plates with eccentric
# stiffeners". Computers & Structures, Vol. 57, No. 1, pp. 99-105,
# 1995.
for model in ['plate_clt_donnell_bardell',
'cpanel_clt_donnell_bardell',
'kpanel_clt_donnell_bardell']:
spb = StiffPanelBay()
spb.a = 0.5
spb.b = 0.250
spb.plyt = 0.00013
spb.laminaprop = (128.e9, 11.e9, 0.25, 4.48e9, 1.53e9, 1.53e9)
spb.stack = [0, -45, +45, 90, 90, +45, -45, 0]
spb.model = model
spb.r = 1.e6
spb.alphadeg = 0.
spb.mu = 1.5e3
spb.m = 9
spb.n = 10
# clamping
spb.w1rx = 0.
spb.w2rx = 0.
spb.w1ry = 0.
spb.w2ry = 0.
spb.add_panel(0, spb.b/2., plyt=spb.plyt)
spb.add_panel(spb.b/2., spb.b, plyt=spb.plyt)
k0 = spb.calc_k0(silent=True)
M = spb.calc_kM(silent=True)
eigvals, eigvecs = freq(k0, M, silent=True)
ref = [85.12907802-0.j, 134.16422850-0.j, 206.77295186-0.j,
216.45992453-0.j, 252.24546171-0.j]
assert np.allclose(eigvals[:5]/2/np.pi, ref, atol=0.1, rtol=0)
def test_lb_Stiffener1D():
print('Testing linear buckling for StiffPanelBay with a 1D Stiffener')
spb = StiffPanelBay()
spb.a = 1.
spb.b = 0.5
spb.stack = [0, 90, 90, 0]
spb.plyt = 1e-3*0.125
spb.laminaprop = (142.5e9, 8.7e9, 0.28, 5.1e9, 5.1e9, 5.1e9)
spb.model = 'plate_clt_donnell_bardell'
spb.mu = 1.3e3
spb.m = 15
spb.n = 16
spb.add_panel(y1=0, y2=spb.b/2., plyt=spb.plyt, Nxx=-1.)
spb.add_panel(y1=spb.b/2., y2=spb.b, plyt=spb.plyt, Nxx_cte=1000.)
spb.add_bladestiff1d(ys=spb.b/2., Fx=0., bf=0.05, fstack=[0, 90, 90, 0],
fplyt=spb.plyt, flaminaprop=spb.laminaprop)
k0 = spb.calc_k0(silent=True)
kG = spb.calc_kG0(silent=True)
eigvals, eigvecs = lb(k0, kG, silent=True)
assert np.isclose(eigvals[0].real, 297.54633, atol=0.1, rtol=0)
def test_lb_Stiffener2D():
print('Testing linear buckling for StiffPanelBay with a 2D Stiffener')
spb = StiffPanelBay()
spb.a = 1.
spb.b = 0.5
spb.stack = [0, 90, 90, 0]
spb.plyt = 1e-3*0.125
spb.laminaprop = (142.5e9, 8.7e9, 0.28, 5.1e9, 5.1e9, 5.1e9)
spb.model = 'plate_clt_donnell_bardell'
spb.mu = 1.3e3
spb.m = 15
spb.n = 16
spb.add_panel(y1=0, y2=spb.b/2., plyt=spb.plyt, Nxx=-1.)
spb.add_panel(y1=spb.b/2., y2=spb.b, plyt=spb.plyt, Nxx_cte=1000.)
spb.add_bladestiff2d(ys=spb.b/2., m1=14, n1=11, bf=0.05,
fstack=[0, 90, 90, 0],
fplyt=spb.plyt, flaminaprop=spb.laminaprop)
k0 = spb.calc_k0(silent=True)
kG = spb.calc_kG0(silent=True)
eigvals, eigvecs = lb(k0, kG, silent=True)
assert np.isclose(eigvals[0].real, 301.0825234, atol=0.1, rtol=0)
def test_freq_Stiffener1D():
print('Testing frequency analysis for StiffPanelBay with a 1D Stiffener')
spb = StiffPanelBay()
spb.a = 2.
spb.b = 0.5
spb.stack = [0, 90, 90, 0]
spb.plyt = 1e-3*0.125
spb.laminaprop = (142.5e9, 8.7e9, 0.28, 5.1e9, 5.1e9, 5.1e9)
spb.model = 'plate_clt_donnell_bardell'
spb.mu = 1.3e3
spb.m = 15
spb.n = 16
spb.add_panel(y1=0, y2=spb.b/2., plyt=spb.plyt)
spb.add_panel(y1=spb.b/2., y2=spb.b, plyt=spb.plyt)
spb.add_bladestiff1d(ys=spb.b/2., Fx=0., bf=0.08, fstack=[0, 90, 90, 0]*5,
fplyt=spb.plyt, flaminaprop=spb.laminaprop)
k0 = spb.calc_k0(silent=True)
M = spb.calc_kM(silent=True)
eigvals, eigvecs = freq(k0, M, silent=True, num_eigvalues=10)
assert np.isclose(eigvals[0].real, 79.5906673583, atol=0.1, rtol=0)
def test_freq_Stiffener2D():
print('Testing frequency analysis for StiffPanelBay with a 2D Stiffener')
spb = StiffPanelBay()
spb.a = 1.
spb.b = 0.5
spb.stack = [0, 90, 90, 0]
spb.plyt = 1e-3*0.125
spb.laminaprop = (142.5e9, 8.7e9, 0.28, 5.1e9, 5.1e9, 5.1e9)
spb.model = 'plate_clt_donnell_bardell'
spb.mu = 1.3e3
spb.m = 11
spb.n = 12
spb.add_panel(y1=0, y2=spb.b/2., plyt=spb.plyt)
spb.add_panel(y1=spb.b/2., y2=spb.b, plyt=spb.plyt)
spb.add_bladestiff2d(ys=spb.b/2., m1=14, n1=11, bf=0.08,
fstack=[0, 90, 90, 0]*5, fplyt=spb.plyt,
flaminaprop=spb.laminaprop)
k0 = spb.calc_k0(silent=True)
M = spb.calc_kM(silent=True)
eigvals, eigvecs = freq(k0, M, silent=True)
assert np.isclose(eigvals[0].real, 137.97927190657148, atol=0.01, rtol=0)
def test_Lee_and_Lee_table4():
print('Testing Lee and Lee Table 4')
# Lee and Lee. "Vibration analysis of anisotropic plates with eccentric
# stiffeners". Computers & Structures, Vol. 57, No. 1, pp. 99-105,
# 1995.
models = (
('model4', 0.00208, 0.0060, 138.99917796302756),
('model5', 0.00260, 0.0075, 175.00597239286196),
('model7', 0.00364, 0.0105, 205.433509024))
for model, hf, bf, value in models:
spb = StiffPanelBay()
spb.model = 'plate_clt_donnell_bardell'
spb.mu = 1.500e3 # plate material density in kg / m^3
spb.laminaprop = (128.e9, 11.e9, 0.25, 4.48e9, 1.53e9, 1.53e9)
spb.stack = [0, -45, +45, 90, 90, +45, -45, 0]
plyt = 0.00013
spb.plyt = plyt
spb.a = 0.5
spb.b = 0.250
spb.m = 14
spb.n = 15
hf = hf
bf = bf
n = int(hf/plyt)
fstack = [0]*(n//4) + [90]*(n//4) + [90]*(n//4) + [0]*(n//4)
# clamping
spb.w1rx = 0.
spb.w2rx = 0.
spb.w1ry = 0.
spb.w2ry = 0.
spb.add_panel(y1=0, y2=spb.b/2.)
spb.add_panel(y1=spb.b/2., y2=spb.b)
spb.add_bladestiff1d(mu=spb.mu, ys=spb.b/2., bb=0., bf=bf,
fstack=fstack, fplyt=plyt, flaminaprop=spb.laminaprop)
k0 = spb.calc_k0(silent=True)
M = spb.calc_kM(silent=True)
eigvals, eigvecs = freq(k0, M, silent=True)
herz = eigvals[0].real/2/np.pi
assert np.isclose(herz, value, atol=0.001, rtol=0.001)
|
[
"compmech.stiffpanelbay.StiffPanelBay",
"numpy.allclose",
"compmech.analysis.lb",
"numpy.isclose",
"compmech.analysis.freq"
] |
[((1542, 1557), 'compmech.stiffpanelbay.StiffPanelBay', 'StiffPanelBay', ([], {}), '()\n', (1555, 1557), False, 'from compmech.stiffpanelbay import StiffPanelBay\n'), ((2164, 2187), 'compmech.analysis.lb', 'lb', (['k0', 'kG'], {'silent': '(True)'}), '(k0, kG, silent=True)\n', (2166, 2187), False, 'from compmech.analysis import freq, lb\n'), ((2200, 2256), 'numpy.isclose', 'np.isclose', (['eigvals[0].real', '(297.54633)'], {'atol': '(0.1)', 'rtol': '(0)'}), '(eigvals[0].real, 297.54633, atol=0.1, rtol=0)\n', (2210, 2256), True, 'import numpy as np\n'), ((2371, 2386), 'compmech.stiffpanelbay.StiffPanelBay', 'StiffPanelBay', ([], {}), '()\n', (2384, 2386), False, 'from compmech.stiffpanelbay import StiffPanelBay\n'), ((3036, 3059), 'compmech.analysis.lb', 'lb', (['k0', 'kG'], {'silent': '(True)'}), '(k0, kG, silent=True)\n', (3038, 3059), False, 'from compmech.analysis import freq, lb\n'), ((3072, 3130), 'numpy.isclose', 'np.isclose', (['eigvals[0].real', '(301.0825234)'], {'atol': '(0.1)', 'rtol': '(0)'}), '(eigvals[0].real, 301.0825234, atol=0.1, rtol=0)\n', (3082, 3130), True, 'import numpy as np\n'), ((3250, 3265), 'compmech.stiffpanelbay.StiffPanelBay', 'StiffPanelBay', ([], {}), '()\n', (3263, 3265), False, 'from compmech.stiffpanelbay import StiffPanelBay\n'), ((3848, 3890), 'compmech.analysis.freq', 'freq', (['k0', 'M'], {'silent': '(True)', 'num_eigvalues': '(10)'}), '(k0, M, silent=True, num_eigvalues=10)\n', (3852, 3890), False, 'from compmech.analysis import freq, lb\n'), ((3903, 3963), 'numpy.isclose', 'np.isclose', (['eigvals[0].real', '(79.5906673583)'], {'atol': '(0.1)', 'rtol': '(0)'}), '(eigvals[0].real, 79.5906673583, atol=0.1, rtol=0)\n', (3913, 3963), True, 'import numpy as np\n'), ((4083, 4098), 'compmech.stiffpanelbay.StiffPanelBay', 'StiffPanelBay', ([], {}), '()\n', (4096, 4098), False, 'from compmech.stiffpanelbay import StiffPanelBay\n'), ((4724, 4748), 'compmech.analysis.freq', 'freq', (['k0', 'M'], {'silent': '(True)'}), '(k0, M, silent=True)\n', (4728, 4748), False, 'from compmech.analysis import freq, lb\n'), ((4761, 4827), 'numpy.isclose', 'np.isclose', (['eigvals[0].real', '(137.97927190657148)'], {'atol': '(0.01)', 'rtol': '(0)'}), '(eigvals[0].real, 137.97927190657148, atol=0.01, rtol=0)\n', (4771, 4827), True, 'import numpy as np\n'), ((551, 566), 'compmech.stiffpanelbay.StiffPanelBay', 'StiffPanelBay', ([], {}), '()\n', (564, 566), False, 'from compmech.stiffpanelbay import StiffPanelBay\n'), ((1211, 1235), 'compmech.analysis.freq', 'freq', (['k0', 'M'], {'silent': '(True)'}), '(k0, M, silent=True)\n', (1215, 1235), False, 'from compmech.analysis import freq, lb\n'), ((1372, 1431), 'numpy.allclose', 'np.allclose', (['(eigvals[:5] / 2 / np.pi)', 'ref'], {'atol': '(0.1)', 'rtol': '(0)'}), '(eigvals[:5] / 2 / np.pi, ref, atol=0.1, rtol=0)\n', (1383, 1431), True, 'import numpy as np\n'), ((5302, 5317), 'compmech.stiffpanelbay.StiffPanelBay', 'StiffPanelBay', ([], {}), '()\n', (5315, 5317), False, 'from compmech.stiffpanelbay import StiffPanelBay\n'), ((6247, 6271), 'compmech.analysis.freq', 'freq', (['k0', 'M'], {'silent': '(True)'}), '(k0, M, silent=True)\n', (6251, 6271), False, 'from compmech.analysis import freq, lb\n'), ((6327, 6374), 'numpy.isclose', 'np.isclose', (['herz', 'value'], {'atol': '(0.001)', 'rtol': '(0.001)'}), '(herz, value, atol=0.001, rtol=0.001)\n', (6337, 6374), True, 'import numpy as np\n')]
|
"""
Script to export a PyTorch-based Pyrado policy to C++
"""
import numpy as np
import torch as to
from rcsenv import ControlPolicy
from pyrado.policies.linear import LinearPolicy
from pyrado.policies.rnn import RNNPolicy
from pyrado.spaces.box import BoxSpace
from pyrado.utils.data_types import EnvSpec
from pyrado.policies.features import FeatureStack, squared_feat, identity_feat, const_feat
def create_nonrecurrent_policy():
return LinearPolicy(
EnvSpec(
BoxSpace(-1, 1, 4),
BoxSpace(-1, 1, 3),
),
FeatureStack([
const_feat,
identity_feat,
squared_feat
])
)
def create_recurrent_policy():
return RNNPolicy(
EnvSpec(
BoxSpace(-1, 1, 4),
BoxSpace(-1, 1, 3),
),
hidden_size=32, num_recurrent_layers=1, hidden_nonlin='tanh'
)
if __name__ == '__main__':
tmpfile = '/tmp/torchscriptsaved.pt'
to.set_default_dtype(to.double)
# Create a Pyrado policy
model = create_nonrecurrent_policy()
# model = create_recurrent_policy()
# Trace the Pyrado policy (inherits from PyTorch module)
traced_script_module = model.trace()
print(traced_script_module.graph)
# Save the scripted module
traced_script_module.save(tmpfile)
# Load in C++
cp = ControlPolicy('torch', tmpfile)
# Print more digits
to.set_printoptions(precision=8, linewidth=200)
np.set_printoptions(precision=8, linewidth=200)
print(f'manual: {model(to.tensor([1, 2, 3, 4], dtype=to.get_default_dtype()))}')
print(f'script: {traced_script_module(to.tensor([1, 2, 3, 4], dtype=to.get_default_dtype()))}')
print(f'cpp: {cp(np.array([1, 2, 3, 4]), 3)}')
|
[
"torch.get_default_dtype",
"torch.set_printoptions",
"rcsenv.ControlPolicy",
"torch.set_default_dtype",
"numpy.array",
"pyrado.policies.features.FeatureStack",
"pyrado.spaces.box.BoxSpace",
"numpy.set_printoptions"
] |
[((963, 994), 'torch.set_default_dtype', 'to.set_default_dtype', (['to.double'], {}), '(to.double)\n', (983, 994), True, 'import torch as to\n'), ((1346, 1377), 'rcsenv.ControlPolicy', 'ControlPolicy', (['"""torch"""', 'tmpfile'], {}), "('torch', tmpfile)\n", (1359, 1377), False, 'from rcsenv import ControlPolicy\n'), ((1407, 1454), 'torch.set_printoptions', 'to.set_printoptions', ([], {'precision': '(8)', 'linewidth': '(200)'}), '(precision=8, linewidth=200)\n', (1426, 1454), True, 'import torch as to\n'), ((1459, 1506), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(8)', 'linewidth': '(200)'}), '(precision=8, linewidth=200)\n', (1478, 1506), True, 'import numpy as np\n'), ((559, 614), 'pyrado.policies.features.FeatureStack', 'FeatureStack', (['[const_feat, identity_feat, squared_feat]'], {}), '([const_feat, identity_feat, squared_feat])\n', (571, 614), False, 'from pyrado.policies.features import FeatureStack, squared_feat, identity_feat, const_feat\n'), ((488, 506), 'pyrado.spaces.box.BoxSpace', 'BoxSpace', (['(-1)', '(1)', '(4)'], {}), '(-1, 1, 4)\n', (496, 506), False, 'from pyrado.spaces.box import BoxSpace\n'), ((520, 538), 'pyrado.spaces.box.BoxSpace', 'BoxSpace', (['(-1)', '(1)', '(3)'], {}), '(-1, 1, 3)\n', (528, 538), False, 'from pyrado.spaces.box import BoxSpace\n'), ((751, 769), 'pyrado.spaces.box.BoxSpace', 'BoxSpace', (['(-1)', '(1)', '(4)'], {}), '(-1, 1, 4)\n', (759, 769), False, 'from pyrado.spaces.box import BoxSpace\n'), ((783, 801), 'pyrado.spaces.box.BoxSpace', 'BoxSpace', (['(-1)', '(1)', '(3)'], {}), '(-1, 1, 3)\n', (791, 801), False, 'from pyrado.spaces.box import BoxSpace\n'), ((1717, 1739), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (1725, 1739), True, 'import numpy as np\n'), ((1565, 1587), 'torch.get_default_dtype', 'to.get_default_dtype', ([], {}), '()\n', (1585, 1587), True, 'import torch as to\n'), ((1665, 1687), 'torch.get_default_dtype', 'to.get_default_dtype', ([], {}), '()\n', (1685, 1687), True, 'import torch as to\n')]
|
# Author: <NAME>
'''
Hierarchy of subroutines::
hh_param_from_profile
|--- _calc_rho()
|--- helper_site_response.thk2dep()
|--- _calc_Gmax()
|--- _calc_vertical_stress()
|--- _calc_OCR()
|--- _calc_K0()
|--- _calc_PI()
|--- _calc_shear_strength()
|--- _calc_K0()
|--- produce_HH_G_param()
|--- _calc_mean_confining_stress()
|--- produce_Darendeli_curves()
|--- helper_generic.assert_1D_numpy_array()
|--- helper_generic.check_length_or_extend_to_array()
|--- _calc_K0()
|--- _calc_mean_confining_stress()
|--- helper_mkz_model.fit_MKZ()
|--- _optimization_kernel()
|
|--- hlper_mkz_model.tau_MKZ()
|--- helper_generic.find_closest_index()
|--- __calc_area()
|--- helper_hh_model.tau_FKZ()
|--- helper_generic.find_closest_index()
|--- __find_x_t_and_d()
|--- helper_hh_model.tau_FKZ()
|--- helper_generic.find_closest_index()
'''
import os
import numpy as np
import matplotlib.pyplot as plt
from . import helper_generic as hlp
from . import helper_site_response as sr
from . import helper_mkz_model as mkz
from . import helper_hh_model as hh
#%%----------------------------------------------------------------------------
def hh_param_from_profile(vs_profile, *, Tmax=None, show_fig=False,
save_fig=False, fig_output_dir=None,
save_HH_G_file=False, HH_G_file_dir=None,
profile_name=None, verbose=True):
'''
Get HH parameters of each soil layer from the Vs values of every layer.
Parameters
----------
vs_profile : numpy.ndarray
Shear-wave velocity profile, as a 2D numpy array. It should have the
following columns:
+---------------+----------+---------+---------+-----------------+
| Thickness (m) | Vs (m/s) | Damping | Density | Material Number |
+===============+==========+=========+=========+=================+
| ... | ... | ... | ... | ... |
+---------------+----------+---------+---------+-----------------+
Tmax : numpy.ndarray or ``None``
Shear strength of each layer of soil. If ``None``, it will be
calculated using a combination of Ladd (1991) and Mohr-Coulomb criteria.
show_fig : bool
Whether or not to show figures G/Gmax and stress-strain curves of MKZ,
FKZ, and HH for each layer.
save_fig : bool
Whether or not to save the figures to the hard drive. Only effective
if ``show_fig`` is set to ``True``.
fig_output_dir : str
The output directory for the figures. Only effective if ``show_fig``
and ``save_fig`` are both ``True``.
save_HH_G_file : bool
Whether or not to save the HH parameters to the hard drive (as a
"HH_G" file).
HH_G_file_dir : str
The output directory for the "HH_G" file. Only effective if
``save_HH_G_file`` is ``True``.
profile_name : str or ``None``
The name of the Vs profile, such as "CE.12345". If ``None``, a string
of current date and time will be used as the profile name.
verbose : bool
Whether or not to print progresses on the console.
Returns
-------
HH_G_param : numpy.ndarray
The HH parameters of each layer. It's a 2D array of shape
``(9, n_layer)``. For each layer (i.e., column), the values are in
this order:
gamma_t, a, gamma_ref, beta, s, Gmax, mu, Tmax, d
'''
phi = 30. # friction angle (choose 30 degrees, because there is no better info)
hlp.check_Vs_profile_format(vs_profile)
h = vs_profile[:-1, 0]
Vs = vs_profile[:-1, 1] # exclude the last layer (i.e., half space)
n_layer = len(Vs)
if Tmax is not None:
hlp.assert_array_length(Tmax, n_layer)
rho = _calc_rho(h, Vs)
Gmax = _calc_Gmax(Vs, rho)
sigma_v0 = _calc_vertical_stress(h, rho)
OCR = _calc_OCR(Vs, rho, sigma_v0)
K0 = _calc_K0(OCR, phi=phi)
PI = _calc_PI(Vs)
if Tmax is None:
Tmax = _calc_shear_strength(Vs, OCR, sigma_v0, K0=K0, phi=phi)
HH_G_param = produce_HH_G_param(Vs, Gmax, Tmax, OCR, sigma_v0, K0,
curves=None, PI=PI, phi=phi,
show_fig=show_fig, save_fig=save_fig,
fig_output_dir=fig_output_dir,
verbose=verbose)
if save_HH_G_file:
if HH_G_file_dir is None:
raise ValueError('Please specify `HH_G_file_dir`.')
if profile_name is None:
profile_name = hlp.get_current_time(for_filename=True)
np.savetxt(os.path.join(HH_G_file_dir, 'HH_G_%s.txt' % profile_name),
HH_G_param, delimiter='\t', fmt='%.6g')
return HH_G_param
#%%----------------------------------------------------------------------------
def hh_param_from_curves(vs_profile, curves, *, Tmax=None, show_fig=False,
save_fig=False, fig_output_dir=None, save_HH_G_file=False,
HH_G_file_dir=None, profile_name=None, verbose=True):
'''
Get HH parameters of each soil layer from the Vs profile and G/Gmax curves.
Parameters
----------
vs_profile : numpy.ndarray
Shear-wave velocity profile, as a 2D numpy array. It should have the
following columns:
+---------------+----------+---------+---------+-----------------+
| Thickness (m) | Vs (m/s) | Damping | Density | Material Number |
+===============+==========+=========+=========+=================+
| ... | ... | ... | ... | ... |
+---------------+----------+---------+---------+-----------------+
curves : numpy.ndarray
A 2D numpy array that represents G/Gmax and damping curves of each
layer, in the following format:
+------------+--------+------------+-------------+-------------+--------+-----+
| strain [%] | G/Gmax | strain [%] | damping [%] | strain [%] | G/Gmax | ... |
+============+========+============+=============+=============+========+=====+
| ... | ... | ... | ... | ... | ... | ... |
+------------+--------+------------+-------------+-------------+--------+-----+
The damping information is neglected in this function, so users can
supply some dummy values.
Tmax : numpy.ndarray or ``None``
Shear strength of each layer of soil. If ``None``, it will be
calculated using a combination of Ladd (1991) and Mohr-Coulomb criteria.
show_fig : bool
Whether or not to show figures G/Gmax and stress-strain curves of MKZ,
FKZ, and HH for each layer.
save_fig : bool
Whether or not to save the figures to the hard drive. Only effective
if ``show_fig`` is set to ``True``.
fig_output_dir : str
The output directory for the figures. Only effective if ``show_fig``
and ``save_fig`` are both ``True``.
save_HH_G_file : bool
Whether or not to save the HH parameters to the hard drive (as a
"HH_G" file).
HH_G_file_dir : str
The output directory for the "HH_G" file. Only effective if
``save_HH_G_file`` is ``True``.
profile_name : str or ``None``
The name of the Vs profile, such as "CE.12345". If ``None``, a string
of current date and time will be used as the profile name.
verbose : bool
Whether or not to print progresses on the console.
Returns
-------
HH_G_param : numpy.ndarray
The HH parameters of each layer. It's a 2D array of shape
``(9, n_layer)``. For each layer (i.e., column), the values are in
this order:
gamma_t, a, gamma_ref, beta, s, Gmax, mu, Tmax, d
'''
phi = 30.0
hlp.check_Vs_profile_format(vs_profile)
h = vs_profile[:-1, 0]
Vs = vs_profile[:-1, 1] # exclude the last layer (i.e., half space)
n_layer = len(Vs)
if vs_profile.shape[1] == 5: # there can only be 5 or 2 columns
mat = vs_profile[:-1, -1]
rho = vs_profile[:-1, 3]
else: # only 2 columns
mat = np.arange(1, n_layer+1)
rho = _calc_rho(h, Vs)
if Tmax is not None:
hlp.assert_array_length(Tmax, n_layer)
Gmax = _calc_Gmax(Vs, rho)
sigma_v0 = _calc_vertical_stress(h, rho)
OCR = _calc_OCR(Vs, rho, sigma_v0)
K0 = _calc_K0(OCR, phi=phi)
if Tmax is None:
Tmax = _calc_shear_strength(Vs, OCR, sigma_v0, K0=K0, phi=phi)
curves_old = curves.copy()
curves_expanded = None
for j in range(n_layer):
tmp = curves_old[:, int(mat[j]) * 4 - 4: int(mat[j]) * 4]
if curves_expanded is None:
curves_expanded = tmp
else:
curves_expanded = np.column_stack((curves_expanded, tmp))
curves = curves_expanded
HH_G_param = produce_HH_G_param(Vs, Gmax, Tmax, OCR, sigma_v0, K0,
curves=curves, PI=None, phi=None,
show_fig=show_fig, save_fig=save_fig,
fig_output_dir=fig_output_dir,
verbose=verbose)
if save_HH_G_file:
if HH_G_file_dir is None:
raise ValueError('Please specify `HH_G_file_dir`.')
if profile_name is None:
profile_name = hlp.get_current_time(for_filename=True)
np.savetxt(os.path.join(HH_G_file_dir, 'HH_G_%s.txt' % profile_name),
HH_G_param, delimiter='\t', fmt='%.6g')
return HH_G_param
#%%----------------------------------------------------------------------------
def produce_HH_G_param(Vs, Gmax, Tmax, OCR, sigma_v0, K0, curves=None,
PI=None, phi=None, show_fig=False, save_fig=False,
fig_output_dir=None, verbose=True):
'''
Produce HH_G parameters from profiles of Vs, Tmax, OCR, etc.
Parameters
----------
Vs : numpy.ndarray
Vs values of each layer. Shape: ``(n_layer, )``, where ``n_layer`` is
the length of ``Vs``. Unit: m/s.
Gmax : numpy.ndarray
Initial stiffness of each layer. Shape: ``(n_layer, )``. Unit: Pa.
Tmax : numpy.ndarray
The shear strength of each layer. Shape: ``(n_layer, )``. Unit: Pa.
OCR : numpy.ndarray
Over-consolidation ratio of each layer. Shape: ``(n_layer, )``.
sigma_v0 : numpy.ndarray
Vertical effective confining stress of each layer. Shape:
``(n_layer, )``. Unit: Pa.
K0 : numpy.ndarray or float
Lateral soil pressure coefficient. If an array, it must have shape
``(n_layer, )``. If a single value, it means that all layers share
this same value.
curves : numpy.ndarray or ``None``
A 2D numpy array that represents G/Gmax and damping curves of each
layer, in the following format:
+------------+--------+------------+-------------+-------------+--------+-----+
| strain [%] | G/Gmax | strain [%] | damping [%] | strain [%] | G/Gmax | ... |
+============+========+============+=============+=============+========+=====+
| ... | ... | ... | ... | ... | ... | ... |
+------------+--------+------------+-------------+-------------+--------+-----+
The damping information is neglected in this function, so users can
supply some dummy values. If ``None``, it means that the users do not
have G/Gmax curve information, so this function will calculate the
MKZ G/Gmax curves from the empirical formulas in Darendeli (2001).
PI : float or numpy.ndarray or ``None``
Plasticity index of the soils. It is not necessary (can be ``None``) if
``curves`` is provided (i.e., not ``None``). If an array, it must have
shape ``(n_layer, )``. If a single value, it means that all layers
share this same value.
phi : float or numpy.ndarray or ``None``
Effective internal frictional angle (in degrees). It is not necessary
(can be ``None``) if ``curve`` is provided (i.e., not ``None``). If
an array, it must have shape ``(n_layer, )``. If a single value, it
means that all layers share this same value.
show_fig : bool
Whether or not to show figures G/Gmax and stress-strain curves of MKZ,
FKZ, and HH for each layer.
save_fig : bool
Whether or not to save the figures to the hard drive. Only effective
if ``show_fig`` is set to ``True``.
fig_output_dir : str
The output directory for the figures. Only effective if ``show_fig``
and ``save_fig`` are both ``True``.
verbose : bool
Whether or not to print progresses on the console.
Returns
-------
parameters : numpy.ndarray
The HH parameters of each layer. It's a 2D array of shape
``(9, n_layer)``. For each layer (i.e., column), the values are in
this order:
gamma_t, a, gamma_ref, beta, s, Gmax, mu, Tmax, d
Notes
-----
This function is based on ``hybridParaKernel_FKZ.m``.
'''
hlp.assert_1D_numpy_array(Vs, '`Vs`')
n_layer = len(Vs)
hlp.assert_array_length(Gmax, n_layer, name='`Gmax`')
hlp.assert_array_length(Tmax, n_layer, name='`Tmax`')
hlp.assert_array_length(OCR, n_layer, name='`OCR`')
hlp.assert_array_length(sigma_v0, n_layer, name='`sigma_v0`')
K0 = hlp.check_length_or_extend_to_array(K0, n_layer, name='`K0`')
p0 = _calc_mean_confining_stress(sigma_v0, K0)
if verbose:
print('========== Start optimizing for HH_G parameters ===========')
# ============= MKZ fit ===================================================
if curves is None: # user does not provide curves
if verbose:
print('------ G/Gmax not provided; will generate MKZ curves using '
'Darendeli (2001): ------')
strain_ = np.geomspace(1e-4, 10, 400) # unit: percent
GGmax, _, gamma_ref = produce_Darendeli_curves(sigma_v0, PI, OCR=OCR,
K0=K0, phi=phi,
strain_in_pct=strain_)
strain = np.tile(strain_, (n_layer, 1)).T # strain matrix for all layers
beta = np.ones(n_layer)
s = 0.9190 * np.ones(n_layer)
else: # user provides own curves
if verbose:
print('------ G/Gmax provided; fitting MKZ curves to data: ------')
hlp.assert_2D_numpy_array(curves)
assert(curves.shape[1] == n_layer * 4)
# ----------- Extract G/Gmax information ------------------------------
strain = curves[:, 0::4] # unit: percent
GGmax = curves[:, 1::4]
# ----------- Fit MKZ parameters --------------------------------------
param_MKZ, _ = mkz.fit_MKZ(curves, show_fig=show_fig)
gamma_ref = param_MKZ[:, 0]
s = param_MKZ[:, 2]
beta = param_MKZ[:, 3]
# ----------- Show results on console ---------------------------------
if verbose:
print('****** MKZ parameters: ******')
for j in range(n_layer):
if verbose:
print('Layer %d: gamma_ref = %.3g, s = %.3g, beta = %.3g' \
% (j, gamma_ref[j], s[j], beta[j]))
# ========== Stress-strain curve implied by G/Gmax ========================
sigma = np.zeros_like(GGmax)
for j in range(n_layer):
sigma[0, j] = 0
for k in range(1, GGmax.shape[0]):
sigma[k, j] = GGmax[k, j] * Gmax[j] * strain[k, j] / 100.0
# END FOR
# END FOR
# ========== Estimate mu using empirical correlations =====================
p0 = p0 / 1000.0 # unit: Pa --> kPa
mu = np.zeros_like(OCR)
for j in range(n_layer):
if Vs[j] <= 760: # softer soil: use Vardanega & Bolton (2011) CGJ formula
mu[j] = 1.0 / (0.000872 * Gmax[j]/Tmax[j] * OCR[j]**0.47 * p0[j]**0.28)
if mu[j] <= 0.02: # mu too small --> too low tau_FKZ --> sharply decreasing tau_HH
mu[j] = mu[j] * 10.0 ** (0.236 * 3) # 0.236 is the standard error suggested in Vardanega & Bolton (2011)
elif mu[j] <= 0.03:
mu[j] = mu[j] * 10.0 ** (0.236 * 2)
elif mu[j] <= 0.04:
mu[j] = mu[j] * 10.0 ** (0.236 * 1)
# END IF
else: # stiffer soils: set mu to 1 for lack of better information
mu[j] = 1.0
# END IF
# END FOR
# ========== Start FKZ optimization =======================================
if verbose:
print('----------- FKZ optimization -----------------------------')
parameters = np.zeros((9, n_layer))
lw = 1.0
muted_blue = np.array([107, 174, 214]) / 255.
muted_green = np.array([120, 198, 121]) / 255.
muted_red = np.array([222, 45, 38]) / 255.
for j in range(n_layer):
strain_j = strain[:, j]
a, gamma_t, d = _optimization_kernel(strain_j / 100.0, gamma_ref[j],
beta[j], s[j], Gmax[j], Tmax[j],
mu[j])
if verbose:
print('%d/%d: mu = %.3f, a = %.1f, gamma_t = %.3f%%, d = %.3f' \
% (j + 1, n_layer, mu[j], a, gamma_t * 100, d))
T_FKZ = hh.tau_FKZ(strain_j / 100.0, Gmax=Gmax[j], mu=mu[j], d=d,
Tmax=Tmax[j])
T_HH = hh.tau_HH(strain_j / 100.0, gamma_t=gamma_t, a=a,
gamma_ref=gamma_ref[j], beta=beta[j], s=s[j],
Gmax=Gmax[j], mu=mu[j], Tmax=Tmax[j], d=d)
parameters[0, j] = gamma_t
parameters[1, j] = a
parameters[2, j] = gamma_ref[j]
parameters[3, j] = beta[j]
parameters[4, j] = s[j]
parameters[5, j] = Gmax[j]
parameters[6, j] = mu[j]
parameters[7, j] = Tmax[j]
parameters[8, j] = d
GGmax_HH = T_HH / (Gmax[j] * (strain_j / 100.0))
if show_fig:
fig = plt.figure(figsize=(4.2, 6.0))
plt.subplot(211)
if curves is None:
plt.semilogx(strain_j, sigma[:, j] / 1000., c=muted_blue,
lw=lw*2.5, label='MKZ') # Darendeli's curve
plt.semilogx(strain_j, T_FKZ / 1000., c=muted_green, lw=lw*1.75,
label='FKZ')
else:
plt.semilogx(strain_j, sigma[:, j] / 1000., c=muted_blue,
marker='o', ls='-', lw=lw*2.5,
label='Given $G/G_{\max}$')
plt.semilogx(strain_j, T_FKZ / 1000., c=muted_green, lw=lw*1.75,
label='FKZ')
plt.grid(ls=':', lw=0.5)
plt.plot([np.min(strain_j), np.max(strain_j)],
np.array([Tmax[j], Tmax[j]]) / 1000., lw=lw, c='gray',
ls='--')
plt.plot(strain_j, T_HH / 1000., c=muted_red, lw=lw, label='HH')
plt.plot([gamma_t * 100] * 2, plt.ylim(), ls='--', c='gray')
plt.ylabel('Stress [kPa]')
plt.xlim(np.min(strain_j), np.max(strain_j))
plt.legend(loc='upper left')
plt.title('$V_S$ = %.1f m/s, $G_{\max}$ = %.3f MPa,'
'\n$\\tau_{\mathrm{ff}}$ = %.3f kPa, '
'$\gamma_{\mathrm{ref}}$ = %.3f%%' \
% (Vs[j], Gmax[j]/1e6, Tmax[j]/1e3, gamma_ref[j]*100))
plt.subplot(212)
if curves is None:
plt.semilogx(strain_j, GGmax[:, j], c=muted_blue, lw=lw*2.5)
else:
plt.semilogx(strain_j, GGmax[:, j], c=muted_blue, ls='-',
marker='o', lw=lw*2.5)
plt.grid(ls=':', lw=0.5)
plt.plot(strain_j,
mu[j] / (1 + Gmax[j]/Tmax[j]*mu[j]*np.abs(strain_j/100.) ),
c=muted_green, lw=lw*1.75)
plt.plot(strain_j, GGmax_HH, c=muted_red, lw=lw)
plt.plot([gamma_t * 100] * 2, plt.ylim(), ls='--', c='gray')
plt.ylabel('$G/G_{\max}$')
plt.xlabel('Strain [%]')
plt.xlim(np.min(strain_j), np.max(strain_j))
plt.title("$\mu$ = %.3f, a = %.1f, $\gamma_{\mathrm{t}}$ = %.4f%%\n"
"d = %.4f, $p'_{\mathrm{m0}}$ = %.2f kPa" \
% (mu[j], a, gamma_t * 100, d, p0[j]))
fig.tight_layout(pad=0.5, h_pad=1.2, w_pad=0.3)
if save_fig:
if fig_output_dir is None:
raise ValueError('Please specify `fig_output_dir`.')
fig.savefig(os.path.join(fig_output_dir,
'Stress_GGmax_of_Layer_#%d.png' % (j+1)))
return parameters
#%%----------------------------------------------------------------------------
def _calc_shear_strength(Vs, OCR, sigma_v0, K0=None, phi=30.0):
'''
Calculate shear strength of soils.
Parameters
----------
Vs : numpy.ndarray
A 1D array of Vs values of each layer. Unit: m/s.
OCR : numpy.ndarray
A 1D array of OCR (over-consolidation ratio) of each layer. Unit: 1.
sigma_v0 : numpy.ndarray
A 1D array of vertical overburden pressure. Unit: Pa.
K0 : float, int, numpy.ndarray, or ``None``
Lateral soil pressure coefficient. If a single value is given, it is
assumed to be the value for all layers. If ``None``, it will be
determined from OCR via an empirical formula by Mayne & Kulhawy (1982).
phi : float, int, or numpy.ndarray
Effective internal friction angle of soils (in degrees).
Returns
-------
Tmax : numpy.ndarray
Shear strength of soils of each layer. Unit: Pa.
'''
dyna_coeff = 1.2 # assume a strain rate of 0.01 sec^(-1), from Vardanega & Bolton (2013)
phi = hlp.check_length_or_extend_to_array(phi, len(Vs), name='`phi`')
if K0 is None:
K0 = _calc_K0(OCR, phi=phi)
else:
K0 = hlp.check_length_or_extend_to_array(K0, len(Vs), name='`K0`')
Tmax = np.zeros(len(Vs))
for j in range(len(Vs)):
if Vs[j] <= 760: # for softer soils, calculate undrained shear strength
Tmax[j] = dyna_coeff * 0.28 * OCR[j]**0.8 * sigma_v0[j] # Ladd (1991)
else: # stiffer soils: Mohr-Coulomb criterion
sigma_h0 = K0[j] * sigma_v0[j] # horizontal stress
sigma_1 = np.max([sigma_v0[j], sigma_h0]) # largest principal stress
sigma_3 = np.min([sigma_v0[j], sigma_h0]) # smallest principal stress
# normal effective stress on the slip plane
sigma_n = (sigma_1 + sigma_3)/2.0 \
- (sigma_1 - sigma_3)/2.0 * np.sin(np.deg2rad(phi[j]))
Tmax[j] = dyna_coeff * sigma_n * np.tan(np.deg2rad(phi[j]))
return Tmax
#%%----------------------------------------------------------------------------
def _calc_Gmax(Vs, rho):
'''
Calculate initial stiffness of each soil layer.
Parameters
----------
Vs : numpy.ndarray
1D array of Vs of layers. Unit: m/s.
rho : numpy.ndarray
1D array of mass density of layers. Unit: kg/m^3.
Returns
-------
Gmax : numpy.ndarray
1D array of initial stiffness. Unit: Pa
'''
Gmax = rho * Vs**2
return Gmax
#%%----------------------------------------------------------------------------
def _calc_OCR(Vs, rho, sigma_v0, OCR_upper_limit=None):
'''
Calculate OCR (over-consolidation ratio) of each layer from the Vs profile.
Parameters
----------
Vs : numpy.ndarray
1D array of Vs of layers. Unit: m/s.
rho : numpy.ndarray
1D array of mass density of layers. Unit: kg/m^3.
sigma_v0 : numpy.ndarray
Vertical overburden stress at the mid-point of each layer. Unit: Pa.
OCR_upper_limit : float or ``None``
The maximum acceptable OCR value. If ``None``, there is no limit.
Returns
-------
OCR : numpy.ndarray
1D array of OCR value, for each soil layer. (Unitless.)
'''
sigma_p0 = 0.106 * Vs**1.47 # Mayne, <NAME> (1998) "Clay stress history evaluated fromseismic piezocone tests"
sigma_p0 = sigma_p0 * 1000 # kPa --> Pa
OCR = sigma_p0 / sigma_v0
OCR = np.minimum(OCR, np.inf if OCR_upper_limit is None else OCR_upper_limit)
return OCR
#%%----------------------------------------------------------------------------
def _calc_vertical_stress(h, rho):
'''
Calculate vertical overburden stress at the mid-point of each layer.
Parameters
----------
h : numpy.ndarray
1D array of layer thickness. Unit: m.
rho : numpy.ndarray
1D array of mass density of each layer. Unit: kg/m^3.
Returns
-------
stress : numpy.ndarray
Vertical overburden stress at the mid-point of each layer. Unit: Pa.
'''
g = 9.81 # unit: m/s/s
n = len(h)
stress = np.zeros_like(h)
if np.mean(rho) < 1000:
print('Warning in __calc_vertical_stress(): It looks like the unit '
'of mass density is g/cm^3. The correct unit should be kg/m^3.')
if h[-1] == 0: # zero thickness, i.e., half space
h[-1] = 1
stress[0] = rho[0] * g * h[0]/2 # divided by 2: middle of layer
for i in range(1, n):
stress[i] = stress[i-1] + rho[i-1] * g * h[i-1]/2 + rho[i] * g * h[i]/2
return stress
#%%----------------------------------------------------------------------------
def _calc_rho(h, Vs):
'''
Calculate mass density of soils from Vs values, using the empirical formula
by Mayne, Schneider & Martin (1999) and Burns & Mayne (1996).
Parameters
----------
h : numpy.ndarray
The thickness of each soil layer. Unit: m.
Vs : numpy.ndarray
The shear-wave velocity for each layer. It needs to be a 1D numpy array.
Unit: m/s.
Returns
-------
rho : numpy.ndarray
Mass density of each soil layer. Unit: kg/m^3.
References
----------
1. Mayne, Schneider & Martin (1999) "Small- and large-strain soil
properties from seismic flat dilatometer tests." Pre-failure
deformation characteristics of geomaterials, 1999 Balkema, Rotterdam.
2. Burns & Mayne (1996) "Small- and high-strain soil properties using the
seismic piezocone." Transportation Research Record 1548, National
Acad. Press, Washington DC, 81-88.
'''
z = sr.thk2dep(h, midpoint=False)
z[z == 0] = 0.0001 # avoid error of dividing by zero
lb = 1.65 # lower bound of density: 1.65 g/cm^3
rho = np.maximum(lb, 1 + 1. / (0.614 + 58.7 * (np.log(z) + 1.095) / Vs))
rho *= 1000 # unit: g/cm^3 --> kg/m^3
return rho
#%%----------------------------------------------------------------------------
def _calc_PI(Vs):
'''
Calculate PI (plasticity index) from Vs values.
Parameters
----------
Vs : numpy.ndarray
The shear-wave velocity for each layer. It needs to be a 1D numpy array.
Unit: m/s.
Returns
-------
PI : numpy.ndarray
The plasticity index for each layer. Unit: %.
'''
PI = np.zeros_like(Vs)
for j in range(len(Vs)):
if Vs[j] <= 200:
PI[j] = 10
elif Vs[j] <= 360:
PI[j] = 5
else:
PI[j] = 0
return PI
#%%----------------------------------------------------------------------------
def _calc_K0(OCR, phi=30.):
'''
Calculate K0 (lateral earth pressure coefficient at rest) from OCR using
the empirical formula by Mayne & Kulhawy (1982).
Parameters
----------
OCR : float, int, or numpy.ndarray
Over-consolidation ratio of each layer of soils. If it is a float/int,
it means only one layer, or all the layers have the same OCR.
phi : float, int, or numpy.ndarray
Internal effective friction angle of soils. If it is a float/int, it
means only one soil layer, or all the layers have the same angle.
Unit: deg.
Returns
-------
K0 : float or numpy.ndarray
K0 value(s). If either ``OCR`` or ``phi`` is an array, ``K0`` will be
an array of the same length.
'''
K0 = (1 - np.sin(np.deg2rad(phi))) * OCR ** (np.sin(np.deg2rad(phi)))
return K0
#%%----------------------------------------------------------------------------
def produce_Darendeli_curves(sigma_v0, PI=20., OCR=1., K0=0.5, phi=30.0,
strain_in_pct=None):
'''
Produce G/Gmax and damping curves using empirical correlations by
Darendeli (2001).
Parameters
----------
sigma_v0 : numpy.ndarray
Effective vertical confining stress of each layer. Unit: Pa.
PI : int, float, or numpy.ndarray
Plasticity index of each layer. Unit: %. If a single value is given,
it is assumed to be the PI for all layers.
OCR : int, float, or numpy.ndarray
Over-consolidation ratio of each layer. If a single value is given,
it is assumed to be the value for all layers.
K0 : int, float, numpy.ndarray, or ``None``
Lateral soil pressure coefficient. If a single value is given, it is
assumed to be the value for all layers. If ``None``, it will be
determined from OCR via an empirical formula by Mayne & Kulhawy (1982).
phi : int, float, or numpy.ndarray
Internal effective friction angle of soils. If it is a float/int, it
means all the layers have the same angle. Unit: deg.
strain_in_pct : numpy.ndarray or ``None``
The strain values at which to calculate G/Gmax and damping values. If
``None``, numpy.geomspace(1e-4, 10, 400) will be used. Unit: percent.
Returns
-------
GGmax : numpy.ndarray
G/Gmax curves for each layer. It is a 2D numpy array. Each column of it
represents the G/Gmax curve of a particular layer. Unit: 1
xi : numpy.ndarray
Damping curves for each layer. Same shape as ``GGmax``. Unit: 1.
gamma_r : numpy.ndarray
Reference strain for each layer. It is a 1D numpy array, corresponding
to each soil layer. Unit: 1.
'''
hlp.assert_1D_numpy_array(sigma_v0)
n_layer = len(sigma_v0)
phi = hlp.check_length_or_extend_to_array(phi, n_layer, name='`phi`')
PI = hlp.check_length_or_extend_to_array(PI, n_layer, name='`PI`')
OCR = hlp.check_length_or_extend_to_array(OCR, n_layer, name='`OCR`')
if K0 is None:
K0 = _calc_K0(OCR, phi=phi)
else:
K0 = hlp.check_length_or_extend_to_array(K0, n_layer, name='`K0`')
if strain_in_pct is None:
gamma = np.geomspace(1e-4, 10, 400)
else:
gamma = strain_in_pct.copy()
# Define all constants
nr_cycle = 10
frq = 1
N = nr_cycle
phi1 = 0.0352
phi2 = 0.0010
phi3 = 0.3246
phi4 = 0.3483
phi5 = 0.9190
phi6 = 0.8005
phi7 = 0.0129
phi8 = -0.1069
phi9 = -0.2889
phi10 = 0.2919
phi11 = 0.6329
phi12 = -0.0057
a = phi5
c1 = -1.1143*a**2 + 1.8618*a + 0.2523 # from Darendeli (2001), page 226
c2 = 0.0805*a**2 - 0.0710*a - 0.0095
c3 = -0.0005*a**2 + 0.0002*a + 0.0003
b = phi11 + phi12 * np.log(N) # Darendeli (2001) Eq 9.1d
# Confinine stress
sigma_0 = _calc_mean_confining_stress(sigma_v0, K0) # octahedral stress
sigma_0 = sigma_0 / 101325.0 # unit: Pa --> atm
n_strain_pts = len(strain_in_pct)
# Reference strain for each layer (Eq 9.1a). Unit: percent
gamma_r = (phi1 + phi2 * PI * OCR**phi3) * sigma_0**phi4
GGmax = np.zeros((n_strain_pts, n_layer))
xi = np.zeros_like(GGmax)
for i in range(n_layer):
GGmax[:, i] = 1. / (1 + (gamma / gamma_r[i])**a) # G of i-th layer (Eq 9.2a)
D_masing_1 = (100. / np.pi) \
* (4 * (gamma - gamma_r[i] * np.log((gamma + gamma_r[i]) / gamma_r[i])) \
/ (gamma**2 / (gamma + gamma_r[i])) - 2) # Unit: percent (page 226)
D_masing = c1 * D_masing_1 + c2 * D_masing_1**2 + c3 * D_masing_1**3 # Unit: percent (page 226)
D_min = (phi6 + phi7 * PI[i] * OCR[i]**phi8) * sigma_0[i]**phi9 * (1 + phi10 * np.log(frq)) # Eq 9.1c (page 221)
xi[:, i] = b * GGmax[:, i]**0.1 * D_masing + D_min # Eq 9.2b (page 224). Unit: percent
xi /= 100.0
gamma_r /= 100.0
return GGmax, xi, gamma_r
#%%----------------------------------------------------------------------------
def _calc_mean_confining_stress(sigma_v0, K0):
'''
Calculate mean (of three directions) confining stress.
Parameters
----------
sigma_v0 : numpy.ndarray
(Effective) vertical stress of each layer. Unit: Pa.
K0 : numpy.ndarray
Lateral stress coefficient of each layer. Unit: 1.
Returns
-------
sigma_m0 : numpy.ndarray
Mean effective confining stress (of three directions). Unit: Pa.
'''
sigma_m0 = (2 * K0 + 1)/3.0 * sigma_v0
return sigma_m0
#%%----------------------------------------------------------------------------
def _optimization_kernel(x, x_ref, beta, s, Gmax, tau_f, mu):
'''
Optimization process to find FKZ parameters.
Parameters
----------
x : numpy.ndarray
An 1D array of shear strain. Unit: 1.
x_ref : float
The "reference strain" parameter (in MKZ) of the soil. Unit: 1.
beta : float
A shape parameter of the FKZ model.
s : float
A shape parameter of the FKZ model.
Gmax : float
Initial shear modulus. Unit: Pa.
tau_f : float
The shear strength of the current soil layer. Unit: Pa.
mu : float
The "shape parameter" of the FKZ model.
Returns
-------
a : float
A parameter of the HH model that defines the "speed" of transition from
MKZ to FKZ
gamma_t : float
The shear strain at which the transition from MKZ to FKZ happens.
Unit: 1
d : float
The "shape power" parameter in the FKZ model.
Notes
-----
Based on optHybFKZ.m
'''
T_MKZ = mkz.tau_MKZ(x, gamma_ref=x_ref, beta=beta, s=s, Gmax=Gmax)
if mu <= 0.03: # when mu is too small, there may be some numerical issues
gamma_t_LB = 0.001 # therefore gamma_t lower bound is relaxed
else:
gamma_t_LB = 0.01
gamma_t_UB = 3.0 # unit: percent
index_gamma_t_LB, _ = hlp.find_closest_index(x, gamma_t_LB / 100.0)
if T_MKZ[index_gamma_t_LB] >= 0.85 * tau_f:
gamma_t_LB = 0.005 # for very deep layers, tau_MKZ may be larger than tau_f at gamma_t_LB
index_gamma_t_LB, _ = hlp.find_closest_index(x, gamma_t_LB / 100.0) # do it again
if T_MKZ[index_gamma_t_LB] >= 0.85 * tau_f:
gamma_t_LB = 0.001
range_d = np.linspace(0.67, 1.39, 200)
area = __calc_area(range_d, x, Gmax, mu, tau_f, gamma_t_LB, gamma_t_UB, T_MKZ)
if np.min(area) < np.inf: # it means that a proper d value is found
gamma_t, d = __find_x_t_and_d(area, range_d, x, Gmax, mu, tau_f, T_MKZ)
else: # cannot find a proper d value
range_d = np.linspace(0.67, 1.39, 400) # increase grid density to 400
area = __calc_area(range_d, x, Gmax, mu, tau_f, gamma_t_LB,
gamma_t_UB, T_MKZ)
if np.min(area) < np.inf:
gamma_t, d = __find_x_t_and_d(area, range_d, x, Gmax, mu, tau_f, T_MKZ)
else:
range_d = np.linspace(0.67, 1.39, 1000) # increase grid density
new_gamma_t_LB = 0.005 # further relax
area = __calc_area(range_d, x, Gmax, mu, tau_f, new_gamma_t_LB,
gamma_t_UB, T_MKZ)
if np.min(area) < np.inf:
gamma_t, d = __find_x_t_and_d(area, range_d, x, Gmax, mu, tau_f, T_MKZ)
else:
d = 1.03
gamma_t = 1e-3 / 100.0 # further ralax to 0.001%
# END IF
# END IF
#END IF
a = 100.0 # always use a fast transition
return a, gamma_t, d
#%%----------------------------------------------------------------------------
def __find_x_t_and_d(area, range_d, x, Gmax, mu, tau_f, T_MKZ):
'''
Find the ``x_t`` (transition strain) that minimizes the "area" between
the MKZ stress curve and the FKZ stress curve.
Parameters
----------
area : numpy.ndarray
The "area" between the MKZ stress curve and the FKZ stress curve. It
has the same shape as ``range_d``, because each element of ``area`` is
the area corresponding to a ``d`` value within ``range_d``.
range_d : numpy.ndarray
The range of ``d`` to search from. Must be a 1D numpy array.
x : numpy.ndarray
An 1D array of shear strain. Unit: 1.
Gmax : float
Initial shear modulus. Unit: Pa.
mu : float
The "shape parameter" of the FKZ model.
tau_f : float
The shear strength of the current soil layer. Unit: Pa.
T_MKZ : numpy.ndarray
The MKZ stress curve, which has the same shape as ``x``. Unit: Pa.
Returns
-------
x_t : float
The ``x_t`` value that minimizes the "area" between the MKZ stress
curve and the FKZ stress curve. Unit: 1.
d : float
The ``d`` value that minimizes the "area" between the MKZ stress curve
and the FKZ stress curve. (No unit.)
'''
j_ = np.argmin(area)
d = range_d[j_]
T_FKZ = hh.tau_FKZ(x, Gmax=Gmax, mu=mu, d=d, Tmax=tau_f)
copt, _ = hlp.find_closest_index(np.abs(T_MKZ - T_FKZ), 0)
x_t = x[copt]
return x_t, d
#%%----------------------------------------------------------------------------
def __calc_area(range_d, x, Gmax, mu, tau_f, gamma_t_LB, gamma_t_UB, T_MKZ):
'''
Calculate the "area" between the MKZ stress curve and the FKZ stress curve.
The MKZ stress curve is supplied as a parameter, and the FKZ stress curve
is determined by ``x``, ``Gmax``, ``mu``, ``d``, ``tau_f``, and ``gamma_t``.
Parameters
----------
range_d : numpy.ndarray
The range of ``d`` to search from. Must be a 1D numpy array.
x : numpy.ndarray
An 1D array of shear strain. Unit: 1.
Gmax : float
Initial shear modulus. Unit: Pa.
mu : float
The "shape parameter" of the FKZ model.
tau_f : float
The shear strength of the current soil layer. Unit: Pa.
gamma_t_LB : float
The lower bound of ``gamma_t`` (:math:`\gamma_t`), i.e., the transition
strain. Unit: %.
gamma_t_UB : float
The upper bound of ``gamma_t``. Unit: %.
T_MKZ : numpy.ndarray
The MKZ stress curve, which has the same shape as ``x``. Unit: Pa.
Returns
-------
area : numpy.ndarray
The "area" between the MKZ stress curve and the FKZ stress curve. It
has the same shape as ``range_d``, because each element of ``area`` is
the area corresponding to a ``d`` value within ``range_d``.
'''
area = np.zeros_like(range_d)
for j in range(len(range_d)):
d = range_d[j]
T_FKZ = hh.tau_FKZ(x, Gmax=Gmax, mu=mu, d=d, Tmax=tau_f)
range_gamma_t = np.geomspace(gamma_t_LB, gamma_t_UB, 200) / 100.0 # unit: 1
copt, _ = hlp.find_closest_index(np.abs(T_MKZ - T_FKZ), 0) # "copt" = cross-over point
gamma_t = x[copt]
if (gamma_t >= range_gamma_t[0]) and (gamma_t <= range_gamma_t[-1]):
diff_T = np.abs(T_MKZ[:copt+1] - T_FKZ[:copt+1])
area[j] = np.linalg.norm(diff_T) / (copt + 1.0)
else:
area[j] = np.inf
# END IF
# END FOR
return area
|
[
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"numpy.log",
"numpy.column_stack",
"numpy.array",
"numpy.linalg.norm",
"numpy.arange",
"numpy.mean",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.zeros_like",
"numpy.max",
"numpy.linspace",
"numpy.min",
"numpy.argmin",
"matplotlib.pyplot.ylim",
"numpy.abs",
"numpy.tile",
"numpy.ones",
"numpy.geomspace",
"numpy.deg2rad",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"numpy.minimum",
"matplotlib.pyplot.semilogx",
"os.path.join",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.subplot"
] |
[((15961, 15981), 'numpy.zeros_like', 'np.zeros_like', (['GGmax'], {}), '(GGmax)\n', (15974, 15981), True, 'import numpy as np\n'), ((16313, 16331), 'numpy.zeros_like', 'np.zeros_like', (['OCR'], {}), '(OCR)\n', (16326, 16331), True, 'import numpy as np\n'), ((17257, 17279), 'numpy.zeros', 'np.zeros', (['(9, n_layer)'], {}), '((9, n_layer))\n', (17265, 17279), True, 'import numpy as np\n'), ((24879, 24950), 'numpy.minimum', 'np.minimum', (['OCR', '(np.inf if OCR_upper_limit is None else OCR_upper_limit)'], {}), '(OCR, np.inf if OCR_upper_limit is None else OCR_upper_limit)\n', (24889, 24950), True, 'import numpy as np\n'), ((25541, 25557), 'numpy.zeros_like', 'np.zeros_like', (['h'], {}), '(h)\n', (25554, 25557), True, 'import numpy as np\n'), ((27763, 27780), 'numpy.zeros_like', 'np.zeros_like', (['Vs'], {}), '(Vs)\n', (27776, 27780), True, 'import numpy as np\n'), ((32186, 32219), 'numpy.zeros', 'np.zeros', (['(n_strain_pts, n_layer)'], {}), '((n_strain_pts, n_layer))\n', (32194, 32219), True, 'import numpy as np\n'), ((32229, 32249), 'numpy.zeros_like', 'np.zeros_like', (['GGmax'], {}), '(GGmax)\n', (32242, 32249), True, 'import numpy as np\n'), ((35336, 35364), 'numpy.linspace', 'np.linspace', (['(0.67)', '(1.39)', '(200)'], {}), '(0.67, 1.39, 200)\n', (35347, 35364), True, 'import numpy as np\n'), ((37928, 37943), 'numpy.argmin', 'np.argmin', (['area'], {}), '(area)\n', (37937, 37943), True, 'import numpy as np\n'), ((39523, 39545), 'numpy.zeros_like', 'np.zeros_like', (['range_d'], {}), '(range_d)\n', (39536, 39545), True, 'import numpy as np\n'), ((8656, 8681), 'numpy.arange', 'np.arange', (['(1)', '(n_layer + 1)'], {}), '(1, n_layer + 1)\n', (8665, 8681), True, 'import numpy as np\n'), ((14473, 14502), 'numpy.geomspace', 'np.geomspace', (['(0.0001)', '(10)', '(400)'], {}), '(0.0001, 10, 400)\n', (14485, 14502), True, 'import numpy as np\n'), ((14842, 14858), 'numpy.ones', 'np.ones', (['n_layer'], {}), '(n_layer)\n', (14849, 14858), True, 'import numpy as np\n'), ((17311, 17336), 'numpy.array', 'np.array', (['[107, 174, 214]'], {}), '([107, 174, 214])\n', (17319, 17336), True, 'import numpy as np\n'), ((17362, 17387), 'numpy.array', 'np.array', (['[120, 198, 121]'], {}), '([120, 198, 121])\n', (17370, 17387), True, 'import numpy as np\n'), ((17411, 17434), 'numpy.array', 'np.array', (['[222, 45, 38]'], {}), '([222, 45, 38])\n', (17419, 17434), True, 'import numpy as np\n'), ((25566, 25578), 'numpy.mean', 'np.mean', (['rho'], {}), '(rho)\n', (25573, 25578), True, 'import numpy as np\n'), ((31247, 31276), 'numpy.geomspace', 'np.geomspace', (['(0.0001)', '(10)', '(400)'], {}), '(0.0001, 10, 400)\n', (31259, 31276), True, 'import numpy as np\n'), ((35455, 35467), 'numpy.min', 'np.min', (['area'], {}), '(area)\n', (35461, 35467), True, 'import numpy as np\n'), ((35661, 35689), 'numpy.linspace', 'np.linspace', (['(0.67)', '(1.39)', '(400)'], {}), '(0.67, 1.39, 400)\n', (35672, 35689), True, 'import numpy as np\n'), ((38062, 38083), 'numpy.abs', 'np.abs', (['(T_MKZ - T_FKZ)'], {}), '(T_MKZ - T_FKZ)\n', (38068, 38083), True, 'import numpy as np\n'), ((5079, 5136), 'os.path.join', 'os.path.join', (['HH_G_file_dir', "('HH_G_%s.txt' % profile_name)"], {}), "(HH_G_file_dir, 'HH_G_%s.txt' % profile_name)\n", (5091, 5136), False, 'import os\n'), ((9293, 9332), 'numpy.column_stack', 'np.column_stack', (['(curves_expanded, tmp)'], {}), '((curves_expanded, tmp))\n', (9308, 9332), True, 'import numpy as np\n'), ((9939, 9996), 'os.path.join', 'os.path.join', (['HH_G_file_dir', "('HH_G_%s.txt' % profile_name)"], {}), "(HH_G_file_dir, 'HH_G_%s.txt' % profile_name)\n", (9951, 9996), False, 'import os\n'), ((14762, 14792), 'numpy.tile', 'np.tile', (['strain_', '(n_layer, 1)'], {}), '(strain_, (n_layer, 1))\n', (14769, 14792), True, 'import numpy as np\n'), ((14880, 14896), 'numpy.ones', 'np.ones', (['n_layer'], {}), '(n_layer)\n', (14887, 14896), True, 'import numpy as np\n'), ((18595, 18625), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4.2, 6.0)'}), '(figsize=(4.2, 6.0))\n', (18605, 18625), True, 'import matplotlib.pyplot as plt\n'), ((18639, 18655), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (18650, 18655), True, 'import matplotlib.pyplot as plt\n'), ((19302, 19326), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'ls': '""":"""', 'lw': '(0.5)'}), "(ls=':', lw=0.5)\n", (19310, 19326), True, 'import matplotlib.pyplot as plt\n'), ((19504, 19569), 'matplotlib.pyplot.plot', 'plt.plot', (['strain_j', '(T_HH / 1000.0)'], {'c': 'muted_red', 'lw': 'lw', 'label': '"""HH"""'}), "(strain_j, T_HH / 1000.0, c=muted_red, lw=lw, label='HH')\n", (19512, 19569), True, 'import matplotlib.pyplot as plt\n'), ((19654, 19680), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Stress [kPa]"""'], {}), "('Stress [kPa]')\n", (19664, 19680), True, 'import matplotlib.pyplot as plt\n'), ((19750, 19778), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (19760, 19778), True, 'import matplotlib.pyplot as plt\n'), ((19791, 19998), 'matplotlib.pyplot.title', 'plt.title', (['("""$V_S$ = %.1f m/s, $G_{\\\\max}$ = %.3f MPa,\n$\\\\tau_{\\\\mathrm{ff}}$ = %.3f kPa, $\\\\gamma_{\\\\mathrm{ref}}$ = %.3f%%"""\n % (Vs[j], Gmax[j] / 1000000.0, Tmax[j] / 1000.0, gamma_ref[j] * 100))'], {}), '(\n """$V_S$ = %.1f m/s, $G_{\\\\max}$ = %.3f MPa,\n$\\\\tau_{\\\\mathrm{ff}}$ = %.3f kPa, $\\\\gamma_{\\\\mathrm{ref}}$ = %.3f%%"""\n % (Vs[j], Gmax[j] / 1000000.0, Tmax[j] / 1000.0, gamma_ref[j] * 100))\n', (19800, 19998), True, 'import matplotlib.pyplot as plt\n'), ((20054, 20070), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (20065, 20070), True, 'import matplotlib.pyplot as plt\n'), ((20335, 20359), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'ls': '""":"""', 'lw': '(0.5)'}), "(ls=':', lw=0.5)\n", (20343, 20359), True, 'import matplotlib.pyplot as plt\n'), ((20532, 20580), 'matplotlib.pyplot.plot', 'plt.plot', (['strain_j', 'GGmax_HH'], {'c': 'muted_red', 'lw': 'lw'}), '(strain_j, GGmax_HH, c=muted_red, lw=lw)\n', (20540, 20580), True, 'import matplotlib.pyplot as plt\n'), ((20666, 20693), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$G/G_{\\\\max}$"""'], {}), "('$G/G_{\\\\max}$')\n", (20676, 20693), True, 'import matplotlib.pyplot as plt\n'), ((20705, 20729), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Strain [%]"""'], {}), "('Strain [%]')\n", (20715, 20729), True, 'import matplotlib.pyplot as plt\n'), ((20799, 20962), 'matplotlib.pyplot.title', 'plt.title', (['("""$\\\\mu$ = %.3f, a = %.1f, $\\\\gamma_{\\\\mathrm{t}}$ = %.4f%%\nd = %.4f, $p\'_{\\\\mathrm{m0}}$ = %.2f kPa"""\n % (mu[j], a, gamma_t * 100, d, p0[j]))'], {}), '(\n """$\\\\mu$ = %.3f, a = %.1f, $\\\\gamma_{\\\\mathrm{t}}$ = %.4f%%\nd = %.4f, $p\'_{\\\\mathrm{m0}}$ = %.2f kPa"""\n % (mu[j], a, gamma_t * 100, d, p0[j]))\n', (20808, 20962), True, 'import matplotlib.pyplot as plt\n'), ((23026, 23057), 'numpy.max', 'np.max', (['[sigma_v0[j], sigma_h0]'], {}), '([sigma_v0[j], sigma_h0])\n', (23032, 23057), True, 'import numpy as np\n'), ((23108, 23139), 'numpy.min', 'np.min', (['[sigma_v0[j], sigma_h0]'], {}), '([sigma_v0[j], sigma_h0])\n', (23114, 23139), True, 'import numpy as np\n'), ((31818, 31827), 'numpy.log', 'np.log', (['N'], {}), '(N)\n', (31824, 31827), True, 'import numpy as np\n'), ((35847, 35859), 'numpy.min', 'np.min', (['area'], {}), '(area)\n', (35853, 35859), True, 'import numpy as np\n'), ((35990, 36019), 'numpy.linspace', 'np.linspace', (['(0.67)', '(1.39)', '(1000)'], {}), '(0.67, 1.39, 1000)\n', (36001, 36019), True, 'import numpy as np\n'), ((39692, 39733), 'numpy.geomspace', 'np.geomspace', (['gamma_t_LB', 'gamma_t_UB', '(200)'], {}), '(gamma_t_LB, gamma_t_UB, 200)\n', (39704, 39733), True, 'import numpy as np\n'), ((39795, 39816), 'numpy.abs', 'np.abs', (['(T_MKZ - T_FKZ)'], {}), '(T_MKZ - T_FKZ)\n', (39801, 39816), True, 'import numpy as np\n'), ((39974, 40017), 'numpy.abs', 'np.abs', (['(T_MKZ[:copt + 1] - T_FKZ[:copt + 1])'], {}), '(T_MKZ[:copt + 1] - T_FKZ[:copt + 1])\n', (39980, 40017), True, 'import numpy as np\n'), ((18703, 18791), 'matplotlib.pyplot.semilogx', 'plt.semilogx', (['strain_j', '(sigma[:, j] / 1000.0)'], {'c': 'muted_blue', 'lw': '(lw * 2.5)', 'label': '"""MKZ"""'}), "(strain_j, sigma[:, j] / 1000.0, c=muted_blue, lw=lw * 2.5,\n label='MKZ')\n", (18715, 18791), True, 'import matplotlib.pyplot as plt\n'), ((18851, 18936), 'matplotlib.pyplot.semilogx', 'plt.semilogx', (['strain_j', '(T_FKZ / 1000.0)'], {'c': 'muted_green', 'lw': '(lw * 1.75)', 'label': '"""FKZ"""'}), "(strain_j, T_FKZ / 1000.0, c=muted_green, lw=lw * 1.75, label='FKZ'\n )\n", (18863, 18936), True, 'import matplotlib.pyplot as plt\n'), ((18992, 19117), 'matplotlib.pyplot.semilogx', 'plt.semilogx', (['strain_j', '(sigma[:, j] / 1000.0)'], {'c': 'muted_blue', 'marker': '"""o"""', 'ls': '"""-"""', 'lw': '(lw * 2.5)', 'label': '"""Given $G/G_{\\\\max}$"""'}), "(strain_j, sigma[:, j] / 1000.0, c=muted_blue, marker='o', ls=\n '-', lw=lw * 2.5, label='Given $G/G_{\\\\max}$')\n", (19004, 19117), True, 'import matplotlib.pyplot as plt\n'), ((19183, 19268), 'matplotlib.pyplot.semilogx', 'plt.semilogx', (['strain_j', '(T_FKZ / 1000.0)'], {'c': 'muted_green', 'lw': '(lw * 1.75)', 'label': '"""FKZ"""'}), "(strain_j, T_FKZ / 1000.0, c=muted_green, lw=lw * 1.75, label='FKZ'\n )\n", (19195, 19268), True, 'import matplotlib.pyplot as plt\n'), ((19611, 19621), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (19619, 19621), True, 'import matplotlib.pyplot as plt\n'), ((19702, 19718), 'numpy.min', 'np.min', (['strain_j'], {}), '(strain_j)\n', (19708, 19718), True, 'import numpy as np\n'), ((19720, 19736), 'numpy.max', 'np.max', (['strain_j'], {}), '(strain_j)\n', (19726, 19736), True, 'import numpy as np\n'), ((20118, 20180), 'matplotlib.pyplot.semilogx', 'plt.semilogx', (['strain_j', 'GGmax[:, j]'], {'c': 'muted_blue', 'lw': '(lw * 2.5)'}), '(strain_j, GGmax[:, j], c=muted_blue, lw=lw * 2.5)\n', (20130, 20180), True, 'import matplotlib.pyplot as plt\n'), ((20213, 20299), 'matplotlib.pyplot.semilogx', 'plt.semilogx', (['strain_j', 'GGmax[:, j]'], {'c': 'muted_blue', 'ls': '"""-"""', 'marker': '"""o"""', 'lw': '(lw * 2.5)'}), "(strain_j, GGmax[:, j], c=muted_blue, ls='-', marker='o', lw=lw *\n 2.5)\n", (20225, 20299), True, 'import matplotlib.pyplot as plt\n'), ((20623, 20633), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (20631, 20633), True, 'import matplotlib.pyplot as plt\n'), ((20751, 20767), 'numpy.min', 'np.min', (['strain_j'], {}), '(strain_j)\n', (20757, 20767), True, 'import numpy as np\n'), ((20769, 20785), 'numpy.max', 'np.max', (['strain_j'], {}), '(strain_j)\n', (20775, 20785), True, 'import numpy as np\n'), ((28834, 28849), 'numpy.deg2rad', 'np.deg2rad', (['phi'], {}), '(phi)\n', (28844, 28849), True, 'import numpy as np\n'), ((28869, 28884), 'numpy.deg2rad', 'np.deg2rad', (['phi'], {}), '(phi)\n', (28879, 28884), True, 'import numpy as np\n'), ((36238, 36250), 'numpy.min', 'np.min', (['area'], {}), '(area)\n', (36244, 36250), True, 'import numpy as np\n'), ((40036, 40058), 'numpy.linalg.norm', 'np.linalg.norm', (['diff_T'], {}), '(diff_T)\n', (40050, 40058), True, 'import numpy as np\n'), ((19349, 19365), 'numpy.min', 'np.min', (['strain_j'], {}), '(strain_j)\n', (19355, 19365), True, 'import numpy as np\n'), ((19367, 19383), 'numpy.max', 'np.max', (['strain_j'], {}), '(strain_j)\n', (19373, 19383), True, 'import numpy as np\n'), ((19407, 19435), 'numpy.array', 'np.array', (['[Tmax[j], Tmax[j]]'], {}), '([Tmax[j], Tmax[j]])\n', (19415, 19435), True, 'import numpy as np\n'), ((21226, 21297), 'os.path.join', 'os.path.join', (['fig_output_dir', "('Stress_GGmax_of_Layer_#%d.png' % (j + 1))"], {}), "(fig_output_dir, 'Stress_GGmax_of_Layer_#%d.png' % (j + 1))\n", (21238, 21297), False, 'import os\n'), ((23402, 23420), 'numpy.deg2rad', 'np.deg2rad', (['phi[j]'], {}), '(phi[j])\n', (23412, 23420), True, 'import numpy as np\n'), ((32766, 32777), 'numpy.log', 'np.log', (['frq'], {}), '(frq)\n', (32772, 32777), True, 'import numpy as np\n'), ((23330, 23348), 'numpy.deg2rad', 'np.deg2rad', (['phi[j]'], {}), '(phi[j])\n', (23340, 23348), True, 'import numpy as np\n'), ((20447, 20471), 'numpy.abs', 'np.abs', (['(strain_j / 100.0)'], {}), '(strain_j / 100.0)\n', (20453, 20471), True, 'import numpy as np\n'), ((27247, 27256), 'numpy.log', 'np.log', (['z'], {}), '(z)\n', (27253, 27256), True, 'import numpy as np\n'), ((32444, 32485), 'numpy.log', 'np.log', (['((gamma + gamma_r[i]) / gamma_r[i])'], {}), '((gamma + gamma_r[i]) / gamma_r[i])\n', (32450, 32485), True, 'import numpy as np\n')]
|
import time
import functools
import numpy as np
def time_compute(fun):
@functools.wraps(fun)
def wrapper(*args, **kwargs):
start_time = time.time()
tmp = fun(*args, **kwargs)
end_time = time.time()
print('{} cost {} s.'.format(fun.__name__, end_time-start_time))
return tmp
return wrapper
def time_avg_compute(fun):
@functools.wraps(fun)
def wrapper(*args, **kwargs):
times = []
tmp = None
for _ in range(20):
start_time = time.time()
tmp = fun(*args, **kwargs)
end_time = time.time()
times.append(end_time-start_time)
times = np.array(times)
times = times[5:]
print('{} avg cost {} s.'.format(fun.__name__, times.mean()))
return tmp
return wrapper
|
[
"numpy.array",
"time.time",
"functools.wraps"
] |
[((78, 98), 'functools.wraps', 'functools.wraps', (['fun'], {}), '(fun)\n', (93, 98), False, 'import functools\n'), ((378, 398), 'functools.wraps', 'functools.wraps', (['fun'], {}), '(fun)\n', (393, 398), False, 'import functools\n'), ((154, 165), 'time.time', 'time.time', ([], {}), '()\n', (163, 165), False, 'import time\n'), ((220, 231), 'time.time', 'time.time', ([], {}), '()\n', (229, 231), False, 'import time\n'), ((673, 688), 'numpy.array', 'np.array', (['times'], {}), '(times)\n', (681, 688), True, 'import numpy as np\n'), ((524, 535), 'time.time', 'time.time', ([], {}), '()\n', (533, 535), False, 'import time\n'), ((598, 609), 'time.time', 'time.time', ([], {}), '()\n', (607, 609), False, 'import time\n')]
|
import numpy as np
arr1 = np.ones (2, dtype=float)
print("1D Array with ones ")
print(arr1)
#[1. 1.]
|
[
"numpy.ones"
] |
[((27, 50), 'numpy.ones', 'np.ones', (['(2)'], {'dtype': 'float'}), '(2, dtype=float)\n', (34, 50), True, 'import numpy as np\n')]
|
import matplotlib.pyplot as plt
import numpy as np
import operator
import os
import sys
import tensorflow as tf
# 256*256 映射到 0~1 区间
def Transform(input):
if not (operator.eq(input.shape, (256, 256))):
print("not matched")
sys.exit(1)
max = np.max(input)
min = np.min(input)
for i in range(256):
for j in range(256):
input[i][j] = (input[i][j] - min) / (max - min)
return input
##img:256x256的数组,i的类型为str!!!!
def Visualize(img, i):
plt.matshow(img, cmap=plt.get_cmap('RdBu'), alpha=0.5)
exist = os.path.exists('./Uncertainty')
if not exist:
os.makedirs('./Uncertainty')
plt.savefig('./Uncertainty/test_'+i+'.jpg')
plt.show()
# Test
# test_input = np.random.rand(128, 256)
test_input = np.ones((256,256),dtype=float)
Visualize(test_input, '1')
# Visualize(Transform(test_input), '1')
print(test_input)
|
[
"os.path.exists",
"matplotlib.pyplot.savefig",
"numpy.ones",
"sys.exit",
"os.makedirs",
"numpy.max",
"numpy.min",
"operator.eq",
"matplotlib.pyplot.get_cmap",
"matplotlib.pyplot.show"
] |
[((774, 806), 'numpy.ones', 'np.ones', (['(256, 256)'], {'dtype': 'float'}), '((256, 256), dtype=float)\n', (781, 806), True, 'import numpy as np\n'), ((268, 281), 'numpy.max', 'np.max', (['input'], {}), '(input)\n', (274, 281), True, 'import numpy as np\n'), ((292, 305), 'numpy.min', 'np.min', (['input'], {}), '(input)\n', (298, 305), True, 'import numpy as np\n'), ((563, 594), 'os.path.exists', 'os.path.exists', (['"""./Uncertainty"""'], {}), "('./Uncertainty')\n", (577, 594), False, 'import os\n'), ((654, 701), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('./Uncertainty/test_' + i + '.jpg')"], {}), "('./Uncertainty/test_' + i + '.jpg')\n", (665, 701), True, 'import matplotlib.pyplot as plt\n'), ((702, 712), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (710, 712), True, 'import matplotlib.pyplot as plt\n'), ((169, 205), 'operator.eq', 'operator.eq', (['input.shape', '(256, 256)'], {}), '(input.shape, (256, 256))\n', (180, 205), False, 'import operator\n'), ((245, 256), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (253, 256), False, 'import sys\n'), ((621, 649), 'os.makedirs', 'os.makedirs', (['"""./Uncertainty"""'], {}), "('./Uncertainty')\n", (632, 649), False, 'import os\n'), ((518, 538), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""RdBu"""'], {}), "('RdBu')\n", (530, 538), True, 'import matplotlib.pyplot as plt\n')]
|
import tensorflow as tf
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
from keras.preprocessing import image
import zipfile
import os
import pickle
from keras.models import model_from_json
from django.conf import settings
from django.conf import settings
def training():
zip_ref = zipfile.ZipFile("check.zip", 'r')
zip_ref.extractall("check/chest_xray")
zip_ref.close()
train_datagen = ImageDataGenerator(rescale = 1./255,shear_range = 0.2,zoom_range = 0.2,horizontal_flip = True)
training_set = train_datagen.flow_from_directory('check/chest_xray/train',target_size = (64, 64),batch_size = 32,class_mode = 'binary')
train_datagen = ImageDataGenerator(rescale = 1./255,shear_range = 0.2,zoom_range = 0.2,horizontal_flip = True)
test_set = train_datagen.flow_from_directory('check/chest_xray/test',target_size = (64, 64),batch_size = 32,class_mode = 'binary')
DESIRED_ACCURACY = 0.95
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if(logs.get('acc')>DESIRED_ACCURACY):
print("\nReached 99.9% accuracy so cancelling training!")
self.model.stop_training = True
callbacks = myCallback()
cnn = tf.keras.models.Sequential()
# Convolution
cnn.add(tf.keras.layers.Conv2D(filters=32, kernel_size=3, activation='relu', input_shape=[64, 64, 3]))
# Pooling
cnn.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2))
### Adding a second convolutional layer
cnn.add(tf.keras.layers.Conv2D(filters=32, kernel_size=3, activation='relu'))
##Pooling
cnn.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2))
# Flattening
cnn.add(tf.keras.layers.Flatten())
### Step 4 - Full Connection
cnn.add(tf.keras.layers.Dense(units=128, activation='relu'))
# Output layer
cnn.add(tf.keras.layers.Dense(units=1, activation='sigmoid'))
# Compiling the CNN
cnn.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
# Training the CNN on the Training set and evaluating it on the Test set
cnn.fit(x = training_set, validation_data = test_set, epochs = 1)
# serialize model to JSON
model_json = cnn.to_json()
with open("datasets/model_check.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
cnn.save_weights("datasets/model_check.h5")
print("Saved model to disk")
def predImageBlock(ob):
name = ob.file.name
fullpath = os.path.abspath(name)
test_image = image.load_img(fullpath, target_size = (64, 64 ))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
# later...
# load json and create model
json_file = open('datasets/model_check.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("datasets/model_check.h5")
result = loaded_model.predict(test_image)
print("yes"*20, result)
return result
if __name__=="__main__":
training()
# pred1()
|
[
"keras.preprocessing.image.img_to_array",
"tensorflow.keras.layers.Conv2D",
"zipfile.ZipFile",
"keras.preprocessing.image.ImageDataGenerator",
"keras.models.model_from_json",
"tensorflow.keras.layers.Dense",
"numpy.expand_dims",
"os.path.abspath",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.MaxPool2D",
"tensorflow.keras.models.Sequential",
"keras.preprocessing.image.load_img"
] |
[((316, 349), 'zipfile.ZipFile', 'zipfile.ZipFile', (['"""check.zip"""', '"""r"""'], {}), "('check.zip', 'r')\n", (331, 349), False, 'import zipfile\n'), ((434, 530), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)', 'shear_range': '(0.2)', 'zoom_range': '(0.2)', 'horizontal_flip': '(True)'}), '(rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2,\n horizontal_flip=True)\n', (452, 530), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((690, 786), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)', 'shear_range': '(0.2)', 'zoom_range': '(0.2)', 'horizontal_flip': '(True)'}), '(rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2,\n horizontal_flip=True)\n', (708, 786), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((1290, 1318), 'tensorflow.keras.models.Sequential', 'tf.keras.models.Sequential', ([], {}), '()\n', (1316, 1318), True, 'import tensorflow as tf\n'), ((2608, 2629), 'os.path.abspath', 'os.path.abspath', (['name'], {}), '(name)\n', (2623, 2629), False, 'import os\n'), ((2648, 2694), 'keras.preprocessing.image.load_img', 'image.load_img', (['fullpath'], {'target_size': '(64, 64)'}), '(fullpath, target_size=(64, 64))\n', (2662, 2694), False, 'from keras.preprocessing import image\n'), ((2719, 2749), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['test_image'], {}), '(test_image)\n', (2737, 2749), False, 'from keras.preprocessing import image\n'), ((2767, 2801), 'numpy.expand_dims', 'np.expand_dims', (['test_image'], {'axis': '(0)'}), '(test_image, axis=0)\n', (2781, 2801), True, 'import numpy as np\n'), ((2997, 3031), 'keras.models.model_from_json', 'model_from_json', (['loaded_model_json'], {}), '(loaded_model_json)\n', (3012, 3031), False, 'from keras.models import model_from_json\n'), ((1351, 1448), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3)', 'activation': '"""relu"""', 'input_shape': '[64, 64, 3]'}), "(filters=32, kernel_size=3, activation='relu',\n input_shape=[64, 64, 3])\n", (1373, 1448), True, 'import tensorflow as tf\n'), ((1474, 1523), 'tensorflow.keras.layers.MaxPool2D', 'tf.keras.layers.MaxPool2D', ([], {'pool_size': '(2)', 'strides': '(2)'}), '(pool_size=2, strides=2)\n', (1499, 1523), True, 'import tensorflow as tf\n'), ((1583, 1651), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3)', 'activation': '"""relu"""'}), "(filters=32, kernel_size=3, activation='relu')\n", (1605, 1651), True, 'import tensorflow as tf\n'), ((1681, 1730), 'tensorflow.keras.layers.MaxPool2D', 'tf.keras.layers.MaxPool2D', ([], {'pool_size': '(2)', 'strides': '(2)'}), '(pool_size=2, strides=2)\n', (1706, 1730), True, 'import tensorflow as tf\n'), ((1763, 1788), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (1786, 1788), True, 'import tensorflow as tf\n'), ((1837, 1888), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': '(128)', 'activation': '"""relu"""'}), "(units=128, activation='relu')\n", (1858, 1888), True, 'import tensorflow as tf\n'), ((1923, 1975), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': '(1)', 'activation': '"""sigmoid"""'}), "(units=1, activation='sigmoid')\n", (1944, 1975), True, 'import tensorflow as tf\n')]
|
from pyHalo.Rendering.SpatialDistributions.uniform import LensConeUniform
import numpy as np
from copy import deepcopy
from pyHalo.Rendering.MassFunctions.power_law import GeneralPowerLaw
from pyHalo.Rendering.rendering_class_base import RenderingClassBase
class TwoHaloContribution(RenderingClassBase):
"""
This class adds correlated structure associated with the host dark matter halo. The amount of structure added is
proportional to b * corr, where b is the halo bias as computed by Sheth and Tormen (1999) and corr is the
matter-matter correlation function. Currently, this term is implemented as a rescaling of the background density by
b * corr, where the product is the average value computed over 2*dz, where dz is the spacing of the redshift planes
adjacent the redshift plane of the main deflector.
"""
def __init__(self, keywords_master, halo_mass_function, geometry, lens_cosmo, lens_plane_redshifts, delta_z_list):
self._rendering_kwargs = self.keyword_parse_render(keywords_master)
self.halo_mass_function = halo_mass_function
self.geometry = geometry
self.lens_cosmo = lens_cosmo
self.spatial_distribution_model = LensConeUniform(keywords_master['cone_opening_angle'], geometry)
self._lens_plane_redshifts = lens_plane_redshifts
self._delta_z_list = delta_z_list
super(TwoHaloContribution, self).__init__()
def render(self):
"""
Generates halo masses and positions for correlated structure around the main deflector
:return: mass (in Msun), x (arcsec), y (arcsec), r3d (kpc), redshift
"""
idx = np.argmin(abs(np.array(self._lens_plane_redshifts) - self.lens_cosmo.z_lens))
delta_z = self._delta_z_list[idx]
m = self.render_masses_at_z(self.lens_cosmo.z_lens, delta_z)
x, y = self.render_positions_at_z(self.lens_cosmo.z_lens, len(m))
subhalo_flag = [False] * len(m)
redshifts = [self.lens_cosmo.z_lens] * len(m)
r3d = np.array([None] * len(m))
return m, x, y, r3d, redshifts, subhalo_flag
def render_masses_at_z(self, z, delta_z):
"""
:param z: redshift at which to render masses
:param delta_z: thickness of the redshift slice
:return: halo masses at the desired redshift in units Msun
"""
norm, slope = self._norm_slope(z, delta_z)
args = deepcopy(self._rendering_kwargs)
log_mlow, log_mhigh = self._redshift_dependent_mass_range(z, args['log_mlow'], args['log_mhigh'])
mfunc = GeneralPowerLaw(log_mlow, log_mhigh, slope, args['draw_poisson'],
norm, args['log_mc'], args['a_wdm'], args['b_wdm'],
args['c_wdm'])
m = mfunc.draw()
return m
def render_positions_at_z(self, z, nhalos):
"""
:param z: redshift
:param nhalos: number of halos or objects to generate
:return: the x, y coordinate of objects in arcsec, and a 3 dimensional coordinate in kpc
The 3d coordinate only has a clear physical interpretation for subhalos, and is used to compute truncation raddi.
For line of sight halos it is set to None.
"""
x_kpc, y_kpc = self.spatial_distribution_model.draw(nhalos, z)
if len(x_kpc) > 0:
kpc_per_asec = self.geometry.kpc_per_arcsec(z)
x_arcsec = x_kpc * kpc_per_asec ** -1
y_arcsec = y_kpc * kpc_per_asec ** -1
return x_arcsec, y_arcsec
else:
return np.array([]), np.array([])
def _norm_slope(self, z, delta_z):
"""
This method computes the normalization of the mass function for correlated structure around the main deflector.
The normalization is defined as (boost - 1) * background, where background is the mean normalization of the
halo mass function computed with (for example) Sheth-Tormen, and boost is the average contribution of the
two-halo term integrated over a comoving distance corresponding to 2 * dz, where dz is the redshift plane
spacing.
boost(z, r_min, r_max) = 2 / r_max int_{r_min}^{r_max} x(r, z, M_{host}) * dr
where xi(r, M_{host) is the linear halo bias times the matter-matter correlation function,
r_min is set of 0.5 Mpc, and r_max is the comoving distance corresponding to 2*dz, where dz is the redshift
spacing. M_host is the mass in M_sun of the host dark matter halo
:param z: the redshift which to evaluate the matter-matter correlation function and halo bias
:param delta_z: the redshift spacing of the lens planes adjacent the main deflector
:return: the normalization of the two-halo term mass function. The form of the two-halo term mass function is
assumed to have the same shape as the background halo mass function
"""
if z != self.lens_cosmo.z_lens:
raise Exception('this class must be evaluated at the main deflector redshift')
volume_element_comoving = self.geometry.volume_element_comoving(z, delta_z)
plaw_index = self.halo_mass_function.plaw_index_z(z) + self._rendering_kwargs['delta_power_law_index']
norm_per_unit_volume = self.halo_mass_function.norm_at_z_density(z, plaw_index,
self._rendering_kwargs['m_pivot'])
norm_per_unit_volume *= self._rendering_kwargs['LOS_normalization']
reference_norm = norm_per_unit_volume * volume_element_comoving
rmax = self.lens_cosmo.cosmo.D_C_transverse(z + delta_z) - self.lens_cosmo.cosmo.D_C_transverse(z)
rmin = min(rmax, 0.5)
two_halo_boost = self.halo_mass_function.two_halo_boost(self._rendering_kwargs['host_m200'], z, rmax=rmax,
rmin=rmin)
slope = self.halo_mass_function.plaw_index_z(z) + self._rendering_kwargs['delta_power_law_index']
norm = (two_halo_boost - 1) * reference_norm
return norm, slope
def convergence_sheet_correction(self, *args, **kwargs):
return {}, [], []
@staticmethod
def keyword_parse_render(keywords_master):
kwargs = {}
required_keys = ['log_mlow', 'log_mhigh', 'host_m200', 'LOS_normalization',
'draw_poisson', 'delta_power_law_index', 'm_pivot', 'log_mc', 'a_wdm', 'b_wdm', 'c_wdm']
for key in required_keys:
if key not in keywords_master:
raise Exception('Required keyword argument ' + str(key) + ' not specified.')
else:
kwargs[key] = keywords_master[key]
return kwargs
def keys_convergence_sheets(self):
return {}
|
[
"pyHalo.Rendering.MassFunctions.power_law.GeneralPowerLaw",
"numpy.array",
"pyHalo.Rendering.SpatialDistributions.uniform.LensConeUniform",
"copy.deepcopy"
] |
[((1205, 1269), 'pyHalo.Rendering.SpatialDistributions.uniform.LensConeUniform', 'LensConeUniform', (["keywords_master['cone_opening_angle']", 'geometry'], {}), "(keywords_master['cone_opening_angle'], geometry)\n", (1220, 1269), False, 'from pyHalo.Rendering.SpatialDistributions.uniform import LensConeUniform\n'), ((2425, 2457), 'copy.deepcopy', 'deepcopy', (['self._rendering_kwargs'], {}), '(self._rendering_kwargs)\n', (2433, 2457), False, 'from copy import deepcopy\n'), ((2580, 2716), 'pyHalo.Rendering.MassFunctions.power_law.GeneralPowerLaw', 'GeneralPowerLaw', (['log_mlow', 'log_mhigh', 'slope', "args['draw_poisson']", 'norm', "args['log_mc']", "args['a_wdm']", "args['b_wdm']", "args['c_wdm']"], {}), "(log_mlow, log_mhigh, slope, args['draw_poisson'], norm,\n args['log_mc'], args['a_wdm'], args['b_wdm'], args['c_wdm'])\n", (2595, 2716), False, 'from pyHalo.Rendering.MassFunctions.power_law import GeneralPowerLaw\n'), ((3583, 3595), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3591, 3595), True, 'import numpy as np\n'), ((3597, 3609), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3605, 3609), True, 'import numpy as np\n'), ((1671, 1707), 'numpy.array', 'np.array', (['self._lens_plane_redshifts'], {}), '(self._lens_plane_redshifts)\n', (1679, 1707), True, 'import numpy as np\n')]
|
import vtreat.util
import pandas
import numpy
def test_range():
# https://github.com/WinVector/pyvtreat/blob/master/Examples/Bugs/asarray_issue.md
# https://github.com/WinVector/pyvtreat/issues/7
numpy.random.seed(2019)
arr = numpy.random.randint(2, size=10)
sparr = pandas.arrays.SparseArray(arr, fill_value=0)
assert vtreat.util.has_range(arr)
assert vtreat.util.has_range(sparr)
|
[
"pandas.arrays.SparseArray",
"numpy.random.randint",
"numpy.random.seed"
] |
[((210, 233), 'numpy.random.seed', 'numpy.random.seed', (['(2019)'], {}), '(2019)\n', (227, 233), False, 'import numpy\n'), ((244, 276), 'numpy.random.randint', 'numpy.random.randint', (['(2)'], {'size': '(10)'}), '(2, size=10)\n', (264, 276), False, 'import numpy\n'), ((289, 333), 'pandas.arrays.SparseArray', 'pandas.arrays.SparseArray', (['arr'], {'fill_value': '(0)'}), '(arr, fill_value=0)\n', (314, 333), False, 'import pandas\n')]
|
import numpy as np
def directional_coupler_lc(wavelength_nm, n_eff_1, n_eff_2):
'''
Calculates the coherence length (100% power transfer) of a
directional coupler.
Args:
wavelength_nm (float): The wavelength in [nm] the
directional coupler should operate at.
n_eff_1 (float): n_eff of the fundamental (even)
supermode of the directional coupler.
n_eff_2 (float): n_eff of the first-order (odd)
supermode of the directional coupler.
Returns:
float: The length [um] the directional coupler
needs to be to achieve 100% power transfer.
'''
wavelength_m = wavelength_nm * 1.e-9
dn_eff = (n_eff_1 - n_eff_2).real
lc_m = wavelength_m / (2. * dn_eff)
lc_um = lc_m * 1.e6
return lc_um
def grating_coupler_period(wavelength,
n_eff,
n_clad,
incidence_angle_deg,
diffration_order=1):
'''
Calculate the period needed for a grating coupler.
Args:
wavelength (float): The target wavelength for the
grating coupler.
n_eff (float): The effective index of the mode
of a waveguide with the width of the grating
coupler.
n_clad (float): The refractive index of the cladding.
incidence_angle_deg (float): The incidence angle
the grating coupler should operate at [degrees].
diffration_order (int): The grating order the coupler
should work at. Default is 1st order (1).
Returns:
float: The period needed for the grating coupler
in the same units as the wavelength was given at.
'''
k0 = 2. * np.pi / wavelength
beta = n_eff.real * k0
n_inc = n_clad
grating_period = (2.*np.pi*diffration_order) \
/ (beta - k0*n_inc*np.sin(np.radians(incidence_angle_deg)))
return grating_period
def loss(n, wavelength):
kappa = n.imag
alpha = 4.34 * 4 * np.pi * np.abs(
kappa) / wavelength # 4.34 = 10*np.log10(np.e) -> [dB/m] = 4.34 [/m]
return alpha # [db/um] if working in [um]
def qpm_wavenumber(pmp_n,
pmp_l,
sig_n,
sig_l,
idl_n,
idl_l,
period_qpm,
type='forward'):
pi2 = np.pi * 2
k_pmp = pmp_n * pi2 / pmp_l
k_sig = sig_n * pi2 / sig_l
k_idl = idl_n * pi2 / idl_l
k_qpm = pi2 / period_qpm
if type == 'forward':
sgn_1 = 1
sgn_2 = 1
elif type == 'forward_backward':
sgn_1 = 1
sgn_2 = -1
elif type == 'backward':
sgn_1 = -1
sgn_2 = -1
k_mismatch = k_idl * sgn_1 + k_sig * sgn_2 + k_qpm - k_pmp
return k_mismatch
def qpm_period(pmp_n, pmp_l, sig_n, sig_l, idl_n, idl_l, type='forward'):
pi2 = np.pi * 2
k_pmp = pmp_n * pi2 / pmp_l
k_sig = sig_n * pi2 / sig_l
k_idl = idl_n * pi2 / idl_l
if type == 'forward':
sgn_1 = 1
sgn_2 = 1
elif type == 'forward_backward':
sgn_1 = 1
sgn_2 = -1
elif type == 'backward':
sgn_1 = -1
sgn_2 = -1
k_qpm = k_pmp - k_idl * sgn_1 - k_sig * sgn_2
l_qpm = pi2 / k_qpm
return l_qpm
|
[
"numpy.radians",
"numpy.abs"
] |
[((2038, 2051), 'numpy.abs', 'np.abs', (['kappa'], {}), '(kappa)\n', (2044, 2051), True, 'import numpy as np\n'), ((1900, 1931), 'numpy.radians', 'np.radians', (['incidence_angle_deg'], {}), '(incidence_angle_deg)\n', (1910, 1931), True, 'import numpy as np\n')]
|
import os
from typing import IO
from PySDDP.newave.script.templates.confhd import ConfhdTemplate
from matplotlib import pyplot as plt
import numpy as np
from random import randint
from mpl_toolkits.mplot3d import Axes3D
class Confhd(ConfhdTemplate):
def __init__(self):
super().__init__()
self.lista_entrada = list()
self._conteudo_ = None
self.dir_base = None
self._numero_registros_ = None
def ler(self, file_name: str, hidr, vazoes, dger, modif, exph) -> None:
"""
Implementa o método para leitura do arquivo HIDR.DAT que contem os dados cadastrais das usinas
hidrelétricas que podem ser utilizadas para a execucao do NEWAVE
:param file_name: string com o caminho completo para o arquivo,
hidr: classe contendo o cadastro de todas as usinas hidreletrica,
vazoes: classe contendo o historico de vazoes completo
"""
self.dir_base = os.path.split(file_name)[0]
self.nome_arquivo = os.path.split(file_name)[1]
self._copiavazoes = vazoes.vaz_nat
self._numero_registros_ = 0
self.nuhe = 0
nanos = dger.num_anos['valor']
try:
with open(file_name, 'r', encoding='latin-1') as f: # type: IO[str]
continua = True
contador = 1
while continua:
self.next_line(f)
linha = self.linha
if contador >= 3:
if len(linha) > 5:
self._codigo["valor"].append(int(linha[1:5]))
else:
break
self._nome["valor"].append(linha[6:18])
self._posto["valor"].append(int(linha[19:23]))
self._jusante["valor"].append(int(linha[25:29]))
self._ree["valor"].append(int(linha[30:34]))
self._vol_ini["valor"].append(float(linha[35:41]))
self._status["valor"].append(linha[44:46])
self._modif["valor"].append(int(linha[49:53]))
self._ano_i["valor"].append(int(linha[58:62]))
self._ano_f["valor"].append(int(linha[67:71]))
# Preenche com dados cadastrais
uhe = hidr.get(self._codigo["valor"][-1])
self._bdh['valor'].append(uhe['bdh'])
self._sist['valor'].append(uhe['sist'])
self._empr['valor'].append(uhe['empr'])
self._desvio['valor'].append(uhe['desvio'])
self._vol_min['valor'].append(uhe['vol_min'])
self._vol_max['valor'].append(uhe['vol_max'])
self._vol_vert['valor'].append(uhe['vol_vert'])
self._vol_min_desv['valor'].append(uhe['vol_min_desv'])
self._cota_min['valor'].append(uhe['cota_min'])
self._cota_max['valor'].append(uhe['cota_max'])
self._pol_cota_vol['valor'].append(uhe['pol_cota_vol'])
self._pol_cota_area['valor'].append(uhe['pol_cota_area'])
self._coef_evap['valor'].append(uhe['coef_evap'])
self._num_conj_maq['valor'].append(uhe['num_conj_maq'])
self._maq_por_conj['valor'].append(uhe['maq_por_conj'])
self._pef_por_conj['valor'].append(uhe['pef_por_conj'])
self._cf_hbqt['valor'].append(uhe['cf_hbqt'])
self._cf_hbqt['valor_2'].append(uhe['cf_hbqt_2'])
self._cf_hbqt['valor_3'].append(uhe['cf_hbqt_3'])
self._cf_hbqt['valor_4'].append(uhe['cf_hbqt_4'])
self._cf_hbqt['valor_5'].append(uhe['cf_hbqt_5'])
self._cf_hbqg['valor'].append(uhe['cf_hbqg'])
self._cf_hbqg['valor_2'].append(uhe['cf_hbqg_2'])
self._cf_hbqg['valor_3'].append(uhe['cf_hbqg_3'])
self._cf_hbqg['valor_4'].append(uhe['cf_hbqg_4'])
self._cf_hbqg['valor_5'].append(uhe['cf_hbqg_5'])
self._cf_hbpt['valor'].append(uhe['cf_hbpt'])
self._cf_hbpt['valor_2'].append(uhe['cf_hbpt_2'])
self._cf_hbpt['valor_3'].append(uhe['cf_hbpt_3'])
self._cf_hbpt['valor_4'].append(uhe['cf_hbpt_4'])
self._cf_hbpt['valor_5'].append(uhe['cf_hbpt_5'])
self._alt_efet_conj['valor'].append(uhe['alt_efet_conj'])
self._vaz_efet_conj['valor'].append(uhe['vaz_efet_conj'])
self._prod_esp['valor'].append(uhe['prod_esp'])
self._perda_hid['valor'].append(uhe['perda_hid'])
self._num_pol_vnj['valor'].append(uhe['num_pol_vnj'])
self._pol_vaz_niv_jus['valor'].append(uhe['pol_vaz_niv_jus'])
self._pol_vaz_niv_jus['valor_2'].append(uhe['pol_vaz_niv_jus_2'])
self._pol_vaz_niv_jus['valor_3'].append(uhe['pol_vaz_niv_jus_3'])
self._pol_vaz_niv_jus['valor_4'].append(uhe['pol_vaz_niv_jus_4'])
self._pol_vaz_niv_jus['valor_5'].append(uhe['pol_vaz_niv_jus_5'])
self._cota_ref_nivel_jus['valor'].append(uhe['cota_ref_nivel_jus'])
self._cfmed['valor'].append(uhe['cfmed'])
self._inf_canal_fuga['valor'].append(uhe['inf_canal_fuga'])
self._fator_carga_max['valor'].append(uhe['fator_carga_max'])
self._fator_carga_min['valor'].append(uhe['fator_carga_min'])
self._vaz_min['valor'].append(uhe['vaz_min'])
self._unid_base['valor'].append(uhe['unid_base'])
self._tipo_turb['valor'].append(uhe['tipo_turb'])
self._repres_conj['valor'].append(uhe['repres_conj'])
self._teifh['valor'].append(uhe['teifh'])
self._ip['valor'].append(uhe['ip'])
self._tipo_perda['valor'].append(uhe['tipo_perda'])
self._data['valor'].append(uhe['data'])
self._observ['valor'].append(uhe['observ'])
self._vol_ref['valor'].append(uhe['vol_ref'])
self._tipo_reg['valor'].append(uhe['tipo_reg'])
# Inclui as vazoes naturais
vaz_nat = vazoes.vaz_nat.transpose()
vaz_nat = vaz_nat[self._posto["valor"][-1]-1]
vaz_nat = vaz_nat.transpose()
self._vazoes['valor'].append(vaz_nat)
# Se a usina for 'NE' ou 'EE' nao deve possuir maquinas
if self._status['valor'][-1] == 'NE' or self._status['valor'][-1] == 'EE':
for iconj in range(5):
self._maq_por_conj['valor'][-1][iconj] = 0
# Parametros Temporais controlados pelo MODIF.DAT
self._vol_mint['valor'].append(self._vol_min['valor'][-1]*np.ones((nanos, 12), 'f'))
self._vol_maxt['valor'].append(self._vol_max['valor'][-1]*np.ones((nanos, 12), 'f'))
self._vol_minp['valor'].append(self._vol_min['valor'][-1]*np.ones((nanos, 12), 'f'))
self._vaz_mint['valor'].append(self._vaz_min['valor'][-1]*np.ones((nanos, 12), 'f'))
self._cfugat['valor'].append(self._cfmed['valor'][-1]*np.ones((nanos, 12), 'f'))
self._cmont['valor'].append(self._cota_max['valor'][-1]*np.ones((nanos, 12), 'f'))
#
# Calcula Volume Útil
#
if self._tipo_reg['valor'][-1] == 'M':
self._vol_util['valor'].append(self._vol_max['valor'][-1] - self._vol_min['valor'][-1])
else:
self._vol_util['valor'].append(float(0))
self._vol_min['valor'][-1] = self._vol_max['valor'][-1]
# Incorpora Modificações do MODIF.DAT
usinadf = modif.bloco_usina['df'][modif.bloco_usina['df']['codigo'] == self._codigo['valor'][-1]]
self._acerta_modif(usinadf, dger)
# Calcula Parametros
#
# Re-Calcula Volume Útil
#
if self._tipo_reg['valor'][-1] == 'M':
self._vol_util['valor'][-1] = self._vol_max['valor'][-1] - self._vol_min['valor'][-1]
else:
self._vol_min['valor'][-1] = self._vol_max['valor'][-1]
self._calc_pot_efetiva()
self._calc_vaz_efetiva()
self._calc_produtibs(nanos)
self._calc_engol_maximo()
# Parametros Temporais calculados pelo EXPH.DAT
if self._status['valor'][-1] == 'EX':
self._status_vol_morto['valor'].append(2 * np.ones((nanos, 12), 'i'))
self._status_motoriz['valor'].append(2 * np.ones((nanos, 12), 'i'))
self._vol_morto_tempo['valor'].append(np.zeros((nanos, 12), 'f'))
self._engol_tempo['valor'].append(self._engolimento['valor'][-1] * np.ones((nanos, 12), 'f'))
self._potencia_tempo['valor'].append(self._pot_efet['valor'][-1] * np.ones((nanos, 12), 'f'))
self._unidades_tempo['valor'].append(sum(self._maq_por_conj['valor'][-1]) * np.ones((nanos, 12), 'f'))
else:
self._status_vol_morto['valor'].append(np.zeros((nanos, 12), 'i'))
self._status_motoriz['valor'].append(np.zeros((nanos, 12), 'i'))
self._vol_morto_tempo['valor'].append(np.zeros((nanos, 12), 'f'))
if self._status['valor'][-1] == 'EE':
self._engol_tempo['valor'].append(self._engolimento['valor'][-1] * np.ones((nanos, 12), 'f'))
self._potencia_tempo['valor'].append(self._pot_efet['valor'][-1] * np.ones((nanos, 12), 'f'))
self._unidades_tempo['valor'].append(sum(self._maq_por_conj['valor'][-1]) * np.ones((nanos, 12), 'f'))
else:
self._engol_tempo['valor'].append(np.zeros((nanos, 12), 'f'))
self._potencia_tempo['valor'].append(np.zeros((nanos, 12), 'f'))
self._unidades_tempo['valor'].append(np.zeros((nanos, 12), 'i'))
#
# Insere matrizes com nanos x 12 para cada tipo de produtibilidade acumulada
#
self._ro_acum_a_ree['valor'].append(np.zeros((nanos, 12), 'd'))
self._ro_acum_b_ree['valor'].append(np.zeros((nanos, 12), 'd'))
self._ro_acum_c_ree['valor'].append(np.zeros((nanos, 12), 'd'))
self._ro_acum_a_sist['valor'].append(np.zeros((nanos, 12), 'd'))
self._ro_acum_b_sist['valor'].append(np.zeros((nanos, 12), 'd'))
self._ro_acum_c_sist['valor'].append(np.zeros((nanos, 12), 'd'))
self._ro_acum['valor'].append(np.zeros((nanos, 12), 'd'))
self._ro_acum_65['valor'].append(np.zeros((nanos, 12), 'd'))
self._ro_acum_max['valor'].append(np.zeros((nanos, 12), 'd'))
self._ro_acum_med['valor'].append(np.zeros((nanos, 12), 'd'))
self._ro_acum_min['valor'].append(np.zeros((nanos, 12), 'd'))
# Incorpora Modificações do EXPH.DAT
usinadf = exph.bloco_usina['df'][exph.bloco_usina['df']['codigo'] == self._codigo['valor'][-1]]
self._acerta_exph(usinadf, dger)
self.nuhe += 1
self._numero_registros_ += 1
contador += 1
except Exception as err:
if isinstance(err, StopIteration):
maior = np.array(self._codigo['valor'], dtype=int)
maior = np.max(maior)
self._mapa = -np.ones(maior+1, dtype=int)
for i, codigo in enumerate(self._codigo['valor']):
self._mapa[codigo]=int(i)
# Acerta Produtibilidades Acumuladas
self._prod_acum()
print("OK! Leitura do", os.path.split(file_name)[1], "realizada com sucesso.")
else:
raise
def escrever(self, file_out: str) -> None:
"""
Implementa o método para escrita do arquivo HIDR.DAT que contem os dados cadastrais das usinas
hidrelétricas que podem ser utilizadas para a execucao do NEWAVE
:param file_out: string com o caminho completo para o arquivo
"""
self.dir_base = os.path.split(file_out)[0]
self.nome_arquivo = os.path.split(file_out)[1]
self._numero_registros_ = 0
formato = "{codigo: >5} {nome: <12} {posto: >4} {jusante: >5} {ree: >4} {vol_ini: >6} {status: >4} {modif: >6} {ano_i: >8} {ano_f: >8}\n"
if not os.path.isdir(os.path.split(file_out)[0]):
os.mkdir(os.path.split(file_out)[0])
try:
with open(file_out, 'w', encoding='latin-1') as f: # type: IO[str]
# Imprime Cabeçalho
f.write(" NUM NOME POSTO JUS REE V.INIC U.EXIS MODIF INIC.HIST FIM HIST\n")
f.write(" XXXX XXXXXXXXXXXX XXXX XXXX XXXX XXX.XX XXXX XXXX XXXX XXXX \n")
for iusi in range(self.nuhe):
linha = dict(
codigo=self._codigo['valor'][iusi],
nome=self._nome['valor'][iusi],
posto=self._posto['valor'][iusi],
jusante=self._jusante['valor'][iusi],
ree=self._ree['valor'][iusi],
vol_ini=self._vol_ini['valor'][iusi],
status=self._status['valor'][iusi],
modif=self._modif['valor'][iusi],
ano_i=self._ano_i['valor'][iusi],
ano_f=self._ano_f['valor'][iusi]
)
f.write(formato.format(**linha))
self._numero_registros_ += 1
except Exception as err:
raise
print("OK! Escrita do", os.path.split(file_out)[1], "realizada com sucesso.")
def get(self, entrada):
"""
Busca uma usina hidreletrica do arquivo CONFHD e retorna um dicionario de dados contendo todas as
informacoes desta usina
:param entrada: string com o nome da usina ou inteiro com o numero de referencia da usina
"""
if (type(entrada) == float) or (type(entrada) == int):
#for i, valor in enumerate(self._codigo["valor"]):
# if valor == int(entrada):
# posicao = i
# break
if type(entrada) == float:
entrada = int(entrada)
posicao = int(self._mapa[entrada])
if posicao == -1:
return None
if type(entrada) == str:
posicao = None
for i, valor in enumerate(self._nome["valor"]):
if (valor.upper()).strip() == (entrada.upper()).strip():
posicao = i
break
if posicao is None:
return None
uhe = {
'codigo': self._codigo['valor'][posicao],
'nome': self._nome['valor'][posicao],
'posto': self._posto['valor'][posicao],
'ree': self._ree["valor"][posicao],
'vol_ini': self._vol_ini["valor"][posicao],
'status': self._status["valor"][posicao],
'modif': self._modif["valor"][posicao],
'ano_i': self._ano_i["valor"][posicao],
'ano_f': self._ano_f["valor"][posicao],
'bdh': self._bdh['valor'][posicao],
'sist': self._sist['valor'][posicao],
'empr': self._empr['valor'][posicao],
'jusante': self._jusante['valor'][posicao],
'desvio': self._desvio['valor'][posicao],
'vol_min': self._vol_min['valor'][posicao],
'vol_max': self._vol_max['valor'][posicao],
'vol_vert': self._vol_vert['valor'][posicao],
'vol_min_desv': self._vol_min_desv['valor'][posicao],
'cota_min': self._cota_min['valor'][posicao],
'cota_max': self._cota_max['valor'][posicao],
'pol_cota_vol': self._pol_cota_vol['valor'][posicao],
'pol_cota_area': self._pol_cota_area['valor'][posicao],
'coef_evap': self._coef_evap['valor'][posicao],
'num_conj_maq': self._num_conj_maq['valor'][posicao],
'maq_por_conj': self._maq_por_conj['valor'][posicao],
'pef_por_conj': self._pef_por_conj['valor'][posicao],
'cf_hbqt': self._cf_hbqt['valor'][posicao],
'cf_hbqt_2': self._cf_hbqt['valor_2'][posicao],
'cf_hbqt_3': self._cf_hbqt['valor_3'][posicao],
'cf_hbqt_4': self._cf_hbqt['valor_4'][posicao],
'cf_hbqt_5': self._cf_hbqt['valor_5'][posicao],
'cf_hbqg': self._cf_hbqg['valor'][posicao],
'cf_hbqg_2': self._cf_hbqg['valor_2'][posicao],
'cf_hbqg_3': self._cf_hbqg['valor_3'][posicao],
'cf_hbqg_4': self._cf_hbqg['valor_4'][posicao],
'cf_hbqg_5': self._cf_hbqg['valor_5'][posicao],
'cf_hbpt': self._cf_hbpt['valor'][posicao],
'cf_hbpt_2': self._cf_hbpt['valor_2'][posicao],
'cf_hbpt_3': self._cf_hbpt['valor_3'][posicao],
'cf_hbpt_4': self._cf_hbpt['valor_4'][posicao],
'cf_hbpt_5': self._cf_hbpt['valor_5'][posicao],
'alt_efet_conj': self._alt_efet_conj['valor'][posicao],
'vaz_efet_conj': self._vaz_efet_conj['valor'][posicao],
'prod_esp': self._prod_esp['valor'][posicao],
'perda_hid': self._perda_hid['valor'][posicao],
'num_pol_vnj': self._num_pol_vnj['valor'][posicao],
'pol_vaz_niv_jus': self._pol_vaz_niv_jus['valor'][posicao],
'pol_vaz_niv_jus_2': self._pol_vaz_niv_jus['valor_2'][posicao],
'pol_vaz_niv_jus_3': self._pol_vaz_niv_jus['valor_3'][posicao],
'pol_vaz_niv_jus_4': self._pol_vaz_niv_jus['valor_4'][posicao],
'pol_vaz_niv_jus_5': self._pol_vaz_niv_jus['valor_5'][posicao],
'cota_ref_nivel_jus': self._cota_ref_nivel_jus['valor'][posicao],
'cfmed': self._cfmed['valor'][posicao],
'inf_canal_fuga': self._inf_canal_fuga['valor'][posicao],
'fator_carga_max': self._fator_carga_max['valor'][posicao],
'fator_carga_min': self._fator_carga_min['valor'][posicao],
'vaz_min': self._vaz_min['valor'][posicao],
'unid_base': self._unid_base['valor'][posicao],
'tipo_turb': self._tipo_turb['valor'][posicao],
'repres_conj': self._repres_conj['valor'][posicao],
'teifh': self._teifh['valor'][posicao],
'ip': self._ip['valor'][posicao],
'tipo_perda': self._tipo_perda['valor'][posicao],
'data': self._data['valor'][posicao],
'observ': self._observ['valor'][posicao],
'vol_ref': self._vol_ref['valor'][posicao],
'tipo_reg': self._tipo_reg['valor'][posicao],
'vazoes': self._vazoes['valor'][posicao],
'vol_mint': self._vol_mint['valor'][posicao],
'vol_maxt': self._vol_maxt['valor'][posicao],
'vol_minp': self._vol_minp['valor'][posicao],
'vaz_mint': self._vaz_mint['valor'][posicao],
'cmont': self._cmont['valor'][posicao],
'cfugat': self._cfugat['valor'][posicao],
'vol_util': self._vol_util['valor'][posicao],
'pot_efet': self._pot_efet['valor'][posicao],
'vaz_efet': self._vaz_efet['valor'][posicao],
'status_vol_morto': self._status_vol_morto['valor'][posicao],
'status_motoriz': self._status_motoriz['valor'][posicao],
'vol_morto_tempo': self._vol_morto_tempo['valor'][posicao],
'engol_tempo': self._engol_tempo['valor'][posicao],
'potencia_tempo': self._potencia_tempo['valor'][posicao],
'unidades_tempo': self._unidades_tempo['valor'][posicao],
'ro_65': self._ro_65['valor'][posicao],
'ro_50': self._ro_50['valor'][posicao],
'ro_equiv': self._ro_equiv['valor'][posicao],
'ro_equiv65': self._ro_equiv65['valor'][posicao],
'ro_min': self._ro_min['valor'][posicao],
'ro_max': self._ro_max['valor'][posicao],
'engolimento': self._engolimento['valor'][posicao],
'ro_acum_a_ree': self._ro_acum_a_ree['valor'][posicao],
'ro_acum_b_ree': self._ro_acum_b_ree['valor'][posicao],
'ro_acum_c_ree': self._ro_acum_c_ree['valor'][posicao],
'ro_acum_a_sist': self._ro_acum_a_sist['valor'][posicao],
'ro_acum_b_sist': self._ro_acum_b_sist['valor'][posicao],
'ro_acum_c_sist': self._ro_acum_c_sist['valor'][posicao],
'ro_acum': self._ro_acum['valor'][posicao],
'ro_acum_65': self._ro_acum_65['valor'][posicao],
'ro_acum_max': self._ro_acum_max['valor'][posicao],
'ro_acum_med': self._ro_acum_med['valor'][posicao],
'ro_acum_med': self._ro_acum_min['valor'][posicao]
}
return uhe
def put(self, uhe):
"""
Atualiza os dados da usina com do CONFHD de acordo com o dicionario de dados fornecido na entrada.
As chaves do dicionario de dados de entrada devem ser as mesmas do dicionario obtido atraves do
comando get.
:param uhe: dicionario de dados contendo informacoes da usina a ser atualizada.
"""
posicao = None
for i, valor in enumerate(self._codigo["valor"]):
if valor == uhe['codigo']:
posicao = i
break
if posicao is None:
return None
self._codigo['valor'][posicao] = uhe['codigo']
self._nome['valor'][posicao] = uhe['nome']
self._posto['valor'][posicao] = uhe['posto']
self._bdh['valor'][posicao] = uhe['bdh']
self._sist['valor'][posicao] = uhe['sist']
self._empr['valor'][posicao] = uhe['empr']
self._jusante['valor'][posicao] = uhe['jusante']
self._desvio['valor'][posicao] = uhe['desvio']
self._vol_min['valor'][posicao] = uhe['vol_min']
self._vol_max['valor'][posicao] = uhe['vol_max']
self._vol_vert['valor'][posicao] = uhe['vol_vert']
self._vol_min_desv['valor'][posicao] = uhe['vol_min_desv']
self._cota_min['valor'][posicao] = uhe['cota_min']
self._cota_max['valor'][posicao] = uhe['cota_max']
self._pol_cota_vol['valor'][posicao] = uhe['pol_cota_vol']
self._pol_cota_area['valor'][posicao] = uhe['pol_cota_area']
self._coef_evap['valor'][posicao] = uhe['coef_evap']
self._num_conj_maq['valor'][posicao] = uhe['num_conj_maq']
self._maq_por_conj['valor'][posicao] = uhe['maq_por_conj']
self._pef_por_conj['valor'][posicao] = uhe['pef_por_conj']
self._cf_hbqt['valor'][posicao] = uhe['cf_hbqt']
self._cf_hbqt['valor_2'][posicao] = uhe['cf_hbqt_2']
self._cf_hbqt['valor_3'][posicao] = uhe['cf_hbqt_3']
self._cf_hbqt['valor_4'][posicao] = uhe['cf_hbqt_4']
self._cf_hbqt['valor_5'][posicao] = uhe['cf_hbqt_5']
self._cf_hbqg['valor'][posicao] = uhe['cf_hbqg']
self._cf_hbqg['valor_2'][posicao] = uhe['cf_hbqg_2']
self._cf_hbqg['valor_3'][posicao] = uhe['cf_hbqg_3']
self._cf_hbqg['valor_4'][posicao] = uhe['cf_hbqg_4']
self._cf_hbqg['valor_5'][posicao] = uhe['cf_hbqg_5']
self._cf_hbpt['valor'][posicao] = uhe['cf_hbpt']
self._cf_hbpt['valor_2'][posicao] = uhe['cf_hbpt_2']
self._cf_hbpt['valor_3'][posicao] = uhe['cf_hbpt_3']
self._cf_hbpt['valor_4'][posicao] = uhe['cf_hbpt_4']
self._cf_hbpt['valor_5'][posicao] = uhe['cf_hbpt_5']
self._alt_efet_conj['valor'][posicao] = uhe['alt_efet_conj']
self._vaz_efet_conj['valor'][posicao] = uhe['vaz_efet_conj']
self._prod_esp['valor'][posicao] = uhe['prod_esp']
self._perda_hid['valor'][posicao] = uhe['perda_hid']
self._num_pol_vnj['valor'][posicao] = uhe['num_pol_vnj']
self._pol_vaz_niv_jus['valor'] = uhe['pol_vaz_niv_jus']
self._pol_vaz_niv_jus['valor_2'][posicao] = uhe['pol_vaz_niv_jus_2']
self._pol_vaz_niv_jus['valor_3'][posicao] = uhe['pol_vaz_niv_jus_3']
self._pol_vaz_niv_jus['valor_4'][posicao] = uhe['pol_vaz_niv_jus_4']
self._pol_vaz_niv_jus['valor_5'][posicao] = uhe['pol_vaz_niv_jus_5']
self._cota_ref_nivel_jus['valor'][posicao] = uhe['cota_ref_nivel_jus']
self._cfmed['valor'][posicao] = uhe['cfmed']
self._inf_canal_fuga['valor'][posicao] = uhe['inf_canal_fuga']
self._fator_carga_max['valor'][posicao] = uhe['fator_carga_max']
self._fator_carga_min['valor'][posicao] = uhe['fator_carga_min']
self._vaz_min['valor'][posicao] = uhe['vaz_min']
self._unid_base['valor'][posicao] = uhe['unid_base']
self._tipo_turb['valor'][posicao] = uhe['tipo_turb']
self._repres_conj['valor'][posicao] = uhe['repres_conj']
self._teifh['valor'][posicao] = uhe['teifh']
self._ip['valor'][posicao] = uhe['ip']
self._tipo_perda['valor'][posicao] = uhe['tipo_perda']
self._data['valor'][posicao] = uhe['data']
self._observ['valor'][posicao] = uhe['observ']
self._vol_ref['valor'][posicao] = uhe['vol_ref']
self._tipo_reg['valor'][posicao] = uhe['tipo_reg']
self._vazoes['valor'][posicao] = uhe['vazoes']
self._vol_mint['valor'][posicao] = uhe['vol_mint']
self._vol_maxt['valor'][posicao] = uhe['vol_maxt']
self._vol_minp['valor'][posicao] = uhe['vol_minp']
self._vaz_mint['valor'][posicao] = uhe['vaz_mint']
self._cfugat['valor'][posicao] = uhe['cfugat']
self._vol_util['valor'][posicao] = uhe['vol_util']
self._pot_efet['valor'][posicao] = uhe['pot_efet']
self._vaz_efet['valor'][posicao] = uhe['vaz_efet']
self._status_vol_morto['valor'][posicao] = uhe['status_vol_morto']
self._status_motoriz['valor'][posicao] = uhe['status_motoriz']
self._vol_morto_tempo['valor'][posicao] = uhe['vol_morto_tempo']
self._engol_tempo['valor'][posicao] = uhe['engol_tempo']
self._potencia_tempo['valor'][posicao] = uhe['potencia_tempo']
self._unidades_tempo['valor'][posicao] = uhe['unidades_tempo']
self._ro_65['valor'][posicao] = uhe['ro_65']
self._ro_50['valor'][posicao] = uhe['ro_50']
self._ro_equiv['valor'][posicao] = uhe['ro_equiv']
self._ro_equiv65['valor'][posicao] = uhe['ro_equiv65']
self._ro_min['valor'][posicao] = uhe['ro_min']
self._ro_max['valor'][posicao] = uhe['ro_max']
self._engolimento['valor'][posicao] = uhe['engolimento']
print(np.shape(self._copiavazoes))
for iano in range(np.shape(self._copiavazoes)[0]):
for imes in range(12):
self._copiavazoes[iano][imes][self._posto['valor'][posicao]-1] = self._vazoes['valor'][posicao][iano][imes]
return 'sucesso'
def help(self, parametro):
"""
Detalha o tipo de informacao de uma chave do dicionario de dados obtido pelo comando get.
:param parametro: string contendo a chave do dicionario de dados cuja o detalhamento eh desejado
"""
duvida = getattr(self, '_'+parametro)
return duvida['descricao']
# Calcula Vazao Incremental
def vaz_inc(self, uhe, iano, imes):
def Montante(uhe, iano, imes):
for iusi in self.lista_uhes():
usina = self.get(iusi)
if usina['jusante'] == uhe['codigo']:
if usina['status_vol_morto'][iano][imes] == 2:
yield iusi
else:
yield from Montante(usina, iano, imes)
# Inicia a vazão incremental da uhe com a sua vazão natural, depois abate as naturais de montante
incremental = uhe['vazoes'][:,imes]
if uhe['status_vol_morto'][iano][imes] != 2:
print ('Erro: Tentativa de calculo de Incremental para usina (', uhe['nome'], ') fora de operacao no mes ', imes, ' e ano ', iano)
return 0
else:
for iusina in Montante(uhe, iano, imes):
usina = self.get(iusina)
incremental = incremental - usina['vazoes'][:,imes]
# Caso Alguma Incremental seja Menor que zero, força para zero
codigos = np.where(incremental<0)
incremental[codigos] = 0
return incremental
def vaz_inc_entre_res(self, codigo, ianoconf, imesconf):
uhe = self.get(codigo)
nanos_hist = len(uhe['vazoes'])
def Montante(codigo, iano, imes):
#for iusi in self.lista_uhes():
# usina = self.get(iusi)
for iusi, jusante in enumerate(self._jusante['valor']):
if jusante == codigo:
if self._status_vol_morto['valor'][iusi][iano][imes] == 2:
if self._vol_util['valor'][iusi] > 0:
yield iusi
else:
yield from Montante(self._codigo['valor'][iusi], iano, imes)
else:
yield from Montante(self._codigo['valor'][iusi], iano, imes)
if uhe['status_vol_morto'][ianoconf][imesconf] != 2:
print ('Erro: Tentativa de calculo de Incremental para usina (', uhe['nome'], ') fora de operacao no mes ', imesconf, ' e ano ', ianoconf)
return 0
else:
incremental = np.zeros(nanos_hist)
for ianoh in range(nanos_hist):
incremental[ianoh] = uhe['vazoes'][ianoh][imesconf]
for iusina in Montante(codigo, ianoconf, imesconf):
for ianoh in range(nanos_hist):
incremental[ianoh] = incremental[ianoh] - self._vazoes['valor'][iusina][ianoh][imesconf]
# Caso Alguma Incremental seja Menor que zero, força para zero
codigos = np.where(incremental<0)
incremental[codigos] = 0
return incremental
##########################################################################################################
# Calcula Parametros das Usinas
##########################################################################################################
#def _calc_vol_util(self): # Calcula Volume Util da Usina
# if self._tipo_reg['valor'][-1] == 'M':
# self._vol_util['valor'].append(self._vol_max['valor'][-1] - self._vol_min['valor'][-1])
# else:
# self._vol_util['valor'].append(float(0))
# self._vol_min['valor'][-1] = self._vol_max['valor'][-1]
def _calc_pot_efetiva(self): # Calcula Potencia Efetiva da Usina
a = np.array(self._maq_por_conj["valor"][-1])
b = np.array(self._pef_por_conj["valor"][-1])
self._pot_efet['valor'].append(np.vdot(a, b))
def _calc_vaz_efetiva(self): # Calcula Vazao Efetiva da Usina
a = np.array(self._maq_por_conj["valor"][-1])
b = np.array(self._vaz_efet_conj["valor"][-1])
self._vaz_efet['valor'].append(np.vdot(a, b))
def _calc_produtibs(self, nanos): # Calcula Produtibilidades Associadas aa diversos volumes
self._ro_65['valor'].append(np.zeros( (nanos,12), 'd' ))
self._ro_50['valor'].append(np.zeros( (nanos,12), 'd' ))
self._ro_equiv['valor'].append(np.zeros( (nanos,12), 'd' ))
self._ro_equiv65['valor'].append(np.zeros( (nanos,12), 'd' ))
self._ro_min['valor'].append(np.zeros( (nanos,12), 'd' ))
self._ro_max['valor'].append(np.zeros( (nanos,12), 'd' ))
a = self._pol_cota_vol["valor"][-1][0]
b = self._pol_cota_vol["valor"][-1][1]
c = self._pol_cota_vol["valor"][-1][2]
d = self._pol_cota_vol["valor"][-1][3]
e = self._pol_cota_vol["valor"][-1][4]
# Calcula Produtibilidade Associada a 65% do Volume Util
volume = self._vol_min['valor'][-1] + 0.65*self._vol_util['valor'][-1]
cota = a + b*volume + c*volume**2 + d*volume**3 + e*volume**4
for iano in range(nanos):
for imes in range(12):
cfuga = self._cfugat['valor'][-1][iano][imes]
if self._tipo_perda['valor'][-1] == 2:
self._ro_65['valor'][-1][iano][imes] = self._prod_esp['valor'][-1] * (cota - cfuga - self._perda_hid['valor'][-1])
else:
self._ro_65['valor'][-1][iano][imes] = self._prod_esp['valor'][-1] * (cota - cfuga)*(1. - self._perda_hid['valor'][-1]/100)
# Calcula Produtibilidade Associada a 50% do Volume Util
volume = self._vol_min['valor'][-1] + 0.50*self._vol_util['valor'][-1]
cota = a + b*volume + c*volume**2 + d*volume**3 + e*volume**4
for iano in range(nanos):
for imes in range(12):
cfuga = self._cfugat['valor'][-1][iano][imes]
if self._tipo_perda['valor'][-1] == 2:
self._ro_50['valor'][-1][iano][imes] = self._prod_esp['valor'][-1] * (cota - cfuga - self._perda_hid['valor'][-1])
else:
self._ro_50['valor'][-1][iano][imes] = self._prod_esp['valor'][-1] * (cota - cfuga)*(1. - self._perda_hid['valor'][-1]/100)
# Calcula Produtibilidade Associada ao Volume Maximo
volume = self._vol_max['valor'][-1]
cota = a + b*volume + c*volume**2 + d*volume**3 + e*volume**4
for iano in range(nanos):
for imes in range(12):
cfuga = self._cfugat['valor'][-1][iano][imes]
if self._tipo_perda['valor'][-1] == 2:
self._ro_max['valor'][-1][iano][imes] = self._prod_esp['valor'][-1] * (cota - cfuga - self._perda_hid['valor'][-1])
else:
self._ro_max['valor'][-1][iano][imes] = self._prod_esp['valor'][-1] * (cota - cfuga)*(1. - self._perda_hid['valor'][-1]/100)
# Calcula Produtibilidade Associada ao Volume Minimo
volume = self._vol_min['valor'][-1]
cota = a + b*volume + c*volume**2 + d*volume**3 + e*volume**4
for iano in range(nanos):
for imes in range(12):
cfuga = self._cfugat['valor'][-1][iano][imes]
if self._tipo_perda['valor'][-1] == 2:
self._ro_min['valor'][-1][iano][imes] = self._prod_esp['valor'][-1] * (cota - cfuga - self._perda_hid['valor'][-1])
else:
self._ro_min['valor'][-1][iano][imes] = self._prod_esp['valor'][-1] * (cota - cfuga)*(1. - self._perda_hid['valor'][-1]/100)
# Calcula Produtibilidade Equivalente
if ( self._vol_util['valor'][-1] > 0):
cota = 0
cota65 = 0
Vol65 = self._vol_min['valor'][-1] + 0.65*self._vol_util['valor'][-1]
for i in range(5):
cota = cota + self._pol_cota_vol["valor"][-1][i] * (self._vol_max['valor'][-1]**(i+1)) / (i+1)
cota = cota - self._pol_cota_vol["valor"][-1][i] * (self._vol_min['valor'][-1]**(i+1)) / (i+1)
cota65 = cota65 + self._pol_cota_vol["valor"][-1][i] * (Vol65**(i+1)) / (i+1)
cota65 = cota65 - self._pol_cota_vol["valor"][-1][i] * (self._vol_min['valor'][-1]**(i+1)) / (i+1)
cota = cota / self._vol_util['valor'][-1]
cota65 = cota65 / (Vol65 - self._vol_min['valor'][-1])
else:
cota65 = cota
for iano in range(nanos):
for imes in range(12):
cfuga = self._cfugat['valor'][-1][iano][imes]
if self._tipo_perda['valor'][-1] == 2:
self._ro_equiv['valor'][-1][iano][imes] = self._prod_esp['valor'][-1] * (cota - cfuga - self._perda_hid['valor'][-1])
self._ro_equiv65['valor'][-1][iano][imes] = self._prod_esp['valor'][-1] * (cota65 - cfuga - self._perda_hid['valor'][-1])
else:
self._ro_equiv['valor'][-1][iano][imes] = self._prod_esp['valor'][-1] * (cota - cfuga)*(1. - self._perda_hid['valor'][-1]/100)
self._ro_equiv65['valor'][-1][iano][imes] = self._prod_esp['valor'][-1] * (cota65 - cfuga)*(1. - self._perda_hid['valor'][-1]/100)
return
def _prod_acum(self):
def cascata(confhd, codigo, iano,imes):
current = confhd.get(codigo)
if current['status_vol_morto'][iano][imes] == 2:
yield current['codigo']
while current['jusante'] != 0:
current = confhd.get(current['jusante'])
if current['status_vol_morto'][iano][imes] == 2:
yield current['codigo']
#
# Percorre todas as usinas do confhd para inserir produtibilidades acumuladas
#
for reg, codigo in enumerate(self._codigo['valor']):
nanos = len(self._status_vol_morto['valor'][reg])
#
# As produtibilidades devem ser calculadas para cada mês/ano do histórico
#
for iano in range(nanos):
for imes in range(12):
trocouRee = 0
trocouSist = 0
FioRee = True
FioSist = True
for iusina in cascata(self, codigo, iano, imes):
uhe = self.get(iusina)
produtib = uhe['ro_equiv'][iano][imes]
produtib65 = uhe['ro_equiv65'][iano][imes]
produtibMax = uhe['ro_max'][iano][imes]
produtibMed = uhe['ro_65'][iano][imes]
produtibMin = uhe['ro_min'][iano][imes]
if uhe['status_motoriz'][iano][imes] == 2:
self._ro_acum['valor'][reg][iano][imes] += produtib
self._ro_acum_65['valor'][reg][iano][imes] += produtib65
self._ro_acum_max['valor'][reg][iano][imes] += produtibMax
self._ro_acum_med['valor'][reg][iano][imes] += produtibMed
self._ro_acum_min['valor'][reg][iano][imes] += produtibMin
if uhe['sist'] != self._sist['valor'][reg]:
trocouSist = trocouSist + 1
if uhe['ree'] != self._ree['valor'][reg]:
trocouRee = trocouRee + 1
if trocouRee == 0:
if uhe['status_motoriz'][iano][imes] == 2:
self._ro_acum_a_ree['valor'][reg][iano][imes] += produtib
else:
if uhe['vol_util'] > 0:
FioRee = False
if FioRee:
if uhe['status_motoriz'][iano][imes] == 2:
self._ro_acum_b_ree['valor'][reg][iano][imes] += produtib
else:
if uhe['status_motoriz'][iano][imes] == 2:
self._ro_acum_c_ree['valor'][reg][iano][imes] += produtib
if trocouSist == 0:
if uhe['status_motoriz'][iano][imes] == 2:
self._ro_acum_a_sist['valor'][reg][iano][imes] += produtib
else:
if uhe['vol_util'] > 0:
FioSist = False
if FioSist:
if uhe['status_motoriz'][iano][imes] == 2:
self._ro_acum_b_sist['valor'][reg][iano][imes] += produtib
else:
if uhe['status_motoriz'][iano][imes] == 2:
self._ro_acum_c_sist['valor'][reg][iano][imes] += produtib
def _prod_acum_entre_res_ree(self, uhe, iano, imes):
if uhe['jusante'] == 0:
return 0
uhe_nova = self.get(uhe['jusante'])
if uhe_nova['vol_util'] != 0:
return 0.
elif uhe_nova['ree'] != uhe['ree']:
return 0.
elif uhe_nova['status_motoriz'][iano][imes] == 2:
return uhe_nova['ro_equiv'] + self._prod_acum_entre_res_ree(uhe_nova, iano, imes)
else:
return self._prod_acum_entre_res_ree(uhe_nova, iano, imes)
#
# def ProdAcumEntreResSist(self, iano, imes, usinas):
# if self.Jusante == 0:
# return 0
# for iusina in usinas:
# if iusina.Codigo == self.Jusante:
# if iusina.VolUtil != 0:
# return 0.
# elif self.Sist != iusina.Sist:
# return 0.
# elif iusina.StatusMotoriz[iano][imes] == 2:
# return iusina.RoEquiv + iusina.ProdAcumEntreResSist(iano, imes, usinas)
# else:
# return iusina.ProdAcumEntreResSist(iano, imes, usinas)
# break
def _calc_engol(self, ql):
engol = 0.
for i in range(5): # Varre Conjuntos de Maquinas
if self._maq_por_conj['valor'][-1][i] > 0:
if ql < self._alt_efet_conj['valor'][-1][i]:
if self._tipo_turb == 1 or self._tipo_turb == 3:
alpha = 0.5
else:
alpha = 0.2
else:
alpha = -1
if self._alt_efet_conj['valor'][-1][i] != 0:
engol = engol + self._maq_por_conj['valor'][-1][i]*self._vaz_efet_conj['valor'][-1][i]*((ql/self._alt_efet_conj['valor'][-1][i])**alpha)
return engol
def _calc_engol_maximo(self): # Estima Engolimento Maximo da Usina
a = self._pol_cota_vol['valor'][-1][0]
b = self._pol_cota_vol['valor'][-1][1]
c = self._pol_cota_vol['valor'][-1][2]
d = self._pol_cota_vol['valor'][-1][3]
e = self._pol_cota_vol['valor'][-1][4]
# Calcula Engolimento a 65% do Volume Util
volume = self._vol_min['valor'][-1] + 0.65*self._vol_util['valor'][-1]
cota = a + b*volume + c*volume**2 + d*volume**3 + e*volume**4
queda65 = cota - self._cfmed['valor'][-1]
engol65 = self._calc_engol(queda65)
# Calcula Engolimento a 50% do Volume Util
volume = self._vol_min['valor'][-1] + 0.50*self._vol_util['valor'][-1]
cota = a + b*volume + c*volume**2 + d*volume**3 + e*volume**4
queda50 = cota - self._cfmed['valor'][-1]
engol50 = self._calc_engol(queda50)
# Calcula Engolimento Associada ao Volume Maximo
volume = self._vol_max['valor'][-1]
cota = a + b*volume + c*volume**2 + d*volume**3 + e*volume**4
quedaMax = cota - self._cfmed['valor'][-1]
engolMax = self._calc_engol(quedaMax)
# Calcula Engolimento Associada ao Volume Minimo
volume = self._vol_min['valor'][-1]
cota = a + b*volume + c*volume**2 + d*volume**3 + e*volume**4
quedaMin = cota - self._cfmed['valor'][-1]
engolMin = self._calc_engol(quedaMin)
# Calcula Engolimento Associado a Altura Equivalente
if ( self._vol_util['valor'][-1] > 0):
cota = 0
for i in range(5):
cota = cota + self._pol_cota_vol['valor'][-1][i] * (self._vol_max['valor'][-1]**(i+1)) / (i+1)
cota = cota - self._pol_cota_vol['valor'][-1][i] * (self._vol_min['valor'][-1]**(i+1)) / (i+1)
cota = cota / self._vol_util['valor'][-1]
quedaEquiv = cota - self._cfmed['valor'][-1]
engolEquiv = self._calc_engol(quedaEquiv)
self._engolimento['valor'].append((engol50+engol65+engolEquiv+engolMax+engolMin)/5)
return
def lista_uhes(self):
"""
Calcula um generator contendo todos os codigos de referencia das usinas pertencentes ao CONFHD.
"""
for i in range(self.nuhe):
yield self._codigo["valor"][i]
def _acerta_modif(self, df, dger):
tamanho = df.shape
tamanho = tamanho[0]
for linha in range(tamanho):
registro = df.iloc[linha].values
#
# Palavras chaves tipo zero - somente atualiza valores
#
if registro[4].upper() == 'NUMCNJ':
self._num_conj_maq['valor'][-1] = registro[5]
if registro[4].upper() == 'PRODESP':
self._prod_esp['valor'][-1] = registro[5]
if registro[4].upper() == 'TEIF':
self._teifh['valor'][-1] = registro[5]
if registro[4].upper() == 'IP':
self._ip['valor'][-1] = registro[5]
if registro[4].upper() == 'PERDHID':
self._perda_hid['valor'][-1] = registro[5]
if registro[4].upper() == 'VAZMIN':
self._vaz_min['valor'][-1] = registro[5]
if registro[4].upper() == 'NUMBAS':
self._unid_base['valor'][-1] = registro[5]
#
# Palavras chaves tipo um - dois campos
#
if registro[4].upper() == 'NUMMAQ':
nr_conj = int(registro[6])
self._maq_por_conj['valor'][-1][nr_conj-1] = int(registro[5])
if registro[4].upper() == 'POTEFE':
nr_conj = int(registro[6])
self._pef_por_conj['valor'][-1][nr_conj-1] = registro[5]
if registro[4].upper() == 'COEFEVAP':
mes = int(registro[6])
self._coef_evap['valor'][-1][mes-1] = registro[5]
if registro[4].upper() == 'VOLMIN':
if registro[6].find("%") == 1:
self._vol_min['valor'][-1] = self._vol_min['valor'][-1] + \
float(registro[5]) * self._vol_util['valor'][-1] / 100
if registro[6].find("h") == 1:
self._vol_min['valor'][-1] = registro[5]
if registro[4].upper() == 'VOLMAX':
if registro[6].find("%") == 1:
self._vol_max['valor'][-1] = self._vol_min['valor'][-1] + \
float(registro[5]) * self._vol_util['valor'][-1] / 100
if registro[6].find("h") == 1:
self._vol_max['valor'][-1] = registro[5]
#
# Palavras chaves tipo dois - coeficientes PCA e PCV
#
if registro[4].upper() == 'VOLCOTA':
self._pol_cota_vol['valor'][-1] = registro[5]
if registro[4].upper() == 'COTAREA':
self._pol_cota_area['valor'][-1] = registro[5]
#
# Palavras chaves tipo 3 - Data e valor
#
if registro[4].upper() == 'CFUGA':
ano = int(registro[0]) - dger.ano_ini['valor']
mes = int(registro[3]) - 1
while ano < dger.num_anos['valor']:
while mes < 12:
self._cfugat['valor'][-1][ano][mes] = registro[5]
mes += 1
mes = 0
ano += 1
if registro[4].upper() == 'VAZMINT':
ano = int(registro[0]) - dger.ano_ini['valor']
mes = int(registro[3]) - 1
while ano < dger.num_anos['valor']:
while mes < 12:
self._vaz_mint['valor'][-1][ano][mes] = registro[5]
mes += 1
mes = 0
ano += 1
if registro[4].upper() == 'CMONT':
ano = int(registro[0]) - dger.ano_ini['valor']
mes = int(registro[3]) - 1
while ano < dger.num_anos['valor']:
while mes < 12:
self._cmont['valor'][-1][ano][mes] = registro[5]
mes += 1
mes = 0
ano += 1
#
# Palavras chaves tipo 4 - Data, valor e ('h' ou '%')
#
if registro[4].upper() == 'VMINP':
ano = int(registro[0]) - dger.ano_ini['valor']
mes = int(registro[3]) - 1
while ano < dger.num_anos['valor']:
while mes < 12:
if registro[6].find("h") == 1:
self._vol_minp['valor'][-1][ano][mes] = registro[5]
if registro[6].find("%") == 1:
self._vol_minp['valor'][-1][ano][mes] = self._vol_min['valor'][-1] + \
float(registro[5]) * self._vol_util['valor'][-1] / 100
mes += 1
mes = 0
ano += 1
if registro[4].upper() == 'VMINT':
ano = int(registro[0]) - dger.ano_ini['valor']
mes = int(registro[3]) - 1
while ano < dger.num_anos['valor']:
while mes < 12:
if registro[6].find("h") == 1:
self._vol_mint['valor'][-1][ano][mes] = registro[5]
if registro[6].find("%") == 1:
self._vol_mint['valor'][-1][ano][mes] = self._vol_min['valor'][-1] + \
float(registro[5]) * self._vol_util['valor'][-1] / 100
mes += 1
mes = 0
ano += 1
if registro[4].upper() == 'VMAXT':
ano = int(registro[0]) - dger.ano_ini['valor']
mes = int(registro[3]) - 1
while ano < dger.num_anos['valor']:
while mes < 12:
if registro[6].find("h") == 1:
self._vol_maxt['valor'][-1][ano][mes] = registro[5]
if registro[6].find("%") == 1:
self._vol_maxt['valor'][-1][ano][mes] = self._vol_min['valor'][-1] + \
float(registro[5]) * self._vol_util['valor'][-1] / 100
mes += 1
mes = 0
ano += 1
return
def _acerta_exph(self, df, dger):
tamanho = df.shape
tamanho = tamanho[0]
#
# Organização do Registro
#
# registro[0] = 'codigo',
# registro[1] = 'nome',
# registro[2] = 'mesi_evm',
# registro[3] = 'anoi_evm',
# registro[4] = 'dura_evm',
# registro[5] = 'perc_evm',
# registro[6] = 'mesi_tur',
# registro[7] = 'anoi_tur',
# registro[8] = 'comentar',
# registro[9] = 'nume_tur',
# registro[10] = 'nume_cnj']
if tamanho > 0:
registro = df.iloc[0].values
#
# Trata Enchimento de Volume Morto
#
if not np.isnan(registro[2]):
dur_vm = int(registro[4])
mesinicial = int(registro[2])
anoinicial = int(registro[3])
volume = self._vol_min['valor'][-1] * float(registro[5]) / 100
volume = (self._vol_min['valor'][-1] - volume) / dur_vm
vol_frac = volume
for iano in range(anoinicial - dger.ano_ini['valor'], dger.num_anos['valor']):
for imes in range(mesinicial - 1, 12):
if dur_vm > 0:
self._status_vol_morto['valor'][-1][iano][imes] = 1
self._vol_morto_tempo['valor'][-1][iano][imes] += volume
volume += vol_frac
dur_vm -= 1
else:
self._status_vol_morto['valor'][-1][iano][imes] = 2
self._vol_morto_tempo['valor'][-1][iano][imes] = 0.
mesinicial = 1
else:
self._status_vol_morto['valor'][-1] = 2 * np.ones((dger.num_anos['valor'], 12), 'i')
for linha in range(tamanho):
registro = df.iloc[linha].values
if not np.isnan(registro[6]):
#
# Preenche evolução temporal do (1) Número de Unidades; (2) Engolimento; (3) Potência
#
mes_ent = int(registro[6])
ano_ent = int(registro[7])
pot_ent = float(registro[8])
unidade = int(registro[9])
conjunto = int(registro[10])
if mes_ent > 0:
mesinicial = mes_ent
self._maq_por_conj['valor'][-1][conjunto - 1] = unidade
self._pef_por_conj['valor'][-1][conjunto - 1] = pot_ent
self._calc_pot_efetiva()
self._calc_engol_maximo()
for iano in range(ano_ent - dger.ano_ini['valor'], dger.num_anos['valor']):
for imes in range(mesinicial - 1, 12):
self._unidades_tempo['valor'][-1][iano][imes] += 1
self._engol_tempo['valor'][-1][iano][imes] = self._engolimento['valor'][-1]
self._potencia_tempo['valor'][-1][iano][imes] = self._pot_efet['valor'][-1]
mesinicial = 1
#
# Acerta Status da Motorização
#
for iano in range(dger.num_anos['valor']):
for imes in range(12):
if self._unidades_tempo['valor'][-1][iano][imes] >= self._unid_base['valor'][-1]:
self._status_motoriz['valor'][-1][iano][imes] = 2
elif self._unidades_tempo['valor'][-1][iano][imes] > 0:
self._status_motoriz['valor'][-1][iano][imes] = 1
else:
if self._status_motoriz['valor'][-1][iano][imes] == 2:
self._status_motoriz['valor'][-1][iano][imes] = 1
else:
self._status_motoriz['valor'][-1][iano][imes] = 0
##########################################################################################################
# Plota Gráficos Diversos
##########################################################################################################
def plota_volume(self, uhe):
nanos = len(uhe['vol_mint'])
fig = plt.figure()
ax = plt.subplot(111)
x_axis = np.arange(1,nanos*12+1)
ax.plot(x_axis,uhe['vol_mint'].reshape(nanos*12),'g-.',lw=2, label = 'Vol.Min.Operat.')
ax.plot(x_axis,uhe['vol_maxt'].reshape(nanos*12),'g-.',lw=2, label = 'Vol.Max.Operat.')
ax.plot(x_axis,uhe['vol_max']*np.ones(nanos*12),'b-',lw=3, label = 'Vol.Minimo Real')
ax.plot(x_axis,uhe['vol_min']*np.ones(nanos*12),'b-',lw=3, label = 'Vol.Maximo Real')
ax.plot(x_axis,uhe['vol_minp'].reshape(nanos*12),'b-.',lw=2, label = 'Vol.Min.com Pen.')
plt.fill_between(x_axis,uhe['vol_mint'].reshape(nanos*12), uhe['vol_maxt'].reshape(nanos*12), facecolor='g', alpha=0.1)
titulo = 'Evolucao dos Volumes da Usina \n' + uhe['nome']
plt.title(titulo, fontsize=16)
plt.xlabel('Mes de Estudo', fontsize=16)
plt.ylabel('Volume em hm^3', fontsize=16)
box = ax.get_position()
ax.set_position([ box.x0, box.y0, box.width*0.7, box.height] )
ax.legend(loc='center left', shadow=True, fontsize=12, bbox_to_anchor=(1, 0.5))
plt.show()
def plota_vaz_min(self, uhe):
nanos = len(uhe['vaz_mint'])
fig = plt.figure()
ax = plt.subplot(111)
x_axis = np.arange(1,nanos*12+1)
ax.plot(x_axis,uhe['vaz_mint'].reshape(nanos*12),'g-.',lw=2, label='Vaz.Min.Operat.')
ax.plot(x_axis,uhe['vaz_min']*np.ones(nanos*12),'b-',lw=3, label='Vaz.Min.Cadastro')
titulo = 'Evolucao da Vazao Minima da Usina \n' + uhe['nome']
plt.title(titulo, fontsize=16)
plt.xlabel('Mes de Estudo', fontsize=16)
plt.ylabel('Vazao Minima em m^3', fontsize=16)
box = ax.get_position()
ax.set_position([ box.x0, box.y0, box.width*0.7, box.height] )
ax.legend(loc='center left', shadow=True, fontsize=12, bbox_to_anchor=(1, 0.5))
plt.show()
def plota_volmorto(self, uhe):
if uhe['status'] == 'EX':
print('Grafico de Volume Morto nao impresso, pois ', uhe['nome'], 'e uma usina existente')
return
nanos = len(uhe['vol_morto_tempo'])
nmeses = np.count_nonzero(uhe['vol_morto_tempo'])
legenda = str(nmeses) + ' Meses'
ax = plt.subplot(111)
x_axis = np.arange(1,nanos*12+1)
p1 = ax.plot(x_axis,uhe['vol_morto_tempo'].reshape(nanos*12),'g-.',lw=2, label = legenda )
titulo = 'Enchimento do Volume Morto da Usina \n' + uhe['nome']
plt.title(titulo, fontsize=16)
plt.xlabel('Mes de Estudo', fontsize=16)
plt.ylabel('Volume Morto em hm^3', fontsize=16)
plt.legend(fontsize=12)
np.count_nonzero(uhe['vol_morto_tempo'])
plt.show()
def plota_potencia(self, uhe):
nanos = len(uhe['potencia_tempo'])
ax = plt.subplot(111)
x_axis = np.arange(1, nanos * 12 + 1)
p1 = ax.plot(x_axis, uhe['potencia_tempo'].reshape(nanos * 12), 'g-.', lw=2)
titulo = 'Evolucao da Potencia Efetiva da Usina \n' + uhe['nome']
plt.title(titulo, fontsize=16)
plt.xlabel('Mes de Estudo', fontsize=16)
plt.ylabel('Potencia Efetiva em MW', fontsize=16)
plt.show()
def plot_vaz(self, uhe):
"""
Plota as todas as series historicas anuais da usina cujo dicionario de dados eh fornecia na entrada.
Em ciano estao as diversas series anuais.
Em azul escuro esta a ultima serie anual.
Em vermelho continuo esta a media mensal.
Em vermelho pontilhado esta a media menos ou mais o desvio padrao.
:param uhe: Dicionario de dados contendo informacoes de uma usina hidreletrica
"""
vaz_nat = uhe['vazoes']
x_axis = np.arange(1, 13)
plt.plot(x_axis, vaz_nat.transpose(), 'c-')
media = np.mean(vaz_nat, axis=0)
plt.plot(x_axis, media, 'r-', lw=3)
desvio = np.nanstd(vaz_nat, axis=0)
plt.plot(x_axis, media + desvio, 'r-.', lw=2)
plt.plot(x_axis, media - desvio, 'r-.', lw=2)
ultimo = len(vaz_nat) - 1
plt.plot(x_axis, vaz_nat[:][ultimo], 'b-')
titulo = 'Historico de Vazoes da Usina ' + uhe['nome']
plt.title(titulo, fontsize=16)
plt.xlabel('Mes do Ano', fontsize=16)
plt.ylabel('Vazao', fontsize=16)
plt.show()
return
# Plota Polinomio Cota-Volume
def plot_pcv(self, uhe):
"""
Plota polinimo Cota-Volume da usina hidreletrica especificada na entrada
:param uhe: Dicionario de dados contendo informacoes da usina hidreletrica
"""
if uhe["vol_min"] == 0:
return
a = uhe['pol_cota_vol'][0]
b = uhe['pol_cota_vol'][1]
c = uhe['pol_cota_vol'][2]
d = uhe['pol_cota_vol'][3]
e = uhe['pol_cota_vol'][4]
if (uhe["vol_min"] == uhe["vol_max"]):
volumes = np.linspace(uhe["vol_min"] - 1,uhe["vol_max"] + 1, 100)
cota = a + b*uhe["vol_min"] + c*uhe["vol_min"]**2 + d*uhe["vol_min"]**3 + e*uhe["vol_min"]**4
cota = cota*np.ones(100)
else:
volumes = np.linspace(uhe["vol_min"],uhe["vol_max"],100)
cota = a + b*volumes + c*volumes**2 + d*volumes**3 + e*volumes**4
cota.shape = volumes.shape
plt.plot(volumes, cota, 'b-', lw=3)
plt.xlabel('Volume do Reservatorio (hm^3)', fontsize=16)
titulo = 'Polinomio Cota-Volume da Usina ' + uhe['nome']
plt.title(titulo, fontsize=16)
plt.ylabel('Cota em Metros', fontsize=16)
plt.xlim(volumes[0], volumes[99])
if ( cota[0] == cota[99]):
plt.ylim(cota[0]-1, cota[99]+1)
else:
plt.ylim(cota[0], cota[99])
plt.show()
# Plota Polinomio Cota-Area
def plot_pca(self, uhe):
"""
Plota polinimo cota-area da usina hidreletrica especificada na entrada
:param uhe: Dicionario de dados contendo informacoes da usina hidreletrica
"""
if uhe['vol_min'] == 0:
return
if (uhe['cota_min'] == uhe['cota_max']):
cotas = np.linspace(uhe['cota_min'] - 1,uhe['cota_max'] + 1, 100)
else:
cotas = np.linspace(uhe['cota_min'],uhe['cota_max'],100)
a = uhe['pol_cota_area'][0]
b = uhe['pol_cota_area'][1]
c = uhe['pol_cota_area'][2]
d = uhe['pol_cota_area'][3]
e = uhe['pol_cota_area'][4]
areas = a + b*cotas + c*cotas**2 + d*cotas**3 + e*cotas**4
areas.shape = cotas.shape
plt.plot(cotas, areas, 'b-', lw=3)
plt.xlabel('Cota do Reservatorio (em metros)', fontsize=16)
titulo = 'Polinomio Cota-Area da Usina ' + uhe['nome']
plt.title(titulo, fontsize=16)
plt.ylabel('Area Superficia em km^2', fontsize=16)
plt.xlim(cotas[0], cotas[99])
if ( areas[0] == areas[99]):
plt.ylim(areas[0]-1, areas[99]+1)
else:
plt.ylim(areas[0], areas[99])
plt.show()
# Plota Produtibilidades Constantes da Usina
def plota_produtibs(self, uhe, iano, imes):
"""
Plota polinimo cota-area da usina hidreletrica especificada na entrada
:param uhe: Dicionario de dados contendo informacoes da usina hidreletrica
"""
x_axis = np.arange(1,7)
y_axis = [ uhe['ro_equiv'][iano][imes], uhe['ro_equiv65'][iano][imes], uhe['ro_min'][iano][imes],
uhe['ro_50'][iano][imes], uhe['ro_65'][iano][imes], uhe['ro_max'][iano][imes] ]
fig, ax = plt.subplots()
a, b, c, d, e, f = plt.bar(x_axis, y_axis)
a.set_facecolor('r')
b.set_facecolor('g')
c.set_facecolor('b')
d.set_facecolor('y')
e.set_facecolor('m')
f.set_facecolor('c')
ax.set_xticks(x_axis)
ax.set_xticklabels(['Equiv', 'Equiv65', 'Min', '50%', '65%', 'Max'])
titulo = 'Produtibilidades da Usina ' + uhe['nome'] + ' - Ano: ' + str(iano+1) + ' - Mês:' + str(imes+1)
plt.title(titulo, fontsize=16)
plt.xlabel('Tipo de Produtibilidade', fontsize=16)
plt.ylabel('Produtibilidade', fontsize=16)
plt.show()
# Plota Variação de Produtibilidade
def plot_var_prod(self, uhe):
"""
Plota variacao da produtibilidade da usina hidreletrica especificada na entrada
:param uhe: Dicionario de dados contendo informacoes da usina hidreletrica
"""
if uhe['vol_min'] == 0:
return
a = uhe['pol_cota_vol'][0]
b = uhe['pol_cota_vol'][1]
c = uhe['pol_cota_vol'][2]
d = uhe['pol_cota_vol'][3]
e = uhe['pol_cota_vol'][4]
if (uhe["vol_min"] == uhe["vol_max"]):
volumes = np.linspace(uhe["vol_min"] - 1,uhe["vol_max"] + 1, 100)
cotamont = a + b*uhe["vol_min"] + c*uhe["vol_min"]**2 + d*uhe["vol_min"]**3 + e*uhe["vol_min"]**4
cotamont = cotamont*np.ones(100)
else:
volumes = np.linspace(uhe["vol_min"],uhe["vol_max"],100)
cotamont = a + b*volumes + c*volumes**2 + d*volumes**3 + e*volumes**4
cotamont.shape = volumes.shape
qdef = np.linspace(uhe['vaz_min'], 2*uhe['engolimento'], 100)
a = uhe['pol_vaz_niv_jus'][0]
b = uhe['pol_vaz_niv_jus'][1]
c = uhe['pol_vaz_niv_jus'][2]
d = uhe['pol_vaz_niv_jus'][3]
e = uhe['pol_vaz_niv_jus'][4]
cotajus = a + b*qdef + c*qdef**2 + d*qdef**3 + e*qdef**4
cotajus.shape = qdef.shape
xGrid, yGrid = np.meshgrid(cotamont, cotajus)
z = uhe['prod_esp'] * ( xGrid - yGrid )
fig = plt.figure()
ax = fig.gca(projection='3d')
surf = ax.plot_surface(qdef, volumes,z, rcount=100, ccount = 100, cmap=plt.cm.coolwarm,
linewidth=0, antialiased=False)
plt.xlabel('Vazão Defluente em m^3/s', fontsize=12)
titulo = 'Produtibilidade da Usina ' + uhe['nome']
plt.title(titulo, fontsize=16)
plt.ylabel('Volume Armazenado em hm^3', fontsize=12)
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
# Plota Usinas Não Existentes e Existentes em Expansao
def plota_expansao(self):
# Conta quantas usinas estao
cont = 0
nomes = []
for iusi, status in enumerate(self._status['valor']):
if status == 'EE' or status == 'NE':
cont += 1
nomes.append(self._nome['valor'][iusi])
motorizada = np.zeros(cont)
vazia = np.zeros(cont)
enchendo = np.zeros(cont)
submotorizada = np.zeros(cont)
ind = np.arange(cont)
cont = 0
nanos = len(self._status_vol_morto['valor'][0])
for iusi, status in enumerate(self._status['valor']):
if status == 'EE' or status == 'NE':
# Meses em que a usina esta motorizada
motorizada[cont] = nanos * 12 - np.count_nonzero(self._status_motoriz['valor'][iusi] - 2)
# Meses que a usina ainda nao iniciou o enchimento do volume morto
vazia[cont] = nanos * 12 - np.count_nonzero(self._status_vol_morto['valor'][iusi])
# Meses que a usina encontra-se enchendo o volume morto
enchendo[cont] = nanos * 12 - np.count_nonzero(self._status_vol_morto['valor'][iusi] - 1)
# Meses que a usina encontra-se motorizando
submotorizada[cont] = nanos * 12 - np.count_nonzero(self._status_motoriz['valor'][iusi] - 1)
cont += 1
width = 0.35 # the width of the bars: can also be len(x) sequence
ax = plt.axes()
p1 = plt.barh(ind, vazia, width, color='w')
p2 = plt.barh(ind, enchendo, width, color='lime', left=vazia)
p3 = plt.barh(ind, submotorizada, width, color='sienna', left=vazia + enchendo)
p4 = plt.barh(ind, motorizada, width, color='black', left=vazia + enchendo + submotorizada)
plt.ylabel('Usinas', fontsize=16)
plt.title('Usinas Hidreletricas em Expansao', fontsize=16)
plt.yticks(ind, nomes, fontsize=12)
plt.xticks(np.arange(0, nanos * 12 + 2, 12))
# plt.yticks(np.arange(0, 81, 10))
plt.legend((p1[0], p2[0], p3[0], p4[0]), ('Nao Entrou', 'Enchendo Vol. Morto', 'Submotorizada', 'Motorizada'),
fontsize=12)
plt.xlabel('Meses do Estudo', fontsize=16)
ax.xaxis.grid()
plt.show()
def parp(self, uhe, ord_max):
"""
Implementa o método para o calculo dos coeficentes do modelo PAR(p).
:param uhe: dicionario de dados com informacoes da usina hidreletrica,
ord_max: ord_max do modelo PAR(p)
:returns ordem: Ordem do modelo Ar para cada mes,
coef_parp: Coeficientes do modelo AR para cada mes,
fac: Funcao de Auto-Correlacao,
facp: Funcao de Auto-Correlacao Parcial,
residuos: Matriz de residuos
"""
vazoes = uhe['vazoes']
nanos = len(vazoes) # A serie historica do ultimo ano geralmente nao vem completa (despreze-a)
media = np.mean(vazoes[1:(nanos-1)], 0) # A primeira serie historica eh utilizada como tendencia (despreze-a)
desvio = np.std(vazoes[1:(nanos-1)], 0) # A primeira serie historica eh utilizada como tendencia (despreze-a)
# Calcula vazao normalizada (nao precisa)
#vaznorm = np.zeros((nanos,12),'d')
#for iano in range(nanos):
# for imes in range(12):
# vaznorm[iano][imes] = (self.Vazoes[iano][imes] - media[imes])/desvio[imes]
# Calcula funcao de auto-correlacao (uma para cada mes)
fac = np.zeros( (12, ord_max+1), 'd')
for ilag in range(ord_max+1):
for imes in range(12):
for iano in np.arange(1,nanos-1):
ano_ant = iano
mes_ant = imes - ilag
if mes_ant < 0:
ano_ant -= 1
mes_ant += 12
fac[imes][ilag] += (vazoes[iano][imes] - media[imes]) * (vazoes[ano_ant][mes_ant] - media[mes_ant])
fac[imes][ilag] /= (nanos-2)
fac[imes][ilag] /= (desvio[imes]*desvio[mes_ant])
# Calcula funcao de auto-correlacao parcial (uma para cada mes)
facp = np.zeros((12, ord_max+1), 'd')
for ilag in np.arange(1,ord_max+1):
for imes in range(12):
A = np.eye(ilag)
B = np.zeros(ilag)
# Preenche matriz triangular superior
for ilin in range(len(A)):
for icol in range( len(A) ): # TODO: Aqui poderia ser np.arange(ilin+1,len(A)): Testar depois
if icol > ilin:
mes = imes - ilin - 1
if mes < 0:
mes = mes + 12
A[ilin][icol] = fac[mes][icol-ilin]
B[ilin] = fac[imes][ilin+1]
# Preenche matriz triangular inferior
for ilin in range(len(A)):
for icol in range( len(A) ): # TODO: Aqui poderia ser np.arange(0, ilin): Testar depois
if icol < ilin:
A[ilin][icol] = A[icol][ilin]
phi = np.linalg.solve(A,B)
facp[imes][ilag] = phi[ len(phi)-1 ]
# Identificacao da ordem
IC = 1.96/np.sqrt(nanos-2)
ordem = np.zeros(12, 'i')
for imes in range(12):
ordem[imes] = 0
for ilag in range(ord_max+1):
if facp[imes][ilag] > IC or facp[imes][ilag] < -IC:
ordem[imes] = ilag
# Calculo dos coeficientes
coef_parp = np.zeros( (12,ord_max), 'd')
for imes in range(12):
ilag = ordem[imes]
A = np.eye(ilag)
B = np.zeros(ilag)
# Preenche matriz triangular superior
for ilin in range(len(A)):
for icol in range( len(A) ): # TODO: Aqui poderia ser np.arange(ilin+1,len(A)): Testar depois
if icol > ilin:
mes = imes - ilin - 1
if mes < 0:
mes = mes + 12
A[ilin][icol] = fac[mes][icol-ilin]
B[ilin] = fac[imes][ilin+1]
# Preenche matriz triangular inferior
for ilin in range(len(A)):
for icol in range( len(A) ): # TODO: Aqui poderia ser np.arange(0, ilin): Testar depois
if icol < ilin:
A[ilin][icol] = A[icol][ilin]
phi = np.linalg.solve(A,B)
for iord in range ( len(phi) ):
coef_parp[imes][iord ] = phi[ iord ]
# Calculo dos Residuos Normalizados
residuos = np.zeros( (nanos-1, 12) )
for iano in np.arange(1,nanos-1):
for imes in range(12):
residuos[iano][imes]= ( vazoes[iano][imes]-media[imes] ) / desvio[imes]
for ilag in range(ord_max):
ano_ant = iano
mes_ant = imes - ilag - 1
if mes_ant < 0:
ano_ant -= 1
mes_ant += 12
residuos[iano][imes] -= coef_parp[imes][ilag]*\
(vazoes[ano_ant][mes_ant]-media[mes_ant])/desvio[mes_ant]
return ordem, coef_parp, fac, facp, residuos
def plota_parp(self, uhe, mes, ordmax):
"""
Implementa o método para a impressao do grafico da fac e facp para a uhe cujo
dicionário de dados é fornecido.
:param uhe: dicionario de dados com informacoes da usina hidreletrica,
mes: mes de 0 a 11 (jan a dez) a ser considerado,
ord_max: ordem maxima do modelo PAR(p)
"""
ordem, coef_parp, fac, facp, residuos = self.parp(uhe, ordmax)
vazoes = uhe['vazoes']
nanos = len(vazoes) - 1
if mes == 0:
str_mes = 'January'
elif mes == 1:
str_mes = 'Fevereiro'
elif mes == 2:
str_mes = 'Marco'
elif mes == 3:
str_mes = 'Abril'
elif mes == 4:
str_mes = 'Maio'
elif mes == 5:
str_mes = 'Junho'
elif mes == 6:
str_mes = 'Julho'
elif mes == 7:
str_mes = 'Agosto'
elif mes == 8:
str_mes = 'Setembro'
elif mes == 9:
str_mes = 'Outubro'
elif mes == 10:
str_mes = 'Novembro'
else:
str_mes = 'Dezembro'
IC = 1.96/np.sqrt(nanos-1)
cores = []
limitesup = []
limiteinf = []
for elemento in facp[mes][1:ordmax+1]:
limitesup.append(IC)
limiteinf.append(-IC)
if elemento > IC or elemento < -IC:
cores.append('r')
else:
cores.append('b')
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
barWidth = 0.40
titulo = 'FAC e FACP of ' + str_mes + ' - UHE ' + uhe['nome']
f.canvas.set_window_title(titulo)
ax1.bar(np.arange(1,ordmax+1), fac[mes][1:ordmax+1], barWidth, align='center')
ax2.bar(np.arange(1,ordmax+1), facp[mes][1:ordmax+1], barWidth, align='center', color = cores)
ax2.plot(np.arange(1,ordmax+1), limitesup, 'm--', lw=1)
ax2.plot(np.arange(1,ordmax+1), limiteinf, 'm--', lw=1)
ax1.set_xticks(np.arange(1,ordmax+1))
ax2.set_xticks(np.arange(1,ordmax+1))
tituloFAC = 'FAC - Month: ' + str_mes + '\n of UHE ' + uhe['nome']
tituloFACP = 'FACP - Month ' + str_mes + '\n of UHE ' + uhe['nome']
ax1.set_title(tituloFAC, fontsize = 13)
ax2.set_title(tituloFACP, fontsize =13)
#ax1.xlabel('Lag')
#ax2.xlabel('Lag')
#ax1.ylabel('Autocorrelacao e Autocorrelacao Parcial')
plt.show()
def gera_cen_sinteticos(self, uhe, ord_max, nr_cen):
"""
Implementa o método para a geração de vazões natuarais sintéticas para a uhe cujo
dicionário de dados é fornecido.
:param uhe: dicionario de dados com informacoes da usina hidreletrica,
ord_max: ord_max do modelo PAR(p),
nr_cen: numero de series sinteticas geradas
:returns sintetica_adit: array(nseries, nestagios) contendo cenários gerados
"""
ordem, coef_parp, fac, facp, residuos = self.parp(uhe, ord_max)
#
# Pega Parâmetros Básicos
#
nanos_estudo = len(uhe['status_vol_morto'])
nmeses_estudo = len(uhe['status_vol_morto'][0])
nestagios = nanos_estudo*nmeses_estudo
vazoes = uhe['vazoes']
nanos = len(vazoes) - 1
media = np.mean(vazoes[1:(nanos-1)], 0) # A primeira serie historica eh utilizada como tendencia (despreze-a)
desvio = np.std(vazoes[1:(nanos-1)], 0) # A primeira serie historica eh utilizada como tendencia (despreze-a)
# Gera series sinteticas
sintetica_adit = np.zeros((nr_cen,nestagios),'d')
for iser in range(nr_cen):
contador = -1
for iano in range(nanos_estudo):
for imes in range(nmeses_estudo):
contador += 1
serie = randint(1,nanos-2)
valor = media[imes] + desvio[imes]*residuos[serie][imes]
for ilag in range(ord_max):
mes_ant = imes - ilag - 1
ano_ant = iano
if mes_ant < 0:
mes_ant += 12
ano_ant -= 1
if ano_ant < 0:
vazant = media[mes_ant]
else:
vazant = sintetica_adit[iser][contador-1-ilag]
valor += desvio[imes]*coef_parp[imes][ilag]*(vazant-media[mes_ant])/desvio[mes_ant]
sintetica_adit[iser][contador] = valor
x_axis = np.arange(1, nestagios+1)
plt.plot(x_axis, sintetica_adit.transpose(), 'c-')
plt.plot(x_axis, np.mean(sintetica_adit,0), 'r-', lw=3, label='Mean - Synthetic Series')
plt.plot(x_axis, np.mean(sintetica_adit,0) + np.nanstd(sintetica_adit, axis=0), 'r-.', lw=2, label='Std Synthetic Series')
plt.plot(x_axis, np.mean(sintetica_adit,0) - np.nanstd(sintetica_adit, axis=0), 'r-.', lw=2)
m = np.concatenate([ media, media, media, media, media])
d = np.concatenate([ desvio, desvio, desvio, desvio, desvio])
plt.plot(x_axis, m, 'mo', lw=3, label='Mean - Hystorical Series')
plt.plot(x_axis, m + d, 'bo', lw=2, label='Std - Hystorical Series')
plt.plot(x_axis, m - d, 'bo', lw=2)
titulo = uhe['nome'].strip() + "'s Synthetic Series of Natural \n" " Inflows - Aditive Noise "
plt.title(titulo, fontsize=16)
plt.xlabel('Month', fontsize=16)
plt.ylabel('Inflow (m^3/s', fontsize=16)
plt.legend(fontsize=12)
plt.show()
return sintetica_adit
|
[
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"numpy.count_nonzero",
"numpy.array",
"numpy.arange",
"numpy.mean",
"numpy.where",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.barh",
"os.path.split",
"numpy.max",
"numpy.linspace",
"matplotlib.pyplot.yticks",
"numpy.concatenate",
"numpy.meshgrid",
"matplotlib.pyplot.ylim",
"random.randint",
"numpy.eye",
"numpy.nanstd",
"numpy.ones",
"numpy.vdot",
"matplotlib.pyplot.axes",
"numpy.isnan",
"numpy.std",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"numpy.shape",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"numpy.linalg.solve",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.bar",
"numpy.zeros",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.subplots"
] |
[((29927, 29952), 'numpy.where', 'np.where', (['(incremental < 0)'], {}), '(incremental < 0)\n', (29935, 29952), True, 'import numpy as np\n'), ((31508, 31533), 'numpy.where', 'np.where', (['(incremental < 0)'], {}), '(incremental < 0)\n', (31516, 31533), True, 'import numpy as np\n'), ((32292, 32333), 'numpy.array', 'np.array', (["self._maq_por_conj['valor'][-1]"], {}), "(self._maq_por_conj['valor'][-1])\n", (32300, 32333), True, 'import numpy as np\n'), ((32346, 32387), 'numpy.array', 'np.array', (["self._pef_por_conj['valor'][-1]"], {}), "(self._pef_por_conj['valor'][-1])\n", (32354, 32387), True, 'import numpy as np\n'), ((32526, 32567), 'numpy.array', 'np.array', (["self._maq_por_conj['valor'][-1]"], {}), "(self._maq_por_conj['valor'][-1])\n", (32534, 32567), True, 'import numpy as np\n'), ((32580, 32622), 'numpy.array', 'np.array', (["self._vaz_efet_conj['valor'][-1]"], {}), "(self._vaz_efet_conj['valor'][-1])\n", (32588, 32622), True, 'import numpy as np\n'), ((56108, 56120), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (56118, 56120), True, 'from matplotlib import pyplot as plt\n'), ((56134, 56150), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (56145, 56150), True, 'from matplotlib import pyplot as plt\n'), ((56170, 56198), 'numpy.arange', 'np.arange', (['(1)', '(nanos * 12 + 1)'], {}), '(1, nanos * 12 + 1)\n', (56179, 56198), True, 'import numpy as np\n'), ((56879, 56909), 'matplotlib.pyplot.title', 'plt.title', (['titulo'], {'fontsize': '(16)'}), '(titulo, fontsize=16)\n', (56888, 56909), True, 'from matplotlib import pyplot as plt\n'), ((56918, 56958), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Mes de Estudo"""'], {'fontsize': '(16)'}), "('Mes de Estudo', fontsize=16)\n", (56928, 56958), True, 'from matplotlib import pyplot as plt\n'), ((56967, 57008), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Volume em hm^3"""'], {'fontsize': '(16)'}), "('Volume em hm^3', fontsize=16)\n", (56977, 57008), True, 'from matplotlib import pyplot as plt\n'), ((57212, 57222), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (57220, 57222), True, 'from matplotlib import pyplot as plt\n'), ((57310, 57322), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (57320, 57322), True, 'from matplotlib import pyplot as plt\n'), ((57336, 57352), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (57347, 57352), True, 'from matplotlib import pyplot as plt\n'), ((57371, 57399), 'numpy.arange', 'np.arange', (['(1)', '(nanos * 12 + 1)'], {}), '(1, nanos * 12 + 1)\n', (57380, 57399), True, 'import numpy as np\n'), ((57663, 57693), 'matplotlib.pyplot.title', 'plt.title', (['titulo'], {'fontsize': '(16)'}), '(titulo, fontsize=16)\n', (57672, 57693), True, 'from matplotlib import pyplot as plt\n'), ((57702, 57742), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Mes de Estudo"""'], {'fontsize': '(16)'}), "('Mes de Estudo', fontsize=16)\n", (57712, 57742), True, 'from matplotlib import pyplot as plt\n'), ((57751, 57797), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Vazao Minima em m^3"""'], {'fontsize': '(16)'}), "('Vazao Minima em m^3', fontsize=16)\n", (57761, 57797), True, 'from matplotlib import pyplot as plt\n'), ((58001, 58011), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (58009, 58011), True, 'from matplotlib import pyplot as plt\n'), ((58268, 58308), 'numpy.count_nonzero', 'np.count_nonzero', (["uhe['vol_morto_tempo']"], {}), "(uhe['vol_morto_tempo'])\n", (58284, 58308), True, 'import numpy as np\n'), ((58365, 58381), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (58376, 58381), True, 'from matplotlib import pyplot as plt\n'), ((58400, 58428), 'numpy.arange', 'np.arange', (['(1)', '(nanos * 12 + 1)'], {}), '(1, nanos * 12 + 1)\n', (58409, 58428), True, 'import numpy as np\n'), ((58604, 58634), 'matplotlib.pyplot.title', 'plt.title', (['titulo'], {'fontsize': '(16)'}), '(titulo, fontsize=16)\n', (58613, 58634), True, 'from matplotlib import pyplot as plt\n'), ((58643, 58683), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Mes de Estudo"""'], {'fontsize': '(16)'}), "('Mes de Estudo', fontsize=16)\n", (58653, 58683), True, 'from matplotlib import pyplot as plt\n'), ((58692, 58739), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Volume Morto em hm^3"""'], {'fontsize': '(16)'}), "('Volume Morto em hm^3', fontsize=16)\n", (58702, 58739), True, 'from matplotlib import pyplot as plt\n'), ((58749, 58772), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(12)'}), '(fontsize=12)\n', (58759, 58772), True, 'from matplotlib import pyplot as plt\n'), ((58782, 58822), 'numpy.count_nonzero', 'np.count_nonzero', (["uhe['vol_morto_tempo']"], {}), "(uhe['vol_morto_tempo'])\n", (58798, 58822), True, 'import numpy as np\n'), ((58832, 58842), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (58840, 58842), True, 'from matplotlib import pyplot as plt\n'), ((58937, 58953), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (58948, 58953), True, 'from matplotlib import pyplot as plt\n'), ((58972, 59000), 'numpy.arange', 'np.arange', (['(1)', '(nanos * 12 + 1)'], {}), '(1, nanos * 12 + 1)\n', (58981, 59000), True, 'import numpy as np\n'), ((59169, 59199), 'matplotlib.pyplot.title', 'plt.title', (['titulo'], {'fontsize': '(16)'}), '(titulo, fontsize=16)\n', (59178, 59199), True, 'from matplotlib import pyplot as plt\n'), ((59208, 59248), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Mes de Estudo"""'], {'fontsize': '(16)'}), "('Mes de Estudo', fontsize=16)\n", (59218, 59248), True, 'from matplotlib import pyplot as plt\n'), ((59257, 59306), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Potencia Efetiva em MW"""'], {'fontsize': '(16)'}), "('Potencia Efetiva em MW', fontsize=16)\n", (59267, 59306), True, 'from matplotlib import pyplot as plt\n'), ((59316, 59326), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (59324, 59326), True, 'from matplotlib import pyplot as plt\n'), ((59854, 59870), 'numpy.arange', 'np.arange', (['(1)', '(13)'], {}), '(1, 13)\n', (59863, 59870), True, 'import numpy as np\n'), ((59939, 59963), 'numpy.mean', 'np.mean', (['vaz_nat'], {'axis': '(0)'}), '(vaz_nat, axis=0)\n', (59946, 59963), True, 'import numpy as np\n'), ((59972, 60007), 'matplotlib.pyplot.plot', 'plt.plot', (['x_axis', 'media', '"""r-"""'], {'lw': '(3)'}), "(x_axis, media, 'r-', lw=3)\n", (59980, 60007), True, 'from matplotlib import pyplot as plt\n'), ((60025, 60051), 'numpy.nanstd', 'np.nanstd', (['vaz_nat'], {'axis': '(0)'}), '(vaz_nat, axis=0)\n', (60034, 60051), True, 'import numpy as np\n'), ((60060, 60105), 'matplotlib.pyplot.plot', 'plt.plot', (['x_axis', '(media + desvio)', '"""r-."""'], {'lw': '(2)'}), "(x_axis, media + desvio, 'r-.', lw=2)\n", (60068, 60105), True, 'from matplotlib import pyplot as plt\n'), ((60114, 60159), 'matplotlib.pyplot.plot', 'plt.plot', (['x_axis', '(media - desvio)', '"""r-."""'], {'lw': '(2)'}), "(x_axis, media - desvio, 'r-.', lw=2)\n", (60122, 60159), True, 'from matplotlib import pyplot as plt\n'), ((60202, 60244), 'matplotlib.pyplot.plot', 'plt.plot', (['x_axis', 'vaz_nat[:][ultimo]', '"""b-"""'], {}), "(x_axis, vaz_nat[:][ultimo], 'b-')\n", (60210, 60244), True, 'from matplotlib import pyplot as plt\n'), ((60316, 60346), 'matplotlib.pyplot.title', 'plt.title', (['titulo'], {'fontsize': '(16)'}), '(titulo, fontsize=16)\n', (60325, 60346), True, 'from matplotlib import pyplot as plt\n'), ((60355, 60392), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Mes do Ano"""'], {'fontsize': '(16)'}), "('Mes do Ano', fontsize=16)\n", (60365, 60392), True, 'from matplotlib import pyplot as plt\n'), ((60401, 60433), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Vazao"""'], {'fontsize': '(16)'}), "('Vazao', fontsize=16)\n", (60411, 60433), True, 'from matplotlib import pyplot as plt\n'), ((60442, 60452), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (60450, 60452), True, 'from matplotlib import pyplot as plt\n'), ((61429, 61464), 'matplotlib.pyplot.plot', 'plt.plot', (['volumes', 'cota', '"""b-"""'], {'lw': '(3)'}), "(volumes, cota, 'b-', lw=3)\n", (61437, 61464), True, 'from matplotlib import pyplot as plt\n'), ((61474, 61530), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Volume do Reservatorio (hm^3)"""'], {'fontsize': '(16)'}), "('Volume do Reservatorio (hm^3)', fontsize=16)\n", (61484, 61530), True, 'from matplotlib import pyplot as plt\n'), ((61604, 61634), 'matplotlib.pyplot.title', 'plt.title', (['titulo'], {'fontsize': '(16)'}), '(titulo, fontsize=16)\n', (61613, 61634), True, 'from matplotlib import pyplot as plt\n'), ((61643, 61684), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cota em Metros"""'], {'fontsize': '(16)'}), "('Cota em Metros', fontsize=16)\n", (61653, 61684), True, 'from matplotlib import pyplot as plt\n'), ((61693, 61726), 'matplotlib.pyplot.xlim', 'plt.xlim', (['volumes[0]', 'volumes[99]'], {}), '(volumes[0], volumes[99])\n', (61701, 61726), True, 'from matplotlib import pyplot as plt\n'), ((61868, 61878), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (61876, 61878), True, 'from matplotlib import pyplot as plt\n'), ((62681, 62715), 'matplotlib.pyplot.plot', 'plt.plot', (['cotas', 'areas', '"""b-"""'], {'lw': '(3)'}), "(cotas, areas, 'b-', lw=3)\n", (62689, 62715), True, 'from matplotlib import pyplot as plt\n'), ((62725, 62784), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Cota do Reservatorio (em metros)"""'], {'fontsize': '(16)'}), "('Cota do Reservatorio (em metros)', fontsize=16)\n", (62735, 62784), True, 'from matplotlib import pyplot as plt\n'), ((62856, 62886), 'matplotlib.pyplot.title', 'plt.title', (['titulo'], {'fontsize': '(16)'}), '(titulo, fontsize=16)\n', (62865, 62886), True, 'from matplotlib import pyplot as plt\n'), ((62895, 62945), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Area Superficia em km^2"""'], {'fontsize': '(16)'}), "('Area Superficia em km^2', fontsize=16)\n", (62905, 62945), True, 'from matplotlib import pyplot as plt\n'), ((62954, 62983), 'matplotlib.pyplot.xlim', 'plt.xlim', (['cotas[0]', 'cotas[99]'], {}), '(cotas[0], cotas[99])\n', (62962, 62983), True, 'from matplotlib import pyplot as plt\n'), ((63131, 63141), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (63139, 63141), True, 'from matplotlib import pyplot as plt\n'), ((63446, 63461), 'numpy.arange', 'np.arange', (['(1)', '(7)'], {}), '(1, 7)\n', (63455, 63461), True, 'import numpy as np\n'), ((63684, 63698), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (63696, 63698), True, 'from matplotlib import pyplot as plt\n'), ((63726, 63749), 'matplotlib.pyplot.bar', 'plt.bar', (['x_axis', 'y_axis'], {}), '(x_axis, y_axis)\n', (63733, 63749), True, 'from matplotlib import pyplot as plt\n'), ((64152, 64182), 'matplotlib.pyplot.title', 'plt.title', (['titulo'], {'fontsize': '(16)'}), '(titulo, fontsize=16)\n', (64161, 64182), True, 'from matplotlib import pyplot as plt\n'), ((64191, 64241), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Tipo de Produtibilidade"""'], {'fontsize': '(16)'}), "('Tipo de Produtibilidade', fontsize=16)\n", (64201, 64241), True, 'from matplotlib import pyplot as plt\n'), ((64250, 64292), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Produtibilidade"""'], {'fontsize': '(16)'}), "('Produtibilidade', fontsize=16)\n", (64260, 64292), True, 'from matplotlib import pyplot as plt\n'), ((64301, 64311), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (64309, 64311), True, 'from matplotlib import pyplot as plt\n'), ((65316, 65372), 'numpy.linspace', 'np.linspace', (["uhe['vaz_min']", "(2 * uhe['engolimento'])", '(100)'], {}), "(uhe['vaz_min'], 2 * uhe['engolimento'], 100)\n", (65327, 65372), True, 'import numpy as np\n'), ((65687, 65717), 'numpy.meshgrid', 'np.meshgrid', (['cotamont', 'cotajus'], {}), '(cotamont, cotajus)\n', (65698, 65717), True, 'import numpy as np\n'), ((65782, 65794), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (65792, 65794), True, 'from matplotlib import pyplot as plt\n'), ((65994, 66045), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Vazão Defluente em m^3/s"""'], {'fontsize': '(12)'}), "('Vazão Defluente em m^3/s', fontsize=12)\n", (66004, 66045), True, 'from matplotlib import pyplot as plt\n'), ((66113, 66143), 'matplotlib.pyplot.title', 'plt.title', (['titulo'], {'fontsize': '(16)'}), '(titulo, fontsize=16)\n', (66122, 66143), True, 'from matplotlib import pyplot as plt\n'), ((66152, 66204), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Volume Armazenado em hm^3"""'], {'fontsize': '(12)'}), "('Volume Armazenado em hm^3', fontsize=12)\n", (66162, 66204), True, 'from matplotlib import pyplot as plt\n'), ((66263, 66273), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (66271, 66273), True, 'from matplotlib import pyplot as plt\n'), ((66654, 66668), 'numpy.zeros', 'np.zeros', (['cont'], {}), '(cont)\n', (66662, 66668), True, 'import numpy as np\n'), ((66685, 66699), 'numpy.zeros', 'np.zeros', (['cont'], {}), '(cont)\n', (66693, 66699), True, 'import numpy as np\n'), ((66719, 66733), 'numpy.zeros', 'np.zeros', (['cont'], {}), '(cont)\n', (66727, 66733), True, 'import numpy as np\n'), ((66758, 66772), 'numpy.zeros', 'np.zeros', (['cont'], {}), '(cont)\n', (66766, 66772), True, 'import numpy as np\n'), ((66788, 66803), 'numpy.arange', 'np.arange', (['cont'], {}), '(cont)\n', (66797, 66803), True, 'import numpy as np\n'), ((67798, 67808), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (67806, 67808), True, 'from matplotlib import pyplot as plt\n'), ((67822, 67860), 'matplotlib.pyplot.barh', 'plt.barh', (['ind', 'vazia', 'width'], {'color': '"""w"""'}), "(ind, vazia, width, color='w')\n", (67830, 67860), True, 'from matplotlib import pyplot as plt\n'), ((67874, 67930), 'matplotlib.pyplot.barh', 'plt.barh', (['ind', 'enchendo', 'width'], {'color': '"""lime"""', 'left': 'vazia'}), "(ind, enchendo, width, color='lime', left=vazia)\n", (67882, 67930), True, 'from matplotlib import pyplot as plt\n'), ((67944, 68018), 'matplotlib.pyplot.barh', 'plt.barh', (['ind', 'submotorizada', 'width'], {'color': '"""sienna"""', 'left': '(vazia + enchendo)'}), "(ind, submotorizada, width, color='sienna', left=vazia + enchendo)\n", (67952, 68018), True, 'from matplotlib import pyplot as plt\n'), ((68032, 68122), 'matplotlib.pyplot.barh', 'plt.barh', (['ind', 'motorizada', 'width'], {'color': '"""black"""', 'left': '(vazia + enchendo + submotorizada)'}), "(ind, motorizada, width, color='black', left=vazia + enchendo +\n submotorizada)\n", (68040, 68122), True, 'from matplotlib import pyplot as plt\n'), ((68128, 68161), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Usinas"""'], {'fontsize': '(16)'}), "('Usinas', fontsize=16)\n", (68138, 68161), True, 'from matplotlib import pyplot as plt\n'), ((68170, 68228), 'matplotlib.pyplot.title', 'plt.title', (['"""Usinas Hidreletricas em Expansao"""'], {'fontsize': '(16)'}), "('Usinas Hidreletricas em Expansao', fontsize=16)\n", (68179, 68228), True, 'from matplotlib import pyplot as plt\n'), ((68237, 68272), 'matplotlib.pyplot.yticks', 'plt.yticks', (['ind', 'nomes'], {'fontsize': '(12)'}), '(ind, nomes, fontsize=12)\n', (68247, 68272), True, 'from matplotlib import pyplot as plt\n'), ((68377, 68504), 'matplotlib.pyplot.legend', 'plt.legend', (['(p1[0], p2[0], p3[0], p4[0])', "('Nao Entrou', 'Enchendo Vol. Morto', 'Submotorizada', 'Motorizada')"], {'fontsize': '(12)'}), "((p1[0], p2[0], p3[0], p4[0]), ('Nao Entrou',\n 'Enchendo Vol. Morto', 'Submotorizada', 'Motorizada'), fontsize=12)\n", (68387, 68504), True, 'from matplotlib import pyplot as plt\n'), ((68528, 68570), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Meses do Estudo"""'], {'fontsize': '(16)'}), "('Meses do Estudo', fontsize=16)\n", (68538, 68570), True, 'from matplotlib import pyplot as plt\n'), ((68604, 68614), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (68612, 68614), True, 'from matplotlib import pyplot as plt\n'), ((69318, 69349), 'numpy.mean', 'np.mean', (['vazoes[1:nanos - 1]', '(0)'], {}), '(vazoes[1:nanos - 1], 0)\n', (69325, 69349), True, 'import numpy as np\n'), ((69440, 69470), 'numpy.std', 'np.std', (['vazoes[1:nanos - 1]', '(0)'], {}), '(vazoes[1:nanos - 1], 0)\n', (69446, 69470), True, 'import numpy as np\n'), ((69881, 69913), 'numpy.zeros', 'np.zeros', (['(12, ord_max + 1)', '"""d"""'], {}), "((12, ord_max + 1), 'd')\n", (69889, 69913), True, 'import numpy as np\n'), ((70549, 70581), 'numpy.zeros', 'np.zeros', (['(12, ord_max + 1)', '"""d"""'], {}), "((12, ord_max + 1), 'd')\n", (70557, 70581), True, 'import numpy as np\n'), ((70600, 70625), 'numpy.arange', 'np.arange', (['(1)', '(ord_max + 1)'], {}), '(1, ord_max + 1)\n', (70609, 70625), True, 'import numpy as np\n'), ((71729, 71746), 'numpy.zeros', 'np.zeros', (['(12)', '"""i"""'], {}), "(12, 'i')\n", (71737, 71746), True, 'import numpy as np\n'), ((72011, 72039), 'numpy.zeros', 'np.zeros', (['(12, ord_max)', '"""d"""'], {}), "((12, ord_max), 'd')\n", (72019, 72039), True, 'import numpy as np\n'), ((75388, 75419), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'sharey': '(True)'}), '(1, 2, sharey=True)\n', (75400, 75419), True, 'from matplotlib import pyplot as plt\n'), ((76345, 76355), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (76353, 76355), True, 'from matplotlib import pyplot as plt\n'), ((77207, 77238), 'numpy.mean', 'np.mean', (['vazoes[1:nanos - 1]', '(0)'], {}), '(vazoes[1:nanos - 1], 0)\n', (77214, 77238), True, 'import numpy as np\n'), ((77329, 77359), 'numpy.std', 'np.std', (['vazoes[1:nanos - 1]', '(0)'], {}), '(vazoes[1:nanos - 1], 0)\n', (77335, 77359), True, 'import numpy as np\n'), ((77492, 77526), 'numpy.zeros', 'np.zeros', (['(nr_cen, nestagios)', '"""d"""'], {}), "((nr_cen, nestagios), 'd')\n", (77500, 77526), True, 'import numpy as np\n'), ((78481, 78508), 'numpy.arange', 'np.arange', (['(1)', '(nestagios + 1)'], {}), '(1, nestagios + 1)\n', (78490, 78508), True, 'import numpy as np\n'), ((78907, 78958), 'numpy.concatenate', 'np.concatenate', (['[media, media, media, media, media]'], {}), '([media, media, media, media, media])\n', (78921, 78958), True, 'import numpy as np\n'), ((78972, 79028), 'numpy.concatenate', 'np.concatenate', (['[desvio, desvio, desvio, desvio, desvio]'], {}), '([desvio, desvio, desvio, desvio, desvio])\n', (78986, 79028), True, 'import numpy as np\n'), ((79038, 79103), 'matplotlib.pyplot.plot', 'plt.plot', (['x_axis', 'm', '"""mo"""'], {'lw': '(3)', 'label': '"""Mean - Hystorical Series"""'}), "(x_axis, m, 'mo', lw=3, label='Mean - Hystorical Series')\n", (79046, 79103), True, 'from matplotlib import pyplot as plt\n'), ((79112, 79180), 'matplotlib.pyplot.plot', 'plt.plot', (['x_axis', '(m + d)', '"""bo"""'], {'lw': '(2)', 'label': '"""Std - Hystorical Series"""'}), "(x_axis, m + d, 'bo', lw=2, label='Std - Hystorical Series')\n", (79120, 79180), True, 'from matplotlib import pyplot as plt\n'), ((79189, 79224), 'matplotlib.pyplot.plot', 'plt.plot', (['x_axis', '(m - d)', '"""bo"""'], {'lw': '(2)'}), "(x_axis, m - d, 'bo', lw=2)\n", (79197, 79224), True, 'from matplotlib import pyplot as plt\n'), ((79336, 79366), 'matplotlib.pyplot.title', 'plt.title', (['titulo'], {'fontsize': '(16)'}), '(titulo, fontsize=16)\n', (79345, 79366), True, 'from matplotlib import pyplot as plt\n'), ((79375, 79407), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Month"""'], {'fontsize': '(16)'}), "('Month', fontsize=16)\n", (79385, 79407), True, 'from matplotlib import pyplot as plt\n'), ((79416, 79456), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Inflow (m^3/s"""'], {'fontsize': '(16)'}), "('Inflow (m^3/s', fontsize=16)\n", (79426, 79456), True, 'from matplotlib import pyplot as plt\n'), ((79465, 79488), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(12)'}), '(fontsize=12)\n', (79475, 79488), True, 'from matplotlib import pyplot as plt\n'), ((79497, 79507), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (79505, 79507), True, 'from matplotlib import pyplot as plt\n'), ((968, 992), 'os.path.split', 'os.path.split', (['file_name'], {}), '(file_name)\n', (981, 992), False, 'import os\n'), ((1024, 1048), 'os.path.split', 'os.path.split', (['file_name'], {}), '(file_name)\n', (1037, 1048), False, 'import os\n'), ((13687, 13710), 'os.path.split', 'os.path.split', (['file_out'], {}), '(file_out)\n', (13700, 13710), False, 'import os\n'), ((13742, 13765), 'os.path.split', 'os.path.split', (['file_out'], {}), '(file_out)\n', (13755, 13765), False, 'import os\n'), ((28232, 28259), 'numpy.shape', 'np.shape', (['self._copiavazoes'], {}), '(self._copiavazoes)\n', (28240, 28259), True, 'import numpy as np\n'), ((31064, 31084), 'numpy.zeros', 'np.zeros', (['nanos_hist'], {}), '(nanos_hist)\n', (31072, 31084), True, 'import numpy as np\n'), ((32427, 32440), 'numpy.vdot', 'np.vdot', (['a', 'b'], {}), '(a, b)\n', (32434, 32440), True, 'import numpy as np\n'), ((32662, 32675), 'numpy.vdot', 'np.vdot', (['a', 'b'], {}), '(a, b)\n', (32669, 32675), True, 'import numpy as np\n'), ((32816, 32842), 'numpy.zeros', 'np.zeros', (['(nanos, 12)', '"""d"""'], {}), "((nanos, 12), 'd')\n", (32824, 32842), True, 'import numpy as np\n'), ((32881, 32907), 'numpy.zeros', 'np.zeros', (['(nanos, 12)', '"""d"""'], {}), "((nanos, 12), 'd')\n", (32889, 32907), True, 'import numpy as np\n'), ((32949, 32975), 'numpy.zeros', 'np.zeros', (['(nanos, 12)', '"""d"""'], {}), "((nanos, 12), 'd')\n", (32957, 32975), True, 'import numpy as np\n'), ((33019, 33045), 'numpy.zeros', 'np.zeros', (['(nanos, 12)', '"""d"""'], {}), "((nanos, 12), 'd')\n", (33027, 33045), True, 'import numpy as np\n'), ((33085, 33111), 'numpy.zeros', 'np.zeros', (['(nanos, 12)', '"""d"""'], {}), "((nanos, 12), 'd')\n", (33093, 33111), True, 'import numpy as np\n'), ((33151, 33177), 'numpy.zeros', 'np.zeros', (['(nanos, 12)', '"""d"""'], {}), "((nanos, 12), 'd')\n", (33159, 33177), True, 'import numpy as np\n'), ((61021, 61077), 'numpy.linspace', 'np.linspace', (["(uhe['vol_min'] - 1)", "(uhe['vol_max'] + 1)", '(100)'], {}), "(uhe['vol_min'] - 1, uhe['vol_max'] + 1, 100)\n", (61032, 61077), True, 'import numpy as np\n'), ((61256, 61304), 'numpy.linspace', 'np.linspace', (["uhe['vol_min']", "uhe['vol_max']", '(100)'], {}), "(uhe['vol_min'], uhe['vol_max'], 100)\n", (61267, 61304), True, 'import numpy as np\n'), ((61774, 61809), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(cota[0] - 1)', '(cota[99] + 1)'], {}), '(cota[0] - 1, cota[99] + 1)\n', (61782, 61809), True, 'from matplotlib import pyplot as plt\n'), ((61832, 61859), 'matplotlib.pyplot.ylim', 'plt.ylim', (['cota[0]', 'cota[99]'], {}), '(cota[0], cota[99])\n', (61840, 61859), True, 'from matplotlib import pyplot as plt\n'), ((62251, 62309), 'numpy.linspace', 'np.linspace', (["(uhe['cota_min'] - 1)", "(uhe['cota_max'] + 1)", '(100)'], {}), "(uhe['cota_min'] - 1, uhe['cota_max'] + 1, 100)\n", (62262, 62309), True, 'import numpy as np\n'), ((62343, 62393), 'numpy.linspace', 'np.linspace', (["uhe['cota_min']", "uhe['cota_max']", '(100)'], {}), "(uhe['cota_min'], uhe['cota_max'], 100)\n", (62354, 62393), True, 'import numpy as np\n'), ((63033, 63070), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(areas[0] - 1)', '(areas[99] + 1)'], {}), '(areas[0] - 1, areas[99] + 1)\n', (63041, 63070), True, 'from matplotlib import pyplot as plt\n'), ((63093, 63122), 'matplotlib.pyplot.ylim', 'plt.ylim', (['areas[0]', 'areas[99]'], {}), '(areas[0], areas[99])\n', (63101, 63122), True, 'from matplotlib import pyplot as plt\n'), ((64881, 64937), 'numpy.linspace', 'np.linspace', (["(uhe['vol_min'] - 1)", "(uhe['vol_max'] + 1)", '(100)'], {}), "(uhe['vol_min'] - 1, uhe['vol_max'] + 1, 100)\n", (64892, 64937), True, 'import numpy as np\n'), ((65128, 65176), 'numpy.linspace', 'np.linspace', (["uhe['vol_min']", "uhe['vol_max']", '(100)'], {}), "(uhe['vol_min'], uhe['vol_max'], 100)\n", (65139, 65176), True, 'import numpy as np\n'), ((68292, 68324), 'numpy.arange', 'np.arange', (['(0)', '(nanos * 12 + 2)', '(12)'], {}), '(0, nanos * 12 + 2, 12)\n', (68301, 68324), True, 'import numpy as np\n'), ((71696, 71714), 'numpy.sqrt', 'np.sqrt', (['(nanos - 2)'], {}), '(nanos - 2)\n', (71703, 71714), True, 'import numpy as np\n'), ((72118, 72130), 'numpy.eye', 'np.eye', (['ilag'], {}), '(ilag)\n', (72124, 72130), True, 'import numpy as np\n'), ((72147, 72161), 'numpy.zeros', 'np.zeros', (['ilag'], {}), '(ilag)\n', (72155, 72161), True, 'import numpy as np\n'), ((72950, 72971), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'B'], {}), '(A, B)\n', (72965, 72971), True, 'import numpy as np\n'), ((73140, 73165), 'numpy.zeros', 'np.zeros', (['(nanos - 1, 12)'], {}), '((nanos - 1, 12))\n', (73148, 73165), True, 'import numpy as np\n'), ((73190, 73213), 'numpy.arange', 'np.arange', (['(1)', '(nanos - 1)'], {}), '(1, nanos - 1)\n', (73199, 73213), True, 'import numpy as np\n'), ((75032, 75050), 'numpy.sqrt', 'np.sqrt', (['(nanos - 1)'], {}), '(nanos - 1)\n', (75039, 75050), True, 'import numpy as np\n'), ((75574, 75598), 'numpy.arange', 'np.arange', (['(1)', '(ordmax + 1)'], {}), '(1, ordmax + 1)\n', (75583, 75598), True, 'import numpy as np\n'), ((75661, 75685), 'numpy.arange', 'np.arange', (['(1)', '(ordmax + 1)'], {}), '(1, ordmax + 1)\n', (75670, 75685), True, 'import numpy as np\n'), ((75765, 75789), 'numpy.arange', 'np.arange', (['(1)', '(ordmax + 1)'], {}), '(1, ordmax + 1)\n', (75774, 75789), True, 'import numpy as np\n'), ((75829, 75853), 'numpy.arange', 'np.arange', (['(1)', '(ordmax + 1)'], {}), '(1, ordmax + 1)\n', (75838, 75853), True, 'import numpy as np\n'), ((75900, 75924), 'numpy.arange', 'np.arange', (['(1)', '(ordmax + 1)'], {}), '(1, ordmax + 1)\n', (75909, 75924), True, 'import numpy as np\n'), ((75946, 75970), 'numpy.arange', 'np.arange', (['(1)', '(ordmax + 1)'], {}), '(1, ordmax + 1)\n', (75955, 75970), True, 'import numpy as np\n'), ((78591, 78617), 'numpy.mean', 'np.mean', (['sintetica_adit', '(0)'], {}), '(sintetica_adit, 0)\n', (78598, 78617), True, 'import numpy as np\n'), ((15275, 15298), 'os.path.split', 'os.path.split', (['file_out'], {}), '(file_out)\n', (15288, 15298), False, 'import os\n'), ((28287, 28314), 'numpy.shape', 'np.shape', (['self._copiavazoes'], {}), '(self._copiavazoes)\n', (28295, 28314), True, 'import numpy as np\n'), ((52625, 52646), 'numpy.isnan', 'np.isnan', (['registro[2]'], {}), '(registro[2])\n', (52633, 52646), True, 'import numpy as np\n'), ((53859, 53880), 'numpy.isnan', 'np.isnan', (['registro[6]'], {}), '(registro[6])\n', (53867, 53880), True, 'import numpy as np\n'), ((56424, 56443), 'numpy.ones', 'np.ones', (['(nanos * 12)'], {}), '(nanos * 12)\n', (56431, 56443), True, 'import numpy as np\n'), ((56520, 56539), 'numpy.ones', 'np.ones', (['(nanos * 12)'], {}), '(nanos * 12)\n', (56527, 56539), True, 'import numpy as np\n'), ((57527, 57546), 'numpy.ones', 'np.ones', (['(nanos * 12)'], {}), '(nanos * 12)\n', (57534, 57546), True, 'import numpy as np\n'), ((61207, 61219), 'numpy.ones', 'np.ones', (['(100)'], {}), '(100)\n', (61214, 61219), True, 'import numpy as np\n'), ((65079, 65091), 'numpy.ones', 'np.ones', (['(100)'], {}), '(100)\n', (65086, 65091), True, 'import numpy as np\n'), ((70014, 70037), 'numpy.arange', 'np.arange', (['(1)', '(nanos - 1)'], {}), '(1, nanos - 1)\n', (70023, 70037), True, 'import numpy as np\n'), ((70679, 70691), 'numpy.eye', 'np.eye', (['ilag'], {}), '(ilag)\n', (70685, 70691), True, 'import numpy as np\n'), ((70712, 70726), 'numpy.zeros', 'np.zeros', (['ilag'], {}), '(ilag)\n', (70720, 70726), True, 'import numpy as np\n'), ((71570, 71591), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'B'], {}), '(A, B)\n', (71585, 71591), True, 'import numpy as np\n'), ((78688, 78714), 'numpy.mean', 'np.mean', (['sintetica_adit', '(0)'], {}), '(sintetica_adit, 0)\n', (78695, 78714), True, 'import numpy as np\n'), ((78716, 78749), 'numpy.nanstd', 'np.nanstd', (['sintetica_adit'], {'axis': '(0)'}), '(sintetica_adit, axis=0)\n', (78725, 78749), True, 'import numpy as np\n'), ((78819, 78845), 'numpy.mean', 'np.mean', (['sintetica_adit', '(0)'], {}), '(sintetica_adit, 0)\n', (78826, 78845), True, 'import numpy as np\n'), ((78847, 78880), 'numpy.nanstd', 'np.nanstd', (['sintetica_adit'], {'axis': '(0)'}), '(sintetica_adit, axis=0)\n', (78856, 78880), True, 'import numpy as np\n'), ((12867, 12909), 'numpy.array', 'np.array', (["self._codigo['valor']"], {'dtype': 'int'}), "(self._codigo['valor'], dtype=int)\n", (12875, 12909), True, 'import numpy as np\n'), ((12934, 12947), 'numpy.max', 'np.max', (['maior'], {}), '(maior)\n', (12940, 12947), True, 'import numpy as np\n'), ((13983, 14006), 'os.path.split', 'os.path.split', (['file_out'], {}), '(file_out)\n', (13996, 14006), False, 'import os\n'), ((14033, 14056), 'os.path.split', 'os.path.split', (['file_out'], {}), '(file_out)\n', (14046, 14056), False, 'import os\n'), ((53713, 53755), 'numpy.ones', 'np.ones', (["(dger.num_anos['valor'], 12)", '"""i"""'], {}), "((dger.num_anos['valor'], 12), 'i')\n", (53720, 53755), True, 'import numpy as np\n'), ((67091, 67148), 'numpy.count_nonzero', 'np.count_nonzero', (["(self._status_motoriz['valor'][iusi] - 2)"], {}), "(self._status_motoriz['valor'][iusi] - 2)\n", (67107, 67148), True, 'import numpy as np\n'), ((67276, 67331), 'numpy.count_nonzero', 'np.count_nonzero', (["self._status_vol_morto['valor'][iusi]"], {}), "(self._status_vol_morto['valor'][iusi])\n", (67292, 67331), True, 'import numpy as np\n'), ((67451, 67510), 'numpy.count_nonzero', 'np.count_nonzero', (["(self._status_vol_morto['valor'][iusi] - 1)"], {}), "(self._status_vol_morto['valor'][iusi] - 1)\n", (67467, 67510), True, 'import numpy as np\n'), ((67623, 67680), 'numpy.count_nonzero', 'np.count_nonzero', (["(self._status_motoriz['valor'][iusi] - 1)"], {}), "(self._status_motoriz['valor'][iusi] - 1)\n", (67639, 67680), True, 'import numpy as np\n'), ((77743, 77764), 'random.randint', 'randint', (['(1)', '(nanos - 2)'], {}), '(1, nanos - 2)\n', (77750, 77764), False, 'from random import randint\n'), ((12978, 13007), 'numpy.ones', 'np.ones', (['(maior + 1)'], {'dtype': 'int'}), '(maior + 1, dtype=int)\n', (12985, 13007), True, 'import numpy as np\n'), ((11499, 11525), 'numpy.zeros', 'np.zeros', (['(nanos, 12)', '"""d"""'], {}), "((nanos, 12), 'd')\n", (11507, 11525), True, 'import numpy as np\n'), ((11587, 11613), 'numpy.zeros', 'np.zeros', (['(nanos, 12)', '"""d"""'], {}), "((nanos, 12), 'd')\n", (11595, 11613), True, 'import numpy as np\n'), ((11675, 11701), 'numpy.zeros', 'np.zeros', (['(nanos, 12)', '"""d"""'], {}), "((nanos, 12), 'd')\n", (11683, 11701), True, 'import numpy as np\n'), ((11765, 11791), 'numpy.zeros', 'np.zeros', (['(nanos, 12)', '"""d"""'], {}), "((nanos, 12), 'd')\n", (11773, 11791), True, 'import numpy as np\n'), ((11854, 11880), 'numpy.zeros', 'np.zeros', (['(nanos, 12)', '"""d"""'], {}), "((nanos, 12), 'd')\n", (11862, 11880), True, 'import numpy as np\n'), ((11943, 11969), 'numpy.zeros', 'np.zeros', (['(nanos, 12)', '"""d"""'], {}), "((nanos, 12), 'd')\n", (11951, 11969), True, 'import numpy as np\n'), ((12026, 12052), 'numpy.zeros', 'np.zeros', (['(nanos, 12)', '"""d"""'], {}), "((nanos, 12), 'd')\n", (12034, 12052), True, 'import numpy as np\n'), ((12111, 12137), 'numpy.zeros', 'np.zeros', (['(nanos, 12)', '"""d"""'], {}), "((nanos, 12), 'd')\n", (12119, 12137), True, 'import numpy as np\n'), ((12197, 12223), 'numpy.zeros', 'np.zeros', (['(nanos, 12)', '"""d"""'], {}), "((nanos, 12), 'd')\n", (12205, 12223), True, 'import numpy as np\n'), ((12283, 12309), 'numpy.zeros', 'np.zeros', (['(nanos, 12)', '"""d"""'], {}), "((nanos, 12), 'd')\n", (12291, 12309), True, 'import numpy as np\n'), ((12369, 12395), 'numpy.zeros', 'np.zeros', (['(nanos, 12)', '"""d"""'], {}), "((nanos, 12), 'd')\n", (12377, 12395), True, 'import numpy as np\n'), ((13246, 13270), 'os.path.split', 'os.path.split', (['file_name'], {}), '(file_name)\n', (13259, 13270), False, 'import os\n'), ((7489, 7514), 'numpy.ones', 'np.ones', (['(nanos, 12)', '"""f"""'], {}), "((nanos, 12), 'f')\n", (7496, 7514), True, 'import numpy as np\n'), ((7598, 7623), 'numpy.ones', 'np.ones', (['(nanos, 12)', '"""f"""'], {}), "((nanos, 12), 'f')\n", (7605, 7623), True, 'import numpy as np\n'), ((7707, 7732), 'numpy.ones', 'np.ones', (['(nanos, 12)', '"""f"""'], {}), "((nanos, 12), 'f')\n", (7714, 7732), True, 'import numpy as np\n'), ((7816, 7841), 'numpy.ones', 'np.ones', (['(nanos, 12)', '"""f"""'], {}), "((nanos, 12), 'f')\n", (7823, 7841), True, 'import numpy as np\n'), ((7921, 7946), 'numpy.ones', 'np.ones', (['(nanos, 12)', '"""f"""'], {}), "((nanos, 12), 'f')\n", (7928, 7946), True, 'import numpy as np\n'), ((8028, 8053), 'numpy.ones', 'np.ones', (['(nanos, 12)', '"""f"""'], {}), "((nanos, 12), 'f')\n", (8035, 8053), True, 'import numpy as np\n'), ((9794, 9820), 'numpy.zeros', 'np.zeros', (['(nanos, 12)', '"""f"""'], {}), "((nanos, 12), 'f')\n", (9802, 9820), True, 'import numpy as np\n'), ((10294, 10320), 'numpy.zeros', 'np.zeros', (['(nanos, 12)', '"""i"""'], {}), "((nanos, 12), 'i')\n", (10302, 10320), True, 'import numpy as np\n'), ((10387, 10413), 'numpy.zeros', 'np.zeros', (['(nanos, 12)', '"""i"""'], {}), "((nanos, 12), 'i')\n", (10395, 10413), True, 'import numpy as np\n'), ((10481, 10507), 'numpy.zeros', 'np.zeros', (['(nanos, 12)', '"""f"""'], {}), "((nanos, 12), 'f')\n", (10489, 10507), True, 'import numpy as np\n'), ((9605, 9630), 'numpy.ones', 'np.ones', (['(nanos, 12)', '"""i"""'], {}), "((nanos, 12), 'i')\n", (9612, 9630), True, 'import numpy as np\n'), ((9701, 9726), 'numpy.ones', 'np.ones', (['(nanos, 12)', '"""i"""'], {}), "((nanos, 12), 'i')\n", (9708, 9726), True, 'import numpy as np\n'), ((9917, 9942), 'numpy.ones', 'np.ones', (['(nanos, 12)', '"""f"""'], {}), "((nanos, 12), 'f')\n", (9924, 9942), True, 'import numpy as np\n'), ((10039, 10064), 'numpy.ones', 'np.ones', (['(nanos, 12)', '"""f"""'], {}), "((nanos, 12), 'f')\n", (10046, 10064), True, 'import numpy as np\n'), ((10170, 10195), 'numpy.ones', 'np.ones', (['(nanos, 12)', '"""f"""'], {}), "((nanos, 12), 'f')\n", (10177, 10195), True, 'import numpy as np\n'), ((11062, 11088), 'numpy.zeros', 'np.zeros', (['(nanos, 12)', '"""f"""'], {}), "((nanos, 12), 'f')\n", (11070, 11088), True, 'import numpy as np\n'), ((11159, 11185), 'numpy.zeros', 'np.zeros', (['(nanos, 12)', '"""f"""'], {}), "((nanos, 12), 'f')\n", (11167, 11185), True, 'import numpy as np\n'), ((11256, 11282), 'numpy.zeros', 'np.zeros', (['(nanos, 12)', '"""i"""'], {}), "((nanos, 12), 'i')\n", (11264, 11282), True, 'import numpy as np\n'), ((10674, 10699), 'numpy.ones', 'np.ones', (['(nanos, 12)', '"""f"""'], {}), "((nanos, 12), 'f')\n", (10681, 10699), True, 'import numpy as np\n'), ((10800, 10825), 'numpy.ones', 'np.ones', (['(nanos, 12)', '"""f"""'], {}), "((nanos, 12), 'f')\n", (10807, 10825), True, 'import numpy as np\n'), ((10935, 10960), 'numpy.ones', 'np.ones', (['(nanos, 12)', '"""f"""'], {}), "((nanos, 12), 'f')\n", (10942, 10960), True, 'import numpy as np\n')]
|
import glob
import os
import subprocess
import time
import matplotlib.pyplot as plt
import numpy
import torch
def viz(
batch: torch.Tensor,
episodes=1000,
video=True,
folder='output',
) -> None:
## Visualize GoodAI Breakout Dataset
fig = plt.figure(1)
ax = fig.add_subplot(111)
ax.set_title("Breakout")
im = ax.imshow(numpy.zeros((84, 84, 4))) # Blank starting image
fig.show()
im.axes.figure.canvas.draw()
tstart = time.time()
rewards = 0
for episode in range(episodes):
image = batch.states[episode].permute(1, 2, 0)
rewards += batch.rewards[episode].detach().cpu().numpy()
ax.set_title(str(f"episode: {episode} | reward: {rewards}"))
im.set_data(image)
im.axes.figure.canvas.draw()
ax.figure.savefig(folder + "/img%02d.png" % episode)
if video:
subprocess.call([
'ffmpeg', '-framerate', '8', '-i', f'{folder}/img%02d.png', '-r', '30',
'-pix_fmt', 'yuv420p', f'{folder}/video_name.mp4'
])
for file_name in glob.glob(f"{folder}/*.png"):
os.remove(file_name)
print('FPS:', 100 / (time.time() - tstart))
|
[
"matplotlib.pyplot.figure",
"numpy.zeros",
"subprocess.call",
"time.time",
"glob.glob",
"os.remove"
] |
[((261, 274), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (271, 274), True, 'import matplotlib.pyplot as plt\n'), ((454, 465), 'time.time', 'time.time', ([], {}), '()\n', (463, 465), False, 'import time\n'), ((348, 372), 'numpy.zeros', 'numpy.zeros', (['(84, 84, 4)'], {}), '((84, 84, 4))\n', (359, 372), False, 'import numpy\n'), ((821, 965), 'subprocess.call', 'subprocess.call', (["['ffmpeg', '-framerate', '8', '-i', f'{folder}/img%02d.png', '-r', '30',\n '-pix_fmt', 'yuv420p', f'{folder}/video_name.mp4']"], {}), "(['ffmpeg', '-framerate', '8', '-i', f'{folder}/img%02d.png',\n '-r', '30', '-pix_fmt', 'yuv420p', f'{folder}/video_name.mp4'])\n", (836, 965), False, 'import subprocess\n'), ((1005, 1033), 'glob.glob', 'glob.glob', (['f"""{folder}/*.png"""'], {}), "(f'{folder}/*.png')\n", (1014, 1033), False, 'import glob\n'), ((1041, 1061), 'os.remove', 'os.remove', (['file_name'], {}), '(file_name)\n', (1050, 1061), False, 'import os\n'), ((1086, 1097), 'time.time', 'time.time', ([], {}), '()\n', (1095, 1097), False, 'import time\n')]
|
import torch
import numpy as np
import cv2
def tonumpyimg(img):
"""
Convert a normalized tensor image to unnormalized uint8 numpy image
For single channel image, no unnormalization is done.
:param img: torch, normalized, (3, H, W), (H, W)
:return: numpy: (H, W, 3), (H, W). uint8
"""
return touint8(tonumpy(unnormalize_torch(img)))
def tonumpy(img):
"""
Convert torch image map to numpy image map
Note the range is not change
:param img: tensor, shape (C, H, W), (H, W)
:return: numpy, shape (H, W, C), (H, W)
"""
if len(img.size()) == 2:
return img.cpu().detach().numpy()
return img.permute(1, 2, 0).cpu().detach().numpy()
def touint8(img):
"""
Convert float numpy image to uint8 image
:param img: numpy image, float, (0, 1)
:return: uint8 image
"""
img = img * 255
return img.astype(np.uint8)
def normalize_torch(img, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):
"""
Normalize a torch image.
:param img: (3, H, W), in range (0, 1)
"""
img = img.clone()
img -= torch.tensor(mean).view(3, 1, 1)
img /= torch.tensor(std).view(3, 1, 1)
return img
def unnormalize_torch(img, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):
"""
Convert a normalized Tensor image to unnormalized form
For single channel image, no normalization is done.
:param img: (C, H, W), (H, W)
"""
if img.size()[0] == 3:
img = img.clone()
img *= torch.Tensor(std).view(3, 1, 1)
img += torch.Tensor(mean).view(3, 1, 1)
return img
def gray2RGB(img_raw):
"""
Convert a gray image to RGB
:param img_raw: (H, W, 3) or (H, W), uint8, numpy
:return: (H, W, 3)
"""
if len(img_raw.shape) == 2:
img_raw = np.repeat(img_raw[:, :, None], 3, axis=2)
if img_raw.shape[2] > 3:
img_raw = img_raw[:, :, :3]
return img_raw
def color_scale(attention):
"""
Visualize a attention map
:param scale_map: (C, H, W), attention map, softmaxed
:return: (3, H, W), colored version
"""
colors = torch.Tensor([
[1, 0, 0], # red
[0, 1, 0], # green
[0, 0, 1], # blue
[0, 0, 0], # black
]).float()
# (H, W)
attention = torch.argmax(attention, dim=0)
# (H, W, C)
color_map = colors[attention]
color_map = color_map.permute(2, 0, 1)
return color_map
def warp_torch(map, H):
"""
Warp a torch image.
:param map: either (C, H, W) or (H, W)
:param H: (3, 3)
:return: warped iamge, (C, H, W) or (H, W)
"""
map = tonumpy(map)
h, w = map.shape[-2:]
map = cv2.warpPerspective(map, H, dsize=(w, h))
return totensor(map)
def torange(array, low, high):
"""
Render an array to value range (low, high)
:param array: any array
:param low, high: the range
:return: new array
"""
min, max = array.min(), array.max()
# normalized to [0, 1]
array = array - min
array = array / (max - min)
# to (low, high)
array = array * (high - low) + low
return array
def tofloat(img):
"""
Convert a uint8 image to float image
:param img: numpy image, uint8
:return: float image
"""
return img.astype(np.float) / 255
def tonumpy_batch(imgs):
"""
Convert a batch of torch images to numpy image map
:param imgs: (B, C, H, W)
:return: (B, H, W, C)
"""
return imgs.permute(0, 2, 3, 1).cpu().detach().numpy()
def totensor(img, device=torch.device('cpu')):
"""
Do the reverse of tonumpy
"""
if len(img.shape) == 2:
return torch.from_numpy(img).to(device).float()
return torch.from_numpy(img).permute(2, 0, 1).to(device).float()
def totensor_batch(imgs, device=torch.device('cpu')):
"""
Do the reverse of tonumpy_batch
"""
return torch.from_numpy(imgs).permute(0, 3, 1, 2).to(device).float()
def RGB2BGR(*imgs):
return [cv2.cvtColor(x, cv2.COLOR_RGB2BGR) for x in imgs]
def unnormalize(img, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):
"""
Convert a normalized tensor image to unnormalized form
:param img: (B, C, H, W)
"""
img = img.detach().cpu()
img *= torch.tensor(std).view(3, 1, 1)
img += torch.tensor(mean).view(3, 1, 1)
return img
def toUint8RGB(img):
return (tonumpy(unnormalize(img)) * 255.).astype(np.uint8)
|
[
"numpy.repeat",
"torch.Tensor",
"torch.argmax",
"torch.from_numpy",
"torch.tensor",
"cv2.warpPerspective",
"cv2.cvtColor",
"torch.device"
] |
[((2321, 2351), 'torch.argmax', 'torch.argmax', (['attention'], {'dim': '(0)'}), '(attention, dim=0)\n', (2333, 2351), False, 'import torch\n'), ((2707, 2748), 'cv2.warpPerspective', 'cv2.warpPerspective', (['map', 'H'], {'dsize': '(w, h)'}), '(map, H, dsize=(w, h))\n', (2726, 2748), False, 'import cv2\n'), ((3586, 3605), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (3598, 3605), False, 'import torch\n'), ((3841, 3860), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (3853, 3860), False, 'import torch\n'), ((1829, 1870), 'numpy.repeat', 'np.repeat', (['img_raw[:, :, None]', '(3)'], {'axis': '(2)'}), '(img_raw[:, :, None], 3, axis=2)\n', (1838, 1870), True, 'import numpy as np\n'), ((4022, 4056), 'cv2.cvtColor', 'cv2.cvtColor', (['x', 'cv2.COLOR_RGB2BGR'], {}), '(x, cv2.COLOR_RGB2BGR)\n', (4034, 4056), False, 'import cv2\n'), ((1117, 1135), 'torch.tensor', 'torch.tensor', (['mean'], {}), '(mean)\n', (1129, 1135), False, 'import torch\n'), ((1161, 1178), 'torch.tensor', 'torch.tensor', (['std'], {}), '(std)\n', (1173, 1178), False, 'import torch\n'), ((2148, 2206), 'torch.Tensor', 'torch.Tensor', (['[[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 0]]'], {}), '([[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 0]])\n', (2160, 2206), False, 'import torch\n'), ((4295, 4312), 'torch.tensor', 'torch.tensor', (['std'], {}), '(std)\n', (4307, 4312), False, 'import torch\n'), ((4338, 4356), 'torch.tensor', 'torch.tensor', (['mean'], {}), '(mean)\n', (4350, 4356), False, 'import torch\n'), ((1530, 1547), 'torch.Tensor', 'torch.Tensor', (['std'], {}), '(std)\n', (1542, 1547), False, 'import torch\n'), ((1577, 1595), 'torch.Tensor', 'torch.Tensor', (['mean'], {}), '(mean)\n', (1589, 1595), False, 'import torch\n'), ((3697, 3718), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (3713, 3718), False, 'import torch\n'), ((3749, 3770), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (3765, 3770), False, 'import torch\n'), ((3926, 3948), 'torch.from_numpy', 'torch.from_numpy', (['imgs'], {}), '(imgs)\n', (3942, 3948), False, 'import torch\n')]
|
# MIT License
# Copyright 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# ==============================================================================
import os
import numpy as np
import pytest
from astropy.io import fits
import morpheus_core.helpers.fits_helper as fh
import morpheus_core.tests.helpers as helper
@pytest.mark.unit
def test_open_file():
"""Tests morpheus_core.helpers.fits_helper.open_file"""
helper.setup()
sample_location = helper.make_sample_file()
expected_array = np.arange(100).reshape([10, 10])
hdul, actual_array = fh.open_file(sample_location)
np.testing.assert_array_equal(expected_array, actual_array)
helper.tear_down()
@pytest.mark.unit
def test_open_files():
"""Tests morpheus_core.helpers.fits_helper.open_file"""
helper.setup()
sample_location = helper.make_sample_file()
sample2_location = helper.make_sample_file2()
expected_array = np.arange(100).reshape([10, 10])
_, actual_arrays = fh.open_files([sample_location, sample2_location])
np.testing.assert_array_equal(expected_array, actual_arrays[0])
np.testing.assert_array_equal(expected_array, actual_arrays[1])
helper.tear_down()
@pytest.mark.unit
def test_dtype_to_bytes_per_value():
"""Tests morpheus_core.helpers.fits_helper.dtype_to_bytes_per_value"""
types = [np.uint8, np.int16, np.int32, np.float32, np.float64]
expected_bytes_per_value = [1, 2, 4, 4, 8]
actual_bytes_per_value = list(map(fh.dtype_to_bytes_per_value, types))
assert actual_bytes_per_value == expected_bytes_per_value
@pytest.mark.unit
def test_dtype_to_bytes_per_value_fails():
"""Tests morpheus_core.helpers.fits_helper.dtype_to_bytes_per_value"""
with pytest.raises(ValueError):
fh.dtype_to_bytes_per_value(np.bool)
@pytest.mark.unit
@pytest.mark.filterwarnings("ignore::UserWarning") # Ignore astropy warning
def test_create_file():
"""Tests morpheus_core.helpers.fits_helper.create_file"""
helper.setup()
shape = (100, 100)
tmp_out = os.path.join(helper.TMP_DIR, "test.fits")
fh.create_file(tmp_out, shape, np.float32)
actual = fits.getdata(tmp_out)
assert actual.shape == shape
helper.tear_down()
|
[
"morpheus_core.helpers.fits_helper.open_file",
"pytest.mark.filterwarnings",
"numpy.arange",
"morpheus_core.tests.helpers.make_sample_file2",
"morpheus_core.helpers.fits_helper.dtype_to_bytes_per_value",
"os.path.join",
"morpheus_core.tests.helpers.make_sample_file",
"astropy.io.fits.getdata",
"pytest.raises",
"morpheus_core.helpers.fits_helper.open_files",
"morpheus_core.tests.helpers.setup",
"morpheus_core.tests.helpers.tear_down",
"morpheus_core.helpers.fits_helper.create_file",
"numpy.testing.assert_array_equal"
] |
[((2846, 2895), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore::UserWarning"""'], {}), "('ignore::UserWarning')\n", (2872, 2895), False, 'import pytest\n'), ((1447, 1461), 'morpheus_core.tests.helpers.setup', 'helper.setup', ([], {}), '()\n', (1459, 1461), True, 'import morpheus_core.tests.helpers as helper\n'), ((1484, 1509), 'morpheus_core.tests.helpers.make_sample_file', 'helper.make_sample_file', ([], {}), '()\n', (1507, 1509), True, 'import morpheus_core.tests.helpers as helper\n'), ((1591, 1620), 'morpheus_core.helpers.fits_helper.open_file', 'fh.open_file', (['sample_location'], {}), '(sample_location)\n', (1603, 1620), True, 'import morpheus_core.helpers.fits_helper as fh\n'), ((1626, 1685), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['expected_array', 'actual_array'], {}), '(expected_array, actual_array)\n', (1655, 1685), True, 'import numpy as np\n'), ((1691, 1709), 'morpheus_core.tests.helpers.tear_down', 'helper.tear_down', ([], {}), '()\n', (1707, 1709), True, 'import morpheus_core.tests.helpers as helper\n'), ((1817, 1831), 'morpheus_core.tests.helpers.setup', 'helper.setup', ([], {}), '()\n', (1829, 1831), True, 'import morpheus_core.tests.helpers as helper\n'), ((1854, 1879), 'morpheus_core.tests.helpers.make_sample_file', 'helper.make_sample_file', ([], {}), '()\n', (1877, 1879), True, 'import morpheus_core.tests.helpers as helper\n'), ((1903, 1929), 'morpheus_core.tests.helpers.make_sample_file2', 'helper.make_sample_file2', ([], {}), '()\n', (1927, 1929), True, 'import morpheus_core.tests.helpers as helper\n'), ((2009, 2059), 'morpheus_core.helpers.fits_helper.open_files', 'fh.open_files', (['[sample_location, sample2_location]'], {}), '([sample_location, sample2_location])\n', (2022, 2059), True, 'import morpheus_core.helpers.fits_helper as fh\n'), ((2065, 2128), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['expected_array', 'actual_arrays[0]'], {}), '(expected_array, actual_arrays[0])\n', (2094, 2128), True, 'import numpy as np\n'), ((2133, 2196), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['expected_array', 'actual_arrays[1]'], {}), '(expected_array, actual_arrays[1])\n', (2162, 2196), True, 'import numpy as np\n'), ((2202, 2220), 'morpheus_core.tests.helpers.tear_down', 'helper.tear_down', ([], {}), '()\n', (2218, 2220), True, 'import morpheus_core.tests.helpers as helper\n'), ((3012, 3026), 'morpheus_core.tests.helpers.setup', 'helper.setup', ([], {}), '()\n', (3024, 3026), True, 'import morpheus_core.tests.helpers as helper\n'), ((3066, 3107), 'os.path.join', 'os.path.join', (['helper.TMP_DIR', '"""test.fits"""'], {}), "(helper.TMP_DIR, 'test.fits')\n", (3078, 3107), False, 'import os\n'), ((3112, 3154), 'morpheus_core.helpers.fits_helper.create_file', 'fh.create_file', (['tmp_out', 'shape', 'np.float32'], {}), '(tmp_out, shape, np.float32)\n', (3126, 3154), True, 'import morpheus_core.helpers.fits_helper as fh\n'), ((3169, 3190), 'astropy.io.fits.getdata', 'fits.getdata', (['tmp_out'], {}), '(tmp_out)\n', (3181, 3190), False, 'from astropy.io import fits\n'), ((3229, 3247), 'morpheus_core.tests.helpers.tear_down', 'helper.tear_down', ([], {}), '()\n', (3245, 3247), True, 'import morpheus_core.tests.helpers as helper\n'), ((2753, 2778), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2766, 2778), False, 'import pytest\n'), ((2788, 2824), 'morpheus_core.helpers.fits_helper.dtype_to_bytes_per_value', 'fh.dtype_to_bytes_per_value', (['np.bool'], {}), '(np.bool)\n', (2815, 2824), True, 'import morpheus_core.helpers.fits_helper as fh\n'), ((1532, 1546), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (1541, 1546), True, 'import numpy as np\n'), ((1952, 1966), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (1961, 1966), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# Created by "Thieu" at 00:16, 15/03/2022 ----------%
# Email: <EMAIL> %
# Github: https://github.com/thieu1995 %
# --------------------------------------------------%
from mealpy.bio_based import BBO
from mealpy.optimizer import Optimizer
import numpy as np
import pytest
@pytest.fixture(scope="module") # scope: Call only 1 time at the beginning
def problem():
def fitness_function(solution):
return np.sum(solution ** 2)
problem = {
"fit_func": fitness_function,
"lb": [-10, -15, -4, -2, -8],
"ub": [10, 15, 12, 8, 20],
"minmax": "min",
"log_to": None,
}
return problem
def test_OriginalBBO_results(problem):
epoch = 10
pop_size = 50
p_m = 0.01
elites = 2
model = BBO.OriginalBBO(problem, epoch, pop_size, p_m, elites)
best_position, best_fitness = model.solve()
assert isinstance(model, Optimizer)
assert isinstance(best_position, np.ndarray)
assert len(best_position) == len(problem["lb"])
def test_BaseBBO_results(problem):
epoch = 10
pop_size = 50
p_m = 0.01
elites = 2
model = BBO.BaseBBO(problem, epoch, pop_size, p_m, elites)
best_position, best_fitness = model.solve()
assert isinstance(model, Optimizer)
assert isinstance(best_position, np.ndarray)
assert len(best_position) == len(problem["lb"])
@pytest.mark.parametrize("problem, epoch, system_code",
[
(problem, None, 0),
(problem, "hello", 0),
(problem, -10, 0),
(problem, [10], 0),
(problem, (0, 9), 0),
(problem, 0, 0),
(problem, float("inf"), 0),
])
def test_epoch_BBO(problem, epoch, system_code):
pop_size = 50
p_m = 0.01
elites = 2
algorithms = [BBO.OriginalBBO, BBO.BaseBBO]
for algorithm in algorithms:
with pytest.raises(SystemExit) as e:
model = algorithm(problem, epoch, pop_size, p_m, elites)
assert e.type == SystemExit
assert e.value.code == system_code
@pytest.mark.parametrize("problem, pop_size, system_code",
[
(problem, None, 0),
(problem, "hello", 0),
(problem, -10, 0),
(problem, [10], 0),
(problem, (0, 9), 0),
(problem, 0, 0),
(problem, float("inf"), 0),
])
def test_pop_size_BBO(problem, pop_size, system_code):
epoch = 10
p_m = 0.01
elites = 2
algorithms = [BBO.OriginalBBO, BBO.BaseBBO]
for algorithm in algorithms:
with pytest.raises(SystemExit) as e:
model = algorithm(problem, epoch, pop_size, p_m, elites)
assert e.type == SystemExit
assert e.value.code == system_code
@pytest.mark.parametrize("problem, p_m, system_code",
[
(problem, None, 0),
(problem, "hello", 0),
(problem, -1.0, 0),
(problem, [10], 0),
(problem, (0, 9), 0),
(problem, 0, 0),
(problem, 1, 0),
(problem, 1.1, 0),
(problem, -0.01, 0),
])
def test_p_m_BBO(problem, p_m, system_code):
epoch = 10
pop_size = 50
elites = 2
algorithms = [BBO.OriginalBBO, BBO.BaseBBO]
for algorithm in algorithms:
with pytest.raises(SystemExit) as e:
model = algorithm(problem, epoch, pop_size, p_m, elites)
assert e.type == SystemExit
assert e.value.code == system_code
@pytest.mark.parametrize("problem, elites, system_code",
[
(problem, None, 0),
(problem, "hello", 0),
(problem, -1.0, 0),
(problem, [10], 0),
(problem, (0, 9), 0),
(problem, 1, 0),
(problem, 50, 0),
(problem, 100, 0),
(problem, 1.6, 0),
])
def test_elites_BBO(problem, elites, system_code):
epoch = 10
pop_size = 50
p_m = 0.01
algorithms = [BBO.OriginalBBO, BBO.BaseBBO]
for algorithm in algorithms:
with pytest.raises(SystemExit) as e:
model = algorithm(problem, epoch, pop_size, p_m, elites)
assert e.type == SystemExit
assert e.value.code == system_code
|
[
"mealpy.bio_based.BBO.OriginalBBO",
"pytest.mark.parametrize",
"numpy.sum",
"pytest.raises",
"pytest.fixture",
"mealpy.bio_based.BBO.BaseBBO"
] |
[((485, 515), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (499, 515), False, 'import pytest\n'), ((3242, 3489), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""problem, p_m, system_code"""', "[(problem, None, 0), (problem, 'hello', 0), (problem, -1.0, 0), (problem, [\n 10], 0), (problem, (0, 9), 0), (problem, 0, 0), (problem, 1, 0), (\n problem, 1.1, 0), (problem, -0.01, 0)]"], {}), "('problem, p_m, system_code', [(problem, None, 0), (\n problem, 'hello', 0), (problem, -1.0, 0), (problem, [10], 0), (problem,\n (0, 9), 0), (problem, 0, 0), (problem, 1, 0), (problem, 1.1, 0), (\n problem, -0.01, 0)])\n", (3265, 3489), False, 'import pytest\n'), ((4160, 4408), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""problem, elites, system_code"""', "[(problem, None, 0), (problem, 'hello', 0), (problem, -1.0, 0), (problem, [\n 10], 0), (problem, (0, 9), 0), (problem, 1, 0), (problem, 50, 0), (\n problem, 100, 0), (problem, 1.6, 0)]"], {}), "('problem, elites, system_code', [(problem, None, 0),\n (problem, 'hello', 0), (problem, -1.0, 0), (problem, [10], 0), (problem,\n (0, 9), 0), (problem, 1, 0), (problem, 50, 0), (problem, 100, 0), (\n problem, 1.6, 0)])\n", (4183, 4408), False, 'import pytest\n'), ((965, 1019), 'mealpy.bio_based.BBO.OriginalBBO', 'BBO.OriginalBBO', (['problem', 'epoch', 'pop_size', 'p_m', 'elites'], {}), '(problem, epoch, pop_size, p_m, elites)\n', (980, 1019), False, 'from mealpy.bio_based import BBO\n'), ((1321, 1371), 'mealpy.bio_based.BBO.BaseBBO', 'BBO.BaseBBO', (['problem', 'epoch', 'pop_size', 'p_m', 'elites'], {}), '(problem, epoch, pop_size, p_m, elites)\n', (1332, 1371), False, 'from mealpy.bio_based import BBO\n'), ((626, 647), 'numpy.sum', 'np.sum', (['(solution ** 2)'], {}), '(solution ** 2)\n', (632, 647), True, 'import numpy as np\n'), ((2217, 2242), 'pytest.raises', 'pytest.raises', (['SystemExit'], {}), '(SystemExit)\n', (2230, 2242), False, 'import pytest\n'), ((3059, 3084), 'pytest.raises', 'pytest.raises', (['SystemExit'], {}), '(SystemExit)\n', (3072, 3084), False, 'import pytest\n'), ((3977, 4002), 'pytest.raises', 'pytest.raises', (['SystemExit'], {}), '(SystemExit)\n', (3990, 4002), False, 'import pytest\n'), ((4903, 4928), 'pytest.raises', 'pytest.raises', (['SystemExit'], {}), '(SystemExit)\n', (4916, 4928), False, 'import pytest\n')]
|
"""
Mask R-CNN
Train on the toy Balloon dataset and implement color splash effect.
Copyright (c) 2018 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by <NAME>
------------------------------------------------------------"""
import os
import sys
import json
import numpy as np
import skimage.draw
import argparse
import matplotlib.pyplot as plt
from tensorflow.python.util import deprecation
deprecation._PRINT_DEPRECATION_WARNINGS = False
# Root directory of the project
ROOT_DIR = os.path.abspath("../../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn.config import Config
from mrcnn import model as modellib
from mrcnn import visualize
# from mrcnn.model import MaskRCNN, log
from mrcnn import utils
# Weights path for unfollow_weights.h5 "r" may be removed depending on system
UNFOLLOW_WEIGHTS_PATH = r"mask\logs\unfollow\unfollow_weights.h5"
dataset_dir = r'button'
# Image needs to be updated...
IMAGE = r'button\val\24.png'
# Directory to save logs and model checkpoints, if not provided
# through the command line argument --logs
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
############################################################
# Configurations
############################################################
class ModelConfiguration(Config):
"""Base configuration class. CHANGE NUMBER OF CLASSES (NUM_CLASSES)
You can leave most of the values as defualt but if you wanted to try
and improve accuracy or increase time it is available by tweaking values.
"""
# give the configuration a recognizable name
NAME = "unfollow_model"
# number of classes ( add +1 for the background (BG))
NUM_CLASSES = 2 #NUMBER OF CLASSES!!!!
# gpu count
GPU_COUNT = 1
IMAGES_PER_GPU = 2
# Skip detections with < 90% confidence
DETECTION_MIN_CONFIDENCE = 0.9
# Anchor stride
# If 1 then anchors are created for each cell in the backbone feature map.
# If 2, then anchors are created for every other cell, and so on.
RPN_ANCHOR_STRIDE = 1
# ROIs kept after non-maximum suppression (training and inference)
POST_NMS_ROIS_INFERENCE = 1000
# Input image resizing
# Generally, use the "square" resizing mode for predicting
# and it should work well in most cases. In this mode, images are scaled
# up such that the small side is = IMAGE_MIN_DIM, but ensuring that the
# scaling doesn't make the long side > IMAGE_MAX_DIM. Then the image is
# padded with zeros to make it a square so multiple images can be put
# in one batch.
# Available resizing modes:
# none, square, pad64, crop
IMAGE_RESIZE_MODE = "square"
IMAGE_MIN_DIM = 800
IMAGE_MAX_DIM = 1024
# Number of color channels per image. RGB = 3, grayscale = 1, RGB-D = 4
# Changing this requires other changes in the code. See the WIKI for more
# details: https://github.com/matterport/Mask_RCNN/wiki
IMAGE_CHANNEL_COUNT = 3
# Minimum probability value to accept a detected instance
# ROIs below this threshold are skipped
DETECTION_MIN_CONFIDENCE = 0.7
config = ModelConfiguration()
config.display()
############################################################
# Dataset
############################################################
class Model_Dataset(utils.Dataset):
# load the dataset definitions
def load_dataset(self, dataset_dir, subset):
# Add classes. .add_class(model name, class id number, name of class)
# MUST FILL IN ###### AS CLASS NAME
self.add_class("unfollow_model", 1, 'unfollow')
# Train or validation dataset?
assert subset in ["train", "val"]
dataset_dir = os.path.join(dataset_dir, subset)
# dictionary of x and y coordinates of each region and region class name
annotations = json.load(open(os.path.join(dataset_dir, "via_region_data.json")))
annotations = list(annotations.values())
# decrease dimensions within annotations
annotations = [a for a in annotations if a['regions']]
# Add images
for a in annotations:
# dependant on VIA version
if type(a['regions']) is dict:
# polygons are bboxes and objects are the class name
polygons = [r['shape_attributes'] for r in a['regions'].values()]
objects = [r['region_attributes'] for r in a['regions'].values()]
else:
polygons = [r['shape_attributes'] for r in a['regions']]
objects = [r['region_attributes'] for r in a['regions']]
# check to see if report and more line up with appropriate id
num_ids = [list(n.values()) for n in objects]
num_ids = [1]
# NUMBER IDS MUST BE CHANGED IF USING <2 OR >2
# load_mask() needs the image size to convert polygons to masks.
# Not provided in annotation json
image_path = os.path.join(dataset_dir, a['filename'])
image = skimage.io.imread(image_path)
height, width = image.shape[:2]
# loading the dataset with image information to be used in load_mask()
self.add_image(
"unfollow_model",
image_id=a['filename'], # use file name as a unique image id
path=image_path,
num_ids=num_ids,
width=width, height=height,
polygons=polygons)
def load_mask(self, image_id):
# obtains info for each image in dataset
info = self.image_info[image_id]
# Convert polygons to a bitmap mask of shape
# [height, width, instance_count]
mask = np.zeros([info["height"],
info["width"],
len(info["polygons"])],
dtype=np.uint8)
for i, p in enumerate(info["polygons"]):
# Get indexes of pixels inside the polygon and set them to 1
rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])
# one makes the transparent mask
mask[rr, cc, i] = 1
# Map class names to class IDs.
num_ids = info['num_ids']
num_ids = np.array(num_ids, dtype=np.int32)
return mask.astype(np.bool), num_ids
def image_reference(self, image_id):
"""Return the path of the image."""
info = self.image_info[image_id]
if info["source"] == "unfollow_model":
return info["path"]
else:
super(self.__class__, self).image_reference(image_id)
############################################################
# Command line
############################################################
if __name__ == '__main__':
# # Parse command line arguments
# parser = argparse.ArgumentParser(
# description='Train Mask R-CNN to detect unfollow button on twitter.')
# parser.add_argument('--dataset', required=False,
# metavar= dataset_dir,
# help='Only val dataset available')
# parser.add_argument('--weights', required = False,
# metavar = UNFOLLOW_WEIGHTS_PATH ,
# help="Path to weights .h5 file, only weights_unfollow.h5 available")
# parser.add_argument('--logs', required=False,
# default = DEFAULT_LOGS_DIR,
# metavar="/path/to/logs/",
# help='Logs and checkpoints directory (default=logs/)')
# # IMAGE may be required change to True
# parser.add_argument('--image', required=False,
# metavar="path or URL to image",
# help='Image to apply the color splash effect on')
# args = parser.parse_args()
#
# print("Weights: ", args.weights)
# print("Dataset: ", args.dataset)
# print("Logs: ", args.logs)
# Configurations
class InferenceConfig(ModelConfiguration):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
config = InferenceConfig()
# can be removed to not show configuration
config.display()
# Create model
model = modellib.MaskRCNN(mode="inference",
config=config,
model_dir=DEFAULT_LOGS_DIR)
# Load weights
print("Loading weights ", UNFOLLOW_WEIGHTS_PATH)
model.load_weights(UNFOLLOW_WEIGHTS_PATH, by_name=True)
def get_ax(rows=1, cols=1, size=16):
"""Return a Matplotlib Axes array to be used in
all visualizations in the notebook. Provide a
central point to control graph sizes.
Adjust the size attribute to control how big to render images
"""
_, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))
return ax
# Load dataset
dataset = Model_Dataset()
dataset.load_dataset(dataset_dir, subset = 'val')
# Must call before using the dataset
dataset.prepare()
# run detection
image = skimage.io.imread(IMAGE)
# Remove alpha channel, if it has one
if image.shape[-1] == 4:
image = image[..., :3]
# Run object detection
results = model.detect([image], verbose=0)
# Display results
ax = get_ax(1)
r = results[0]
visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],
dataset.class_names, r['scores'], ax=ax,
title="Predictions")
#Extract the first bbox
print (r['rois'][0])
# has the format of [y1, x1, y2, x2]
#############################################################################
# Evaluation/ Inference
#############################################################################
# Load dataset
dataset = Model_Dataset()
dataset.load_dataset(dataset_dir, subset = 'val')
# Must call before using the dataset
dataset.prepare()
class InferenceConfig(ModelConfiguration):
GPU_COUNT = 1
IMAGES_PER_GPU = 1
inference_config = InferenceConfig()
# Recreate the model in inference mode
model = modellib.MaskRCNN(mode="inference",
config=inference_config,
model_dir=UNFOLLOW_WEIGHTS_PATH)
def get_ax(rows=1, cols=1, size=16):
"""Return a Matplotlib Axes array to be used in
all visualizations in the notebook. Provide a
central point to control graph sizes.
Adjust the size attribute to control how big to render images
"""
_, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))
return ax
print("Loading weights from ", UNFOLLOW_WEIGHTS_PATH)
model.load_weights(UNFOLLOW_WEIGHTS_PATH, by_name=True)
# run detection
image = skimage.io.imread(IMAGE)
# Remove alpha channel, if it has one
if image.shape[-1] == 4:
image = image[..., :3]
# Run object detection
results = model.detect([image], verbose=0)
# Display results
ax = get_ax(1)
r = results[0]
visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],
dataset.class_names, r['scores'], ax=ax,
title="Predictions")
#Extract the first bbox
print (r['rois'][0])
# has the format of [y1, x1, y2, x2]
|
[
"mrcnn.model.MaskRCNN",
"os.path.join",
"numpy.array",
"mrcnn.visualize.display_instances",
"os.path.abspath",
"sys.path.append",
"matplotlib.pyplot.subplots"
] |
[((541, 566), 'os.path.abspath', 'os.path.abspath', (['"""../../"""'], {}), "('../../')\n", (556, 566), False, 'import os\n'), ((590, 615), 'sys.path.append', 'sys.path.append', (['ROOT_DIR'], {}), '(ROOT_DIR)\n', (605, 615), False, 'import sys\n'), ((1185, 1215), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""logs"""'], {}), "(ROOT_DIR, 'logs')\n", (1197, 1215), False, 'import os\n'), ((10704, 10802), 'mrcnn.model.MaskRCNN', 'modellib.MaskRCNN', ([], {'mode': '"""inference"""', 'config': 'inference_config', 'model_dir': 'UNFOLLOW_WEIGHTS_PATH'}), "(mode='inference', config=inference_config, model_dir=\n UNFOLLOW_WEIGHTS_PATH)\n", (10721, 10802), True, 'from mrcnn import model as modellib\n'), ((11591, 11730), 'mrcnn.visualize.display_instances', 'visualize.display_instances', (['image', "r['rois']", "r['masks']", "r['class_ids']", 'dataset.class_names', "r['scores']"], {'ax': 'ax', 'title': '"""Predictions"""'}), "(image, r['rois'], r['masks'], r['class_ids'],\n dataset.class_names, r['scores'], ax=ax, title='Predictions')\n", (11618, 11730), False, 'from mrcnn import visualize\n'), ((8669, 8747), 'mrcnn.model.MaskRCNN', 'modellib.MaskRCNN', ([], {'mode': '"""inference"""', 'config': 'config', 'model_dir': 'DEFAULT_LOGS_DIR'}), "(mode='inference', config=config, model_dir=DEFAULT_LOGS_DIR)\n", (8686, 8747), True, 'from mrcnn import model as modellib\n'), ((9867, 10006), 'mrcnn.visualize.display_instances', 'visualize.display_instances', (['image', "r['rois']", "r['masks']", "r['class_ids']", 'dataset.class_names', "r['scores']"], {'ax': 'ax', 'title': '"""Predictions"""'}), "(image, r['rois'], r['masks'], r['class_ids'],\n dataset.class_names, r['scores'], ax=ax, title='Predictions')\n", (9894, 10006), False, 'from mrcnn import visualize\n'), ((11135, 11195), 'matplotlib.pyplot.subplots', 'plt.subplots', (['rows', 'cols'], {'figsize': '(size * cols, size * rows)'}), '(rows, cols, figsize=(size * cols, size * rows))\n', (11147, 11195), True, 'import matplotlib.pyplot as plt\n'), ((3874, 3907), 'os.path.join', 'os.path.join', (['dataset_dir', 'subset'], {}), '(dataset_dir, subset)\n', (3886, 3907), False, 'import os\n'), ((6541, 6574), 'numpy.array', 'np.array', (['num_ids'], {'dtype': 'np.int32'}), '(num_ids, dtype=np.int32)\n', (6549, 6574), True, 'import numpy as np\n'), ((9283, 9343), 'matplotlib.pyplot.subplots', 'plt.subplots', (['rows', 'cols'], {'figsize': '(size * cols, size * rows)'}), '(rows, cols, figsize=(size * cols, size * rows))\n', (9295, 9343), True, 'import matplotlib.pyplot as plt\n'), ((5195, 5235), 'os.path.join', 'os.path.join', (['dataset_dir', "a['filename']"], {}), "(dataset_dir, a['filename'])\n", (5207, 5235), False, 'import os\n'), ((4030, 4079), 'os.path.join', 'os.path.join', (['dataset_dir', '"""via_region_data.json"""'], {}), "(dataset_dir, 'via_region_data.json')\n", (4042, 4079), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 10 22:12:12 2020
@author: vxr131730
"""
import glob
import os
import sys
import random
import time
import numpy as np
import cv2
from test import *
from casadi import *
from numpy import random as npr
from casadi.tools import *
try:
sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
import carla
IM_WIDTH = 640
IM_HEIGHT = 480
actor_list = []
try:
client = carla.Client("localhost",2000)
client.set_timeout(10.0)
world = client.get_world()
blueprint_library = world.get_blueprint_library()
vehicle_bp = blueprint_library.filter("model3")[0]
startpoint = world.get_map().get_spawn_points()[195] #128 195
world.debug.draw_string(startpoint.location, 'O', draw_shadow=False,
color=carla.Color(r=255, g=0, b=0), life_time=50,
persistent_lines=True)
vehicle = world.spawn_actor(vehicle_bp, startpoint)
# ---------------------Trajectory-----------------
wplist = world.get_map().get_topology()
wps = wplist[270][0].next_until_lane_end(5.0) # 22 (195:270)
for w in wps:
world.debug.draw_string(w.transform.location, 'O', draw_shadow=False,
color=carla.Color(r=0, g=255, b=0), life_time=20.0,
persistent_lines=True)
endpoint = wps[0].transform
actor_list.append(vehicle)
# -------------------MPC--------------------
# SAMPLING TIME
T = 0.08 #0.08 # (s)
# PREDICTION HORIZON
N = 50 # 12 5
# STATES
x = SX.sym('x') # x coordinate
y = SX.sym('y') # y coordinate
theta = SX.sym('theta') # vehicle orientation
v = SX.sym('v') # longitudenal velocity
states = vertcat(x,y,theta,v)
n_states = 4 # no. of states
# CONTROL
thr = SX.sym('thr') # Throttle
strang = SX.sym('strang') # steering angle
controls = vertcat(thr,strang)
n_controls = 2
# CONTROL BOUNDS
minthr = 0.0 # minimum throttle
maxthr = 0.5 # maximum throttle
minstrang = -1 # minimum steering angle
maxstrang = 1 # maximum steering angle
# VEHICLE MODEL PARAMETERS
l_r = 1.415 # distance from center of gravity to rare wheels
l_f = 1.6# distance from center of graity to front wheels
# SYMBOLIC REPRESENTATION OF THE DERIVATIVE OF THE STATES BASED ON THE BICYCE MODEL
rhs = vertcat((v*cos(theta+(atan((l_r*tan(strang*1.22))/(l_f + l_r))))),
v*sin(theta+(atan((l_r*tan(strang*1.22))/(l_f + l_r)))),
((v/l_r)*(sin(atan((l_r*tan(strang*1.22))/(l_f + l_r))))),
thr*16)
# STATE PREDICTION - SYMBOLIC FUNCTION OF CURRENT STATE AND CONTROL INPUTS AT EACH TIME STEP OF HORIZON PERIOD
f = Function('f', [states, controls], [rhs])
U = SX.sym('U', n_controls, N)
P = SX.sym('P', n_states + n_states)
X = SX.sym('X', n_states, (N+1))
X[:,0] = P[0:4]
for k in range(N):
st = X[:,k]
con = U[:,k]
f_value = f(st, con)
st_next = st + (T*f_value)
X[:,k+1] = st_next
ff = Function('ff', [U,P], [X])
# SYBOLIC REPRESENTATION OF THE OBJECTIVE FUNCTION
obj = 0
g = SX.sym('g',4,(N+1))
Q = diag(SX([3600,3600,1900,2])) #195 [3600,3600,1900,2] [3100,3100,1900,2] [2700,2700,2000,2]
R = diag(SX([0,8000])) #195 [0,7000]
for k in range(N):
st = X[:,k]
con = U[:,k]
obj = obj + mtimes(mtimes((st - P[4:8]).T,Q), (st - P[4:8])) + mtimes(mtimes(con.T, R), con)
# STATES BOUNDS/CONSTRAINTS
for k in range(0,N+1):
g[0,k] = X[0,k]
g[1,k] = X[1,k]
g[2,k] = X[2,k]
g[3,k] = X[3,k]
g = reshape(g, 4*(N+1), 1)
# CREATING A OPTIMIZATION SOLVER IN CASADI
OPT_variables = reshape(U, 2*N, 1)
nlp_prob = {'f':obj, 'x':OPT_variables, 'g':g, 'p':P}
opts = {'ipopt.max_iter':100,
'ipopt.print_level':0,
'print_time':0,
'ipopt.acceptable_tol':1e-8,
'ipopt.acceptable_obj_change_tol':1e-6}
solver = nlpsol('solver','ipopt', nlp_prob, opts) # solver
# IMPLEMENTING CONTROL BOUNDS
lbx = []
ubx = []
for i in range(2*N):
if i%2==0:
lbx.append(minthr)
ubx.append(maxthr)
else:
lbx.append(minstrang)
ubx.append(maxstrang)
lbx = np.transpose(lbx)
ubx = np.transpose(ubx)
# IMPLEMENTING STATE BOUNDS
lbgv = []
ubgv = []
for i in range(0,4*(N+1),4):
lbgv.append(-300)
lbgv.append(-300)
lbgv.append(0)
lbgv.append(0)
ubgv.append(300)
ubgv.append(300)
ubgv.append(405)
ubgv.append(15)
u0 = (DM.zeros(2*N,1))
u_cl = []
def contheta(thet):
if thet < 0:
thet = 360 - abs(thet)
return thet
x0 = np.transpose([startpoint.location.x, startpoint.location.y, contheta(startpoint.rotation.yaw), 0])
xs = np.transpose([endpoint.location.x, endpoint.location.y, contheta(startpoint.rotation.yaw), 3]) #-90.156235*pi/180
c = 0
p = np.transpose([startpoint.location.x,
startpoint.location.y,
contheta(startpoint.rotation.yaw),
0,
endpoint.location.x,
endpoint.location.y,
contheta(startpoint.rotation.yaw),
3])
while c < len(wps):
if (norm_2(x0[0:2]-p[4:6]))<3:
c += 1
endpoint = wps[c].transform
world.debug.draw_string(endpoint.location, 'O', draw_shadow=False,
color=carla.Color(r=0, g=0, b=255), life_time=3,
persistent_lines=True)
print(x0,"---",p[4:8])
u0 = reshape(u0, 2*N,1)
p[0:4] = x0
p[4:8] = [endpoint.location.x, endpoint.location.y, contheta(endpoint.rotation.yaw), 3]#6
sol = solver(x0=u0, lbx=lbx, ubx=ubx, lbg=lbgv, ubg=ubgv, p=p)
u = reshape(sol['x'].T, 2, N).T
ff_value = ff(u.T, p)
for k in range(N):
world.debug.draw_string(carla.Location(x=float(ff_value[0,k]),
y=float(ff_value[1,k]),
z=0.0),
'O', draw_shadow=False,
color=carla.Color(r=255, g=0, b=0), life_time=0.01,
persistent_lines=True)
u_cl.append(u[0,:])
vehicle.apply_control(carla.VehicleControl(throttle =float(u[0,0]) , steer = float(u[0,1])))
u_theta = vehicle.get_transform().rotation.yaw
x0 = np.transpose([vehicle.get_transform().location.x,
vehicle.get_transform().location.y,
contheta(u_theta),
norm_2([vehicle.get_velocity().x,
vehicle.get_velocity().y])])
u0 = reshape(u0, N, 2)
u0[0:N-1,:] = u[1:N,:]
u0[N-1,:]=u[N-1,:]
time.sleep(10)
finally:
for actor in actor_list:
actor.destroy()
print("All cleaned up!")
|
[
"time.sleep",
"carla.Client",
"carla.Color",
"numpy.transpose",
"glob.glob"
] |
[((633, 664), 'carla.Client', 'carla.Client', (['"""localhost"""', '(2000)'], {}), "('localhost', 2000)\n", (645, 664), False, 'import carla\n'), ((4756, 4773), 'numpy.transpose', 'np.transpose', (['lbx'], {}), '(lbx)\n', (4768, 4773), True, 'import numpy as np\n'), ((4785, 4802), 'numpy.transpose', 'np.transpose', (['ubx'], {}), '(ubx)\n', (4797, 4802), True, 'import numpy as np\n'), ((7665, 7679), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (7675, 7679), False, 'import time\n'), ((323, 481), 'glob.glob', 'glob.glob', (["('../carla/dist/carla-*%d.%d-%s.egg' % (sys.version_info.major, sys.\n version_info.minor, 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))"], {}), "('../carla/dist/carla-*%d.%d-%s.egg' % (sys.version_info.major,\n sys.version_info.minor, 'win-amd64' if os.name == 'nt' else 'linux-x86_64')\n )\n", (332, 481), False, 'import glob\n'), ((1025, 1053), 'carla.Color', 'carla.Color', ([], {'r': '(255)', 'g': '(0)', 'b': '(0)'}), '(r=255, g=0, b=0)\n', (1036, 1053), False, 'import carla\n'), ((1500, 1528), 'carla.Color', 'carla.Color', ([], {'r': '(0)', 'g': '(255)', 'b': '(0)'}), '(r=0, g=255, b=0)\n', (1511, 1528), False, 'import carla\n'), ((6136, 6164), 'carla.Color', 'carla.Color', ([], {'r': '(0)', 'g': '(0)', 'b': '(255)'}), '(r=0, g=0, b=255)\n', (6147, 6164), False, 'import carla\n'), ((6943, 6971), 'carla.Color', 'carla.Color', ([], {'r': '(255)', 'g': '(0)', 'b': '(0)'}), '(r=255, g=0, b=0)\n', (6954, 6971), False, 'import carla\n')]
|
"""
Collection of functions to calculate lag correlations
and significance following Ebisuzaki 97 JCLIM
"""
def phaseran(recblk, nsurr,ax):
""" Phaseran by <NAME>: http://www.mathworks.nl/matlabcentral/fileexchange/32621-phase-randomization/content/phaseran.m
Args:
recblk (2D array): Row: time sample. Column: recording.
An odd number of time samples (height) is expected.
If that is not the case, recblock is reduced by 1 sample before the surrogate data is created.
The class must be double and it must be nonsparse.
nsurr (int): is the number of image block surrogates that you want to generate.
Returns:
surrblk: 3D multidimensional array image block with the surrogate datasets along the third dimension
Reference:
<NAME>., <NAME>. Generating Surrogate Data for Time Series with Several Simultaneously Measured Variables (1994)
Physical Review Letters, Vol 73, Number 7
NOTE: Extended to xy data and converted to python by <NAME>
"""
import numpy as np
from ds21grl.misc import AxRoll
# make sure time dimension is axis=0
recblk = AxRoll(recblk,ax)
# Get time length
nfrms = recblk.shape[0]
# force data to have odd time length
if nfrms % 2 == 0:
nfrms = nfrms-1
recblk = recblk[0:nfrms]
# define fft frequency intervals
len_ser = int((nfrms-1)/2)
interv1 = np.arange(1, len_ser+1)
interv2 = np.arange(len_ser+1, nfrms)
# Fourier transform of the original dataset
fft_recblk = np.fft.fft(recblk,axis=0)
# Create nsurr timeseries of random numbers (0,1)
# Also tile fft array for later
if np.ndim(recblk) == 1:
ph_rnd = np.random.rand(len_ser,nsurr)
fft_recblk_surr = np.tile(fft_recblk[None,:],(nsurr,1))
elif np.ndim(recblk) == 2:
ph_rnd = np.random.rand(len_ser,recblk.shape[1],nsurr)
fft_recblk_surr = np.tile(fft_recblk[None,:],(nsurr,1,1))
elif np.ndim(recblk) == 3:
ph_rnd = np.random.rand(len_ser,recblk.shape[1],recblk.shape[2],nsurr)
fft_recblk_surr = np.tile(fft_recblk[None,:],(nsurr,1,1,1))
fft_recblk_surr = np.moveaxis(fft_recblk_surr,0,-1)
# Create the random phases for all the time series
ph_interv1 = np.exp(2*np.pi*1j*ph_rnd)
ph_interv2 = np.conj(np.flipud(ph_interv1))
# Randomize all the time series simultaneously
fft_recblk_surr[interv1,:] = fft_recblk_surr[interv1,:] * ph_interv1
fft_recblk_surr[interv2,:] = fft_recblk_surr[interv2,:] * ph_interv2
# Inverse transform
surrblk = np.real(np.fft.ifft(fft_recblk_surr,axis=0))
return surrblk
def remove_mean(data,ax):
"""
function that removes mean defined across
given axis from entire data array
"""
import numpy as np
from ds21grl.misc import AxRoll
if np.ndim(data) == 1:
data = data - np.mean(data)
else:
data = AxRoll(data,ax)
mean = np.mean(data,axis=ax)
for i in range(0,data.shape[ax]):
data[i,:] = data[i,:] - mean[:]
data = AxRoll(data,ax,invert=True)
return data
def cross_correlate_ndim(x,y,maxlag,ax):
"""
Calculates lag cross-correlation
between two n dim arrays along a specified axis.
Truncates to +-maxlag
NOTE: x and y arrays must be same dimensions
"""
import numpy as np
from scipy import signal
from ds21grl.misc import AxRoll
# put lag correlation axis on axis=0
x = AxRoll(x,ax)
y = AxRoll(y,ax)
# center time series
x = remove_mean(x,0)
y = remove_mean(y,0)
# calc cross correlation
corr = signal.fftconvolve(x, np.flip(y,axis=0), mode='full', axes=0)
corr = corr/x.shape[0]/np.std(x,axis=0)/np.std(y,axis=0)
# extract desired lags
temp1 = np.arange(-(x.shape[0]-1),0,1)
temp2 = np.arange(0,x.shape[0],1)
lag = np.concatenate((temp1, temp2), axis=0)
index = (lag >= -1*maxlag) & (lag <= maxlag)
lag = lag[index]
if np.ndim(x) > 1:
corr = corr[index,:]
else:
corr = corr[index]
return corr,lag
def cross_correlate_ndim_sig(x1,x2,maxlag,nbs,sigthresh,ax):
"""
Wrapper for cross_correlate_ndim. Also calculates
significance following randomized phase procedure from Ebisuzaki 97.
Significant = 1 and not significant = 0.
NOTE: x and y arrays must be same dimensions
"""
import numpy as np
from ds21grl.misc import AxRoll
# make time dimension axis=0
x1 = AxRoll(x1,ax)
x2 = AxRoll(x2,ax)
# force timeseries to be odd
# (because of phaseran fxn)
if x1.shape[ax] % 2 == 0:
x1 = x1[0:-1]
x2 = x2[0:-1]
# calculate lag correlation
[corr,lag] = cross_correlate_ndim(x1,x2,maxlag,ax)
# calculate boostrapped time series with
# randomized phases
x2 = phaseran(x2,nbs,ax)
if np.ndim(x1) == 3:
x1 = np.tile(x1[None,:],(nbs,1,1,1))
elif np.ndim(x1) == 2:
x1 = np.tile(x1[None,:],(nbs,1,1))
elif np.ndim(x1) == 1:
x1 = np.tile(x1[None,:],(nbs,1))
x1 = np.moveaxis(x1,0,-1) # x1 must have same shape as x2
[corr_bs,lag] = cross_correlate_ndim(x1,x2,maxlag,ax)
# calculate significant correlations (two sided test)
# using PDF of bootstrapped correlations
sig = np.zeros((corr.shape))
ptile1 = np.percentile(corr_bs,(100-sigthresh)/2,axis=-1)
ptile2 = np.percentile(corr_bs,sigthresh+(100-sigthresh)/2,axis=-1)
index = (corr > ptile1) & (corr < ptile2)
sig[index] = 1
return corr,sig,lag
def write_yt_daily(corr,sig,lag,filename,dir_out,dim,write2file):
"""
Writes yt lag correlation data to file
"""
import numpy as np
import xarray as xr
if write2file == 1:
output = xr.Dataset(data_vars={'corr': (('lag','lat'), corr.astype(np.float32)),
'sig': (('lag','lat'), sig.astype(np.float32))},
coords={'lag': lag,'lat': dim.lat})
output.corr.attrs['units'] = 'unitless'
output.sig.attrs['units'] = 'unitless'
output.to_netcdf(dir_out + filename)
return
|
[
"numpy.tile",
"numpy.mean",
"numpy.flip",
"numpy.random.rand",
"numpy.flipud",
"numpy.fft.fft",
"numpy.ndim",
"numpy.exp",
"numpy.zeros",
"ds21grl.misc.AxRoll",
"numpy.concatenate",
"numpy.std",
"numpy.moveaxis",
"numpy.percentile",
"numpy.fft.ifft",
"numpy.arange"
] |
[((1166, 1184), 'ds21grl.misc.AxRoll', 'AxRoll', (['recblk', 'ax'], {}), '(recblk, ax)\n', (1172, 1184), False, 'from ds21grl.misc import AxRoll\n'), ((1453, 1478), 'numpy.arange', 'np.arange', (['(1)', '(len_ser + 1)'], {}), '(1, len_ser + 1)\n', (1462, 1478), True, 'import numpy as np\n'), ((1491, 1520), 'numpy.arange', 'np.arange', (['(len_ser + 1)', 'nfrms'], {}), '(len_ser + 1, nfrms)\n', (1500, 1520), True, 'import numpy as np\n'), ((1585, 1611), 'numpy.fft.fft', 'np.fft.fft', (['recblk'], {'axis': '(0)'}), '(recblk, axis=0)\n', (1595, 1611), True, 'import numpy as np\n'), ((2233, 2268), 'numpy.moveaxis', 'np.moveaxis', (['fft_recblk_surr', '(0)', '(-1)'], {}), '(fft_recblk_surr, 0, -1)\n', (2244, 2268), True, 'import numpy as np\n'), ((2340, 2373), 'numpy.exp', 'np.exp', (['(2 * np.pi * 1.0j * ph_rnd)'], {}), '(2 * np.pi * 1.0j * ph_rnd)\n', (2346, 2373), True, 'import numpy as np\n'), ((3596, 3609), 'ds21grl.misc.AxRoll', 'AxRoll', (['x', 'ax'], {}), '(x, ax)\n', (3602, 3609), False, 'from ds21grl.misc import AxRoll\n'), ((3617, 3630), 'ds21grl.misc.AxRoll', 'AxRoll', (['y', 'ax'], {}), '(y, ax)\n', (3623, 3630), False, 'from ds21grl.misc import AxRoll\n'), ((3923, 3957), 'numpy.arange', 'np.arange', (['(-(x.shape[0] - 1))', '(0)', '(1)'], {}), '(-(x.shape[0] - 1), 0, 1)\n', (3932, 3957), True, 'import numpy as np\n'), ((3967, 3994), 'numpy.arange', 'np.arange', (['(0)', 'x.shape[0]', '(1)'], {}), '(0, x.shape[0], 1)\n', (3976, 3994), True, 'import numpy as np\n'), ((4006, 4044), 'numpy.concatenate', 'np.concatenate', (['(temp1, temp2)'], {'axis': '(0)'}), '((temp1, temp2), axis=0)\n', (4020, 4044), True, 'import numpy as np\n'), ((4756, 4770), 'ds21grl.misc.AxRoll', 'AxRoll', (['x1', 'ax'], {}), '(x1, ax)\n', (4762, 4770), False, 'from ds21grl.misc import AxRoll\n'), ((4779, 4793), 'ds21grl.misc.AxRoll', 'AxRoll', (['x2', 'ax'], {}), '(x2, ax)\n', (4785, 4793), False, 'from ds21grl.misc import AxRoll\n'), ((5354, 5376), 'numpy.moveaxis', 'np.moveaxis', (['x1', '(0)', '(-1)'], {}), '(x1, 0, -1)\n', (5365, 5376), True, 'import numpy as np\n'), ((5587, 5607), 'numpy.zeros', 'np.zeros', (['corr.shape'], {}), '(corr.shape)\n', (5595, 5607), True, 'import numpy as np\n'), ((5628, 5682), 'numpy.percentile', 'np.percentile', (['corr_bs', '((100 - sigthresh) / 2)'], {'axis': '(-1)'}), '(corr_bs, (100 - sigthresh) / 2, axis=-1)\n', (5641, 5682), True, 'import numpy as np\n'), ((5695, 5761), 'numpy.percentile', 'np.percentile', (['corr_bs', '(sigthresh + (100 - sigthresh) / 2)'], {'axis': '(-1)'}), '(corr_bs, sigthresh + (100 - sigthresh) / 2, axis=-1)\n', (5708, 5761), True, 'import numpy as np\n'), ((1709, 1724), 'numpy.ndim', 'np.ndim', (['recblk'], {}), '(recblk)\n', (1716, 1724), True, 'import numpy as np\n'), ((1757, 1787), 'numpy.random.rand', 'np.random.rand', (['len_ser', 'nsurr'], {}), '(len_ser, nsurr)\n', (1771, 1787), True, 'import numpy as np\n'), ((1813, 1853), 'numpy.tile', 'np.tile', (['fft_recblk[None, :]', '(nsurr, 1)'], {}), '(fft_recblk[None, :], (nsurr, 1))\n', (1820, 1853), True, 'import numpy as np\n'), ((2391, 2412), 'numpy.flipud', 'np.flipud', (['ph_interv1'], {}), '(ph_interv1)\n', (2400, 2412), True, 'import numpy as np\n'), ((2660, 2696), 'numpy.fft.ifft', 'np.fft.ifft', (['fft_recblk_surr'], {'axis': '(0)'}), '(fft_recblk_surr, axis=0)\n', (2671, 2696), True, 'import numpy as np\n'), ((2930, 2943), 'numpy.ndim', 'np.ndim', (['data'], {}), '(data)\n', (2937, 2943), True, 'import numpy as np\n'), ((3011, 3027), 'ds21grl.misc.AxRoll', 'AxRoll', (['data', 'ax'], {}), '(data, ax)\n', (3017, 3027), False, 'from ds21grl.misc import AxRoll\n'), ((3042, 3064), 'numpy.mean', 'np.mean', (['data'], {'axis': 'ax'}), '(data, axis=ax)\n', (3049, 3064), True, 'import numpy as np\n'), ((3165, 3194), 'ds21grl.misc.AxRoll', 'AxRoll', (['data', 'ax'], {'invert': '(True)'}), '(data, ax, invert=True)\n', (3171, 3194), False, 'from ds21grl.misc import AxRoll\n'), ((3780, 3798), 'numpy.flip', 'np.flip', (['y'], {'axis': '(0)'}), '(y, axis=0)\n', (3787, 3798), True, 'import numpy as np\n'), ((3865, 3882), 'numpy.std', 'np.std', (['y'], {'axis': '(0)'}), '(y, axis=0)\n', (3871, 3882), True, 'import numpy as np\n'), ((4126, 4136), 'numpy.ndim', 'np.ndim', (['x'], {}), '(x)\n', (4133, 4136), True, 'import numpy as np\n'), ((5135, 5146), 'numpy.ndim', 'np.ndim', (['x1'], {}), '(x1)\n', (5142, 5146), True, 'import numpy as np\n'), ((5166, 5202), 'numpy.tile', 'np.tile', (['x1[None, :]', '(nbs, 1, 1, 1)'], {}), '(x1[None, :], (nbs, 1, 1, 1))\n', (5173, 5202), True, 'import numpy as np\n'), ((1860, 1875), 'numpy.ndim', 'np.ndim', (['recblk'], {}), '(recblk)\n', (1867, 1875), True, 'import numpy as np\n'), ((1912, 1959), 'numpy.random.rand', 'np.random.rand', (['len_ser', 'recblk.shape[1]', 'nsurr'], {}), '(len_ser, recblk.shape[1], nsurr)\n', (1926, 1959), True, 'import numpy as np\n'), ((1984, 2027), 'numpy.tile', 'np.tile', (['fft_recblk[None, :]', '(nsurr, 1, 1)'], {}), '(fft_recblk[None, :], (nsurr, 1, 1))\n', (1991, 2027), True, 'import numpy as np\n'), ((2972, 2985), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (2979, 2985), True, 'import numpy as np\n'), ((3848, 3865), 'numpy.std', 'np.std', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (3854, 3865), True, 'import numpy as np\n'), ((5207, 5218), 'numpy.ndim', 'np.ndim', (['x1'], {}), '(x1)\n', (5214, 5218), True, 'import numpy as np\n'), ((5238, 5271), 'numpy.tile', 'np.tile', (['x1[None, :]', '(nbs, 1, 1)'], {}), '(x1[None, :], (nbs, 1, 1))\n', (5245, 5271), True, 'import numpy as np\n'), ((2033, 2048), 'numpy.ndim', 'np.ndim', (['recblk'], {}), '(recblk)\n', (2040, 2048), True, 'import numpy as np\n'), ((2081, 2145), 'numpy.random.rand', 'np.random.rand', (['len_ser', 'recblk.shape[1]', 'recblk.shape[2]', 'nsurr'], {}), '(len_ser, recblk.shape[1], recblk.shape[2], nsurr)\n', (2095, 2145), True, 'import numpy as np\n'), ((2169, 2215), 'numpy.tile', 'np.tile', (['fft_recblk[None, :]', '(nsurr, 1, 1, 1)'], {}), '(fft_recblk[None, :], (nsurr, 1, 1, 1))\n', (2176, 2215), True, 'import numpy as np\n'), ((5277, 5288), 'numpy.ndim', 'np.ndim', (['x1'], {}), '(x1)\n', (5284, 5288), True, 'import numpy as np\n'), ((5308, 5338), 'numpy.tile', 'np.tile', (['x1[None, :]', '(nbs, 1)'], {}), '(x1[None, :], (nbs, 1))\n', (5315, 5338), True, 'import numpy as np\n')]
|
import argparse
import os
import mlflow
import numpy as np
import pandas as pd
import torch
import torch.optim as optim
from matplotlib import pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg
from mlflow import log_metric, log_param, get_artifact_uri
from skimage.io import imsave
from sklearn.model_selection import ParameterGrid
from torch.nn.utils import clip_grad_norm_
from torch.utils.data import DataLoader
from tqdm import tqdm
from dataset import TomoDetectionDataset as Dataset
from dense_yolo import DenseYOLO
from loss import objectness_module, LocalizationLoss
from sampler import TomoBatchSampler
from transform import transforms
def main(args):
torch.backends.cudnn.benchmark = True
device = torch.device("cpu" if not torch.cuda.is_available() else args.device)
loader_train, loader_valid = data_loaders(args)
loaders = {"train": loader_train, "valid": loader_valid}
hparams_dict = {
"block_config": [(1, 3, 2, 6, 4), (2, 6, 4, 12, 8)],
"num_init_features": [8, 16],
"growth_rate": [8, 16],
"bn_size": [2, 4],
}
hparams = list(ParameterGrid(hparams_dict)) # 16 configs
loss_params_dict = [
{"loss": ["CE", "weighted-CE"], "alpha": [0.25, 0.5, 1.0]}, # 6 configs
{"loss": ["focal"], "alpha": [0.25, 0.5, 1.0], "gamma": [0.5, 1.0, 2.0]}, # 9 configs
{
"loss": ["reduced-focal"],
"alpha": [0.25, 0.5, 1.0],
"gamma": [0.5, 1.0, 2.0],
"reduce_th": [0.5],
} # 9 configs
] # 24 configs
loss_params = list(ParameterGrid(loss_params_dict))
loss_params = loss_params * 2 # 48 configs
try:
mlflow.set_tracking_uri(args.mlruns_path)
experiment_id = (
args.experiment_id
if args.experiment_id
else mlflow.create_experiment(name=args.experiment_name)
)
except Exception as _:
print("experiment-id must be unique")
return
for i, loss_param in tqdm(enumerate(loss_params)):
for j, hparam in enumerate(hparams):
with mlflow.start_run(experiment_id=experiment_id):
mlflow_log_params(loss_param, hparam)
try:
yolo = DenseYOLO(img_channels=1, out_channels=Dataset.out_channels, **hparam)
yolo.to(device)
objectness_loss = objectness_module(
name=loss_param["loss"], args=argparse.Namespace(**loss_param)
)
localization_loss = LocalizationLoss(weight=args.loc_weight)
optimizer = optim.Adam(yolo.parameters(), lr=args.lr)
early_stop = args.patience
run_tpr2 = 0.0
run_tpr1 = 0.0
run_auc = 0.0
for _ in range(args.epochs):
if early_stop == 0:
break
for phase in ["train", "valid"]:
if phase == "train":
yolo.train()
early_stop -= 1
else:
yolo.eval()
df_validation_pred = pd.DataFrame()
valid_target_nb = 0
for data in loaders[phase]:
x, y_true = data
x, y_true = x.to(device), y_true.to(device)
optimizer.zero_grad()
with torch.set_grad_enabled(phase == "train"):
y_pred = yolo(x)
obj = objectness_loss(y_pred, y_true)
loc = localization_loss(y_pred, y_true)
total_loss = obj + loc
if phase == "train":
total_loss.backward()
clip_grad_norm_(yolo.parameters(), 0.5)
optimizer.step()
else:
y_true_np = y_true.detach().cpu().numpy()
valid_target_nb += np.sum(y_true_np[:, 0])
df_batch_pred = evaluate_batch(y_pred, y_true)
df_validation_pred = df_validation_pred.append(
df_batch_pred, ignore_index=True, sort=False
)
if phase == "valid":
tpr, fps = froc(df_validation_pred, valid_target_nb)
epoch_tpr2 = np.interp(2.0, fps, tpr)
epoch_tpr1 = np.interp(1.0, fps, tpr)
if epoch_tpr2 > run_tpr2:
early_stop = args.patience
run_tpr2 = epoch_tpr2
run_tpr1 = epoch_tpr1
run_auc = np.trapz(tpr, fps)
torch.save(
yolo.state_dict(),
os.path.join(get_artifact_uri(), "yolo.pt"),
)
imsave(
os.path.join(get_artifact_uri(), "froc.png"),
plot_froc(fps, tpr),
)
log_metric("TPR2", run_tpr2)
log_metric("TPR1", run_tpr1)
log_metric("AUC", run_auc)
except Exception as e:
print(
"{:0>2d}/{} | {} {}".format(
j + 1, len(hparams), hparams[j], type(e).__name__
)
)
def mlflow_log_params(loss_param, hparam):
for key in loss_param:
log_param(key, loss_param[key])
log_param("loss_fun", str(loss_param))
for key in hparam:
log_param(key, hparam[key])
log_param("network", str(hparam))
def data_loaders(args):
dataset_train, dataset_valid = datasets(args)
sampler_train = TomoBatchSampler(
batch_size=args.batch_size, data_frame=dataset_train.data_frame
)
def worker_init(worker_id):
np.random.seed(42 + worker_id)
loader_train = DataLoader(
dataset_train,
batch_sampler=sampler_train,
num_workers=args.workers,
worker_init_fn=worker_init,
)
loader_valid = DataLoader(
dataset_valid,
batch_size=args.batch_size,
drop_last=False,
num_workers=args.workers,
worker_init_fn=worker_init,
)
return loader_train, loader_valid
def datasets(args):
train = Dataset(
csv_views=args.data_views,
csv_bboxes=args.data_boxes,
root_dir=args.images,
subset="train",
random=True,
only_biopsied=args.only_biopsied,
transform=transforms(train=True),
skip_preprocessing=True,
downscale=args.downscale,
max_slice_offset=args.slice_offset,
seed=args.seed,
)
valid = Dataset(
csv_views=args.data_views,
csv_bboxes=args.data_boxes,
root_dir=args.images,
subset="validation",
random=False,
transform=transforms(train=False),
skip_preprocessing=True,
downscale=args.downscale,
max_slice_offset=args.slice_offset,
seed=args.seed,
)
return train, valid
def froc(df, targets_nb):
total_slices = len(df.drop_duplicates(subset=["PID"]))
total_tps = targets_nb
tpr = [0.0]
fps = [0.0]
max_fps = 4.0
thresholds = sorted(df[df["TP"] == 1]["Score"], reverse=True)
for th in thresholds:
df_th = df[df["Score"] >= th]
df_th_unique_tp = df_th.drop_duplicates(subset=["PID", "TP", "GTID"])
num_tps_th = float(sum(df_th_unique_tp["TP"]))
tpr_th = num_tps_th / total_tps
num_fps_th = float(len(df_th[df_th["TP"] == 0]))
fps_th = num_fps_th / total_slices
if fps_th > max_fps:
tpr.append(tpr[-1])
fps.append(max_fps)
break
tpr.append(tpr_th)
fps.append(fps_th)
if np.max(fps) < max_fps:
tpr.append(tpr[-1])
fps.append(max_fps)
return tpr, fps
def plot_froc(fps, tpr, color="darkorange", linestyle="-"):
fig = plt.figure(figsize=(10, 8))
canvas = FigureCanvasAgg(fig)
plt.plot(fps, tpr, color=color, linestyle=linestyle, lw=2)
plt.xlim([0.0, 4.0])
plt.xticks(np.arange(0.0, 4.5, 0.5))
plt.ylim([0.0, 1.0])
plt.yticks(np.arange(0.0, 1.1, 0.1))
plt.tick_params(axis="both", which="major", labelsize=16)
plt.xlabel("Mean FPs per slice", fontsize=24)
plt.ylabel("Sensitivity", fontsize=24)
plt.grid(color="silver", alpha=0.3, linestyle="--", linewidth=1)
plt.tight_layout()
canvas.draw()
plt.close()
s, (width, height) = canvas.print_to_buffer()
return np.fromstring(s, np.uint8).reshape((height, width, 4))
def is_tp(pred_box, true_box, min_dist=50):
# box: center point + dimensions
pred_y, pred_x = pred_box["Y"], pred_box["X"]
gt_y, gt_x = true_box["Y"], true_box["X"]
# distance between GT and predicted center points
dist = np.sqrt((pred_x - gt_x) ** 2 + (pred_y - gt_y) ** 2)
# TP radius based on GT box size
dist_threshold = np.sqrt(true_box["Width"] ** 2 + true_box["Height"] ** 2) / 2.
dist_threshold = max(dist_threshold, min_dist)
# TP if predicted center within GT radius
return dist <= dist_threshold
def evaluate_batch(y_pred, y_true):
y_pred = y_pred.detach().cpu().numpy()
y_true = y_true.detach().cpu().numpy()
df_eval = pd.DataFrame()
for i in range(y_pred.shape[0]):
df_gt_boxes = pred2boxes(y_true[i], threshold=1.0)
df_gt_boxes["GTID"] = np.random.randint(10e10) * (1 + df_gt_boxes["X"])
df_pred_boxes = pred2boxes(y_pred[i])
df_pred_boxes["PID"] = np.random.randint(10e12)
df_pred_boxes["TP"] = 0
df_pred_boxes["GTID"] = np.random.choice(
list(set(df_gt_boxes["GTID"])), df_pred_boxes.shape[0]
)
for index, pred_box in df_pred_boxes.iterrows():
tp_list = [
(j, is_tp(pred_box, x_box)) for j, x_box in df_gt_boxes.iterrows()
]
if any([tp[1] for tp in tp_list]):
tp_index = [tp[0] for tp in tp_list if tp[1]][0]
df_pred_boxes.at[index, "TP"] = 1
df_pred_boxes.at[index, "GTID"] = df_gt_boxes.at[tp_index, "GTID"]
df_eval = df_eval.append(df_pred_boxes, ignore_index=True, sort=False)
return df_eval
def pred2boxes(pred, threshold=None):
# box: center point + dimensions
anchor = Dataset.anchor
cell_size = Dataset.cell_size
np.nan_to_num(pred, copy=False)
obj_th = pred[0]
if threshold is None:
threshold = min(0.001, np.max(obj_th) * 0.5)
obj_th[obj_th < threshold] = 0
yy, xx = np.nonzero(obj_th)
scores = []
xs = []
ys = []
ws = []
hs = []
for i in range(len(yy)):
scores.append(pred[0, yy[i], xx[i]])
h = int(anchor[0] * pred[3, yy[i], xx[i]] ** 2)
hs.append(h)
w = int(anchor[1] * pred[4, yy[i], xx[i]] ** 2)
ws.append(w)
y_offset = pred[1, yy[i], xx[i]]
y_mid = yy[i] * cell_size + (cell_size / 2) + (cell_size / 2) * y_offset
ys.append(int(y_mid))
x_offset = pred[2, yy[i], xx[i]]
x_mid = xx[i] * cell_size + (cell_size / 2) + (cell_size / 2) * x_offset
xs.append(int(x_mid))
df_dict = {"Score": scores, "X": xs, "Y": ys, "Width": ws, "Height": hs}
df_boxes = pd.DataFrame(df_dict)
df_boxes.sort_values(by="Score", ascending=False, inplace=True)
return df_boxes
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Hyper-parameters grid search for YOLO model for cancer detection in Duke DBT volumes"
)
parser.add_argument(
"--batch-size",
type=int,
default=16,
help="input batch size for training (default: 16)",
)
parser.add_argument(
"--epochs",
type=int,
default=100,
help="number of epochs to train (default: 100)",
)
parser.add_argument(
"--patience",
type=int,
default=25,
help="early stopping: number of epochs to wait for improvement (default: 25)",
)
parser.add_argument(
"--lr", type=float, default=0.001, help="initial learning rate (default: 0.001)"
)
parser.add_argument(
"--loc-weight",
type=float,
default=0.5,
help="weight of localization loss (default: 0.5)",
)
parser.add_argument(
"--device",
type=str,
default="cuda:1",
help="device for training (default: cuda:1)",
)
parser.add_argument(
"--workers",
type=int,
default=4,
help="number of workers for data loading (default: 4)",
)
parser.add_argument(
"--data-views",
type=str,
default="/data/data_train_v2.csv",
help="csv file listing training views together with category label",
)
parser.add_argument(
"--data-boxes",
type=str,
default="/data/bboxes_v2.csv",
help="csv file defining ground truth bounding boxes",
)
parser.add_argument(
"--images",
type=str,
default="/data/TomoImagesPP/",
help="root folder with preprocessed images",
)
parser.add_argument(
"--seed",
type=int,
default=42,
help="random seed for validation split (default: 42)",
)
parser.add_argument(
"--downscale",
type=int,
default=2,
help="input image downscale factor (default 2)",
)
parser.add_argument(
"--experiment-name",
type=str,
default="0",
help="experiment name for new mlflow (default: 0)",
)
parser.add_argument(
"--experiment-id",
type=str,
default=None,
help="experiment id to restore in-progress mlflow experiment (default: None)",
)
parser.add_argument(
"--mlruns-path",
type=str,
default="/data/mlruns",
help="path for mlflow results (default: /data/mlruns)",
)
parser.add_argument(
"--slice-offset",
type=int,
default=0,
help="maximum offset from central slice to consider as GT bounding box (default: 0)",
)
parser.add_argument(
"--only-biopsied",
default=True, # set to true by default for convenience
action="store_true",
help="flag to use only biopsied cases",
)
args = parser.parse_args()
main(args)
|
[
"mlflow.create_experiment",
"matplotlib.pyplot.grid",
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"mlflow.log_param",
"torch.cuda.is_available",
"argparse.Namespace",
"numpy.arange",
"sklearn.model_selection.ParameterGrid",
"transform.transforms",
"torch.set_grad_enabled",
"argparse.ArgumentParser",
"mlflow.set_tracking_uri",
"mlflow.log_metric",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.random.seed",
"pandas.DataFrame",
"matplotlib.pyplot.ylim",
"mlflow.start_run",
"numpy.fromstring",
"numpy.trapz",
"matplotlib.pyplot.tick_params",
"numpy.nonzero",
"numpy.interp",
"matplotlib.pyplot.xlim",
"mlflow.get_artifact_uri",
"dense_yolo.DenseYOLO",
"matplotlib.pyplot.figure",
"numpy.random.randint",
"loss.LocalizationLoss",
"numpy.sum",
"matplotlib.backends.backend_agg.FigureCanvasAgg",
"torch.utils.data.DataLoader",
"sampler.TomoBatchSampler",
"matplotlib.pyplot.tight_layout",
"numpy.nan_to_num"
] |
[((6499, 6585), 'sampler.TomoBatchSampler', 'TomoBatchSampler', ([], {'batch_size': 'args.batch_size', 'data_frame': 'dataset_train.data_frame'}), '(batch_size=args.batch_size, data_frame=dataset_train.\n data_frame)\n', (6515, 6585), False, 'from sampler import TomoBatchSampler\n'), ((6687, 6800), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset_train'], {'batch_sampler': 'sampler_train', 'num_workers': 'args.workers', 'worker_init_fn': 'worker_init'}), '(dataset_train, batch_sampler=sampler_train, num_workers=args.\n workers, worker_init_fn=worker_init)\n', (6697, 6800), False, 'from torch.utils.data import DataLoader\n'), ((6854, 6982), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset_valid'], {'batch_size': 'args.batch_size', 'drop_last': '(False)', 'num_workers': 'args.workers', 'worker_init_fn': 'worker_init'}), '(dataset_valid, batch_size=args.batch_size, drop_last=False,\n num_workers=args.workers, worker_init_fn=worker_init)\n', (6864, 6982), False, 'from torch.utils.data import DataLoader\n'), ((8770, 8797), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (8780, 8797), True, 'from matplotlib import pyplot as plt\n'), ((8811, 8831), 'matplotlib.backends.backend_agg.FigureCanvasAgg', 'FigureCanvasAgg', (['fig'], {}), '(fig)\n', (8826, 8831), False, 'from matplotlib.backends.backend_agg import FigureCanvasAgg\n'), ((8836, 8894), 'matplotlib.pyplot.plot', 'plt.plot', (['fps', 'tpr'], {'color': 'color', 'linestyle': 'linestyle', 'lw': '(2)'}), '(fps, tpr, color=color, linestyle=linestyle, lw=2)\n', (8844, 8894), True, 'from matplotlib import pyplot as plt\n'), ((8899, 8919), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.0, 4.0]'], {}), '([0.0, 4.0])\n', (8907, 8919), True, 'from matplotlib import pyplot as plt\n'), ((8965, 8985), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (8973, 8985), True, 'from matplotlib import pyplot as plt\n'), ((9031, 9088), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'which': '"""major"""', 'labelsize': '(16)'}), "(axis='both', which='major', labelsize=16)\n", (9046, 9088), True, 'from matplotlib import pyplot as plt\n'), ((9093, 9138), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Mean FPs per slice"""'], {'fontsize': '(24)'}), "('Mean FPs per slice', fontsize=24)\n", (9103, 9138), True, 'from matplotlib import pyplot as plt\n'), ((9143, 9181), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Sensitivity"""'], {'fontsize': '(24)'}), "('Sensitivity', fontsize=24)\n", (9153, 9181), True, 'from matplotlib import pyplot as plt\n'), ((9186, 9250), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'color': '"""silver"""', 'alpha': '(0.3)', 'linestyle': '"""--"""', 'linewidth': '(1)'}), "(color='silver', alpha=0.3, linestyle='--', linewidth=1)\n", (9194, 9250), True, 'from matplotlib import pyplot as plt\n'), ((9255, 9273), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (9271, 9273), True, 'from matplotlib import pyplot as plt\n'), ((9296, 9307), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9305, 9307), True, 'from matplotlib import pyplot as plt\n'), ((9668, 9720), 'numpy.sqrt', 'np.sqrt', (['((pred_x - gt_x) ** 2 + (pred_y - gt_y) ** 2)'], {}), '((pred_x - gt_x) ** 2 + (pred_y - gt_y) ** 2)\n', (9675, 9720), True, 'import numpy as np\n'), ((10111, 10125), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (10123, 10125), True, 'import pandas as pd\n'), ((11227, 11258), 'numpy.nan_to_num', 'np.nan_to_num', (['pred'], {'copy': '(False)'}), '(pred, copy=False)\n', (11240, 11258), True, 'import numpy as np\n'), ((11407, 11425), 'numpy.nonzero', 'np.nonzero', (['obj_th'], {}), '(obj_th)\n', (11417, 11425), True, 'import numpy as np\n'), ((12115, 12136), 'pandas.DataFrame', 'pd.DataFrame', (['df_dict'], {}), '(df_dict)\n', (12127, 12136), True, 'import pandas as pd\n'), ((12267, 12400), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Hyper-parameters grid search for YOLO model for cancer detection in Duke DBT volumes"""'}), "(description=\n 'Hyper-parameters grid search for YOLO model for cancer detection in Duke DBT volumes'\n )\n", (12290, 12400), False, 'import argparse\n'), ((1132, 1159), 'sklearn.model_selection.ParameterGrid', 'ParameterGrid', (['hparams_dict'], {}), '(hparams_dict)\n', (1145, 1159), False, 'from sklearn.model_selection import ParameterGrid\n'), ((1601, 1632), 'sklearn.model_selection.ParameterGrid', 'ParameterGrid', (['loss_params_dict'], {}), '(loss_params_dict)\n', (1614, 1632), False, 'from sklearn.model_selection import ParameterGrid\n'), ((1701, 1742), 'mlflow.set_tracking_uri', 'mlflow.set_tracking_uri', (['args.mlruns_path'], {}), '(args.mlruns_path)\n', (1724, 1742), False, 'import mlflow\n'), ((6231, 6262), 'mlflow.log_param', 'log_param', (['key', 'loss_param[key]'], {}), '(key, loss_param[key])\n', (6240, 6262), False, 'from mlflow import log_metric, log_param, get_artifact_uri\n'), ((6337, 6364), 'mlflow.log_param', 'log_param', (['key', 'hparam[key]'], {}), '(key, hparam[key])\n', (6346, 6364), False, 'from mlflow import log_metric, log_param, get_artifact_uri\n'), ((6636, 6666), 'numpy.random.seed', 'np.random.seed', (['(42 + worker_id)'], {}), '(42 + worker_id)\n', (6650, 6666), True, 'import numpy as np\n'), ((8599, 8610), 'numpy.max', 'np.max', (['fps'], {}), '(fps)\n', (8605, 8610), True, 'import numpy as np\n'), ((8935, 8959), 'numpy.arange', 'np.arange', (['(0.0)', '(4.5)', '(0.5)'], {}), '(0.0, 4.5, 0.5)\n', (8944, 8959), True, 'import numpy as np\n'), ((9001, 9025), 'numpy.arange', 'np.arange', (['(0.0)', '(1.1)', '(0.1)'], {}), '(0.0, 1.1, 0.1)\n', (9010, 9025), True, 'import numpy as np\n'), ((9779, 9836), 'numpy.sqrt', 'np.sqrt', (["(true_box['Width'] ** 2 + true_box['Height'] ** 2)"], {}), "(true_box['Width'] ** 2 + true_box['Height'] ** 2)\n", (9786, 9836), True, 'import numpy as np\n'), ((10379, 10414), 'numpy.random.randint', 'np.random.randint', (['(10000000000000.0)'], {}), '(10000000000000.0)\n', (10396, 10414), True, 'import numpy as np\n'), ((1851, 1902), 'mlflow.create_experiment', 'mlflow.create_experiment', ([], {'name': 'args.experiment_name'}), '(name=args.experiment_name)\n', (1875, 1902), False, 'import mlflow\n'), ((7314, 7336), 'transform.transforms', 'transforms', ([], {'train': '(True)'}), '(train=True)\n', (7324, 7336), False, 'from transform import transforms\n'), ((7670, 7693), 'transform.transforms', 'transforms', ([], {'train': '(False)'}), '(train=False)\n', (7680, 7693), False, 'from transform import transforms\n'), ((9369, 9395), 'numpy.fromstring', 'np.fromstring', (['s', 'np.uint8'], {}), '(s, np.uint8)\n', (9382, 9395), True, 'import numpy as np\n'), ((10252, 10285), 'numpy.random.randint', 'np.random.randint', (['(100000000000.0)'], {}), '(100000000000.0)\n', (10269, 10285), True, 'import numpy as np\n'), ((769, 794), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (792, 794), False, 'import torch\n'), ((2121, 2166), 'mlflow.start_run', 'mlflow.start_run', ([], {'experiment_id': 'experiment_id'}), '(experiment_id=experiment_id)\n', (2137, 2166), False, 'import mlflow\n'), ((11337, 11351), 'numpy.max', 'np.max', (['obj_th'], {}), '(obj_th)\n', (11343, 11351), True, 'import numpy as np\n'), ((2271, 2341), 'dense_yolo.DenseYOLO', 'DenseYOLO', ([], {'img_channels': '(1)', 'out_channels': 'Dataset.out_channels'}), '(img_channels=1, out_channels=Dataset.out_channels, **hparam)\n', (2280, 2341), False, 'from dense_yolo import DenseYOLO\n'), ((2585, 2625), 'loss.LocalizationLoss', 'LocalizationLoss', ([], {'weight': 'args.loc_weight'}), '(weight=args.loc_weight)\n', (2601, 2625), False, 'from loss import objectness_module, LocalizationLoss\n'), ((5780, 5808), 'mlflow.log_metric', 'log_metric', (['"""TPR2"""', 'run_tpr2'], {}), "('TPR2', run_tpr2)\n", (5790, 5808), False, 'from mlflow import log_metric, log_param, get_artifact_uri\n'), ((5829, 5857), 'mlflow.log_metric', 'log_metric', (['"""TPR1"""', 'run_tpr1'], {}), "('TPR1', run_tpr1)\n", (5839, 5857), False, 'from mlflow import log_metric, log_param, get_artifact_uri\n'), ((5878, 5904), 'mlflow.log_metric', 'log_metric', (['"""AUC"""', 'run_auc'], {}), "('AUC', run_auc)\n", (5888, 5904), False, 'from mlflow import log_metric, log_param, get_artifact_uri\n'), ((2490, 2522), 'argparse.Namespace', 'argparse.Namespace', ([], {}), '(**loss_param)\n', (2508, 2522), False, 'import argparse\n'), ((3310, 3324), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3322, 3324), True, 'import pandas as pd\n'), ((4903, 4927), 'numpy.interp', 'np.interp', (['(2.0)', 'fps', 'tpr'], {}), '(2.0, fps, tpr)\n', (4912, 4927), True, 'import numpy as np\n'), ((4973, 4997), 'numpy.interp', 'np.interp', (['(1.0)', 'fps', 'tpr'], {}), '(1.0, fps, tpr)\n', (4982, 4997), True, 'import numpy as np\n'), ((3648, 3688), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (["(phase == 'train')"], {}), "(phase == 'train')\n", (3670, 3688), False, 'import torch\n'), ((5281, 5299), 'numpy.trapz', 'np.trapz', (['tpr', 'fps'], {}), '(tpr, fps)\n', (5289, 5299), True, 'import numpy as np\n'), ((4393, 4416), 'numpy.sum', 'np.sum', (['y_true_np[:, 0]'], {}), '(y_true_np[:, 0])\n', (4399, 4416), True, 'import numpy as np\n'), ((5460, 5478), 'mlflow.get_artifact_uri', 'get_artifact_uri', ([], {}), '()\n', (5476, 5478), False, 'from mlflow import log_metric, log_param, get_artifact_uri\n'), ((5627, 5645), 'mlflow.get_artifact_uri', 'get_artifact_uri', ([], {}), '()\n', (5643, 5645), False, 'from mlflow import log_metric, log_param, get_artifact_uri\n')]
|
import numpy as np
def point_to_seg(x1, x2) -> np.ndarray:
'''
Method:
-------
Transform 2 points into a parametrized segment. Implicitely phi is in
[-pi/2; pi/2], it is the oriented angle the segment makes with the
horizontal line passing through its middle c.
'''
c = (x1[:2] + x2[:2])/2
# TODO: funny could define different topologies to explore.
if np.sum((x2-x1)**2) == 0:
print('x2 is equal to x1?')
r = np.sqrt(np.sum((x2-x1)**2))
# TODO: chack that the angle is well oriented
sign = np.sign(x2[0] - x1[0]) * np.sign(x2[1] - x1[1])
phi = sign * np.arccos(np.abs(x2[0]-x1[0])/r)
if phi < - np.pi/2 or phi > np.pi/2:
raise ValueError('the value of phi is not in [-pi/2, pi/2] but it {}'.format(phi))
res = np.hstack([c, r, phi])
return res
def seg_to_point(seg) -> (np.ndarray, np.ndarray):
'''transforms seg (c,r,phi) into a tuple of two 2-d points'''
phi = seg[3]
r = seg[2]
c = seg[:2]
dx = np.abs(np.cos(phi)*r/2)
dy = np.abs(np.sin(phi)*r/2)
x1 = c - np.array([dx, np.sign(phi)*dy])
x2 = c + np.array([dx, np.sign(phi)*dy])
return(x1, x2)
|
[
"numpy.abs",
"numpy.hstack",
"numpy.sum",
"numpy.cos",
"numpy.sign",
"numpy.sin"
] |
[((793, 815), 'numpy.hstack', 'np.hstack', (['[c, r, phi]'], {}), '([c, r, phi])\n', (802, 815), True, 'import numpy as np\n'), ((395, 417), 'numpy.sum', 'np.sum', (['((x2 - x1) ** 2)'], {}), '((x2 - x1) ** 2)\n', (401, 417), True, 'import numpy as np\n'), ((472, 494), 'numpy.sum', 'np.sum', (['((x2 - x1) ** 2)'], {}), '((x2 - x1) ** 2)\n', (478, 494), True, 'import numpy as np\n'), ((553, 575), 'numpy.sign', 'np.sign', (['(x2[0] - x1[0])'], {}), '(x2[0] - x1[0])\n', (560, 575), True, 'import numpy as np\n'), ((578, 600), 'numpy.sign', 'np.sign', (['(x2[1] - x1[1])'], {}), '(x2[1] - x1[1])\n', (585, 600), True, 'import numpy as np\n'), ((628, 649), 'numpy.abs', 'np.abs', (['(x2[0] - x1[0])'], {}), '(x2[0] - x1[0])\n', (634, 649), True, 'import numpy as np\n'), ((1014, 1025), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (1020, 1025), True, 'import numpy as np\n'), ((1047, 1058), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (1053, 1058), True, 'import numpy as np\n'), ((1091, 1103), 'numpy.sign', 'np.sign', (['phi'], {}), '(phi)\n', (1098, 1103), True, 'import numpy as np\n'), ((1136, 1148), 'numpy.sign', 'np.sign', (['phi'], {}), '(phi)\n', (1143, 1148), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
@author:XuMing(<EMAIL>), <NAME>(<EMAIL>)
@description: Graph classify
"""
import numpy
from sklearn.metrics import f1_score, accuracy_score
from sklearn.multiclass import OneVsRestClassifier
from sklearn.preprocessing import MultiLabelBinarizer
class TopKRanker(OneVsRestClassifier):
def predict(self, X, top_k_list):
probs = numpy.asarray(super(TopKRanker, self).predict_proba(X))
all_labels = []
for i, k in enumerate(top_k_list):
probs_ = probs[i, :]
labels = self.classes_[probs_.argsort()[-k:]].tolist()
probs_[:] = 0
probs_[labels] = 1
all_labels.append(probs_)
return numpy.asarray(all_labels)
class Classifier(object):
def __init__(self, embeddings, clf):
self.embeddings = embeddings
self.clf = TopKRanker(clf)
self.binarizer = MultiLabelBinarizer(sparse_output=True)
def train(self, X, Y, Y_all):
self.binarizer.fit(Y_all)
X_train = [self.embeddings[x] for x in X]
Y = self.binarizer.transform(Y)
self.clf.fit(X_train, Y)
def evaluate(self, X, Y):
top_k_list = [len(l) for l in Y]
Y_ = self.predict(X, top_k_list)
Y = self.binarizer.transform(Y)
results = {}
results['f1'] = f1_score(Y, Y_, average="weighted")
results['acc'] = accuracy_score(Y, Y_)
print(results)
return results
def predict(self, X, top_k_list):
X_ = numpy.asarray([self.embeddings[x] for x in X])
Y = self.clf.predict(X_, top_k_list=top_k_list)
return Y
def split_train_evaluate(self, X, Y, train_precent, seed=0):
training_size = int(train_precent * len(X))
numpy.random.seed(seed)
shuffle_indices = numpy.random.permutation(numpy.arange(len(X)))
X_train = [X[shuffle_indices[i]] for i in range(training_size)]
Y_train = [Y[shuffle_indices[i]] for i in range(training_size)]
X_test = [X[shuffle_indices[i]] for i in range(training_size, len(X))]
Y_test = [Y[shuffle_indices[i]] for i in range(training_size, len(X))]
self.train(X_train, Y_train, Y)
return self.evaluate(X_test, Y_test)
def read_node_label(filename, skip_head=False):
with open(filename, 'r', encoding='utf-8') as f:
X = []
Y = []
count = 0
for line in f:
line = line.strip()
count += 1
if skip_head and count == 1:
continue
parts = line.split(' ')
if len(parts) > 1:
X.append(parts[0])
Y.append(parts[1:])
return X, Y
|
[
"sklearn.metrics.f1_score",
"numpy.asarray",
"numpy.random.seed",
"sklearn.preprocessing.MultiLabelBinarizer",
"sklearn.metrics.accuracy_score"
] |
[((702, 727), 'numpy.asarray', 'numpy.asarray', (['all_labels'], {}), '(all_labels)\n', (715, 727), False, 'import numpy\n'), ((894, 933), 'sklearn.preprocessing.MultiLabelBinarizer', 'MultiLabelBinarizer', ([], {'sparse_output': '(True)'}), '(sparse_output=True)\n', (913, 933), False, 'from sklearn.preprocessing import MultiLabelBinarizer\n'), ((1324, 1359), 'sklearn.metrics.f1_score', 'f1_score', (['Y', 'Y_'], {'average': '"""weighted"""'}), "(Y, Y_, average='weighted')\n", (1332, 1359), False, 'from sklearn.metrics import f1_score, accuracy_score\n'), ((1385, 1406), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['Y', 'Y_'], {}), '(Y, Y_)\n', (1399, 1406), False, 'from sklearn.metrics import f1_score, accuracy_score\n'), ((1505, 1551), 'numpy.asarray', 'numpy.asarray', (['[self.embeddings[x] for x in X]'], {}), '([self.embeddings[x] for x in X])\n', (1518, 1551), False, 'import numpy\n'), ((1752, 1775), 'numpy.random.seed', 'numpy.random.seed', (['seed'], {}), '(seed)\n', (1769, 1775), False, 'import numpy\n')]
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wrong-import-position,invalid-name
"""
Test the cascader in the compilation flow.
"""
import pytest
pytest.importorskip("ethosu.vela")
import numpy as np
import tvm
from tvm import relay
from tvm.relay.backend.contrib.ethosu.codegen import _create_cascader
from tvm.relay.backend.contrib.ethosu.tir.compiler import _lower_to_tir
from tvm.contrib.ethosu.cascader import MemoryRegion, EthosuDeviceConfig
from .. import infra as test_infra
from . import infra as cascader_test_infra
def _ethos_u55_cascader():
sram = MemoryRegion(
name="SRAM",
size=10**6,
read_bandwidth=16,
write_bandwidth=16,
read_latency=0,
write_latency=0,
burst_length=1,
)
flash = MemoryRegion(name="FLASH", size=10**7, read_bandwidth=4, write_bandwidth=4)
device_config = EthosuDeviceConfig("ethos-u55-256")
cascader_options = cascader_test_infra.make_options(
cascade_region=sram,
max_proposals=64,
stripe_factors=4,
max_plan_size=10,
max_open_plans=8,
max_closed_plans=32,
always_copy_size=1024,
disable_pareto_plans=False,
disable_pareto_proposals=False,
enable_striping=False,
)
return _create_cascader(
options=cascader_options,
io_region=sram,
constant_region=flash,
working_regions=[sram],
device_config=device_config,
)
def _compile_model(relay_function):
mod = tvm.IRModule()
mod["main"] = relay_function
mod = relay.transform.InferType()(mod)
tir_mod = _lower_to_tir(mod["main"], _ethos_u55_cascader())[0]
return tir_mod["main"]
def _create_single_conv2d():
ifm = relay.var("x", shape=(1, 8, 8, 4), dtype="int8")
conv1 = test_infra.make_ethosu_conv2d(ifm, 4, 4, (3, 3), (1, 1), (1, 1), (1, 1))
func = relay.Function(relay.analysis.free_vars(conv1), conv1)
return func
def _create_double_conv2d():
ifm = relay.var("x", shape=(1, 8, 8, 4), dtype="int8")
conv1 = test_infra.make_ethosu_conv2d(ifm, 4, 4, (3, 3), (1, 1), (1, 1), (1, 1))
conv2 = test_infra.make_ethosu_conv2d(conv1, 4, 4, (1, 3), (1, 1), (1, 1), (1, 1))
func = relay.Function(relay.analysis.free_vars(conv2), conv2)
return func
def _create_scalar_add():
ifm = relay.var("x", shape=(1, 5, 4, 3), dtype="int8")
ifm2 = relay.const(np.ones((1, 1, 1, 1)), dtype="int8")
add = test_infra.make_ethosu_binary_elementwise(
ifm, ifm2, ifm_channels=3, ifm2_channels=1, operator_type="ADD", ofm_dtype="int8"
)
func = relay.Function(relay.analysis.free_vars(add), add)
return func
def test_single_conv_compute_cycles_hint():
"""
Check the "compute_cycles_hint" annotation remains in the lowering flow
for single convolution.
"""
primfunc = _compile_model(_create_single_conv2d())
ops = primfunc.body.body.body.seq
compute_cycles_hints = [2304, 640, 320]
for op, compute_cycle_hint in zip(ops, compute_cycles_hints):
assert op.attr_key == "pragma_compute_cycles_hint"
assert op.value == compute_cycle_hint
def test_double_conv_compute_cycles_hint():
"""
Check the "compute_cycles_hint" annotation remains in the lowering flow
for double convolution.
"""
primfunc = _compile_model(_create_double_conv2d())
ops = primfunc.body.body.body.body.body.body.seq
compute_cycles_hints = [2304, 640, 768, 640, 320, 240]
for op, compute_cycle_hint in zip(ops, compute_cycles_hints):
assert op.attr_key == "pragma_compute_cycles_hint"
assert op.value == compute_cycle_hint
def test_scalar_add_compute_cycles_hint():
"""
Check the "compute_cycles_hint" annotation remains in the lowering flow
for add with scalar values.
"""
primfunc = _compile_model(_create_scalar_add())
ops = primfunc.body.body.seq
compute_cycles_hints = [16, 24]
for op, compute_cycle_hint in zip(ops, compute_cycles_hints):
assert op.attr_key == "pragma_compute_cycles_hint"
assert op.value == compute_cycle_hint
|
[
"numpy.ones",
"tvm.relay.backend.contrib.ethosu.codegen._create_cascader",
"tvm.contrib.ethosu.cascader.EthosuDeviceConfig",
"pytest.importorskip",
"tvm.contrib.ethosu.cascader.MemoryRegion",
"tvm.IRModule",
"tvm.relay.transform.InferType",
"tvm.relay.analysis.free_vars",
"tvm.relay.var"
] |
[((906, 940), 'pytest.importorskip', 'pytest.importorskip', (['"""ethosu.vela"""'], {}), "('ethosu.vela')\n", (925, 940), False, 'import pytest\n'), ((1329, 1461), 'tvm.contrib.ethosu.cascader.MemoryRegion', 'MemoryRegion', ([], {'name': '"""SRAM"""', 'size': '(10 ** 6)', 'read_bandwidth': '(16)', 'write_bandwidth': '(16)', 'read_latency': '(0)', 'write_latency': '(0)', 'burst_length': '(1)'}), "(name='SRAM', size=10 ** 6, read_bandwidth=16, write_bandwidth=\n 16, read_latency=0, write_latency=0, burst_length=1)\n", (1341, 1461), False, 'from tvm.contrib.ethosu.cascader import MemoryRegion, EthosuDeviceConfig\n'), ((1530, 1607), 'tvm.contrib.ethosu.cascader.MemoryRegion', 'MemoryRegion', ([], {'name': '"""FLASH"""', 'size': '(10 ** 7)', 'read_bandwidth': '(4)', 'write_bandwidth': '(4)'}), "(name='FLASH', size=10 ** 7, read_bandwidth=4, write_bandwidth=4)\n", (1542, 1607), False, 'from tvm.contrib.ethosu.cascader import MemoryRegion, EthosuDeviceConfig\n'), ((1627, 1662), 'tvm.contrib.ethosu.cascader.EthosuDeviceConfig', 'EthosuDeviceConfig', (['"""ethos-u55-256"""'], {}), "('ethos-u55-256')\n", (1645, 1662), False, 'from tvm.contrib.ethosu.cascader import MemoryRegion, EthosuDeviceConfig\n'), ((2037, 2176), 'tvm.relay.backend.contrib.ethosu.codegen._create_cascader', '_create_cascader', ([], {'options': 'cascader_options', 'io_region': 'sram', 'constant_region': 'flash', 'working_regions': '[sram]', 'device_config': 'device_config'}), '(options=cascader_options, io_region=sram, constant_region=\n flash, working_regions=[sram], device_config=device_config)\n', (2053, 2176), False, 'from tvm.relay.backend.contrib.ethosu.codegen import _create_cascader\n'), ((2267, 2281), 'tvm.IRModule', 'tvm.IRModule', ([], {}), '()\n', (2279, 2281), False, 'import tvm\n'), ((2493, 2541), 'tvm.relay.var', 'relay.var', (['"""x"""'], {'shape': '(1, 8, 8, 4)', 'dtype': '"""int8"""'}), "('x', shape=(1, 8, 8, 4), dtype='int8')\n", (2502, 2541), False, 'from tvm import relay\n'), ((2750, 2798), 'tvm.relay.var', 'relay.var', (['"""x"""'], {'shape': '(1, 8, 8, 4)', 'dtype': '"""int8"""'}), "('x', shape=(1, 8, 8, 4), dtype='int8')\n", (2759, 2798), False, 'from tvm import relay\n'), ((3091, 3139), 'tvm.relay.var', 'relay.var', (['"""x"""'], {'shape': '(1, 5, 4, 3)', 'dtype': '"""int8"""'}), "('x', shape=(1, 5, 4, 3), dtype='int8')\n", (3100, 3139), False, 'from tvm import relay\n'), ((2325, 2352), 'tvm.relay.transform.InferType', 'relay.transform.InferType', ([], {}), '()\n', (2350, 2352), False, 'from tvm import relay\n'), ((2653, 2684), 'tvm.relay.analysis.free_vars', 'relay.analysis.free_vars', (['conv1'], {}), '(conv1)\n', (2677, 2684), False, 'from tvm import relay\n'), ((2997, 3028), 'tvm.relay.analysis.free_vars', 'relay.analysis.free_vars', (['conv2'], {}), '(conv2)\n', (3021, 3028), False, 'from tvm import relay\n'), ((3163, 3184), 'numpy.ones', 'np.ones', (['(1, 1, 1, 1)'], {}), '((1, 1, 1, 1))\n', (3170, 3184), True, 'import numpy as np\n'), ((3375, 3404), 'tvm.relay.analysis.free_vars', 'relay.analysis.free_vars', (['add'], {}), '(add)\n', (3399, 3404), False, 'from tvm import relay\n')]
|
# encoding: utf-8
'''
Created on Dec 18, 2018
@author: <NAME>
'''
import time
from array import *
from ctypes import *
from sys import exit
from multiprocessing import Process
from multiprocessing import Queue
import numpy as np
class EmotivDeviceReader(object):
'''
classdocs
This class is used to read EEG data from emotiv
Attributes:
queue: the queue save EEG data
'''
def __init__(self):
'''
Constructor
'''
self.queue = Queue(maxsize=-1)
# num_EDR = 0 # 记录创建了多少个EmotivDeviceReader
self.num_start = 0 # 记录start了多少个线程
def test(self):
print("real_time_detection.GUI.EmotivDeviceReader.py now test.")
print("test test test test test")
# check_status(self)
def check_status(self):
print("EmotivDeviceReader.py.check_status(self).start...")
'''
check if the device is connect correctly, if not, exit this process
'''
if self.libEDK.IEE_EngineConnect(create_string_buffer(b"Emotiv Systems-5")) != 0:
print("Failed to start up Emotiv Engine.")
exit()
else:
print("Successfully start up Emotiv Engine.")
print("EmotivDeviceReader.py.check_status(self).end...")
# check_status(self)
# loop(self)
def loop(self):
print("EmotivDeviceReader.py..loop(self).start...")
'''
the loop is used to continuously read data from device
'''
try:
self.libEDK = cdll.LoadLibrary("win64/edk.dll")
except Exception as e:
print('Error: cannot load EDK lib:', e)
exit()
print("EmotivDeviceReader.py...successfully connect")
self.IEE_EmoEngineEventCreate = self.libEDK.IEE_EmoEngineEventCreate
self.IEE_EmoEngineEventCreate.restype = c_void_p
self.eEvent = self.IEE_EmoEngineEventCreate()
# print("self.eEvent = self.IEE_EmoEngineEventCreate()")
self.IEE_EmoEngineEventGetEmoState = self.libEDK.IEE_EmoEngineEventGetEmoState
self.IEE_EmoEngineEventGetEmoState.argtypes = [c_void_p, c_void_p]
self.IEE_EmoEngineEventGetEmoState.restype = c_int
# print("self.IEE_EmoEngineEventGetEmoState.restype = c_int")
self.IEE_EmoStateCreate = self.libEDK.IEE_EmoStateCreate
self.IEE_EmoStateCreate.restype = c_void_p
self.eState = self.IEE_EmoStateCreate()
# print("self.eState = self.IEE_EmoStateCreate()")
self.IEE_EngineGetNextEvent = self.libEDK.IEE_EngineGetNextEvent
self.IEE_EngineGetNextEvent.restype = c_int
self.IEE_EngineGetNextEvent.argtypes = [c_void_p]
# print("self.IEE_EngineGetNextEvent.argtypes = [c_void_p]")
self.IEE_EmoEngineEventGetUserId = self.libEDK.IEE_EmoEngineEventGetUserId
self.IEE_EmoEngineEventGetUserId.restype = c_int
self.IEE_EmoEngineEventGetUserId.argtypes = [c_void_p , c_void_p]
# print("self.IEE_EmoEngineEventGetUserId.argtypes = [c_void_p , c_void_p]")
self.IEE_EmoEngineEventGetType = self.libEDK.IEE_EmoEngineEventGetType
self.IEE_EmoEngineEventGetType.restype = c_int
self.IEE_EmoEngineEventGetType.argtypes = [c_void_p]
# print("self.IEE_EmoEngineEventGetType.argtypes = [c_void_p]")
self.IEE_EmoEngineEventCreate = self.libEDK.IEE_EmoEngineEventCreate
self.IEE_EmoEngineEventCreate.restype = c_void_p
# print("self.IEE_EmoEngineEventCreate.restype = c_void_p")
self.IEE_EmoEngineEventGetEmoState = self.libEDK.IEE_EmoEngineEventGetEmoState
self.IEE_EmoEngineEventGetEmoState.argtypes = [c_void_p, c_void_p]
self.IEE_EmoEngineEventGetEmoState.restype = c_int
# print("self.IEE_EmoEngineEventGetEmoState.restype = c_int")
self.IEE_EmoStateCreate = self.libEDK.IEE_EmoStateCreate
self.IEE_EmoStateCreate.argtype = c_void_p
self.IEE_EmoStateCreate.restype = c_void_p
# print("self.IEE_EmoStateCreate.restype = c_void_p")
self.IEE_FFTSetWindowingType = self.libEDK.IEE_FFTSetWindowingType
self.IEE_FFTSetWindowingType.restype = c_int
self.IEE_FFTSetWindowingType.argtypes = [c_uint, c_void_p]
# print("self.IEE_FFTSetWindowingType.argtypes = [c_uint, c_void_p]")
self.IEE_GetAverageBandPowers = self.libEDK.IEE_GetAverageBandPowers
self.IEE_GetAverageBandPowers.restype = c_int
self.IEE_GetAverageBandPowers.argtypes = [c_uint, c_int, c_void_p, c_void_p, c_void_p, c_void_p, c_void_p]
# print("self.IEE_GetAverageBandPowers.argtypes = [c_uint, c_int, c_void_p, c_void_p, c_void_p, c_void_p, c_void_p]")
self.IEE_EngineDisconnect = self.libEDK.IEE_EngineDisconnect
self.IEE_EngineDisconnect.restype = c_int
self.IEE_EngineDisconnect.argtype = c_void_p
# print("self.IEE_EngineDisconnect.argtype = c_void_p")
self.IEE_EmoStateFree = self.libEDK.IEE_EmoStateFree
self.IEE_EmoStateFree.restype = c_int
self.IEE_EmoStateFree.argtypes = [c_void_p]
# print("self.IEE_EmoStateFree.argtypes = [c_void_p]")
self.IEE_EmoEngineEventFree = self.libEDK.IEE_EmoEngineEventFree
self.IEE_EmoEngineEventFree.restype = c_int
self.IEE_EmoEngineEventFree.argtypes = [c_void_p]
# print("self.IEE_EmoEngineEventFree.argtypes = [c_void_p]")
self.check_status()
print("EmotivDeviceReader.py...self.check_status()...")
userID = c_uint(0)
user = pointer(userID)
ready = 0
state = c_int(0)
alphaValue = c_double(0)
low_betaValue = c_double(0)
high_betaValue = c_double(0)
gammaValue = c_double(0)
thetaValue = c_double(0)
alpha = pointer(alphaValue)
low_beta = pointer(low_betaValue)
high_beta = pointer(high_betaValue)
gamma = pointer(gammaValue)
theta = pointer(thetaValue)
channelList = array('I', [3, 7, 9, 12, 16]) # IED_AF3, IED_AF4, IED_T7, IED_T8, IED_Pz
loop_times = 0 # count how many times did while(1) run
# while(1)
while(1):
loop_times += 1
state = self.IEE_EngineGetNextEvent(self.eEvent)
data = []
if state == 0:
eventType = self.IEE_EmoEngineEventGetType(self.eEvent)
self.IEE_EmoEngineEventGetUserId(self.eEvent, user)
if eventType == 16: # libEDK.IEE_Event_enum.IEE_UserAdded
ready = 1
self.IEE_FFTSetWindowingType(userID, 1); # 1: libEDK.IEE_WindowingTypes_enum.IEE_HAMMING
print("User added")
if ready == 1:
for i in channelList:
result = c_int(0)
result = self.IEE_GetAverageBandPowers(userID, i, theta, alpha, low_beta, high_beta, gamma)
if result == 0: # EDK_OK
print("theta: %.6f, alpha: %.6f, low beta: %.6f, high beta: %.6f, gamma: %.6f \n" %
(thetaValue.value, alphaValue.value, low_betaValue.value,
high_betaValue.value, gammaValue.value))
one_read_data = [thetaValue.value, alphaValue.value,
low_betaValue.value, high_betaValue.value, gammaValue.value]
if len(one_read_data) > 0:
data += one_read_data
elif state != 0x0600:
print("Internal error in Emotiv Engine ! ")
if len(data) > 0:
self.queue.put(np.array(data))
# --------------- #
# sleep_time = 0.5
# print("sleep(%f)" % sleep_time)
# print("loop_times(%d)" % loop_times)
# time.sleep(sleep_time)
# if loop_times >= 50:
# break
# while(1)
print("EmotivDeviceReader.py..loop(self).end...")
return 0
# loop(self)
def start(self):
'''
start a sub-process
'''
print("sub_process")
self.num_start += 1
print("num_start: %d " % self.num_start)
sub_process = Process(target=self.loop) # self.loop is the loop(self) function above
print("sub_process.start().start")
sub_process.start()
print("sub_process.start().end")
#error when run __main__ in the tool.py
'''
line 204, in start
sub_process.start()
'''
def get_data(self):
'''
read psd data
Returns:
theta, alpha, low_beta, high_beta, gamma in order
IED_AF3, IED_AF4, IED_T7, IED_T8, IED_Pz in order
'''
print("EmotivDeviceReader.get_data().start...")
data_list = []
while self.queue.qsize() > 0:
ele = self.queue.get()
data_list.append(ele)
print("data_list[0]")
print(data_list[0])
print("data_list[1]")
print(data_list[1])
# print(data_list[2])
print("EmotivDeviceReader.get_data().end...")
return data_list
# __main__
if __name__ == '__main__':
print("EmotivDeviceReader.py..__main__.start...")
device_reader = EmotivDeviceReader()
print("device_reader.start()")
device_reader.start()
print("device_reader.start()")
time.sleep(5)
print("for 5 loop: data")
for i in range(5):
print("i:%d" % i)
data = device_reader.get_data()
data = np.array(data)
print(data)
time.sleep(1)
print("EmotivDeviceReader.py..__main__.end...")
# __main__
|
[
"multiprocessing.Process",
"time.sleep",
"numpy.array",
"sys.exit",
"multiprocessing.Queue"
] |
[((9695, 9708), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (9705, 9708), False, 'import time\n'), ((494, 511), 'multiprocessing.Queue', 'Queue', ([], {'maxsize': '(-1)'}), '(maxsize=-1)\n', (499, 511), False, 'from multiprocessing import Queue\n'), ((8523, 8548), 'multiprocessing.Process', 'Process', ([], {'target': 'self.loop'}), '(target=self.loop)\n', (8530, 8548), False, 'from multiprocessing import Process\n'), ((9843, 9857), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (9851, 9857), True, 'import numpy as np\n'), ((9886, 9899), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (9896, 9899), False, 'import time\n'), ((1124, 1130), 'sys.exit', 'exit', ([], {}), '()\n', (1128, 1130), False, 'from sys import exit\n'), ((1646, 1652), 'sys.exit', 'exit', ([], {}), '()\n', (1650, 1652), False, 'from sys import exit\n'), ((7922, 7936), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (7930, 7936), True, 'import numpy as np\n')]
|
import scipy
import matplotlib.pyplot as plt
import numpy as np
x = [
0.001, 0.019, 0.039, 0.058, 0.080, 0.098, 0.119, 0.139,
0.159, 0.180, 0.198, 0.249, 0.298, 0.349, 0.398, 0.419,
0.439, 0.460, 0.479, 0.499, 0.519, 0.540, 0.558, 0.578,
0.598, 0.649, 0.698, 0.749, 0.798, 0.819, 0.839, 0.859,
0.879, 0.900, 0.920, 0.939, 0.958, 0.980, 0.998
]
y = [
0.056, 0.077, 0.076, 0.078, 0.088, 0.078, 0.105, 0.101,
0.107, 0.111, 0.119, 0.120, 0.155, 0.195, 0.223, 0.276,
0.293, 0.304, 0.325, 0.349, 0.370, 0.387, 0.390, 0.386,
0.408, 0.458, 0.449, 0.467, 0.456, 0.447, 0.436, 0.443,
0.444, 0.423, 0.429, 0.428, 0.445, 0.416, 0.400
]
x_axis = np.arange(min(x), max(x) + 0.1, 0.1)
fig, ax = plt.subplots()
ax.scatter(x, y,)
for degree in range(1,5):
poly_coefficient, residual, _, _, _ = np.polyfit(x, y, degree, full=True)
poly_function = np.poly1d(poly_coefficient)
ax.plot(x_axis, poly_function(x_axis), label=f'deg: {degree}, res: {residual}')
print(residual)
ax.grid(ls='-')
plt.show()
|
[
"numpy.polyfit",
"numpy.poly1d",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] |
[((724, 738), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (736, 738), True, 'import matplotlib.pyplot as plt\n'), ((1037, 1047), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1045, 1047), True, 'import matplotlib.pyplot as plt\n'), ((827, 862), 'numpy.polyfit', 'np.polyfit', (['x', 'y', 'degree'], {'full': '(True)'}), '(x, y, degree, full=True)\n', (837, 862), True, 'import numpy as np\n'), ((884, 911), 'numpy.poly1d', 'np.poly1d', (['poly_coefficient'], {}), '(poly_coefficient)\n', (893, 911), True, 'import numpy as np\n')]
|
import unittest
import numpy as np
import torch
from torch import optim
from spn.structure.Base import Product, Sum
from spn.structure.Base import assign_ids, rebuild_scopes_bottom_up
from spn.structure.leaves.parametric.Parametric import Gaussian, Categorical
from spn.gpu.TensorFlow import spn_to_tf_graph, optimize_tf_graph
from spn.gpu.TensorFlow import eval_tf
from spn.algorithms.Inference import log_likelihood
from torch import nn
from pytorch import CategoricalNode, GaussianNode, ProductNode, SumNode, optimize_torch
DELTA = 1e-10
class IdentityLeaf(nn.Module):
"""Identity leaf node (in log space) for testing purposes."""
def __init__(self, scope):
super(IdentityLeaf, self).__init__()
self.scope = scope
def forward(self, x):
return torch.log(x[:, self.scope])
class TestPytorchNodes(unittest.TestCase):
"""Test cases for different node types."""
def test_sum_node(self):
"""Test SumNode implementation."""
# Init product node
id0 = IdentityLeaf(scope=0)
id1 = IdentityLeaf(scope=1)
id2 = IdentityLeaf(scope=2)
children = [id0, id1, id2]
weights = np.array([0.1, 0.4, 0.5])
sumnode = SumNode(children=children, weights=weights)
# Define input: Two samples with three features
sample1 = np.array([1, 2, 3])
sample2 = np.array([10, 20, 30])
x = torch.Tensor([sample1, sample2])
# Get sum node result
result = sumnode(x)
# Expected results
expected_result = np.log([np.sum(weights * sample1), np.sum(weights * sample2)]).tolist()
# Assertions
self.assertEqual(len(result.tolist()), 2)
self.assertTrue(np.isclose(result.tolist(), expected_result, atol=DELTA).all())
def test_product_node(self):
"""Test product node implementation"""
# Init product node
id0 = IdentityLeaf(scope=0)
id1 = IdentityLeaf(scope=1)
id2 = IdentityLeaf(scope=2)
prod = ProductNode(children=[id0, id1, id2])
# Define input: Two samples with three features
sample1 = np.array([1, 2, 3])
sample2 = np.array([10, 20, 30])
x = torch.Tensor([sample1, sample2])
# Get product node result
result = prod(x)
# Product in logspace is sum
expected_result = [np.sum(np.log(sample1)), np.sum(np.log(sample2))]
# Assertions
self.assertEqual(len(result.tolist()), 2)
self.assertTrue(np.isclose(result.tolist(), expected_result, atol=DELTA).all())
def test_gaussian_node(self):
"""Test the GaussianNode implementation"""
means = [0.0, 0.5, 1.0]
stds = [1.0, 2.0, 3.0]
gauss0 = GaussianNode(mean=means[0], std=stds[0], scope=0)
gauss1 = GaussianNode(mean=means[1], std=stds[1], scope=1)
gauss2 = GaussianNode(mean=means[2], std=stds[2], scope=2)
sample1 = np.array([1, 2, 3])
sample2 = np.array([10, 20, 30])
x = torch.Tensor([sample1, sample2])
# Get results
res_gauss0 = gauss0(x)
res_gauss1 = gauss1(x)
res_gauss2 = gauss2(x)
# Expect results from normal distributions
normal0 = torch.distributions.Normal(loc=means[0], scale=stds[0])
normal1 = torch.distributions.Normal(loc=means[1], scale=stds[1])
normal2 = torch.distributions.Normal(loc=means[2], scale=stds[2])
exp_gauss0 = normal0.log_prob(torch.Tensor([1, 10]))
exp_gauss1 = normal1.log_prob(torch.Tensor([2, 20]))
exp_gauss2 = normal2.log_prob(torch.Tensor([3, 30]))
# Assertions
self.assertEqual(len(res_gauss0.tolist()), 2)
self.assertEqual(len(res_gauss1.tolist()), 2)
self.assertEqual(len(res_gauss2.tolist()), 2)
# Assert that results are numerically equal
self.assertTrue(np.isclose(res_gauss0.tolist(), exp_gauss0, atol=DELTA).all())
self.assertTrue(np.isclose(res_gauss1.tolist(), exp_gauss1, atol=DELTA).all())
self.assertTrue(np.isclose(res_gauss2.tolist(), exp_gauss2, atol=DELTA).all())
def test_equal_to_tf(self):
# SPFLow implementation
g00 = Gaussian(mean=0.0, stdev=1.0, scope=0)
g10 = Gaussian(mean=1.0, stdev=2.0, scope=1)
g01 = Gaussian(mean=3.0, stdev=2.0, scope=0)
g11 = Gaussian(mean=5.0, stdev=1.0, scope=1)
p0 = Product(children=[g00, g10])
p1 = Product(children=[g01, g11])
s = Sum(weights=[0.2, 0.8], children=[p0, p1])
assign_ids(s)
rebuild_scopes_bottom_up(s)
# Test for 100 random samples
data = np.random.randn(100, 2)
# LL from SPN
ll = log_likelihood(s, data)
# PyTorch implementation
g00 = GaussianNode(mean=0.0, std=1.0, scope=0)
g10 = GaussianNode(mean=1.0, std=2.0, scope=1)
g01 = GaussianNode(mean=3.0, std=2.0, scope=0)
g11 = GaussianNode(mean=5.0, std=1.0, scope=1)
p0 = ProductNode(children=[g00, g10])
p1 = ProductNode(children=[g01, g11])
rootnode = SumNode(weights=[0.2, 0.8], children=[p0, p1])
datatensor = torch.Tensor(data)
# LL from pytorch
ll_torch = rootnode(datatensor)
# Assert equality
self.assertTrue(np.isclose(np.array(ll).squeeze(), ll_torch.detach().numpy(), atol=DELTA).all())
def test_spn_to_torch(self):
# SPFLow implementation
n0 = Gaussian(mean=0.0, stdev=1.0, scope=0)
n1 = Categorical(p=[0.1, 0.3, 0.6])
n2 = Sum(weights=[0.1, 0.2, 0.3, 0.4], children=[n0, n1])
n3 = Product(children=[n0, n1])
torch_n0 = GaussianNode.from_spn(n0)
torch_n1 = CategoricalNode.from_spn(n1)
torch_n2 = SumNode.from_spn(n2)
torch_n3 = ProductNode.from_spn(n3)
self.assertEqual(torch_n0.mean, n0.mean)
self.assertEqual(torch_n0.std, n0.stdev)
self.assertTrue(np.isclose(torch_n1.p.detach().numpy(), n1.p, atol=DELTA).all())
self.assertTrue(np.isclose(torch_n2.weights.detach().numpy(), n2.weights, atol=DELTA).all())
def test_torch_vs_tf_time(self):
# Create sample data
from sklearn.datasets.samples_generator import make_blobs
import tensorflow as tf
from time import time
X, y = make_blobs(n_samples=10, centers=3, n_features=2, random_state=0)
X = X.astype(np.float32)
# SPFLow implementation
g00 = Gaussian(mean=0.0, stdev=1.0, scope=0)
g10 = Gaussian(mean=1.0, stdev=2.0, scope=1)
g01 = Gaussian(mean=3.0, stdev=2.0, scope=0)
g11 = Gaussian(mean=5.0, stdev=1.0, scope=1)
p0 = Product(children=[g00, g10])
p1 = Product(children=[g01, g11])
s = Sum(weights=[0.2, 0.8], children=[p0, p1])
assign_ids(s)
rebuild_scopes_bottom_up(s)
# Convert
tf_spn, data_placeholder, variable_dict = spn_to_tf_graph(s, data=X)
torch_spn = SumNode.from_spn(s)
# Optimizer
lr = 0.001
tf_optim = tf.train.AdamOptimizer(lr)
torch_optim = optim.Adam(torch_spn.parameters(), lr)
t0 = time()
epochs = 10
optimize_tf_graph(tf_spn, variable_dict, data_placeholder, X, epochs=epochs, optimizer=tf_optim)
t1 = time()
optimize_torch(torch_spn, X, epochs=epochs, optimizer=torch_optim)
t2 = time()
print("Tensorflow took: ", t1 - t0)
print("PyTorch took: ", t2 - t1)
if __name__ == "__main__":
unittest.main()
|
[
"pytorch.SumNode.from_spn",
"spn.structure.Base.Sum",
"numpy.log",
"spn.algorithms.Inference.log_likelihood",
"spn.structure.leaves.parametric.Parametric.Gaussian",
"numpy.array",
"unittest.main",
"pytorch.GaussianNode.from_spn",
"spn.structure.leaves.parametric.Parametric.Categorical",
"tensorflow.train.AdamOptimizer",
"pytorch.SumNode",
"torch.distributions.Normal",
"spn.structure.Base.assign_ids",
"spn.structure.Base.rebuild_scopes_bottom_up",
"torch.Tensor",
"pytorch.GaussianNode",
"pytorch.CategoricalNode.from_spn",
"numpy.random.randn",
"time.time",
"pytorch.ProductNode.from_spn",
"torch.log",
"spn.structure.Base.Product",
"spn.gpu.TensorFlow.optimize_tf_graph",
"pytorch.ProductNode",
"pytorch.optimize_torch",
"numpy.sum",
"sklearn.datasets.samples_generator.make_blobs",
"spn.gpu.TensorFlow.spn_to_tf_graph"
] |
[((7532, 7547), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7545, 7547), False, 'import unittest\n'), ((790, 817), 'torch.log', 'torch.log', (['x[:, self.scope]'], {}), '(x[:, self.scope])\n', (799, 817), False, 'import torch\n'), ((1172, 1197), 'numpy.array', 'np.array', (['[0.1, 0.4, 0.5]'], {}), '([0.1, 0.4, 0.5])\n', (1180, 1197), True, 'import numpy as np\n'), ((1216, 1259), 'pytorch.SumNode', 'SumNode', ([], {'children': 'children', 'weights': 'weights'}), '(children=children, weights=weights)\n', (1223, 1259), False, 'from pytorch import CategoricalNode, GaussianNode, ProductNode, SumNode, optimize_torch\n'), ((1335, 1354), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (1343, 1354), True, 'import numpy as np\n'), ((1373, 1395), 'numpy.array', 'np.array', (['[10, 20, 30]'], {}), '([10, 20, 30])\n', (1381, 1395), True, 'import numpy as np\n'), ((1408, 1440), 'torch.Tensor', 'torch.Tensor', (['[sample1, sample2]'], {}), '([sample1, sample2])\n', (1420, 1440), False, 'import torch\n'), ((2018, 2055), 'pytorch.ProductNode', 'ProductNode', ([], {'children': '[id0, id1, id2]'}), '(children=[id0, id1, id2])\n', (2029, 2055), False, 'from pytorch import CategoricalNode, GaussianNode, ProductNode, SumNode, optimize_torch\n'), ((2131, 2150), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (2139, 2150), True, 'import numpy as np\n'), ((2169, 2191), 'numpy.array', 'np.array', (['[10, 20, 30]'], {}), '([10, 20, 30])\n', (2177, 2191), True, 'import numpy as np\n'), ((2204, 2236), 'torch.Tensor', 'torch.Tensor', (['[sample1, sample2]'], {}), '([sample1, sample2])\n', (2216, 2236), False, 'import torch\n'), ((2738, 2787), 'pytorch.GaussianNode', 'GaussianNode', ([], {'mean': 'means[0]', 'std': 'stds[0]', 'scope': '(0)'}), '(mean=means[0], std=stds[0], scope=0)\n', (2750, 2787), False, 'from pytorch import CategoricalNode, GaussianNode, ProductNode, SumNode, optimize_torch\n'), ((2805, 2854), 'pytorch.GaussianNode', 'GaussianNode', ([], {'mean': 'means[1]', 'std': 'stds[1]', 'scope': '(1)'}), '(mean=means[1], std=stds[1], scope=1)\n', (2817, 2854), False, 'from pytorch import CategoricalNode, GaussianNode, ProductNode, SumNode, optimize_torch\n'), ((2872, 2921), 'pytorch.GaussianNode', 'GaussianNode', ([], {'mean': 'means[2]', 'std': 'stds[2]', 'scope': '(2)'}), '(mean=means[2], std=stds[2], scope=2)\n', (2884, 2921), False, 'from pytorch import CategoricalNode, GaussianNode, ProductNode, SumNode, optimize_torch\n'), ((2940, 2959), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (2948, 2959), True, 'import numpy as np\n'), ((2978, 3000), 'numpy.array', 'np.array', (['[10, 20, 30]'], {}), '([10, 20, 30])\n', (2986, 3000), True, 'import numpy as np\n'), ((3013, 3045), 'torch.Tensor', 'torch.Tensor', (['[sample1, sample2]'], {}), '([sample1, sample2])\n', (3025, 3045), False, 'import torch\n'), ((3232, 3287), 'torch.distributions.Normal', 'torch.distributions.Normal', ([], {'loc': 'means[0]', 'scale': 'stds[0]'}), '(loc=means[0], scale=stds[0])\n', (3258, 3287), False, 'import torch\n'), ((3306, 3361), 'torch.distributions.Normal', 'torch.distributions.Normal', ([], {'loc': 'means[1]', 'scale': 'stds[1]'}), '(loc=means[1], scale=stds[1])\n', (3332, 3361), False, 'import torch\n'), ((3380, 3435), 'torch.distributions.Normal', 'torch.distributions.Normal', ([], {'loc': 'means[2]', 'scale': 'stds[2]'}), '(loc=means[2], scale=stds[2])\n', (3406, 3435), False, 'import torch\n'), ((4197, 4235), 'spn.structure.leaves.parametric.Parametric.Gaussian', 'Gaussian', ([], {'mean': '(0.0)', 'stdev': '(1.0)', 'scope': '(0)'}), '(mean=0.0, stdev=1.0, scope=0)\n', (4205, 4235), False, 'from spn.structure.leaves.parametric.Parametric import Gaussian, Categorical\n'), ((4250, 4288), 'spn.structure.leaves.parametric.Parametric.Gaussian', 'Gaussian', ([], {'mean': '(1.0)', 'stdev': '(2.0)', 'scope': '(1)'}), '(mean=1.0, stdev=2.0, scope=1)\n', (4258, 4288), False, 'from spn.structure.leaves.parametric.Parametric import Gaussian, Categorical\n'), ((4303, 4341), 'spn.structure.leaves.parametric.Parametric.Gaussian', 'Gaussian', ([], {'mean': '(3.0)', 'stdev': '(2.0)', 'scope': '(0)'}), '(mean=3.0, stdev=2.0, scope=0)\n', (4311, 4341), False, 'from spn.structure.leaves.parametric.Parametric import Gaussian, Categorical\n'), ((4356, 4394), 'spn.structure.leaves.parametric.Parametric.Gaussian', 'Gaussian', ([], {'mean': '(5.0)', 'stdev': '(1.0)', 'scope': '(1)'}), '(mean=5.0, stdev=1.0, scope=1)\n', (4364, 4394), False, 'from spn.structure.leaves.parametric.Parametric import Gaussian, Categorical\n'), ((4408, 4436), 'spn.structure.Base.Product', 'Product', ([], {'children': '[g00, g10]'}), '(children=[g00, g10])\n', (4415, 4436), False, 'from spn.structure.Base import Product, Sum\n'), ((4450, 4478), 'spn.structure.Base.Product', 'Product', ([], {'children': '[g01, g11]'}), '(children=[g01, g11])\n', (4457, 4478), False, 'from spn.structure.Base import Product, Sum\n'), ((4491, 4533), 'spn.structure.Base.Sum', 'Sum', ([], {'weights': '[0.2, 0.8]', 'children': '[p0, p1]'}), '(weights=[0.2, 0.8], children=[p0, p1])\n', (4494, 4533), False, 'from spn.structure.Base import Product, Sum\n'), ((4543, 4556), 'spn.structure.Base.assign_ids', 'assign_ids', (['s'], {}), '(s)\n', (4553, 4556), False, 'from spn.structure.Base import assign_ids, rebuild_scopes_bottom_up\n'), ((4565, 4592), 'spn.structure.Base.rebuild_scopes_bottom_up', 'rebuild_scopes_bottom_up', (['s'], {}), '(s)\n', (4589, 4592), False, 'from spn.structure.Base import assign_ids, rebuild_scopes_bottom_up\n'), ((4647, 4670), 'numpy.random.randn', 'np.random.randn', (['(100)', '(2)'], {}), '(100, 2)\n', (4662, 4670), True, 'import numpy as np\n'), ((4707, 4730), 'spn.algorithms.Inference.log_likelihood', 'log_likelihood', (['s', 'data'], {}), '(s, data)\n', (4721, 4730), False, 'from spn.algorithms.Inference import log_likelihood\n'), ((4779, 4819), 'pytorch.GaussianNode', 'GaussianNode', ([], {'mean': '(0.0)', 'std': '(1.0)', 'scope': '(0)'}), '(mean=0.0, std=1.0, scope=0)\n', (4791, 4819), False, 'from pytorch import CategoricalNode, GaussianNode, ProductNode, SumNode, optimize_torch\n'), ((4834, 4874), 'pytorch.GaussianNode', 'GaussianNode', ([], {'mean': '(1.0)', 'std': '(2.0)', 'scope': '(1)'}), '(mean=1.0, std=2.0, scope=1)\n', (4846, 4874), False, 'from pytorch import CategoricalNode, GaussianNode, ProductNode, SumNode, optimize_torch\n'), ((4889, 4929), 'pytorch.GaussianNode', 'GaussianNode', ([], {'mean': '(3.0)', 'std': '(2.0)', 'scope': '(0)'}), '(mean=3.0, std=2.0, scope=0)\n', (4901, 4929), False, 'from pytorch import CategoricalNode, GaussianNode, ProductNode, SumNode, optimize_torch\n'), ((4944, 4984), 'pytorch.GaussianNode', 'GaussianNode', ([], {'mean': '(5.0)', 'std': '(1.0)', 'scope': '(1)'}), '(mean=5.0, std=1.0, scope=1)\n', (4956, 4984), False, 'from pytorch import CategoricalNode, GaussianNode, ProductNode, SumNode, optimize_torch\n'), ((4998, 5030), 'pytorch.ProductNode', 'ProductNode', ([], {'children': '[g00, g10]'}), '(children=[g00, g10])\n', (5009, 5030), False, 'from pytorch import CategoricalNode, GaussianNode, ProductNode, SumNode, optimize_torch\n'), ((5044, 5076), 'pytorch.ProductNode', 'ProductNode', ([], {'children': '[g01, g11]'}), '(children=[g01, g11])\n', (5055, 5076), False, 'from pytorch import CategoricalNode, GaussianNode, ProductNode, SumNode, optimize_torch\n'), ((5096, 5142), 'pytorch.SumNode', 'SumNode', ([], {'weights': '[0.2, 0.8]', 'children': '[p0, p1]'}), '(weights=[0.2, 0.8], children=[p0, p1])\n', (5103, 5142), False, 'from pytorch import CategoricalNode, GaussianNode, ProductNode, SumNode, optimize_torch\n'), ((5165, 5183), 'torch.Tensor', 'torch.Tensor', (['data'], {}), '(data)\n', (5177, 5183), False, 'import torch\n'), ((5461, 5499), 'spn.structure.leaves.parametric.Parametric.Gaussian', 'Gaussian', ([], {'mean': '(0.0)', 'stdev': '(1.0)', 'scope': '(0)'}), '(mean=0.0, stdev=1.0, scope=0)\n', (5469, 5499), False, 'from spn.structure.leaves.parametric.Parametric import Gaussian, Categorical\n'), ((5513, 5543), 'spn.structure.leaves.parametric.Parametric.Categorical', 'Categorical', ([], {'p': '[0.1, 0.3, 0.6]'}), '(p=[0.1, 0.3, 0.6])\n', (5524, 5543), False, 'from spn.structure.leaves.parametric.Parametric import Gaussian, Categorical\n'), ((5557, 5609), 'spn.structure.Base.Sum', 'Sum', ([], {'weights': '[0.1, 0.2, 0.3, 0.4]', 'children': '[n0, n1]'}), '(weights=[0.1, 0.2, 0.3, 0.4], children=[n0, n1])\n', (5560, 5609), False, 'from spn.structure.Base import Product, Sum\n'), ((5623, 5649), 'spn.structure.Base.Product', 'Product', ([], {'children': '[n0, n1]'}), '(children=[n0, n1])\n', (5630, 5649), False, 'from spn.structure.Base import Product, Sum\n'), ((5670, 5695), 'pytorch.GaussianNode.from_spn', 'GaussianNode.from_spn', (['n0'], {}), '(n0)\n', (5691, 5695), False, 'from pytorch import CategoricalNode, GaussianNode, ProductNode, SumNode, optimize_torch\n'), ((5715, 5743), 'pytorch.CategoricalNode.from_spn', 'CategoricalNode.from_spn', (['n1'], {}), '(n1)\n', (5739, 5743), False, 'from pytorch import CategoricalNode, GaussianNode, ProductNode, SumNode, optimize_torch\n'), ((5763, 5783), 'pytorch.SumNode.from_spn', 'SumNode.from_spn', (['n2'], {}), '(n2)\n', (5779, 5783), False, 'from pytorch import CategoricalNode, GaussianNode, ProductNode, SumNode, optimize_torch\n'), ((5803, 5827), 'pytorch.ProductNode.from_spn', 'ProductNode.from_spn', (['n3'], {}), '(n3)\n', (5823, 5827), False, 'from pytorch import CategoricalNode, GaussianNode, ProductNode, SumNode, optimize_torch\n'), ((6328, 6393), 'sklearn.datasets.samples_generator.make_blobs', 'make_blobs', ([], {'n_samples': '(10)', 'centers': '(3)', 'n_features': '(2)', 'random_state': '(0)'}), '(n_samples=10, centers=3, n_features=2, random_state=0)\n', (6338, 6393), False, 'from sklearn.datasets.samples_generator import make_blobs\n'), ((6474, 6512), 'spn.structure.leaves.parametric.Parametric.Gaussian', 'Gaussian', ([], {'mean': '(0.0)', 'stdev': '(1.0)', 'scope': '(0)'}), '(mean=0.0, stdev=1.0, scope=0)\n', (6482, 6512), False, 'from spn.structure.leaves.parametric.Parametric import Gaussian, Categorical\n'), ((6527, 6565), 'spn.structure.leaves.parametric.Parametric.Gaussian', 'Gaussian', ([], {'mean': '(1.0)', 'stdev': '(2.0)', 'scope': '(1)'}), '(mean=1.0, stdev=2.0, scope=1)\n', (6535, 6565), False, 'from spn.structure.leaves.parametric.Parametric import Gaussian, Categorical\n'), ((6580, 6618), 'spn.structure.leaves.parametric.Parametric.Gaussian', 'Gaussian', ([], {'mean': '(3.0)', 'stdev': '(2.0)', 'scope': '(0)'}), '(mean=3.0, stdev=2.0, scope=0)\n', (6588, 6618), False, 'from spn.structure.leaves.parametric.Parametric import Gaussian, Categorical\n'), ((6633, 6671), 'spn.structure.leaves.parametric.Parametric.Gaussian', 'Gaussian', ([], {'mean': '(5.0)', 'stdev': '(1.0)', 'scope': '(1)'}), '(mean=5.0, stdev=1.0, scope=1)\n', (6641, 6671), False, 'from spn.structure.leaves.parametric.Parametric import Gaussian, Categorical\n'), ((6685, 6713), 'spn.structure.Base.Product', 'Product', ([], {'children': '[g00, g10]'}), '(children=[g00, g10])\n', (6692, 6713), False, 'from spn.structure.Base import Product, Sum\n'), ((6727, 6755), 'spn.structure.Base.Product', 'Product', ([], {'children': '[g01, g11]'}), '(children=[g01, g11])\n', (6734, 6755), False, 'from spn.structure.Base import Product, Sum\n'), ((6768, 6810), 'spn.structure.Base.Sum', 'Sum', ([], {'weights': '[0.2, 0.8]', 'children': '[p0, p1]'}), '(weights=[0.2, 0.8], children=[p0, p1])\n', (6771, 6810), False, 'from spn.structure.Base import Product, Sum\n'), ((6819, 6832), 'spn.structure.Base.assign_ids', 'assign_ids', (['s'], {}), '(s)\n', (6829, 6832), False, 'from spn.structure.Base import assign_ids, rebuild_scopes_bottom_up\n'), ((6841, 6868), 'spn.structure.Base.rebuild_scopes_bottom_up', 'rebuild_scopes_bottom_up', (['s'], {}), '(s)\n', (6865, 6868), False, 'from spn.structure.Base import assign_ids, rebuild_scopes_bottom_up\n'), ((6938, 6964), 'spn.gpu.TensorFlow.spn_to_tf_graph', 'spn_to_tf_graph', (['s'], {'data': 'X'}), '(s, data=X)\n', (6953, 6964), False, 'from spn.gpu.TensorFlow import spn_to_tf_graph, optimize_tf_graph\n'), ((6985, 7004), 'pytorch.SumNode.from_spn', 'SumNode.from_spn', (['s'], {}), '(s)\n', (7001, 7004), False, 'from pytorch import CategoricalNode, GaussianNode, ProductNode, SumNode, optimize_torch\n'), ((7064, 7090), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['lr'], {}), '(lr)\n', (7086, 7090), True, 'import tensorflow as tf\n'), ((7166, 7172), 'time.time', 'time', ([], {}), '()\n', (7170, 7172), False, 'from time import time\n'), ((7201, 7301), 'spn.gpu.TensorFlow.optimize_tf_graph', 'optimize_tf_graph', (['tf_spn', 'variable_dict', 'data_placeholder', 'X'], {'epochs': 'epochs', 'optimizer': 'tf_optim'}), '(tf_spn, variable_dict, data_placeholder, X, epochs=epochs,\n optimizer=tf_optim)\n', (7218, 7301), False, 'from spn.gpu.TensorFlow import spn_to_tf_graph, optimize_tf_graph\n'), ((7311, 7317), 'time.time', 'time', ([], {}), '()\n', (7315, 7317), False, 'from time import time\n'), ((7326, 7392), 'pytorch.optimize_torch', 'optimize_torch', (['torch_spn', 'X'], {'epochs': 'epochs', 'optimizer': 'torch_optim'}), '(torch_spn, X, epochs=epochs, optimizer=torch_optim)\n', (7340, 7392), False, 'from pytorch import CategoricalNode, GaussianNode, ProductNode, SumNode, optimize_torch\n'), ((7406, 7412), 'time.time', 'time', ([], {}), '()\n', (7410, 7412), False, 'from time import time\n'), ((3475, 3496), 'torch.Tensor', 'torch.Tensor', (['[1, 10]'], {}), '([1, 10])\n', (3487, 3496), False, 'import torch\n'), ((3536, 3557), 'torch.Tensor', 'torch.Tensor', (['[2, 20]'], {}), '([2, 20])\n', (3548, 3557), False, 'import torch\n'), ((3597, 3618), 'torch.Tensor', 'torch.Tensor', (['[3, 30]'], {}), '([3, 30])\n', (3609, 3618), False, 'import torch\n'), ((2369, 2384), 'numpy.log', 'np.log', (['sample1'], {}), '(sample1)\n', (2375, 2384), True, 'import numpy as np\n'), ((2394, 2409), 'numpy.log', 'np.log', (['sample2'], {}), '(sample2)\n', (2400, 2409), True, 'import numpy as np\n'), ((1562, 1587), 'numpy.sum', 'np.sum', (['(weights * sample1)'], {}), '(weights * sample1)\n', (1568, 1587), True, 'import numpy as np\n'), ((1589, 1614), 'numpy.sum', 'np.sum', (['(weights * sample2)'], {}), '(weights * sample2)\n', (1595, 1614), True, 'import numpy as np\n'), ((5312, 5324), 'numpy.array', 'np.array', (['ll'], {}), '(ll)\n', (5320, 5324), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
from welib.tools.clean_exceptions import *
from welib.FEM.graph import Node as GraphNode
from welib.FEM.graph import Element as GraphElement
from welib.FEM.graph import NodeProperty
from welib.FEM.graph import GraphModel
class MaterialProperty(NodeProperty):
def __init__(self):
Property.__init__(self)
pass
class FEMNode(GraphNode):
def __init__(self, ID, x, y, z=0, Type=None, DOFs=[]):
GraphNode.__init__(self, ID, x, y, z)
self.DOFs = DOFs
def __repr__(self):
s='<Node{:4d}> x:{:7.2f} y:{:7.2f} z:{:7.2f}, DOFs: {}'.format(self.ID, self.x, self.y, self.z, self.DOFs)
return s
class FEMElement(GraphElement):
def __init__(self, ID, nodeIDs, nodes=None, properties=None):
GraphElement.__init__(self, ID, nodeIDs, nodes, properties)
self.Ce=[]
self.Ke=[]
self.Me=[]
def __repr__(self):
s='<Elem{:4d}> NodeIDs: {}'.format(self.ID, self.nodeIDs)
if self.nodes is not None:
s+=' l={:.2f}'.format(self.length)
return s
class BeamElement(FEMElement):
def __init__(self, ID, nodeIDs, nodes, properties=None):
super(BeamElement,self).__init__(ID, nodeIDs, nodes=nodes, properties=properties)
class FEMModel(GraphModel):
def __init__(self):
GraphModel.__init__(self)
self.MM = None
self.KK = None
self.DD = None
self.nDOF = None
def setFullMatrices(self,MM,KK,DD=None):
self.MM=MM
self.KK=KK
if DD is not None:
self.DD=DD
def CraigBampton(self, Ileader, Ifollow=None, Ifixed=None):
""" """
from welib.FEM.reduction import CraigBampton
if Ifixed is not None:
M,K = self.applyFixBC()
else:
M,K = self.MM, self.KK
return CraigBampton(M, K, Ileader, Ifollow=Ifollow)
def DOF2Nodes(self):
DOF2Nodes=np.zeros((self.nDOF,4),int)
for iN,node in enumerate(self.Nodes):
for iiDOF,iDOF in enumerate(node.DOFs):
DOF2Nodes[iDOF,0] = iDOF
DOF2Nodes[iDOF,1] = iN
DOF2Nodes[iDOF,2] = len(node.DOFs)
DOF2Nodes[iDOF,3] = iiDOF+1
return DOF2Nodes
if __name__=='__main__':
np.set_printoptions(linewidth=500)
mdl=SubDynModel()
mdl.fromSummaryFile('../../data/Monopile/Pendulum.SD.sum.yaml')
|
[
"welib.FEM.graph.Element.__init__",
"welib.FEM.reduction.CraigBampton",
"numpy.zeros",
"welib.FEM.graph.Node.__init__",
"welib.FEM.graph.GraphModel.__init__",
"numpy.set_printoptions"
] |
[((2329, 2363), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'linewidth': '(500)'}), '(linewidth=500)\n', (2348, 2363), True, 'import numpy as np\n'), ((464, 501), 'welib.FEM.graph.Node.__init__', 'GraphNode.__init__', (['self', 'ID', 'x', 'y', 'z'], {}), '(self, ID, x, y, z)\n', (482, 501), True, 'from welib.FEM.graph import Node as GraphNode\n'), ((791, 850), 'welib.FEM.graph.Element.__init__', 'GraphElement.__init__', (['self', 'ID', 'nodeIDs', 'nodes', 'properties'], {}), '(self, ID, nodeIDs, nodes, properties)\n', (812, 850), True, 'from welib.FEM.graph import Element as GraphElement\n'), ((1342, 1367), 'welib.FEM.graph.GraphModel.__init__', 'GraphModel.__init__', (['self'], {}), '(self)\n', (1361, 1367), False, 'from welib.FEM.graph import GraphModel\n'), ((1884, 1928), 'welib.FEM.reduction.CraigBampton', 'CraigBampton', (['M', 'K', 'Ileader'], {'Ifollow': 'Ifollow'}), '(M, K, Ileader, Ifollow=Ifollow)\n', (1896, 1928), False, 'from welib.FEM.reduction import CraigBampton\n'), ((1973, 2002), 'numpy.zeros', 'np.zeros', (['(self.nDOF, 4)', 'int'], {}), '((self.nDOF, 4), int)\n', (1981, 2002), True, 'import numpy as np\n')]
|
from __future__ import print_function
from astrometry.util.fits import *
import pylab as plt
import numpy as np
from glob import glob
from astrometry.util.plotutils import *
from astrometry.libkd.spherematch import *
from astrometry.util.resample import *
from astrometry.util.util import *
ps = PlotSequence('cosmos')
baseA = 'cosmos-dr5-60/'
baseB = 'cosmos-dr5-67/'
Atxt = '60'
Btxt = '67'
TA = merge_tables([fits_table(fn) for fn in glob(baseA + 'tractor/*/tractor-*.fits')])
print('Total of', len(TA), 'sources in 60')
TA.cut(TA.brick_primary)
print(len(TA), 'brick primary')
TB = merge_tables([fits_table(fn) for fn in glob(baseB + 'tractor/*/tractor-*.fits')])
print('Total of', len(TB), 'sources in 67')
TB.cut(TB.brick_primary)
print(len(TB), 'brick primary')
ramin = min(TA.ra.min(), TB.ra.min())
ramax = max(TA.ra.max(), TB.ra.max())
decmin = min(TA.dec.min(), TB.dec.min())
decmax = max(TA.dec.max(), TB.dec.max())
# Create low-res depth maps
pixsc = 10. * 0.262/3600.
rc,dc = (ramin+ramax)/2., (decmin+decmax)/2.
w = int((ramax - ramin) * np.cos(np.deg2rad(dc)) / pixsc)
h = int((decmax - decmin) / pixsc)
wcs = Tan(rc, dc, w/2., h/2., -pixsc, 0., 0., pixsc, float(w), float(h))
#print('WCS:', wcs)
#for band in ['g','r','z']:
for band in ['g']:
psfdepthA = np.zeros(wcs.shape, np.float32)
psfdepthB = np.zeros(wcs.shape, np.float32)
for fn in glob(baseA + 'coadd/*/*/legacysurvey-*-depth-%s.fits*' % band):
print('Reading', fn)
iwcs = Tan(fn, 1)
Yo,Xo,Yi,Xi,nil = resample_with_wcs(wcs, iwcs)
dmap = fitsio.read(fn)
#I = np.flatnonzero(np.isfinite(dmap) * (dmap > 0))
#print(len(I), 'finite & positive values')
psfdepthA[Yo,Xo] = dmap[Yi,Xi]
for fn in glob(baseB + 'coadd/*/*/legacysurvey-*-depth-%s.fits*' % band):
print('Reading', fn)
iwcs = Tan(fn, 1)
Yo,Xo,Yi,Xi,nil = resample_with_wcs(wcs, iwcs)
dmap = fitsio.read(fn)
#I = np.flatnonzero(np.isfinite(dmap) * (dmap > 0))
#print(len(I), 'finite & positive values')
psfdepthB[Yo,Xo] = dmap[Yi,Xi]
galdepthA = np.zeros(wcs.shape, np.float32)
galdepthB = np.zeros(wcs.shape, np.float32)
for fn in glob(baseA + 'coadd/*/*/legacysurvey-*-galdepth-%s.fits*' % band):
print('Reading', fn)
iwcs = Tan(fn, 1)
Yo,Xo,Yi,Xi,nil = resample_with_wcs(wcs, iwcs)
dmap = fitsio.read(fn)
#I = np.flatnonzero(np.isfinite(dmap) * (dmap > 0))
#print(len(I), 'finite & positive values')
galdepthA[Yo,Xo] = dmap[Yi,Xi]
for fn in glob(baseB + 'coadd/*/*/legacysurvey-*-galdepth-%s.fits*' % band):
print('Reading', fn)
iwcs = Tan(fn, 1)
Yo,Xo,Yi,Xi,nil = resample_with_wcs(wcs, iwcs)
dmap = fitsio.read(fn)
#I = np.flatnonzero(np.isfinite(dmap) * (dmap > 0))
#print(len(I), 'finite & positive values')
galdepthB[Yo,Xo] = dmap[Yi,Xi]
print('PsfdepthA (iv)', psfdepthA.min(), psfdepthA.max())
print('PsfdepthB (iv)', psfdepthB.min(), psfdepthB.max())
psfdepthA = -2.5 * (np.log10(5./np.sqrt(psfdepthA)) - 9)
psfdepthB = -2.5 * (np.log10(5./np.sqrt(psfdepthB)) - 9)
print('PsfdepthA', psfdepthA.min(), psfdepthA.max())
print('PsfdepthB', psfdepthB.min(), psfdepthB.max())
galdepthA = -2.5 * (np.log10(5./np.sqrt(galdepthA)) - 9)
galdepthB = -2.5 * (np.log10(5./np.sqrt(galdepthB)) - 9)
print('GaldepthA', galdepthA.min(), galdepthA.max())
print('GaldepthB', galdepthB.min(), galdepthB.max())
ima = dict(interpolation='nearest', origin='lower',
extent=[ramax,ramin,decmin,decmax], vmin=20.0, vmax=24.5)
plt.clf()
plt.subplot(1,2,1)
plt.imshow(psfdepthA, **ima)
plt.title(Atxt)
plt.subplot(1,2,2)
plt.imshow(psfdepthB, **ima)
plt.title(Btxt)
plt.suptitle('PSF Depth maps (%s)' % band)
ps.savefig()
plt.clf()
plt.subplot(1,2,1)
plt.imshow(galdepthA, **ima)
plt.title(Atxt)
plt.subplot(1,2,2)
plt.imshow(galdepthB, **ima)
plt.title(Btxt)
plt.suptitle('Galaxy Depth maps (%s)' % band)
ps.savefig()
# dd = np.append(galdepthA.ravel(), galdepthB.ravel())
# dd = dd[np.isfinite(dd)]
# thresh = np.percentile(dd, 10)
# print('Depth threshold:', thresh)
thresh = 24.0
hh,ww = wcs.shape
ok,xx,yy = wcs.radec2pixelxy(TA.ra, TA.dec)
xx = np.clip((np.round(xx) - 1), 0, ww-1).astype(int)
yy = np.clip((np.round(yy) - 1), 0, hh-1).astype(int)
I = np.flatnonzero((galdepthA[yy,xx] > thresh) * (galdepthB[yy,xx] > thresh))
print(len(I), 'of', len(TA), 'sources in A are in good-depth regions')
TA.cut(I)
ok,xx,yy = wcs.radec2pixelxy(TB.ra, TB.dec)
xx = np.clip((np.round(xx) - 1), 0, ww-1).astype(int)
yy = np.clip((np.round(yy) - 1), 0, hh-1).astype(int)
I = np.flatnonzero((galdepthA[yy,xx] > thresh) * (galdepthB[yy,xx] > thresh))
print(len(I), 'of', len(TB), 'sources in B are in good-depth regions')
TB.cut(I)
ha = dict(range=(18,27), bins=50, histtype='stepfilled', alpha=0.1)
hb = dict(range=(18,27), bins=50, histtype='stepfilled', alpha=0.1)
plt.clf()
plt.hist(np.maximum(psfdepthA.ravel(), 18), color='b', label=Atxt, **ha)
plt.hist(np.maximum(psfdepthB.ravel(), 18), color='r', label=Btxt, **hb)
plt.xlim(18,27)
plt.legend()
plt.title('PSF depth map values (g mag)')
ps.savefig()
plt.clf()
plt.hist(np.maximum(galdepthA.ravel(), 18), color='b', label=Atxt, **ha)
plt.hist(np.maximum(galdepthB.ravel(), 18), color='r', label=Btxt, **hb)
plt.xlim(18,27)
plt.legend()
plt.title('Galaxy depth map values (g mag)')
ps.savefig()
TA.mag_g = -2.5 * (np.log10(TA.flux_g) - 9)
TB.mag_g = -2.5 * (np.log10(TB.flux_g) - 9)
TA.psfdepth_mag_g = -2.5 * (np.log10(5./np.sqrt(TA.psfdepth_g)) - 9)
TB.psfdepth_mag_g = -2.5 * (np.log10(5./np.sqrt(TB.psfdepth_g)) - 9)
TA.galdepth_mag_g = -2.5 * (np.log10(5./np.sqrt(TA.galdepth_g)) - 9)
TB.galdepth_mag_g = -2.5 * (np.log10(5./np.sqrt(TB.galdepth_g)) - 9)
ha = dict(range=(18,27), bins=50, histtype='stepfilled', alpha=0.1)
hb = dict(range=(18,27), bins=50, histtype='stepfilled', alpha=0.1)
ha2 = dict(range=(18,27), bins=50, histtype='step', alpha=0.5)
hb2 = dict(range=(18,27), bins=50, histtype='step', alpha=0.5)
plt.clf()
plt.hist(TA.mag_g, color='b', label=Atxt, **ha)
plt.hist(TA.mag_g, color='b', **ha2)
plt.hist(TB.mag_g, color='r', label=Btxt, **hb)
plt.hist(TB.mag_g, color='r', **hb2)
plt.xlim(18,27)
plt.legend()
plt.xlabel('All sources: g mag')
ps.savefig()
ha = dict(range=(23,25), bins=50, histtype='stepfilled', alpha=0.1)
hb = dict(range=(23,25), bins=50, histtype='stepfilled', alpha=0.1)
plt.clf()
plt.hist(TA.psfdepth_mag_g, color='b', label=Atxt, **ha)
plt.hist(TB.psfdepth_mag_g, color='r', label=Btxt, **hb)
plt.xlim(23,25)
plt.legend()
plt.title('PSF depth for sources (g mag)')
ps.savefig()
plt.clf()
plt.hist(TA.galdepth_mag_g, color='b', label=Atxt, **ha)
plt.hist(TB.galdepth_mag_g, color='r', label=Btxt, **hb)
plt.xlim(23,25)
plt.legend()
plt.title('Gal depth for sources (g mag)')
ps.savefig()
ha = dict(range=((ramin,ramax),(decmin,decmax)), doclf=False,
docolorbar=False, imshowargs=dict(vmin=0, vmax=14))
plt.clf()
plt.subplot(1,2,1)
plothist(TA.ra, TA.dec, 200, **ha)
plt.title(Atxt)
plt.subplot(1,2,2)
plothist(TB.ra, TB.dec, 200, **ha)
plt.title(Btxt)
plt.suptitle('All sources')
ps.savefig()
I,J,d = match_radec(TA.ra, TA.dec, TB.ra, TB.dec, 1./3600.)
unmatchedA = np.ones(len(TA), bool)
unmatchedB = np.ones(len(TB), bool)
unmatchedA[I] = False
unmatchedB[J] = False
ha = dict(range=((ramin,ramax),(decmin,decmax)), doclf=False,
docolorbar=False, imshowargs=dict(vmin=0, vmax=5))
plt.clf()
plt.subplot(1,2,1)
plothist(TA.ra[unmatchedA], TA.dec[unmatchedA], 200, **ha)
plt.title(Atxt)
plt.subplot(1,2,2)
plothist(TB.ra[unmatchedB], TB.dec[unmatchedB], 200, **ha)
plt.title(Btxt)
plt.suptitle('Un-matched sources')
ps.savefig()
|
[
"pylab.title",
"numpy.log10",
"pylab.hist",
"numpy.sqrt",
"pylab.subplot",
"numpy.round",
"numpy.flatnonzero",
"pylab.xlabel",
"pylab.legend",
"numpy.zeros",
"numpy.deg2rad",
"glob.glob",
"pylab.xlim",
"pylab.clf",
"pylab.suptitle",
"pylab.imshow"
] |
[((4483, 4558), 'numpy.flatnonzero', 'np.flatnonzero', (['((galdepthA[yy, xx] > thresh) * (galdepthB[yy, xx] > thresh))'], {}), '((galdepthA[yy, xx] > thresh) * (galdepthB[yy, xx] > thresh))\n', (4497, 4558), True, 'import numpy as np\n'), ((4795, 4870), 'numpy.flatnonzero', 'np.flatnonzero', (['((galdepthA[yy, xx] > thresh) * (galdepthB[yy, xx] > thresh))'], {}), '((galdepthA[yy, xx] > thresh) * (galdepthB[yy, xx] > thresh))\n', (4809, 4870), True, 'import numpy as np\n'), ((5089, 5098), 'pylab.clf', 'plt.clf', ([], {}), '()\n', (5096, 5098), True, 'import pylab as plt\n'), ((5245, 5261), 'pylab.xlim', 'plt.xlim', (['(18)', '(27)'], {}), '(18, 27)\n', (5253, 5261), True, 'import pylab as plt\n'), ((5261, 5273), 'pylab.legend', 'plt.legend', ([], {}), '()\n', (5271, 5273), True, 'import pylab as plt\n'), ((5274, 5315), 'pylab.title', 'plt.title', (['"""PSF depth map values (g mag)"""'], {}), "('PSF depth map values (g mag)')\n", (5283, 5315), True, 'import pylab as plt\n'), ((5330, 5339), 'pylab.clf', 'plt.clf', ([], {}), '()\n', (5337, 5339), True, 'import pylab as plt\n'), ((5486, 5502), 'pylab.xlim', 'plt.xlim', (['(18)', '(27)'], {}), '(18, 27)\n', (5494, 5502), True, 'import pylab as plt\n'), ((5502, 5514), 'pylab.legend', 'plt.legend', ([], {}), '()\n', (5512, 5514), True, 'import pylab as plt\n'), ((5515, 5559), 'pylab.title', 'plt.title', (['"""Galaxy depth map values (g mag)"""'], {}), "('Galaxy depth map values (g mag)')\n", (5524, 5559), True, 'import pylab as plt\n'), ((6204, 6213), 'pylab.clf', 'plt.clf', ([], {}), '()\n', (6211, 6213), True, 'import pylab as plt\n'), ((6214, 6261), 'pylab.hist', 'plt.hist', (['TA.mag_g'], {'color': '"""b"""', 'label': 'Atxt'}), "(TA.mag_g, color='b', label=Atxt, **ha)\n", (6222, 6261), True, 'import pylab as plt\n'), ((6262, 6298), 'pylab.hist', 'plt.hist', (['TA.mag_g'], {'color': '"""b"""'}), "(TA.mag_g, color='b', **ha2)\n", (6270, 6298), True, 'import pylab as plt\n'), ((6299, 6346), 'pylab.hist', 'plt.hist', (['TB.mag_g'], {'color': '"""r"""', 'label': 'Btxt'}), "(TB.mag_g, color='r', label=Btxt, **hb)\n", (6307, 6346), True, 'import pylab as plt\n'), ((6347, 6383), 'pylab.hist', 'plt.hist', (['TB.mag_g'], {'color': '"""r"""'}), "(TB.mag_g, color='r', **hb2)\n", (6355, 6383), True, 'import pylab as plt\n'), ((6384, 6400), 'pylab.xlim', 'plt.xlim', (['(18)', '(27)'], {}), '(18, 27)\n', (6392, 6400), True, 'import pylab as plt\n'), ((6400, 6412), 'pylab.legend', 'plt.legend', ([], {}), '()\n', (6410, 6412), True, 'import pylab as plt\n'), ((6413, 6445), 'pylab.xlabel', 'plt.xlabel', (['"""All sources: g mag"""'], {}), "('All sources: g mag')\n", (6423, 6445), True, 'import pylab as plt\n'), ((6597, 6606), 'pylab.clf', 'plt.clf', ([], {}), '()\n', (6604, 6606), True, 'import pylab as plt\n'), ((6607, 6663), 'pylab.hist', 'plt.hist', (['TA.psfdepth_mag_g'], {'color': '"""b"""', 'label': 'Atxt'}), "(TA.psfdepth_mag_g, color='b', label=Atxt, **ha)\n", (6615, 6663), True, 'import pylab as plt\n'), ((6664, 6720), 'pylab.hist', 'plt.hist', (['TB.psfdepth_mag_g'], {'color': '"""r"""', 'label': 'Btxt'}), "(TB.psfdepth_mag_g, color='r', label=Btxt, **hb)\n", (6672, 6720), True, 'import pylab as plt\n'), ((6721, 6737), 'pylab.xlim', 'plt.xlim', (['(23)', '(25)'], {}), '(23, 25)\n', (6729, 6737), True, 'import pylab as plt\n'), ((6737, 6749), 'pylab.legend', 'plt.legend', ([], {}), '()\n', (6747, 6749), True, 'import pylab as plt\n'), ((6750, 6792), 'pylab.title', 'plt.title', (['"""PSF depth for sources (g mag)"""'], {}), "('PSF depth for sources (g mag)')\n", (6759, 6792), True, 'import pylab as plt\n'), ((6807, 6816), 'pylab.clf', 'plt.clf', ([], {}), '()\n', (6814, 6816), True, 'import pylab as plt\n'), ((6817, 6873), 'pylab.hist', 'plt.hist', (['TA.galdepth_mag_g'], {'color': '"""b"""', 'label': 'Atxt'}), "(TA.galdepth_mag_g, color='b', label=Atxt, **ha)\n", (6825, 6873), True, 'import pylab as plt\n'), ((6874, 6930), 'pylab.hist', 'plt.hist', (['TB.galdepth_mag_g'], {'color': '"""r"""', 'label': 'Btxt'}), "(TB.galdepth_mag_g, color='r', label=Btxt, **hb)\n", (6882, 6930), True, 'import pylab as plt\n'), ((6931, 6947), 'pylab.xlim', 'plt.xlim', (['(23)', '(25)'], {}), '(23, 25)\n', (6939, 6947), True, 'import pylab as plt\n'), ((6947, 6959), 'pylab.legend', 'plt.legend', ([], {}), '()\n', (6957, 6959), True, 'import pylab as plt\n'), ((6960, 7002), 'pylab.title', 'plt.title', (['"""Gal depth for sources (g mag)"""'], {}), "('Gal depth for sources (g mag)')\n", (6969, 7002), True, 'import pylab as plt\n'), ((7142, 7151), 'pylab.clf', 'plt.clf', ([], {}), '()\n', (7149, 7151), True, 'import pylab as plt\n'), ((7152, 7172), 'pylab.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (7163, 7172), True, 'import pylab as plt\n'), ((7206, 7221), 'pylab.title', 'plt.title', (['Atxt'], {}), '(Atxt)\n', (7215, 7221), True, 'import pylab as plt\n'), ((7222, 7242), 'pylab.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (7233, 7242), True, 'import pylab as plt\n'), ((7276, 7291), 'pylab.title', 'plt.title', (['Btxt'], {}), '(Btxt)\n', (7285, 7291), True, 'import pylab as plt\n'), ((7292, 7319), 'pylab.suptitle', 'plt.suptitle', (['"""All sources"""'], {}), "('All sources')\n", (7304, 7319), True, 'import pylab as plt\n'), ((7636, 7645), 'pylab.clf', 'plt.clf', ([], {}), '()\n', (7643, 7645), True, 'import pylab as plt\n'), ((7646, 7666), 'pylab.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (7657, 7666), True, 'import pylab as plt\n'), ((7724, 7739), 'pylab.title', 'plt.title', (['Atxt'], {}), '(Atxt)\n', (7733, 7739), True, 'import pylab as plt\n'), ((7740, 7760), 'pylab.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (7751, 7760), True, 'import pylab as plt\n'), ((7818, 7833), 'pylab.title', 'plt.title', (['Btxt'], {}), '(Btxt)\n', (7827, 7833), True, 'import pylab as plt\n'), ((7834, 7868), 'pylab.suptitle', 'plt.suptitle', (['"""Un-matched sources"""'], {}), "('Un-matched sources')\n", (7846, 7868), True, 'import pylab as plt\n'), ((1287, 1318), 'numpy.zeros', 'np.zeros', (['wcs.shape', 'np.float32'], {}), '(wcs.shape, np.float32)\n', (1295, 1318), True, 'import numpy as np\n'), ((1335, 1366), 'numpy.zeros', 'np.zeros', (['wcs.shape', 'np.float32'], {}), '(wcs.shape, np.float32)\n', (1343, 1366), True, 'import numpy as np\n'), ((1381, 1443), 'glob.glob', 'glob', (["(baseA + 'coadd/*/*/legacysurvey-*-depth-%s.fits*' % band)"], {}), "(baseA + 'coadd/*/*/legacysurvey-*-depth-%s.fits*' % band)\n", (1385, 1443), False, 'from glob import glob\n'), ((1750, 1812), 'glob.glob', 'glob', (["(baseB + 'coadd/*/*/legacysurvey-*-depth-%s.fits*' % band)"], {}), "(baseB + 'coadd/*/*/legacysurvey-*-depth-%s.fits*' % band)\n", (1754, 1812), False, 'from glob import glob\n'), ((2122, 2153), 'numpy.zeros', 'np.zeros', (['wcs.shape', 'np.float32'], {}), '(wcs.shape, np.float32)\n', (2130, 2153), True, 'import numpy as np\n'), ((2170, 2201), 'numpy.zeros', 'np.zeros', (['wcs.shape', 'np.float32'], {}), '(wcs.shape, np.float32)\n', (2178, 2201), True, 'import numpy as np\n'), ((2216, 2281), 'glob.glob', 'glob', (["(baseA + 'coadd/*/*/legacysurvey-*-galdepth-%s.fits*' % band)"], {}), "(baseA + 'coadd/*/*/legacysurvey-*-galdepth-%s.fits*' % band)\n", (2220, 2281), False, 'from glob import glob\n'), ((2588, 2653), 'glob.glob', 'glob', (["(baseB + 'coadd/*/*/legacysurvey-*-galdepth-%s.fits*' % band)"], {}), "(baseB + 'coadd/*/*/legacysurvey-*-galdepth-%s.fits*' % band)\n", (2592, 2653), False, 'from glob import glob\n'), ((3681, 3690), 'pylab.clf', 'plt.clf', ([], {}), '()\n', (3688, 3690), True, 'import pylab as plt\n'), ((3695, 3715), 'pylab.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (3706, 3715), True, 'import pylab as plt\n'), ((3718, 3746), 'pylab.imshow', 'plt.imshow', (['psfdepthA'], {}), '(psfdepthA, **ima)\n', (3728, 3746), True, 'import pylab as plt\n'), ((3751, 3766), 'pylab.title', 'plt.title', (['Atxt'], {}), '(Atxt)\n', (3760, 3766), True, 'import pylab as plt\n'), ((3771, 3791), 'pylab.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (3782, 3791), True, 'import pylab as plt\n'), ((3794, 3822), 'pylab.imshow', 'plt.imshow', (['psfdepthB'], {}), '(psfdepthB, **ima)\n', (3804, 3822), True, 'import pylab as plt\n'), ((3827, 3842), 'pylab.title', 'plt.title', (['Btxt'], {}), '(Btxt)\n', (3836, 3842), True, 'import pylab as plt\n'), ((3847, 3889), 'pylab.suptitle', 'plt.suptitle', (["('PSF Depth maps (%s)' % band)"], {}), "('PSF Depth maps (%s)' % band)\n", (3859, 3889), True, 'import pylab as plt\n'), ((3912, 3921), 'pylab.clf', 'plt.clf', ([], {}), '()\n', (3919, 3921), True, 'import pylab as plt\n'), ((3926, 3946), 'pylab.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (3937, 3946), True, 'import pylab as plt\n'), ((3949, 3977), 'pylab.imshow', 'plt.imshow', (['galdepthA'], {}), '(galdepthA, **ima)\n', (3959, 3977), True, 'import pylab as plt\n'), ((3982, 3997), 'pylab.title', 'plt.title', (['Atxt'], {}), '(Atxt)\n', (3991, 3997), True, 'import pylab as plt\n'), ((4002, 4022), 'pylab.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (4013, 4022), True, 'import pylab as plt\n'), ((4025, 4053), 'pylab.imshow', 'plt.imshow', (['galdepthB'], {}), '(galdepthB, **ima)\n', (4035, 4053), True, 'import pylab as plt\n'), ((4058, 4073), 'pylab.title', 'plt.title', (['Btxt'], {}), '(Btxt)\n', (4067, 4073), True, 'import pylab as plt\n'), ((4078, 4123), 'pylab.suptitle', 'plt.suptitle', (["('Galaxy Depth maps (%s)' % band)"], {}), "('Galaxy Depth maps (%s)' % band)\n", (4090, 4123), True, 'import pylab as plt\n'), ((5594, 5613), 'numpy.log10', 'np.log10', (['TA.flux_g'], {}), '(TA.flux_g)\n', (5602, 5613), True, 'import numpy as np\n'), ((5638, 5657), 'numpy.log10', 'np.log10', (['TB.flux_g'], {}), '(TB.flux_g)\n', (5646, 5657), True, 'import numpy as np\n'), ((441, 481), 'glob.glob', 'glob', (["(baseA + 'tractor/*/tractor-*.fits')"], {}), "(baseA + 'tractor/*/tractor-*.fits')\n", (445, 481), False, 'from glob import glob\n'), ((630, 670), 'glob.glob', 'glob', (["(baseB + 'tractor/*/tractor-*.fits')"], {}), "(baseB + 'tractor/*/tractor-*.fits')\n", (634, 670), False, 'from glob import glob\n'), ((1070, 1084), 'numpy.deg2rad', 'np.deg2rad', (['dc'], {}), '(dc)\n', (1080, 1084), True, 'import numpy as np\n'), ((4385, 4397), 'numpy.round', 'np.round', (['xx'], {}), '(xx)\n', (4393, 4397), True, 'import numpy as np\n'), ((4439, 4451), 'numpy.round', 'np.round', (['yy'], {}), '(yy)\n', (4447, 4451), True, 'import numpy as np\n'), ((4697, 4709), 'numpy.round', 'np.round', (['xx'], {}), '(xx)\n', (4705, 4709), True, 'import numpy as np\n'), ((4751, 4763), 'numpy.round', 'np.round', (['yy'], {}), '(yy)\n', (4759, 4763), True, 'import numpy as np\n'), ((5704, 5726), 'numpy.sqrt', 'np.sqrt', (['TA.psfdepth_g'], {}), '(TA.psfdepth_g)\n', (5711, 5726), True, 'import numpy as np\n'), ((5773, 5795), 'numpy.sqrt', 'np.sqrt', (['TB.psfdepth_g'], {}), '(TB.psfdepth_g)\n', (5780, 5795), True, 'import numpy as np\n'), ((5842, 5864), 'numpy.sqrt', 'np.sqrt', (['TA.galdepth_g'], {}), '(TA.galdepth_g)\n', (5849, 5864), True, 'import numpy as np\n'), ((5911, 5933), 'numpy.sqrt', 'np.sqrt', (['TB.galdepth_g'], {}), '(TB.galdepth_g)\n', (5918, 5933), True, 'import numpy as np\n'), ((3111, 3129), 'numpy.sqrt', 'np.sqrt', (['psfdepthA'], {}), '(psfdepthA)\n', (3118, 3129), True, 'import numpy as np\n'), ((3172, 3190), 'numpy.sqrt', 'np.sqrt', (['psfdepthB'], {}), '(psfdepthB)\n', (3179, 3190), True, 'import numpy as np\n'), ((3347, 3365), 'numpy.sqrt', 'np.sqrt', (['galdepthA'], {}), '(galdepthA)\n', (3354, 3365), True, 'import numpy as np\n'), ((3408, 3426), 'numpy.sqrt', 'np.sqrt', (['galdepthB'], {}), '(galdepthB)\n', (3415, 3426), True, 'import numpy as np\n')]
|
import gym
import numpy as np
import matplotlib.pyplot as plt
def policy(state, theta):
""" TODO: return probabilities for actions under softmax action selection """
h = state @ theta
return np.exp(h)/np.sum(np.exp(h))
def generate_episode(env, theta, display=False):
""" enerates one episode and returns the list of states, the list of rewards and the list of actions of that episode """
state = env.reset()
states = [state]
actions = []
rewards = []
for t in range(500):
if display:
env.render()
p = policy(state, theta)
action = np.random.choice(len(p), p=p)
state, reward, done, info = env.step(action)
rewards.append(reward)
actions.append(action)
if done:
break
states.append(state)
return states, rewards, actions
def REINFORCE(env, gamma=0.99, alpha=0.05):
theta = np.random.rand(4, 2) # policy parameters
ep_len_list = []
mean_ep_len = []
for e in range(1000):
if e % 300 == 0:
states, rewards, actions = generate_episode(env, theta, False) # display the policy every 300 episodes
else:
states, rewards, actions = generate_episode(env, theta, False)
# TODO: keep track of previous 100 episode lengths and compute mean
if len(ep_len_list) >= 100:
ep_len_list.pop(0) #remove last item
ep_len_list.append(len(states))
mean = sum(ep_len_list) / len(ep_len_list)
mean_ep_len.append(mean)
print("episode:\t" + str(e) + " length:\t" + str(len(states)) + " mean len:\t" + str(mean))
# TODO: implement the reinforce algorithm to improve the policy weights
nr_steps = len(states)
G = np.zeros([nr_steps])
for t in range(nr_steps):
for k in range(t+1,nr_steps+1):
G[t] += (gamma**(k-t-1)) * rewards[k-1]
action = actions[t]
theta[:,action] = theta[:,action] + alpha * (gamma**t) * G[t] * (states[t] * (1 - policy(states[t], theta)[action]))
return mean_ep_len
def main():
env = gym.make('CartPole-v1')
mean_ep_len = REINFORCE(env)
plt.plot(mean_ep_len)
plt.title("Mean Ep length over time")
plt.xlabel("Episodes")
plt.ylabel("Mean Episode Length")
plt.legend()
plt.savefig('ex09' + '.png')
plt.show()
env.close()
if __name__ == "__main__":
main()
|
[
"matplotlib.pyplot.savefig",
"numpy.random.rand",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.exp",
"numpy.zeros",
"matplotlib.pyplot.title",
"gym.make",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] |
[((913, 933), 'numpy.random.rand', 'np.random.rand', (['(4)', '(2)'], {}), '(4, 2)\n', (927, 933), True, 'import numpy as np\n'), ((2139, 2162), 'gym.make', 'gym.make', (['"""CartPole-v1"""'], {}), "('CartPole-v1')\n", (2147, 2162), False, 'import gym\n'), ((2200, 2221), 'matplotlib.pyplot.plot', 'plt.plot', (['mean_ep_len'], {}), '(mean_ep_len)\n', (2208, 2221), True, 'import matplotlib.pyplot as plt\n'), ((2226, 2263), 'matplotlib.pyplot.title', 'plt.title', (['"""Mean Ep length over time"""'], {}), "('Mean Ep length over time')\n", (2235, 2263), True, 'import matplotlib.pyplot as plt\n'), ((2268, 2290), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Episodes"""'], {}), "('Episodes')\n", (2278, 2290), True, 'import matplotlib.pyplot as plt\n'), ((2295, 2328), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Mean Episode Length"""'], {}), "('Mean Episode Length')\n", (2305, 2328), True, 'import matplotlib.pyplot as plt\n'), ((2333, 2345), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2343, 2345), True, 'import matplotlib.pyplot as plt\n'), ((2350, 2378), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('ex09' + '.png')"], {}), "('ex09' + '.png')\n", (2361, 2378), True, 'import matplotlib.pyplot as plt\n'), ((2383, 2393), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2391, 2393), True, 'import matplotlib.pyplot as plt\n'), ((205, 214), 'numpy.exp', 'np.exp', (['h'], {}), '(h)\n', (211, 214), True, 'import numpy as np\n'), ((1774, 1794), 'numpy.zeros', 'np.zeros', (['[nr_steps]'], {}), '([nr_steps])\n', (1782, 1794), True, 'import numpy as np\n'), ((222, 231), 'numpy.exp', 'np.exp', (['h'], {}), '(h)\n', (228, 231), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedKFold
import lightgbm as lgb
import xgboost as xgb
# read dataset
df_train = pd.read_csv('train.csv')
df_test = pd.read_csv('test.csv')
# gini function
def gini(actual, pred, cmpcol = 0, sortcol = 1):
assert( len(actual) == len(pred) )
all = np.asarray(np.c_[ actual, pred, np.arange(len(actual)) ], dtype=np.float)
all = all[ np.lexsort((all[:,2], -1*all[:,1])) ]
totalLosses = all[:,0].sum()
giniSum = all[:,0].cumsum().sum() / totalLosses
giniSum -= (len(actual) + 1) / 2.
return giniSum / len(actual)
def gini_normalized(a, p):
return gini(a, p) / gini(a, a)
def gini_xgb(preds, dtrain):
labels = dtrain.get_label()
gini_score = gini_normalized(labels, preds)
return 'gini', gini_score
def gini_lgb(preds, dtrain):
labels = dtrain.get_label()
gini_score = gini_normalized(labels, preds)
return 'gini', gini_score, True
# define fold number
kfold = 5
skf = StratifiedKFold(n_splits=kfold, random_state=42)
sub = pd.DataFrame()
sub['id'] = test_id
sub['target'] = np.zeros_like(test_id)
params_xgd = {
'min_child_weight': 10.0,
'objective': 'binary:logistic',
'max_depth': 7,
'max_delta_step': 1.8,
'colsample_bytree': 0.4,
'subsample': 0.8,
'eta': 0.005,
'gamma': 0.65,
'num_boost_round' : 700
}
params_lgb = {
'max_depth': 7,
'learning_rate': 0.005,
'objective': 'binary'
}
for i, (train_index, test_index) in enumerate(skf.split(X, y)):
print('[Fold %d/%d]' % (i + 1, kfold))
X_train, X_valid = X[train_index], X[test_index]
y_train, y_valid = y[train_index], y[test_index]
d_train = lgb.Dataset(X_train, y_train)
d_valid = lgb.Dataset(X_valid, y_valid)
watchlist = [d_train, d_valid]
model_lgb = lgb.train(params_lgb, d_train, 1600, watchlist, early_stopping_rounds = 70, feval = gini_lgb, verbose_eval = 100)
d_train = xgb.DMatrix(X_train, y_train)
d_valid = xgb.DMatrix(X_valid, y_valid)
d_test = xgb.DMatrix(test.values)
watchlist = [(d_train, 'train'), (d_valid, 'valid')]
model_xgb = xgb.train(params_xgd, d_train, 1600, watchlist, early_stopping_rounds = 70, feval = gini_xgb, maximize = True, verbose_eval = 100)
print('[Fold %d/%d Prediciton:]' % (i + 1, kfold))
pred_xgb = model_xgb.predict(d_test, ntree_limit = mdl.best_ntree_limit)
pred_lgb = model_lgb.predict(test.values)
# 0.7 from xgb, 0.3 from lgb. You can play around here
sub['target'] += (pred_xgb * 0.7 + pred_lgb * 0.3) / kfold
|
[
"pandas.read_csv",
"xgboost.train",
"lightgbm.train",
"sklearn.model_selection.StratifiedKFold",
"numpy.lexsort",
"lightgbm.Dataset",
"pandas.DataFrame",
"xgboost.DMatrix",
"numpy.zeros_like"
] |
[((163, 187), 'pandas.read_csv', 'pd.read_csv', (['"""train.csv"""'], {}), "('train.csv')\n", (174, 187), True, 'import pandas as pd\n'), ((198, 221), 'pandas.read_csv', 'pd.read_csv', (['"""test.csv"""'], {}), "('test.csv')\n", (209, 221), True, 'import pandas as pd\n'), ((1012, 1060), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': 'kfold', 'random_state': '(42)'}), '(n_splits=kfold, random_state=42)\n', (1027, 1060), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((1067, 1081), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1079, 1081), True, 'import pandas as pd\n'), ((1118, 1140), 'numpy.zeros_like', 'np.zeros_like', (['test_id'], {}), '(test_id)\n', (1131, 1140), True, 'import numpy as np\n'), ((1715, 1744), 'lightgbm.Dataset', 'lgb.Dataset', (['X_train', 'y_train'], {}), '(X_train, y_train)\n', (1726, 1744), True, 'import lightgbm as lgb\n'), ((1759, 1788), 'lightgbm.Dataset', 'lgb.Dataset', (['X_valid', 'y_valid'], {}), '(X_valid, y_valid)\n', (1770, 1788), True, 'import lightgbm as lgb\n'), ((1841, 1952), 'lightgbm.train', 'lgb.train', (['params_lgb', 'd_train', '(1600)', 'watchlist'], {'early_stopping_rounds': '(70)', 'feval': 'gini_lgb', 'verbose_eval': '(100)'}), '(params_lgb, d_train, 1600, watchlist, early_stopping_rounds=70,\n feval=gini_lgb, verbose_eval=100)\n', (1850, 1952), True, 'import lightgbm as lgb\n'), ((1970, 1999), 'xgboost.DMatrix', 'xgb.DMatrix', (['X_train', 'y_train'], {}), '(X_train, y_train)\n', (1981, 1999), True, 'import xgboost as xgb\n'), ((2014, 2043), 'xgboost.DMatrix', 'xgb.DMatrix', (['X_valid', 'y_valid'], {}), '(X_valid, y_valid)\n', (2025, 2043), True, 'import xgboost as xgb\n'), ((2057, 2081), 'xgboost.DMatrix', 'xgb.DMatrix', (['test.values'], {}), '(test.values)\n', (2068, 2081), True, 'import xgboost as xgb\n'), ((2156, 2282), 'xgboost.train', 'xgb.train', (['params_xgd', 'd_train', '(1600)', 'watchlist'], {'early_stopping_rounds': '(70)', 'feval': 'gini_xgb', 'maximize': '(True)', 'verbose_eval': '(100)'}), '(params_xgd, d_train, 1600, watchlist, early_stopping_rounds=70,\n feval=gini_xgb, maximize=True, verbose_eval=100)\n', (2165, 2282), True, 'import xgboost as xgb\n'), ((426, 465), 'numpy.lexsort', 'np.lexsort', (['(all[:, 2], -1 * all[:, 1])'], {}), '((all[:, 2], -1 * all[:, 1]))\n', (436, 465), True, 'import numpy as np\n')]
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models
import torchvision.datasets.folder
import torchvision.transforms as transforms
import torchvision.transforms.functional as Ft
from pytorch_transformers import BertTokenizer
import os
import db
from PIL import Image
import cv2
import numpy
import time
import copy
import math
import sys
sys.path.insert(0, './bottom-up-attention/')
sys.path.insert(0, './bottom-up-attention/caffe/python/')
sys.path.insert(0, './bottom-up-attention/lib/')
sys.path.insert(0, './bottom-up-attention/tools/')
sys.path.append('./errorcam')
import caffe
caffe.set_mode_gpu()
from fast_rcnn.config import cfg, cfg_from_file
from fast_rcnn.test import im_detect,_get_blobs
from fast_rcnn.nms_wrapper import nms
import cv2
cfg_from_file('bottom-up-attention/experiments/cfgs/faster_rcnn_end2end_resnet.yml')
weights = 'bottom-up-attention/data/faster_rcnn_models/resnet101_faster_rcnn_final.caffemodel'
prototxt = 'bottom-up-attention/models/vg/ResNet-101/faster_rcnn_end2end_final/test.prototxt'
self_fast_rcnn = caffe.Net(prototxt, caffe.TEST, weights=weights);
import errorcam.models.attention_refine.atten_refine_network as att_refine
from errorcam.scripts.pytorchgradcam.gradcam import GradCam
from scipy.stats import spearmanr as correlation_func_atten
from statsmodels.stats.weightstats import ztest
import numpy as np
import json
#t0=time.time();
#im_file = 'val/n01532829_2439.JPEG'
# Similar to get_detections_from_im
#import requests
#response=requests.get('http://diva-1:5001/val/n01532829_2439.JPEG');
#image=Image.open(BytesIO(response.content));
#image=image.copy();
#im=F.to_tensor(image);
#im=(im*255).permute(1,2,0);
#im=torch.stack((im[:,:,2],im[:,:,1],im[:,:,0]),dim=2);
#im=im.cpu();
#im=im.numpy();
#im = cv2.imread(im_file)
#scores, boxes, attr_scores, rel_scores = im_detect(net, im)
#print('Loaded %f'%(time.time()-t0));
#a=0/0;
#QA classifier
import qa_classifier as qa_classifier
qa_classifier=qa_classifier.qa_classifier;
qtypes=['object', 'color', 'action', 'count', 'time', 'weather']
import model_7x7 as base_model
import lru_cache
import time
lru_mask_rcnn=lru_cache.new(100);
class xvqa:
def __init__(self,args_models):
self.in_use=0;
#Prepare ResNet152 for feature extraction
with torch.no_grad():
resnet152=torchvision.models.resnet152(pretrained=True)
resnet152=nn.Sequential(*list(resnet152.children())[:-2]).cuda();
resnet152=nn.DataParallel(resnet152).cuda()
resnet152.eval();
self.resnet152=resnet152;
#Prepare BERT tokenizer for question
self.tokenizer=BertTokenizer.from_pretrained('bert-base-uncased');
self.tokenizer.max_qlength=30;
#Prepare several BERT-VQA models for QA
print('Loading model')
models=[];
qfvs=[];
for m in args_models:
args_m=torch.load(os.path.join(m['root'],'args.pt'));
model=base_model.simple_vqa_model(args_m).cuda();
model=nn.DataParallel(model).cuda()
checkpoint=torch.load(os.path.join(m['root'],'model_checkpoint.pt'));
model.load_state_dict(checkpoint['model_state'])
model.eval()
model.answer_dictionary=torch.load(os.path.join(m['root'],'answer_dictionary.pt'));
model.args=args_m;
models.append(model);
qfv=torch.load(os.path.join(m['root'],'qfv.pt'))
qfvs.append(qfv);
self.models=models;
self.qfvs=qfvs;
self.qfvs_imkey=torch.load('res/models/qfv_imkey.pt');
#Prepare fast-rcnn detector
#cfg_from_file('bottom-up-attention/experiments/cfgs/faster_rcnn_end2end_resnet.yml')
#weights = 'bottom-up-attention/data/faster_rcnn_models/resnet101_faster_rcnn_final.caffemodel'
#prototxt = 'bottom-up-attention/models/vg/ResNet-101/faster_rcnn_end2end_final/test.prototxt'
#self.fast_rcnn = caffe.Net(prototxt, caffe.TEST, weights=weights);
def loadGloveModel(gloveFile):
print("Loading Glove Model")
f = open(gloveFile,'r', encoding='utf8')
model = {}
for line in f:
splitLine = line.split()
word = splitLine[0]
embedding = np.array([float(val) for val in splitLine[1:]])
model[word] = embedding
print("Done.",len(model)," words loaded!")
return model
#Get w2v
self.w2v = loadGloveModel("errorcam/glove.6B.300d.txt");
atten_dim = (4,12,115,115)
model_init_args = {"im_feat_dim": (7,7,2048), "hidden_feat_size": 96, "atten_dim": np.prod(atten_dim), "ans_dim":3129, "ques_cam":False}
self.attention_refine_model = att_refine.uncertainatt_refinedatt_net_cam_bigger(**model_init_args).cuda()
model_suffix = "model_3_5501.pt"
exp_name = "exp4_fullmodel_corrpred_refinedattn_uncertainCAM_bigger"
self.attention_refine_model.load_state_dict(torch.load("errorcam/checkpoints/"+exp_name+"/"+model_suffix))
self.gradcam = GradCam(self.attention_refine_model)
return;
def get_lock(self):
while self.in_use>0:
time.sleep(0.2);
print('locked');
self.in_use=1;
return;
def release_lock(self):
self.in_use=0;
return;
def parse_question(self,qtext):
if isinstance(qtext,list):
qtokens=[];
question=[];
for qi in qtext:
qtokens_i,question_i=self.parse_question(qi);
qtokens.append(qtokens_i);
question.append(question_i);
with torch.no_grad():
question=torch.stack(question,dim=0);
return qtokens,question;
else:
qtokens=self.tokenizer.tokenize(qtext);
if len(qtokens)>self.tokenizer.max_qlength-2:
qtokens=qtokens[:self.tokenizer.max_qlength-2];
qtokens=['[CLS]']+qtokens+['[SEP]'];
question=self.tokenizer.convert_tokens_to_ids(qtokens);
question=question+[0]*(self.tokenizer.max_qlength-len(question));
question=torch.LongTensor(question);
return qtokens,question;
def get_7x7_features(self,Is):
#Resize & Normalize
with torch.no_grad():
It=[]
for I in Is:
I=F.adaptive_avg_pool2d(I,(224,224));
I=Ft.normalize(I,mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]);
It.append(I);
It=torch.stack(It,dim=0);
#Extract features
fvs=[];
batch=8;
for i in range(0,len(It),batch):
r=min(i+batch,len(It));
fv=self.resnet152(It[i:r]);
fvs.append(fv);
fvs=torch.cat(fvs,dim=0);
return fvs;
def get_maskrcnn_features(self,Is):
try:
self.get_lock();
caffe.set_mode_gpu()
conf_thresh=0.2
min_boxes=36
max_boxes=36
net=self_fast_rcnn;
fv=[];
boxes_=[];
for iid in range(len(Is)):
I=Is[iid]
k=I.numpy().tostring();
if k in lru_mask_rcnn:
fv_i=lru_mask_rcnn[k]['fv'].clone();
boxes_i=lru_mask_rcnn[k]['boxes'].clone();
fv.append(fv_i);
boxes_.append(boxes_i);
else:
t0=time.time();
I=I.cuda();
im=(I*255).permute(1,2,0);
im=torch.stack((im[:,:,2],im[:,:,1],im[:,:,0]),dim=2);
im=im.cpu();
print(im.shape,im.max(),im.min())
im=im.numpy();
print('chpt1 %f'%float(time.time()-t0));
scores, boxes, attr_scores, rel_scores = im_detect(net, im)
print('chpt2 %f'%float(time.time()-t0));
# Keep the original boxes, don't worry about the regression bbox outputs
rois = net.blobs['rois'].data.copy()
# unscale back to raw image space
blobs, im_scales = _get_blobs(im, None)
print('chpt3 %f'%float(time.time()-t0));
cls_boxes = rois[:, 1:5] / im_scales[0]
cls_prob = net.blobs['cls_prob'].data
attr_prob = net.blobs['attr_prob'].data
pool5 = net.blobs['pool5_flat'].data
# Keep only the best detections
max_conf = numpy.zeros((rois.shape[0]))
for cls_ind in range(1,cls_prob.shape[1]):
cls_scores = scores[:, cls_ind]
try:
dets = numpy.hstack((cls_boxes, cls_scores[:, numpy.newaxis])).astype(numpy.float32)
except:
print(cls_boxes.shape);
print(cls_scores.shape);
dets = numpy.hstack((cls_boxes, cls_scores[:, numpy.newaxis])).astype(numpy.float32)
keep = numpy.array(nms(dets, cfg.TEST.NMS))
max_conf[keep] = numpy.where(cls_scores[keep] > max_conf[keep], cls_scores[keep], max_conf[keep])
keep_boxes = numpy.where(max_conf >= conf_thresh)[0]
if len(keep_boxes) < min_boxes:
keep_boxes = numpy.argsort(max_conf)[::-1][:min_boxes]
elif len(keep_boxes) > max_boxes:
keep_boxes = numpy.argsort(max_conf)[::-1][:max_boxes]
print('chpt4 %f'%float(time.time()-t0));
imh=I.shape[1];
imw=I.shape[2];
boxes_i=torch.from_numpy(cls_boxes[keep_boxes]).view(1,36,4);
boxes_i=boxes_i/torch.Tensor([imw,imh,imw,imh]).view(1,1,4);
fv_i=torch.from_numpy(pool5[keep_boxes]).view(1,36,2048);
print(fv_i.shape,boxes_i.shape);
lru_mask_rcnn[k]={'fv':fv_i.clone().cpu(),'boxes':boxes_i.clone().cpu()};
print('chpt5 %f'%float(time.time()-t0));
fv.append(fv_i);
boxes_.append(boxes_i);
fv=torch.cat(fv,dim=0);
boxes_=torch.cat(boxes_,dim=0);
self.release_lock();
except:
self.release_lock();
a=0/0;
return fv,boxes_;
def vqa(self,Is,Qs,use_model=''):
qtokens,q=self.parse_question(Qs);
print(qtokens)
fv7x7=self.get_7x7_features(Is);
fv36,boxes=self.get_maskrcnn_features(Is);
with torch.no_grad():
print(fv7x7.shape,fv36.shape,q.shape);
scores,attn=self.models[use_model](fv36,fv7x7.permute(0,2,3,1),q);
scores=scores.data.cpu();
attn=torch.stack(attn,dim=1).data.cpu();
top1_conf,pred=scores.max(dim=1);
As=[self.models[use_model].answer_dictionary[i] for i in pred.tolist()];
return db.Table({'I':Is,'Q':Qs,'A':As,'scores':scores,'attention':attn,'qtoken':qtokens,'qtensor':q,'features_7x7':fv7x7,'features_fv36':fv36,'bbox':boxes,'model':[use_model for q in Qs]});
#attn: 7x7 matrix
#imurl: image url
#output_fname: fname wrt root
def write_spatial_attention(self,I,attn,output_fname):
eps=1e-4
I=Ft.to_pil_image(I);
I=I.resize((224, 224))
I=numpy.asarray(I).astype(numpy.float32)
attn=attn.view(7,7).numpy()
attn=cv2.resize(attn, (224, 224))
attn=(attn-numpy.min(attn)+eps)/(numpy.max(attn)-numpy.min(attn)+eps)
att_heatmap=cv2.applyColorMap(numpy.uint8(255*attn), cv2.COLORMAP_JET)
alpha = 0.5
output_image=(1-alpha)*att_heatmap+alpha*I;
cv2.imwrite(output_fname,output_image)
return;
def write_object_attention(self,I,attn_rpn,bbox,attn_fname,token_ind=-1):
def apply_mask(image, mask, color, alpha=0.7):
for c in range(3):
image[:, :, c] = numpy.where(mask == 1,
image[:, :, c] *
(1 - alpha) + alpha * color[c] * 255,
image[:, :, c])
return image
def apply_obj_mask(masked_image, mask, actual_image, weight):
mask = numpy.repeat(mask[:,:,numpy.newaxis], 3, axis=2)
obj_image = numpy.ones(actual_image.shape)*255
numpy.copyto(obj_image, actual_image, where=(mask==1))
white_image = numpy.ones(actual_image.shape)*255
if weight< 0.3:
weight=weight+0.15
obj_img_weighted = weight*obj_image + (1-weight)*white_image
numpy.copyto(masked_image, obj_img_weighted, where=(mask==1))
return masked_image
def computeIOU(box1, box2):
#boxes should be in (y1, x1, y2, x2)
box1 = numpy.asarray(box1).astype(numpy.float32)
box2 = numpy.asarray(box2).astype(numpy.float32)
iou_box_x1 = max(box1[1], box2[1])
iou_box_y1 = max(box1[0], box2[0])
iou_box_x2 = min(box1[3], box2[3])
iou_box_y2 = min(box1[2], box2[2])
iou_h = max(0, iou_box_y2-iou_box_y1)
iou_w = max(0, iou_box_x2 - iou_box_x1)
roi_area = (iou_h * iou_w)
box1_area = numpy.absolute((box1[3] - box1[1]) * (box1[2] - box1[0]))
box2_area = numpy.absolute((box2[3] - box2[1]) * (box2[2] - box2[0]))
iou = roi_area/float(box1_area + box2_area - roi_area)
return iou
def compute_box_distance(box1, box2):
#boxes in (y1, x1, y2, x2)
box1 = numpy.asarray(box1).astype(numpy.float32)
box2 = numpy.asarray(box2).astype(numpy.float32)
cntr_box1_x = int((box1[1] + box1[3])/2)
cntr_box1_y = int((box1[0] + box1[2])/2)
cntr_box2_x = int((box2[1] + box2[3])/2)
cntr_box2_y = int((box2[0] + box2[2])/2)
dist = numpy.sqrt((cntr_box1_x - cntr_box2_x)**2 + (cntr_box1_y - cntr_box2_y)**2)
return dist
def computeWeights(mrcnn_boxes, rpn_boxes, box_weights):
epsilon = 1e-5
rcnn_box_weights = []
for ind, rcnn_box in enumerate(mrcnn_boxes):
max_area = 0
all_iou = []
all_weights = []
for rpn_ind, rpn_box in enumerate(rpn_boxes):
iou_area = computeIOU(rcnn_box, rpn_box)
all_iou.append(iou_area)
all_weights.append(box_weights[rpn_ind])
if len(all_iou) >= 1 and numpy.sum(all_iou)>0:
final_weight = numpy.exp(numpy.log(numpy.sum(numpy.exp(numpy.log(numpy.asarray(all_iou)) + numpy.log(numpy.asarray(all_weights))))) -(numpy.log(float(numpy.sum(all_iou)+ epsilon))))
rcnn_box_weights.append(final_weight)
else:
rcnn_box_weights.append(0)
return rcnn_box_weights
def make_rpn_attention_im(actual_image,attention_rpn,bboxes,attn_fname,token_ind=-1):
im_boxes=(bboxes.numpy()*256).astype(numpy.int32)
final_obj_weights = attention_rpn.numpy()
actual_image = Ft.to_pil_image(actual_image).resize((256, 256))
if len(final_obj_weights) != 0:
if numpy.max(final_obj_weights) > 0:
final_obj_weights = numpy.exp(numpy.log(final_obj_weights) - numpy.log(numpy.max(final_obj_weights)))
img_arr = numpy.asarray(actual_image).astype(numpy.float32)
masked_image = numpy.ones(img_arr.shape) * 255
masked_image = img_arr * 0.1 + masked_image * 0.9
if len(final_obj_weights) != 0:
obj_atten_inds = numpy.argsort(final_obj_weights)
else:
obj_atten_inds = []
obj_atten_inds = obj_atten_inds[::-1]
top_N = 5 # int(N * float(3) / 4)
for i in obj_atten_inds[:top_N][::-1]:
if final_obj_weights[i] > 0:
mask = numpy.zeros((256,256))
x0, y0, x1, y1 = im_boxes[i]
mask[y0:y1, x0:x1]=1
masked_image=apply_obj_mask(masked_image,mask,img_arr,float(final_obj_weights[i]))
## draw origin box (clicked box and draw arrows from that box to attended boxes)
## will only work for cases where we have such box to box attention, think about generalizing this later
if token_ind>29 and token_ind<66:
origin_box = im_boxes[token_ind-30]
ox0, oy0, ox1, oy1 = origin_box
cv2.rectangle(masked_image,(origin_box[0],origin_box[1]),(origin_box[2],origin_box[3]),(100,100,100),5)
for i in obj_atten_inds[:top_N]:
x0, y0, x1, y1 = im_boxes[i]
cv2.rectangle(masked_image, (x0, y0), (x1, y1), (50, 50, 50), 1)
pt1, pt2 = compute_closest_corner(origin_box, im_boxes[i])
cv2.arrowedLine(masked_image, pt1, pt2, (100,100,100), 2,8,0,0.05)
#masked_im = Image.fromarray(masked_image.astype(numpy.float32))
cv2.imwrite(attn_fname,masked_image[:,:,::-1])
return;
def compute_closest_corner(box1, box2):
ax0, ay0, ax1, ay1 = box1
bx0, by0, bx1, by1 = box2
min_d = float("inf")
for ax in [ax0, ax1]:
for bx in [bx0, bx1]:
d = abs(ax-bx)
if d<min_d:
ax_c = ax
bx_c = bx
min_d = d
min_d = float("inf")
for ay in [ay0, ay1]:
for by in [by0, by1]:
d = abs(ay-by)
if d<min_d:
ay_c = ay
by_c = by
min_d = d
return (ax_c, ay_c), (bx_c, by_c)
make_rpn_attention_im(I,attn_rpn,bbox,attn_fname,token_ind);
return;
def explain_errormap(self,table_vqa):
key=table_vqa['id'][0];
I=table_vqa['I'][0]
Q=table_vqa['Q'][0]
fv7x7=table_vqa['features_7x7'][0:1].clone()#.permute(0,2,3,1).view(1,49,2048);
attn=table_vqa['attention'][0:1];
answer_prob=F.softmax(table_vqa['scores'][0:1],dim=1);
def get_avg_w2v(question, w2v):
q_w = question.lower().split("?")[0].split(" ")
avg_feats = []
for w in q_w:
if w in w2v:
avg_feats.append(w2v[w])
return np.average(avg_feats, axis=0)
def get_err_weight(p):
weight = (p/0.175)**4 # empirically defined by what looks good on the matplotlib colormap.
if weight>1:
weight=1.0
return weight
#get question features
ques_feats = torch.from_numpy(get_avg_w2v(Q,self.w2v))
ques_feats = ques_feats.cuda().float().unsqueeze(0)
#get failure prediction probability. Using this to weigh the error maps results in better visualization.
model_out = self.attention_refine_model(attn.cuda().view(1,-1), fv7x7.cuda(), ques_feats, answer_prob.cuda());
fail_pred = model_out['wrong_pred']
fail_pred = float(fail_pred.squeeze().detach().cpu())
weight = get_err_weight(fail_pred)
print(attn.shape,fv7x7.shape,ques_feats.shape,answer_prob.shape)
att_map, _ = self.gradcam([attn.cuda().view(1,-1), fv7x7.cuda(), ques_feats, answer_prob.cuda()])
actual_image = Ft.to_pil_image(I).resize((224,224))
actual_image=numpy.asarray(actual_image).astype(numpy.float32)
processed_img = cv2.resize(actual_image, (224,224))
att_map = att_map.reshape((7,7))
att_map = cv2.resize(att_map, (224,224))
epsilon = 1e-3
att_heatmap = cv2.applyColorMap(np.uint8(255 * att_map), cv2.COLORMAP_JET)
alpha = 0.5
output_image = (1 - alpha) * att_heatmap *weight + alpha * processed_img
errmap_im_file_name='./attn/%s_errormap.jpg'%key;
cv2.imwrite(errmap_im_file_name, output_image)
return errmap_im_file_name;
def explain_attention_map_average(self,table_vqa):
key=table_vqa['id'][0];
attn=table_vqa['attention'][0];
qtoken=table_vqa['qtoken'][0];
L=len(qtoken);
attn_sp=attn[-1,:,:L, 66:].mean(0).mean(0).view(7,7);
attn_fname='./attn/%s_spatial_average.jpg'%key;
self.write_spatial_attention(table_vqa['I'][0],attn_sp,attn_fname);
return attn_fname;
def explain_attention_map_all(self,table_vqa):
key=table_vqa['id'][0];
attn=table_vqa['attention'][0];
qtoken=table_vqa['qtoken'][0];
L=len(qtoken);
attn_fname=[];
for i in range(L):
attn_sp=attn[-1,:,i, 66:].mean(0).view(7,7);
attn_fname_i='./attn/%s_spatial_w%d.jpg'%(key,i);
self.write_spatial_attention(table_vqa['I'][0],attn_sp,attn_fname_i);
attn_fname.append(attn_fname_i);
return attn_fname;
def explain_object_attention_average(self,table_vqa):
key=table_vqa['id'][0];
attn=table_vqa['attention'][0];
bbox=table_vqa['bbox'][0];
qtoken=table_vqa['qtoken'][0];
L=len(qtoken);
attn_rpn=attn[-1,-1,:L,30:66].mean(0);
attn_fname='./attn/%s_object_average.jpg'%key;
self.write_object_attention(table_vqa['I'][0],attn_rpn,bbox,attn_fname)
return attn_fname;
def explain_object_attention_all(self,table_vqa):
key=table_vqa['id'][0];
attn=table_vqa['attention'][0];
bbox=table_vqa['bbox'][0];
qtoken=table_vqa['qtoken'][0];
L=len(qtoken);
attn_fname=[];
for i in range(L):
attn_rpn=attn[-1,-1,i,30:66];
attn_fname_i='./attn/%s_object_w%d.jpg'%(key,i);
self.write_object_attention(table_vqa['I'][0],attn_rpn,bbox,attn_fname_i)
attn_fname.append(attn_fname_i);
return attn_fname;
#def explain_attention_map_pairs(self,table_vqa):
def explain_top_answers(self,table_vqa,k=5):
n=len(table_vqa);
topk_answers=[];
topk_confidence=[];
for i in range(n):
use_model=table_vqa['model'][i];
s=table_vqa['scores'][i];
p=F.softmax(s,dim=0);
p,ind=p.sort(dim=0,descending=True);
p=p[:k].tolist();
ind=ind[:k].tolist();
a=[self.models[use_model].answer_dictionary[j] for j in ind];
topk_answers_i=[];
for j in range(len(a)):
topk_answers_i.append({'answer':a[j],'confidence':p[j]});
topk_answers.append(topk_answers_i);
return topk_answers;
def explain_related_qas(self,table_vqa,k=5):
n=len(table_vqa);
topk_qas=[];
for i in range(n):
#Compute vector for question
use_model=table_vqa['model'][i];
I=table_vqa['I'][i];
qtext=table_vqa['Q'][i]
q=self.question_vector_v0(qtext,batch=50,model=use_model);
#Query related question
precomputed_qfv=self.qfvs[use_model]['qfv'];
precomputed_q=self.qfvs[use_model]['q'];
s=torch.mm(precomputed_qfv,q.view(-1,1)).view(-1);
s,ind=s.sort(dim=0,descending=True);
ind=ind.tolist();
s=s.tolist();
#Read questions and call VQA
topk_qas_i=[];
for j in range(k):
topk_qas_i.append({'question':precomputed_q[ind[j]],'r':s[j]});
result=self.vqa([I]*k,[x['question'] for x in topk_qas_i],use_model=use_model);
for j in range(k):
topk_qas_i[j]['answer']=result['A'][j];
topk_qas.append(topk_qas_i);
#Call VQA in batch mode
return topk_qas;
#Question type as perceived by the model
def explain_qtype(self,table_vqa):
qac=qa_classifier();
qtype=[];
n=len(table_vqa);
for i in range(n):
question=table_vqa['Q'][i];
answer=table_vqa['A'][i];
qtype.append(qac.classify_qa(question=question,answer=answer))
return qtype;
def question_vector_v0(self,qtext,T=15,std=1e-3,batch=4,model=0):
def logmeanexp(inputs,dim=None,keepdim=False):
return (inputs-F.log_softmax(inputs,dim=dim).data).mean(dim,keepdim=keepdim)-math.log(inputs.size(dim));
seeds=[t*1000 for t in range(T)]; #Fix seeds across runs
#Preprocess question
_,q=self.parse_question(qtext);
q=q.view(1,-1);
feature=self.qfvs_imkey['fv36'].cuda();
feature_7x7=self.qfvs_imkey['fv49'].cuda();
model2=copy.deepcopy(self.models[model]);
model2.train();
s=[];
for t in range(T):
st=[];
rng_state=torch.random.get_rng_state();
torch.random.manual_seed(seeds[t]);
#Run the model, pairing the q with each images
with torch.no_grad():
for j in range(0,feature.shape[0],batch):
r=min(j+batch,feature.shape[0]);
scores,_=model2(feature[j:r],feature_7x7[j:r],q.repeat(r-j,1));
scores=F.log_softmax(scores,dim=1).data;
st.append(scores);
torch.random.set_rng_state(rng_state);
st=torch.cat(st,dim=0);
s.append(st.data);
s=torch.stack(s,dim=0); #TxKx3129
savg=logmeanexp(s,dim=0,keepdim=True);
sdiff=s-savg;
s=s.permute(1,0,2);
sdiff=sdiff.permute(1,2,0);
v=torch.bmm(torch.exp(s),torch.exp(sdiff))/T;
return v.view(-1).cpu();
|
[
"numpy.uint8",
"numpy.prod",
"numpy.copyto",
"sys.path.insert",
"numpy.sqrt",
"qa_classifier",
"cv2.rectangle",
"fast_rcnn.test.im_detect",
"torchvision.transforms.functional.to_pil_image",
"torch.LongTensor",
"numpy.hstack",
"numpy.log",
"time.sleep",
"torch.exp",
"numpy.argsort",
"torch.from_numpy",
"copy.deepcopy",
"sys.path.append",
"torch.nn.functional.softmax",
"torchvision.transforms.functional.normalize",
"torch.random.manual_seed",
"errorcam.scripts.pytorchgradcam.gradcam.GradCam",
"torch.nn.functional.adaptive_avg_pool2d",
"numpy.repeat",
"numpy.where",
"db.Table",
"numpy.asarray",
"numpy.max",
"numpy.min",
"model_7x7.simple_vqa_model",
"cv2.arrowedLine",
"torch.random.get_rng_state",
"fast_rcnn.nms_wrapper.nms",
"fast_rcnn.test._get_blobs",
"numpy.ones",
"numpy.average",
"torch.Tensor",
"pytorch_transformers.BertTokenizer.from_pretrained",
"errorcam.models.attention_refine.atten_refine_network.uncertainatt_refinedatt_net_cam_bigger",
"torch.random.set_rng_state",
"torch.nn.functional.log_softmax",
"cv2.resize",
"time.time",
"torch.cat",
"fast_rcnn.config.cfg_from_file",
"cv2.imwrite",
"torch.load",
"torch.stack",
"caffe.set_mode_gpu",
"numpy.absolute",
"os.path.join",
"torch.nn.DataParallel",
"numpy.sum",
"numpy.zeros",
"lru_cache.new",
"caffe.Net",
"torch.no_grad"
] |
[((380, 424), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""./bottom-up-attention/"""'], {}), "(0, './bottom-up-attention/')\n", (395, 424), False, 'import sys\n'), ((425, 482), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""./bottom-up-attention/caffe/python/"""'], {}), "(0, './bottom-up-attention/caffe/python/')\n", (440, 482), False, 'import sys\n'), ((483, 531), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""./bottom-up-attention/lib/"""'], {}), "(0, './bottom-up-attention/lib/')\n", (498, 531), False, 'import sys\n'), ((532, 582), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""./bottom-up-attention/tools/"""'], {}), "(0, './bottom-up-attention/tools/')\n", (547, 582), False, 'import sys\n'), ((583, 612), 'sys.path.append', 'sys.path.append', (['"""./errorcam"""'], {}), "('./errorcam')\n", (598, 612), False, 'import sys\n'), ((627, 647), 'caffe.set_mode_gpu', 'caffe.set_mode_gpu', ([], {}), '()\n', (645, 647), False, 'import caffe\n'), ((796, 885), 'fast_rcnn.config.cfg_from_file', 'cfg_from_file', (['"""bottom-up-attention/experiments/cfgs/faster_rcnn_end2end_resnet.yml"""'], {}), "(\n 'bottom-up-attention/experiments/cfgs/faster_rcnn_end2end_resnet.yml')\n", (809, 885), False, 'from fast_rcnn.config import cfg, cfg_from_file\n'), ((1087, 1135), 'caffe.Net', 'caffe.Net', (['prototxt', 'caffe.TEST'], {'weights': 'weights'}), '(prototxt, caffe.TEST, weights=weights)\n', (1096, 1135), False, 'import caffe\n'), ((2169, 2187), 'lru_cache.new', 'lru_cache.new', (['(100)'], {}), '(100)\n', (2182, 2187), False, 'import lru_cache\n'), ((2688, 2738), 'pytorch_transformers.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['"""bert-base-uncased"""'], {}), "('bert-base-uncased')\n", (2717, 2738), False, 'from pytorch_transformers import BertTokenizer\n'), ((3636, 3673), 'torch.load', 'torch.load', (['"""res/models/qfv_imkey.pt"""'], {}), "('res/models/qfv_imkey.pt')\n", (3646, 3673), False, 'import torch\n'), ((5221, 5257), 'errorcam.scripts.pytorchgradcam.gradcam.GradCam', 'GradCam', (['self.attention_refine_model'], {}), '(self.attention_refine_model)\n', (5228, 5257), False, 'from errorcam.scripts.pytorchgradcam.gradcam import GradCam\n'), ((11654, 11864), 'db.Table', 'db.Table', (["{'I': Is, 'Q': Qs, 'A': As, 'scores': scores, 'attention': attn, 'qtoken':\n qtokens, 'qtensor': q, 'features_7x7': fv7x7, 'features_fv36': fv36,\n 'bbox': boxes, 'model': [use_model for q in Qs]}"], {}), "({'I': Is, 'Q': Qs, 'A': As, 'scores': scores, 'attention': attn,\n 'qtoken': qtokens, 'qtensor': q, 'features_7x7': fv7x7, 'features_fv36':\n fv36, 'bbox': boxes, 'model': [use_model for q in Qs]})\n", (11662, 11864), False, 'import db\n'), ((12006, 12024), 'torchvision.transforms.functional.to_pil_image', 'Ft.to_pil_image', (['I'], {}), '(I)\n', (12021, 12024), True, 'import torchvision.transforms.functional as Ft\n'), ((12155, 12183), 'cv2.resize', 'cv2.resize', (['attn', '(224, 224)'], {}), '(attn, (224, 224))\n', (12165, 12183), False, 'import cv2\n'), ((12421, 12460), 'cv2.imwrite', 'cv2.imwrite', (['output_fname', 'output_image'], {}), '(output_fname, output_image)\n', (12432, 12460), False, 'import cv2\n'), ((19498, 19540), 'torch.nn.functional.softmax', 'F.softmax', (["table_vqa['scores'][0:1]"], {'dim': '(1)'}), "(table_vqa['scores'][0:1], dim=1)\n", (19507, 19540), True, 'import torch.nn.functional as F\n'), ((20969, 21005), 'cv2.resize', 'cv2.resize', (['actual_image', '(224, 224)'], {}), '(actual_image, (224, 224))\n', (20979, 21005), False, 'import cv2\n'), ((21064, 21095), 'cv2.resize', 'cv2.resize', (['att_map', '(224, 224)'], {}), '(att_map, (224, 224))\n', (21074, 21095), False, 'import cv2\n'), ((21387, 21433), 'cv2.imwrite', 'cv2.imwrite', (['errmap_im_file_name', 'output_image'], {}), '(errmap_im_file_name, output_image)\n', (21398, 21433), False, 'import cv2\n'), ((25483, 25498), 'qa_classifier', 'qa_classifier', ([], {}), '()\n', (25496, 25498), True, 'import qa_classifier as qa_classifier\n'), ((26311, 26344), 'copy.deepcopy', 'copy.deepcopy', (['self.models[model]'], {}), '(self.models[model])\n', (26324, 26344), False, 'import copy\n'), ((27068, 27089), 'torch.stack', 'torch.stack', (['s'], {'dim': '(0)'}), '(s, dim=0)\n', (27079, 27089), False, 'import torch\n'), ((2324, 2339), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2337, 2339), False, 'import torch\n'), ((4797, 4815), 'numpy.prod', 'np.prod', (['atten_dim'], {}), '(atten_dim)\n', (4804, 4815), True, 'import numpy as np\n'), ((5135, 5202), 'torch.load', 'torch.load', (["('errorcam/checkpoints/' + exp_name + '/' + model_suffix)"], {}), "('errorcam/checkpoints/' + exp_name + '/' + model_suffix)\n", (5145, 5202), False, 'import torch\n'), ((5353, 5368), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (5363, 5368), False, 'import time\n'), ((6391, 6417), 'torch.LongTensor', 'torch.LongTensor', (['question'], {}), '(question)\n', (6407, 6417), False, 'import torch\n'), ((6537, 6552), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6550, 6552), False, 'import torch\n'), ((6797, 6819), 'torch.stack', 'torch.stack', (['It'], {'dim': '(0)'}), '(It, dim=0)\n', (6808, 6819), False, 'import torch\n'), ((7094, 7115), 'torch.cat', 'torch.cat', (['fvs'], {'dim': '(0)'}), '(fvs, dim=0)\n', (7103, 7115), False, 'import torch\n'), ((7244, 7264), 'caffe.set_mode_gpu', 'caffe.set_mode_gpu', ([], {}), '()\n', (7262, 7264), False, 'import caffe\n'), ((10842, 10862), 'torch.cat', 'torch.cat', (['fv'], {'dim': '(0)'}), '(fv, dim=0)\n', (10851, 10862), False, 'import torch\n'), ((10882, 10906), 'torch.cat', 'torch.cat', (['boxes_'], {'dim': '(0)'}), '(boxes_, dim=0)\n', (10891, 10906), False, 'import torch\n'), ((11261, 11276), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11274, 11276), False, 'import torch\n'), ((12300, 12323), 'numpy.uint8', 'numpy.uint8', (['(255 * attn)'], {}), '(255 * attn)\n', (12311, 12323), False, 'import numpy\n'), ((13021, 13071), 'numpy.repeat', 'numpy.repeat', (['mask[:, :, numpy.newaxis]', '(3)'], {'axis': '(2)'}), '(mask[:, :, numpy.newaxis], 3, axis=2)\n', (13033, 13071), False, 'import numpy\n'), ((13154, 13208), 'numpy.copyto', 'numpy.copyto', (['obj_image', 'actual_image'], {'where': '(mask == 1)'}), '(obj_image, actual_image, where=mask == 1)\n', (13166, 13208), False, 'import numpy\n'), ((13457, 13518), 'numpy.copyto', 'numpy.copyto', (['masked_image', 'obj_img_weighted'], {'where': '(mask == 1)'}), '(masked_image, obj_img_weighted, where=mask == 1)\n', (13469, 13518), False, 'import numpy\n'), ((14177, 14234), 'numpy.absolute', 'numpy.absolute', (['((box1[3] - box1[1]) * (box1[2] - box1[0]))'], {}), '((box1[3] - box1[1]) * (box1[2] - box1[0]))\n', (14191, 14234), False, 'import numpy\n'), ((14259, 14316), 'numpy.absolute', 'numpy.absolute', (['((box2[3] - box2[1]) * (box2[2] - box2[0]))'], {}), '((box2[3] - box2[1]) * (box2[2] - box2[0]))\n', (14273, 14316), False, 'import numpy\n'), ((14919, 14998), 'numpy.sqrt', 'numpy.sqrt', (['((cntr_box1_x - cntr_box2_x) ** 2 + (cntr_box1_y - cntr_box2_y) ** 2)'], {}), '((cntr_box1_x - cntr_box2_x) ** 2 + (cntr_box1_y - cntr_box2_y) ** 2)\n', (14929, 14998), False, 'import numpy\n'), ((18290, 18339), 'cv2.imwrite', 'cv2.imwrite', (['attn_fname', 'masked_image[:, :, ::-1]'], {}), '(attn_fname, masked_image[:, :, ::-1])\n', (18301, 18339), False, 'import cv2\n'), ((19818, 19847), 'numpy.average', 'np.average', (['avg_feats'], {'axis': '(0)'}), '(avg_feats, axis=0)\n', (19828, 19847), True, 'import numpy as np\n'), ((21168, 21191), 'numpy.uint8', 'np.uint8', (['(255 * att_map)'], {}), '(255 * att_map)\n', (21176, 21191), True, 'import numpy as np\n'), ((23731, 23750), 'torch.nn.functional.softmax', 'F.softmax', (['s'], {'dim': '(0)'}), '(s, dim=0)\n', (23740, 23750), True, 'import torch.nn.functional as F\n'), ((26452, 26480), 'torch.random.get_rng_state', 'torch.random.get_rng_state', ([], {}), '()\n', (26478, 26480), False, 'import torch\n'), ((26494, 26528), 'torch.random.manual_seed', 'torch.random.manual_seed', (['seeds[t]'], {}), '(seeds[t])\n', (26518, 26528), False, 'import torch\n'), ((26943, 26980), 'torch.random.set_rng_state', 'torch.random.set_rng_state', (['rng_state'], {}), '(rng_state)\n', (26969, 26980), False, 'import torch\n'), ((26997, 27017), 'torch.cat', 'torch.cat', (['st'], {'dim': '(0)'}), '(st, dim=0)\n', (27006, 27017), False, 'import torch\n'), ((2963, 2997), 'os.path.join', 'os.path.join', (["m['root']", '"""args.pt"""'], {}), "(m['root'], 'args.pt')\n", (2975, 2997), False, 'import os\n'), ((3143, 3189), 'os.path.join', 'os.path.join', (["m['root']", '"""model_checkpoint.pt"""'], {}), "(m['root'], 'model_checkpoint.pt')\n", (3155, 3189), False, 'import os\n'), ((3324, 3371), 'os.path.join', 'os.path.join', (["m['root']", '"""answer_dictionary.pt"""'], {}), "(m['root'], 'answer_dictionary.pt')\n", (3336, 3371), False, 'import os\n'), ((3478, 3511), 'os.path.join', 'os.path.join', (["m['root']", '"""qfv.pt"""'], {}), "(m['root'], 'qfv.pt')\n", (3490, 3511), False, 'import os\n'), ((4889, 4957), 'errorcam.models.attention_refine.atten_refine_network.uncertainatt_refinedatt_net_cam_bigger', 'att_refine.uncertainatt_refinedatt_net_cam_bigger', ([], {}), '(**model_init_args)\n', (4938, 4957), True, 'import errorcam.models.attention_refine.atten_refine_network as att_refine\n'), ((5853, 5868), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5866, 5868), False, 'import torch\n'), ((5895, 5923), 'torch.stack', 'torch.stack', (['question'], {'dim': '(0)'}), '(question, dim=0)\n', (5906, 5923), False, 'import torch\n'), ((6615, 6651), 'torch.nn.functional.adaptive_avg_pool2d', 'F.adaptive_avg_pool2d', (['I', '(224, 224)'], {}), '(I, (224, 224))\n', (6636, 6651), True, 'import torch.nn.functional as F\n'), ((6669, 6739), 'torchvision.transforms.functional.normalize', 'Ft.normalize', (['I'], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(I, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (6681, 6739), True, 'import torchvision.transforms.functional as Ft\n'), ((12067, 12083), 'numpy.asarray', 'numpy.asarray', (['I'], {}), '(I)\n', (12080, 12083), False, 'import numpy\n'), ((12678, 12776), 'numpy.where', 'numpy.where', (['(mask == 1)', '(image[:, :, c] * (1 - alpha) + alpha * color[c] * 255)', 'image[:, :, c]'], {}), '(mask == 1, image[:, :, c] * (1 - alpha) + alpha * color[c] * \n 255, image[:, :, c])\n', (12689, 12776), False, 'import numpy\n'), ((13094, 13124), 'numpy.ones', 'numpy.ones', (['actual_image.shape'], {}), '(actual_image.shape)\n', (13104, 13124), False, 'import numpy\n'), ((13248, 13278), 'numpy.ones', 'numpy.ones', (['actual_image.shape'], {}), '(actual_image.shape)\n', (13258, 13278), False, 'import numpy\n'), ((16643, 16668), 'numpy.ones', 'numpy.ones', (['img_arr.shape'], {}), '(img_arr.shape)\n', (16653, 16668), False, 'import numpy\n'), ((16827, 16859), 'numpy.argsort', 'numpy.argsort', (['final_obj_weights'], {}), '(final_obj_weights)\n', (16840, 16859), False, 'import numpy\n'), ((17735, 17850), 'cv2.rectangle', 'cv2.rectangle', (['masked_image', '(origin_box[0], origin_box[1])', '(origin_box[2], origin_box[3])', '(100, 100, 100)', '(5)'], {}), '(masked_image, (origin_box[0], origin_box[1]), (origin_box[2],\n origin_box[3]), (100, 100, 100), 5)\n', (17748, 17850), False, 'import cv2\n'), ((20837, 20855), 'torchvision.transforms.functional.to_pil_image', 'Ft.to_pil_image', (['I'], {}), '(I)\n', (20852, 20855), True, 'import torchvision.transforms.functional as Ft\n'), ((20895, 20922), 'numpy.asarray', 'numpy.asarray', (['actual_image'], {}), '(actual_image)\n', (20908, 20922), False, 'import numpy\n'), ((26606, 26621), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (26619, 26621), False, 'import torch\n'), ((27253, 27265), 'torch.exp', 'torch.exp', (['s'], {}), '(s)\n', (27262, 27265), False, 'import torch\n'), ((27266, 27282), 'torch.exp', 'torch.exp', (['sdiff'], {}), '(sdiff)\n', (27275, 27282), False, 'import torch\n'), ((2509, 2535), 'torch.nn.DataParallel', 'nn.DataParallel', (['resnet152'], {}), '(resnet152)\n', (2524, 2535), True, 'import torch.nn as nn\n'), ((3017, 3052), 'model_7x7.simple_vqa_model', 'base_model.simple_vqa_model', (['args_m'], {}), '(args_m)\n', (3044, 3052), True, 'import model_7x7 as base_model\n'), ((3079, 3101), 'torch.nn.DataParallel', 'nn.DataParallel', (['model'], {}), '(model)\n', (3094, 3101), True, 'import torch.nn as nn\n'), ((7820, 7831), 'time.time', 'time.time', ([], {}), '()\n', (7829, 7831), False, 'import time\n'), ((7935, 7994), 'torch.stack', 'torch.stack', (['(im[:, :, 2], im[:, :, 1], im[:, :, 0])'], {'dim': '(2)'}), '((im[:, :, 2], im[:, :, 1], im[:, :, 0]), dim=2)\n', (7946, 7994), False, 'import torch\n'), ((8231, 8249), 'fast_rcnn.test.im_detect', 'im_detect', (['net', 'im'], {}), '(net, im)\n', (8240, 8249), False, 'from fast_rcnn.test import im_detect, _get_blobs\n'), ((8575, 8595), 'fast_rcnn.test._get_blobs', '_get_blobs', (['im', 'None'], {}), '(im, None)\n', (8585, 8595), False, 'from fast_rcnn.test import im_detect, _get_blobs\n'), ((9017, 9043), 'numpy.zeros', 'numpy.zeros', (['rois.shape[0]'], {}), '(rois.shape[0])\n', (9028, 9043), False, 'import numpy\n'), ((12203, 12218), 'numpy.min', 'numpy.min', (['attn'], {}), '(attn)\n', (12212, 12218), False, 'import numpy\n'), ((12225, 12240), 'numpy.max', 'numpy.max', (['attn'], {}), '(attn)\n', (12234, 12240), False, 'import numpy\n'), ((12241, 12256), 'numpy.min', 'numpy.min', (['attn'], {}), '(attn)\n', (12250, 12256), False, 'import numpy\n'), ((13669, 13688), 'numpy.asarray', 'numpy.asarray', (['box1'], {}), '(box1)\n', (13682, 13688), False, 'import numpy\n'), ((13730, 13749), 'numpy.asarray', 'numpy.asarray', (['box2'], {}), '(box2)\n', (13743, 13749), False, 'import numpy\n'), ((14546, 14565), 'numpy.asarray', 'numpy.asarray', (['box1'], {}), '(box1)\n', (14559, 14565), False, 'import numpy\n'), ((14607, 14626), 'numpy.asarray', 'numpy.asarray', (['box2'], {}), '(box2)\n', (14620, 14626), False, 'import numpy\n'), ((16248, 16277), 'torchvision.transforms.functional.to_pil_image', 'Ft.to_pil_image', (['actual_image'], {}), '(actual_image)\n', (16263, 16277), True, 'import torchvision.transforms.functional as Ft\n'), ((16374, 16402), 'numpy.max', 'numpy.max', (['final_obj_weights'], {}), '(final_obj_weights)\n', (16383, 16402), False, 'import numpy\n'), ((16566, 16593), 'numpy.asarray', 'numpy.asarray', (['actual_image'], {}), '(actual_image)\n', (16579, 16593), False, 'import numpy\n'), ((17134, 17157), 'numpy.zeros', 'numpy.zeros', (['(256, 256)'], {}), '((256, 256))\n', (17145, 17157), False, 'import numpy\n'), ((17957, 18021), 'cv2.rectangle', 'cv2.rectangle', (['masked_image', '(x0, y0)', '(x1, y1)', '(50, 50, 50)', '(1)'], {}), '(masked_image, (x0, y0), (x1, y1), (50, 50, 50), 1)\n', (17970, 18021), False, 'import cv2\n'), ((18121, 18192), 'cv2.arrowedLine', 'cv2.arrowedLine', (['masked_image', 'pt1', 'pt2', '(100, 100, 100)', '(2)', '(8)', '(0)', '(0.05)'], {}), '(masked_image, pt1, pt2, (100, 100, 100), 2, 8, 0, 0.05)\n', (18136, 18192), False, 'import cv2\n'), ((9691, 9776), 'numpy.where', 'numpy.where', (['(cls_scores[keep] > max_conf[keep])', 'cls_scores[keep]', 'max_conf[keep]'], {}), '(cls_scores[keep] > max_conf[keep], cls_scores[keep], max_conf[keep]\n )\n', (9702, 9776), False, 'import numpy\n'), ((9826, 9862), 'numpy.where', 'numpy.where', (['(max_conf >= conf_thresh)'], {}), '(max_conf >= conf_thresh)\n', (9837, 9862), False, 'import numpy\n'), ((11463, 11487), 'torch.stack', 'torch.stack', (['attn'], {'dim': '(1)'}), '(attn, dim=1)\n', (11474, 11487), False, 'import torch\n'), ((15602, 15620), 'numpy.sum', 'numpy.sum', (['all_iou'], {}), '(all_iou)\n', (15611, 15620), False, 'import numpy\n'), ((26845, 26873), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['scores'], {'dim': '(1)'}), '(scores, dim=1)\n', (26858, 26873), True, 'import torch.nn.functional as F\n'), ((9625, 9648), 'fast_rcnn.nms_wrapper.nms', 'nms', (['dets', 'cfg.TEST.NMS'], {}), '(dets, cfg.TEST.NMS)\n', (9628, 9648), False, 'from fast_rcnn.nms_wrapper import nms\n'), ((10312, 10351), 'torch.from_numpy', 'torch.from_numpy', (['cls_boxes[keep_boxes]'], {}), '(cls_boxes[keep_boxes])\n', (10328, 10351), False, 'import torch\n'), ((10472, 10507), 'torch.from_numpy', 'torch.from_numpy', (['pool5[keep_boxes]'], {}), '(pool5[keep_boxes])\n', (10488, 10507), False, 'import torch\n'), ((16458, 16486), 'numpy.log', 'numpy.log', (['final_obj_weights'], {}), '(final_obj_weights)\n', (16467, 16486), False, 'import numpy\n'), ((9955, 9978), 'numpy.argsort', 'numpy.argsort', (['max_conf'], {}), '(max_conf)\n', (9968, 9978), False, 'import numpy\n'), ((10402, 10436), 'torch.Tensor', 'torch.Tensor', (['[imw, imh, imw, imh]'], {}), '([imw, imh, imw, imh])\n', (10414, 10436), False, 'import torch\n'), ((16499, 16527), 'numpy.max', 'numpy.max', (['final_obj_weights'], {}), '(final_obj_weights)\n', (16508, 16527), False, 'import numpy\n'), ((25912, 25942), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['inputs'], {'dim': 'dim'}), '(inputs, dim=dim)\n', (25925, 25942), True, 'import torch.nn.functional as F\n'), ((8152, 8163), 'time.time', 'time.time', ([], {}), '()\n', (8161, 8163), False, 'import time\n'), ((8293, 8304), 'time.time', 'time.time', ([], {}), '()\n', (8302, 8304), False, 'import time\n'), ((8639, 8650), 'time.time', 'time.time', ([], {}), '()\n', (8648, 8650), False, 'import time\n'), ((9229, 9284), 'numpy.hstack', 'numpy.hstack', (['(cls_boxes, cls_scores[:, numpy.newaxis])'], {}), '((cls_boxes, cls_scores[:, numpy.newaxis]))\n', (9241, 9284), False, 'import numpy\n'), ((10088, 10111), 'numpy.argsort', 'numpy.argsort', (['max_conf'], {}), '(max_conf)\n', (10101, 10111), False, 'import numpy\n'), ((10194, 10205), 'time.time', 'time.time', ([], {}), '()\n', (10203, 10205), False, 'import time\n'), ((10715, 10726), 'time.time', 'time.time', ([], {}), '()\n', (10724, 10726), False, 'import time\n'), ((9479, 9534), 'numpy.hstack', 'numpy.hstack', (['(cls_boxes, cls_scores[:, numpy.newaxis])'], {}), '((cls_boxes, cls_scores[:, numpy.newaxis]))\n', (9491, 9534), False, 'import numpy\n'), ((15794, 15812), 'numpy.sum', 'numpy.sum', (['all_iou'], {}), '(all_iou)\n', (15803, 15812), False, 'import numpy\n'), ((15709, 15731), 'numpy.asarray', 'numpy.asarray', (['all_iou'], {}), '(all_iou)\n', (15722, 15731), False, 'import numpy\n'), ((15745, 15771), 'numpy.asarray', 'numpy.asarray', (['all_weights'], {}), '(all_weights)\n', (15758, 15771), False, 'import numpy\n')]
|
import cv2
import numpy as np
from pyautogui import screenshot
from pyautogui import size as get_screen_size
from core.screen.screen_rectangle import ScreenRectangle
class ScreenshotImage:
def __init__(self, in_region: ScreenRectangle = None):
screen_width, screen_height = get_screen_size()
region_coordinates = (0, 0, screen_width, screen_height)
if in_region is not None:
region_coordinates = (in_region.start_point.x, in_region.start_point.y, in_region.width, in_region.height)
screen_pil_image = screenshot(region=region_coordinates)
self._gray_array = cv2.cvtColor(np.array(screen_pil_image), cv2.COLOR_BGR2GRAY)
height, width = self._gray_array.shape
self._width = width
self._height = height
@property
def image_gray_array(self):
return self._gray_array
@property
def width(self) -> int:
return self._width
@property
def height(self) -> int:
return self._height
def binarize(self):
# img2 = cv2.adaptiveThreshold(self._gray_array, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
return cv2.threshold(self._gray_array, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
|
[
"cv2.threshold",
"numpy.array",
"pyautogui.screenshot",
"pyautogui.size"
] |
[((289, 306), 'pyautogui.size', 'get_screen_size', ([], {}), '()\n', (304, 306), True, 'from pyautogui import size as get_screen_size\n'), ((554, 591), 'pyautogui.screenshot', 'screenshot', ([], {'region': 'region_coordinates'}), '(region=region_coordinates)\n', (564, 591), False, 'from pyautogui import screenshot\n'), ((633, 659), 'numpy.array', 'np.array', (['screen_pil_image'], {}), '(screen_pil_image)\n', (641, 659), True, 'import numpy as np\n'), ((1167, 1243), 'cv2.threshold', 'cv2.threshold', (['self._gray_array', '(0)', '(255)', '(cv2.THRESH_BINARY | cv2.THRESH_OTSU)'], {}), '(self._gray_array, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n', (1180, 1243), False, 'import cv2\n')]
|
import random
import logging
import numpy as np
import tensorflow as tf
class DeepQNetworkModel:
def __init__(self,
session,
layers_size,
memory,
default_batch_size=None,
default_learning_rate=None,
default_epsilon=None,
gamma=0.99,
min_samples_for_predictions=0,
double_dqn=False,
learning_procedures_to_q_target_switch=1000,
tau=1,
maximize_entropy=False,
var_scope_name=None):
"""
Create a new Deep Q Network model
:param session: a tf.Session to be used
:param layers_size: a list of numbers, representing the number of nodes in each layer of the network
:param memory: an instance of type memory_buffers.Memory
:param default_batch_size: the default batch size for training
:param default_learning_rate: the default learning rate for training
:param default_epsilon: the default epsilon to be used for the eps-greedy policy
:param gamma: the discount factor
:param min_samples_for_predictions: the minimum number of seen state-transitions required to make predictions.
random numbers will be selected until this number has reached
:param double_dqn: boolean, should a Double Deep Q Network should be used or not
:param learning_procedures_to_q_target_switch: how many learning procedures are required before the main network
is copied to the q-target network. relevant only if double_dqn = True.
:param tau: a number in the range [0,1] determining the mixture of the main network weights and q-target weights
which will be inserted to q-target. tau=1 copies the main network weights to the q-target network as
they are (as should be according to the original paper). tau=0 will keep q-target weights unchanged,
meaning no knowledge will be transferred.
relevant only if double_dqn = True.
:param maximize_entropy: boolean, determining if the network should try to optimize the Q values entropy
:param var_scope_name: when more than one model are generated, each needs its own variable scope. If the two
or more models are suppose to share their weights, they both should have the same variable scope name.
This is irrelevant when only one instance of the model is used.
"""
self.output_size = layers_size[-1]
self.session = session
self.default_batch_size = default_batch_size
self.default_learning_rate = default_learning_rate
self.default_epsilon = default_epsilon
self.min_samples_for_predictions = min_samples_for_predictions
self.learning_procedures_to_q_target_switch = learning_procedures_to_q_target_switch
self.tau = tau
self.maximize_entropy = maximize_entropy
self.memory = memory
# print("Layers_size: ", layers_size)
# print("Output size: ", self.output_size)
# print("Input size: ", layers_size[0])
self.q_network = self.__create_q_network(input_size=layers_size[0], output_size=self.output_size,
hidden_layers_size=layers_size[1:-1], gamma=gamma,
maximize_entropy=maximize_entropy,
var_scope_name=var_scope_name,
layer_name_suffix='qnn')
if double_dqn:
self.target_q_network = self.__create_q_network(input_size=layers_size[0], output_size=self.output_size,
hidden_layers_size=layers_size[1:-1], gamma=gamma,
maximize_entropy=maximize_entropy,
var_scope_name=var_scope_name,
layer_name_suffix='qt')
else:
self.target_q_network = None
def __create_q_network(self, input_size, output_size, hidden_layers_size, gamma, maximize_entropy,
var_scope_name, layer_name_suffix):
scope_name = var_scope_name or tf.compat.v1.get_variable_scope().name
reuse = tf.compat.v1.AUTO_REUSE if var_scope_name else False
with tf.compat.v1.variable_scope(scope_name, reuse=reuse):
qnn = QNetwork(input_size=input_size, output_size=output_size, hidden_layers_size=hidden_layers_size,
gamma=gamma, maximize_entropy=maximize_entropy, layer_name_suffix=layer_name_suffix)
return qnn
def learn(self, learning_rate=None, batch_size=None):
"""
Initialize a learning attempt
:param learning_rate: a learning rate overriding default_learning_rate
:param batch_size: a batch_size overriding default_batch_size
:return: None if no learning was made, or the cost of learning if it did happen
"""
current_batch_size = batch_size if batch_size is not None else self.default_batch_size
if self.memory.counter % current_batch_size != 0 or self.memory.counter == 0:
logging.debug('Passing on learning procedure')
pass
else:
logging.debug('Starting learning procedure...')
batch = self.memory.sample(current_batch_size)
# print("batch: ", batch)
# print("batch.reshape(-1): ", batch.reshape(-1), " ", batch.reshape(-1).shape)
#print("self.target_q_network.states: ", self.target_q_network.states)
#print("self.__fetch_from_batch(batch, 'next_state'): ", self.__fetch_from_batch(batch, 'next_state'))
qt = self.session.run(self.target_q_network.output,
feed_dict={self.target_q_network.states: self.__fetch_from_batch(batch, 'next_state')})
#print(self.__fetch_from_batch(batch, 'is_terminal'))
terminals = self.__fetch_from_batch(batch, 'is_terminal')
for i in range(terminals.size):
if terminals[i]:
qt[i] = np.zeros(self.output_size)
lr = learning_rate if learning_rate is not None else self.default_learning_rate
_, cost = self.session.run([self.q_network.optimizer, self.q_network.cost],
feed_dict={self.q_network.states: self.__fetch_from_batch(batch, 'state'),
self.q_network.r: self.__fetch_from_batch(batch, 'reward'),
self.q_network.enumerated_actions: self.__fetch_from_batch(batch, 'action', enum=True),
self.q_network.q_target: qt,
self.q_network.learning_rate: lr})
logging.debug('Batch number: %s | Q-Network cost: %s | Learning rate: %s',
self.memory.counter // current_batch_size, cost, lr)
if self.target_q_network is not None and self.memory.counter % (self.learning_procedures_to_q_target_switch * current_batch_size) == 0:
logging.info('Copying Q-Network to Q-Target...')
tf_vars = tf.compat.v1.trainable_variables()
num_of_vars = len(tf_vars)
operations = []
for i, v in enumerate(tf_vars[0:num_of_vars // 2]):
operations.append(tf_vars[i + num_of_vars // 2].assign(
(v.value() * self.tau) + ((1 - self.tau) * tf_vars[i + num_of_vars // 2].value())))
self.session.run(operations)
return cost
def act(self, state, epsilon=None):
"""
Select an action for the given state
:param state: a Numpy array representing a state
:param epsilon: an epsilon value to be used for the eps-greedy policy, overriding default_epsilon
:return: a number representing the selected action
"""
eps = epsilon if epsilon is not None else self.default_epsilon
rnd = random.random()
if rnd < eps or self.memory.counter < self.min_samples_for_predictions:
action = random.randint(0, self.output_size - 1)
logging.debug("Choosing a random action: %s [Epsilon = %s]", action, eps)
else:
prediction = self.session.run(self.q_network.output,
feed_dict={self.q_network.states: np.expand_dims(state, axis=0)})
prediction = np.squeeze(prediction)
action = np.argmax(prediction)
logging.debug("Predicted action for state %s is %s (network output: %s) [Epsilon = %s]",
state, action, prediction, eps)
return action
def add_to_memory(self, state, action, reward, next_state, is_terminal_state):
"""
Add new state-transition to memory
:param state: a Numpy array representing a state
:param action: an integer representing the selected action
:param reward: a number representing the received reward
:param next_state: a Numpy array representing the state reached after performing the action
:param is_terminal_state: boolean. mark state as a terminal_state. next_state will have no effect.
"""
self.memory.append({'state': state, 'action': action, 'reward': reward,
'next_state': next_state, 'is_terminal': is_terminal_state})
def __fetch_from_batch(self, batch, key, enum=False):
# print("batch: ", batch)
if key == 'next_state' or key == 'state':
if enum:
return np.array(list(enumerate(map(lambda x: x[key].reshape(-1), batch))))
else:
return np.array(list(map(lambda x: x[key].reshape(-1), batch)))
else:
if enum:
return np.array(list(enumerate(map(lambda x: x[key], batch))))
else:
return np.array(list(map(lambda x: x[key], batch)))
class QNetwork:
"""
A Q-Network implementation
"""
def __init__(self, input_size, output_size, hidden_layers_size, gamma, maximize_entropy, layer_name_suffix):
self.q_target = tf.compat.v1.placeholder(shape=(None, output_size), dtype=tf.float32)
self.r = tf.compat.v1.placeholder(shape=None, dtype=tf.float32)
self.states = tf.compat.v1.placeholder(shape=(None, input_size), dtype=tf.float32)
self.enumerated_actions = tf.compat.v1.placeholder(shape=(None, 2), dtype=tf.int32)
self.learning_rate = tf.compat.v1.placeholder(shape=[], dtype=tf.float32)
layer = self.states
for i in range(len(hidden_layers_size)):
layer = tf.compat.v1.layers.dense(inputs=layer, units=hidden_layers_size[i], activation=tf.nn.relu,
name='{}_dense_layer_{}'.format(layer_name_suffix,i),
kernel_initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform"))
self.output = tf.compat.v1.layers.dense(inputs=layer, units=output_size,
name='{}_dense_layer_{}'.format(layer_name_suffix,len(hidden_layers_size)),
kernel_initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform"))
self.predictions = tf.gather_nd(self.output, indices=self.enumerated_actions)
if maximize_entropy:
self.future_q = tf.math.log(tf.reduce_sum(input_tensor=tf.exp(self.q_target), axis=1))
else:
self.future_q = tf.reduce_max(input_tensor=self.q_target, axis=1)
self.labels = self.r + (gamma * self.future_q)
self.cost = tf.reduce_mean(input_tensor=tf.compat.v1.losses.mean_squared_error(labels=self.labels, predictions=self.predictions))
self.optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.cost)
|
[
"tensorflow.compat.v1.placeholder",
"tensorflow.compat.v1.variable_scope",
"logging.debug",
"tensorflow.compat.v1.get_variable_scope",
"tensorflow.compat.v1.train.AdamOptimizer",
"numpy.argmax",
"logging.info",
"numpy.squeeze",
"tensorflow.reduce_max",
"tensorflow.exp",
"numpy.zeros",
"tensorflow.compat.v1.losses.mean_squared_error",
"tensorflow.compat.v1.keras.initializers.VarianceScaling",
"numpy.expand_dims",
"tensorflow.compat.v1.trainable_variables",
"random.random",
"tensorflow.gather_nd",
"random.randint"
] |
[((8348, 8363), 'random.random', 'random.random', ([], {}), '()\n', (8361, 8363), False, 'import random\n'), ((10521, 10590), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'shape': '(None, output_size)', 'dtype': 'tf.float32'}), '(shape=(None, output_size), dtype=tf.float32)\n', (10545, 10590), True, 'import tensorflow as tf\n'), ((10608, 10662), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'shape': 'None', 'dtype': 'tf.float32'}), '(shape=None, dtype=tf.float32)\n', (10632, 10662), True, 'import tensorflow as tf\n'), ((10685, 10753), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'shape': '(None, input_size)', 'dtype': 'tf.float32'}), '(shape=(None, input_size), dtype=tf.float32)\n', (10709, 10753), True, 'import tensorflow as tf\n'), ((10788, 10845), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'shape': '(None, 2)', 'dtype': 'tf.int32'}), '(shape=(None, 2), dtype=tf.int32)\n', (10812, 10845), True, 'import tensorflow as tf\n'), ((10875, 10927), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'shape': '[]', 'dtype': 'tf.float32'}), '(shape=[], dtype=tf.float32)\n', (10899, 10927), True, 'import tensorflow as tf\n'), ((11741, 11799), 'tensorflow.gather_nd', 'tf.gather_nd', (['self.output'], {'indices': 'self.enumerated_actions'}), '(self.output, indices=self.enumerated_actions)\n', (11753, 11799), True, 'import tensorflow as tf\n'), ((4560, 4612), 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (['scope_name'], {'reuse': 'reuse'}), '(scope_name, reuse=reuse)\n', (4587, 4612), True, 'import tensorflow as tf\n'), ((5410, 5456), 'logging.debug', 'logging.debug', (['"""Passing on learning procedure"""'], {}), "('Passing on learning procedure')\n", (5423, 5456), False, 'import logging\n'), ((5500, 5547), 'logging.debug', 'logging.debug', (['"""Starting learning procedure..."""'], {}), "('Starting learning procedure...')\n", (5513, 5547), False, 'import logging\n'), ((7107, 7239), 'logging.debug', 'logging.debug', (['"""Batch number: %s | Q-Network cost: %s | Learning rate: %s"""', '(self.memory.counter // current_batch_size)', 'cost', 'lr'], {}), "('Batch number: %s | Q-Network cost: %s | Learning rate: %s', \n self.memory.counter // current_batch_size, cost, lr)\n", (7120, 7239), False, 'import logging\n'), ((8465, 8504), 'random.randint', 'random.randint', (['(0)', '(self.output_size - 1)'], {}), '(0, self.output_size - 1)\n', (8479, 8504), False, 'import random\n'), ((8517, 8590), 'logging.debug', 'logging.debug', (['"""Choosing a random action: %s [Epsilon = %s]"""', 'action', 'eps'], {}), "('Choosing a random action: %s [Epsilon = %s]', action, eps)\n", (8530, 8590), False, 'import logging\n'), ((8803, 8825), 'numpy.squeeze', 'np.squeeze', (['prediction'], {}), '(prediction)\n', (8813, 8825), True, 'import numpy as np\n'), ((8847, 8868), 'numpy.argmax', 'np.argmax', (['prediction'], {}), '(prediction)\n', (8856, 8868), True, 'import numpy as np\n'), ((8881, 9010), 'logging.debug', 'logging.debug', (['"""Predicted action for state %s is %s (network output: %s) [Epsilon = %s]"""', 'state', 'action', 'prediction', 'eps'], {}), "(\n 'Predicted action for state %s is %s (network output: %s) [Epsilon = %s]',\n state, action, prediction, eps)\n", (8894, 9010), False, 'import logging\n'), ((11970, 12019), 'tensorflow.reduce_max', 'tf.reduce_max', ([], {'input_tensor': 'self.q_target', 'axis': '(1)'}), '(input_tensor=self.q_target, axis=1)\n', (11983, 12019), True, 'import tensorflow as tf\n'), ((4439, 4472), 'tensorflow.compat.v1.get_variable_scope', 'tf.compat.v1.get_variable_scope', ([], {}), '()\n', (4470, 4472), True, 'import tensorflow as tf\n'), ((7425, 7473), 'logging.info', 'logging.info', (['"""Copying Q-Network to Q-Target..."""'], {}), "('Copying Q-Network to Q-Target...')\n", (7437, 7473), False, 'import logging\n'), ((7500, 7534), 'tensorflow.compat.v1.trainable_variables', 'tf.compat.v1.trainable_variables', ([], {}), '()\n', (7532, 7534), True, 'import tensorflow as tf\n'), ((11614, 11716), 'tensorflow.compat.v1.keras.initializers.VarianceScaling', 'tf.compat.v1.keras.initializers.VarianceScaling', ([], {'scale': '(1.0)', 'mode': '"""fan_avg"""', 'distribution': '"""uniform"""'}), "(scale=1.0, mode='fan_avg',\n distribution='uniform')\n", (11661, 11716), True, 'import tensorflow as tf\n'), ((12123, 12216), 'tensorflow.compat.v1.losses.mean_squared_error', 'tf.compat.v1.losses.mean_squared_error', ([], {'labels': 'self.labels', 'predictions': 'self.predictions'}), '(labels=self.labels, predictions=self\n .predictions)\n', (12161, 12216), True, 'import tensorflow as tf\n'), ((12238, 12304), 'tensorflow.compat.v1.train.AdamOptimizer', 'tf.compat.v1.train.AdamOptimizer', ([], {'learning_rate': 'self.learning_rate'}), '(learning_rate=self.learning_rate)\n', (12270, 12304), True, 'import tensorflow as tf\n'), ((6362, 6388), 'numpy.zeros', 'np.zeros', (['self.output_size'], {}), '(self.output_size)\n', (6370, 6388), True, 'import numpy as np\n'), ((11262, 11364), 'tensorflow.compat.v1.keras.initializers.VarianceScaling', 'tf.compat.v1.keras.initializers.VarianceScaling', ([], {'scale': '(1.0)', 'mode': '"""fan_avg"""', 'distribution': '"""uniform"""'}), "(scale=1.0, mode='fan_avg',\n distribution='uniform')\n", (11309, 11364), True, 'import tensorflow as tf\n'), ((8746, 8775), 'numpy.expand_dims', 'np.expand_dims', (['state'], {'axis': '(0)'}), '(state, axis=0)\n', (8760, 8775), True, 'import numpy as np\n'), ((11896, 11917), 'tensorflow.exp', 'tf.exp', (['self.q_target'], {}), '(self.q_target)\n', (11902, 11917), True, 'import tensorflow as tf\n')]
|
# The original GA algorithm is here:
import numpy as np, random, operator, pandas as pd, matplotlib.pyplot as plt
import math
class City:
def __init__(self, x, y):
self.x = x
self.y = y
def distance(self, city):
xDis = abs(self.x - city.x)
yDis = abs(self.y - city.y)
distance = np.sqrt((xDis ** 2) + (yDis ** 2))
return distance
def __repr__(self):
return "(" + str(self.x) + "," + str(self.y) + ")"
class Fitness:
def __init__(self, route):
self.route = route
self.distance = 0
self.fitness = 0.0
def routeDistance(self):
if self.distance == 0:
pathDistance = 0
for i in range(0, len(self.route)):
fromCity = self.route[i]
toCity = None
if i + 1 < len(self.route):
toCity = self.route[i + 1]
else:
toCity = self.route[0]
pathDistance += fromCity.distance(toCity)
self.distance = pathDistance
return self.distance
def routeFitness(self):
if self.fitness == 0:
dis = self.routeDistance()
self.fitness = dis
return self.fitness
def createRoute(cityList):
route = random.sample(cityList, len(cityList))
return route
def initialPopulation(popSize, cityList):
population = []
for i in range(0, popSize):
population.append(createRoute(cityList))
return population
def rankRoutes(population):
fitnessResults = {}
for i in range(0, len(population)):
fitnessResults[i] = Fitness(population[i]).routeFitness()
return sorted(fitnessResults.items(), key=operator.itemgetter(1), reverse=False)
def selection(popRanked, eliteSize):
selectionResults = []
for i in range(0, eliteSize):
selectionResults.append(popRanked[i][0])
popRanked_pre = popRanked[:len(popRanked)]
for i in range(0, len(popRanked) - eliteSize):
c1 = random.sample(popRanked_pre, 1)
c2 = random.sample(popRanked_pre, 1)
winner = None
if c1[0][1] > c2[0][1]:
winner = c1
else:
winner = c2
selectionResults.append(winner[0][0])
return selectionResults
def matingPool(population, selectionResults):
matingpool = []
for i in range(0, len(selectionResults)):
index = selectionResults[i]
matingpool.append(population[index])
return matingpool
def breed(parent1, parent2):
child = []
childP1 = []
childP2 = []
geneA = int(random.random() * len(parent1))
geneB = int(random.random() * len(parent1))
startGene = min(geneA, geneB)
endGene = max(geneA, geneB)
for i in range(startGene, endGene):
childP1.append(parent1[i])
childP2 = [item for item in parent2 if item not in childP1]
child = childP1 + childP2
return child
def breedPopulation(matingpool, eliteSize):
children = []
length = len(matingpool) - eliteSize
pool = random.sample(matingpool, len(matingpool))
for i in range(0, eliteSize):
children.append(matingpool[i])
for i in range(0, length):
child = breed(pool[i], pool[len(matingpool) - i - 1])
children.append(child)
return children
def mutate(individual, mutationRate):
for swapped in range(len(individual)):
if (random.random() < mutationRate):
swapWith = int(random.random() * len(individual))
city1 = individual[swapped]
city2 = individual[swapWith]
individual[swapped] = city2
individual[swapWith] = city1
return individual
def mutatePopulation(population, mutationRate):
mutatedPop = []
for ind in range(0, len(population)):
mutatedInd = mutate(population[ind], mutationRate)
mutatedPop.append(mutatedInd)
return mutatedPop
def nextGeneration(currentGen, eliteSize, mutationRate):
popRanked = rankRoutes(currentGen)
selectionResults = selection(popRanked, eliteSize)
matingpool = matingPool(currentGen, selectionResults)
children = breedPopulation(matingpool, eliteSize)
return children
def geneticAlgorithm(population, popSize, eliteSize, mutationRate, generations):
pop = initialPopulation(popSize, population)
print("Initial distance: " + str(1 / rankRoutes(pop)[0][1]))
for i in range(0, generations):
pop = nextGeneration(pop, eliteSize, mutationRate)
print("Final distance: " + str(1 / rankRoutes(pop)[0][1]))
bestRouteIndex = rankRoutes(pop)[0][0]
bestRoute = pop[bestRouteIndex]
return bestRoute
def plotting():
l1 = list()
for c in best:
l1.append([c.x, c.y])
l = np.asarray(l1)
plt.clf()
plt.scatter(l[:, 0].T, l[:, 1].T, s=10, c='k')
l1.append(l1[0])
l = np.asarray(l1)
plt.plot(l[:, 0].T, l[:, 1].T, 'r-')
# plt.show()
plt.savefig("berlin52_route.png")
def read_line(s):
l = s.split(' ')
return float(l[0]), float(l[1]), float(l[2])
def geneticAlgorithmPlot(population, popSize, eliteSize, mutationRate, generations):
pop = initialPopulation(popSize, population)
progress = []
progress.append(rankRoutes(pop)[0][1])
for i in range(0, generations):
pop = nextGeneration(pop, eliteSize, mutationRate)
print(i)
progress.append(rankRoutes(pop)[0][1])
plt.clf()
plt.plot(progress)
plt.ylabel('Distance')
plt.xlabel('Generation')
# plt.show()
plt.savefig("berlin52_distance.png")
print("Final distance: " + str(rankRoutes(pop)[0][1]))
bestRouteIndex = rankRoutes(pop)[0][0]
bestRoute = pop[bestRouteIndex]
return bestRoute
cityList = []
with open('./TSP_data', 'rt') as f:
for line in f:
a, b, c = read_line(line)
cityList.append(City(x=b, y=c))
best = geneticAlgorithmPlot(population=cityList, popSize=2000, eliteSize=1000, mutationRate=0.01, generations=2000)
plotting()
|
[
"random.sample",
"matplotlib.pyplot.savefig",
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.clf",
"numpy.asarray",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.scatter",
"operator.itemgetter",
"random.random"
] |
[((4760, 4774), 'numpy.asarray', 'np.asarray', (['l1'], {}), '(l1)\n', (4770, 4774), True, 'import numpy as np, random, operator, pandas as pd, matplotlib.pyplot as plt\n'), ((4779, 4788), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4786, 4788), True, 'import numpy as np, random, operator, pandas as pd, matplotlib.pyplot as plt\n'), ((4793, 4839), 'matplotlib.pyplot.scatter', 'plt.scatter', (['l[:, 0].T', 'l[:, 1].T'], {'s': '(10)', 'c': '"""k"""'}), "(l[:, 0].T, l[:, 1].T, s=10, c='k')\n", (4804, 4839), True, 'import numpy as np, random, operator, pandas as pd, matplotlib.pyplot as plt\n'), ((4869, 4883), 'numpy.asarray', 'np.asarray', (['l1'], {}), '(l1)\n', (4879, 4883), True, 'import numpy as np, random, operator, pandas as pd, matplotlib.pyplot as plt\n'), ((4888, 4924), 'matplotlib.pyplot.plot', 'plt.plot', (['l[:, 0].T', 'l[:, 1].T', '"""r-"""'], {}), "(l[:, 0].T, l[:, 1].T, 'r-')\n", (4896, 4924), True, 'import numpy as np, random, operator, pandas as pd, matplotlib.pyplot as plt\n'), ((4946, 4979), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""berlin52_route.png"""'], {}), "('berlin52_route.png')\n", (4957, 4979), True, 'import numpy as np, random, operator, pandas as pd, matplotlib.pyplot as plt\n'), ((5432, 5441), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5439, 5441), True, 'import numpy as np, random, operator, pandas as pd, matplotlib.pyplot as plt\n'), ((5446, 5464), 'matplotlib.pyplot.plot', 'plt.plot', (['progress'], {}), '(progress)\n', (5454, 5464), True, 'import numpy as np, random, operator, pandas as pd, matplotlib.pyplot as plt\n'), ((5469, 5491), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Distance"""'], {}), "('Distance')\n", (5479, 5491), True, 'import numpy as np, random, operator, pandas as pd, matplotlib.pyplot as plt\n'), ((5496, 5520), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Generation"""'], {}), "('Generation')\n", (5506, 5520), True, 'import numpy as np, random, operator, pandas as pd, matplotlib.pyplot as plt\n'), ((5542, 5578), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""berlin52_distance.png"""'], {}), "('berlin52_distance.png')\n", (5553, 5578), True, 'import numpy as np, random, operator, pandas as pd, matplotlib.pyplot as plt\n'), ((332, 362), 'numpy.sqrt', 'np.sqrt', (['(xDis ** 2 + yDis ** 2)'], {}), '(xDis ** 2 + yDis ** 2)\n', (339, 362), True, 'import numpy as np, random, operator, pandas as pd, matplotlib.pyplot as plt\n'), ((2023, 2054), 'random.sample', 'random.sample', (['popRanked_pre', '(1)'], {}), '(popRanked_pre, 1)\n', (2036, 2054), False, 'import numpy as np, random, operator, pandas as pd, matplotlib.pyplot as plt\n'), ((2068, 2099), 'random.sample', 'random.sample', (['popRanked_pre', '(1)'], {}), '(popRanked_pre, 1)\n', (2081, 2099), False, 'import numpy as np, random, operator, pandas as pd, matplotlib.pyplot as plt\n'), ((1724, 1746), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (1743, 1746), False, 'import numpy as np, random, operator, pandas as pd, matplotlib.pyplot as plt\n'), ((2605, 2620), 'random.random', 'random.random', ([], {}), '()\n', (2618, 2620), False, 'import numpy as np, random, operator, pandas as pd, matplotlib.pyplot as plt\n'), ((2653, 2668), 'random.random', 'random.random', ([], {}), '()\n', (2666, 2668), False, 'import numpy as np, random, operator, pandas as pd, matplotlib.pyplot as plt\n'), ((3414, 3429), 'random.random', 'random.random', ([], {}), '()\n', (3427, 3429), False, 'import numpy as np, random, operator, pandas as pd, matplotlib.pyplot as plt\n'), ((3474, 3489), 'random.random', 'random.random', ([], {}), '()\n', (3487, 3489), False, 'import numpy as np, random, operator, pandas as pd, matplotlib.pyplot as plt\n')]
|
# ------------------------------------------------------------------------------
# Copyright 2020 Forschungszentrum Jülich GmbH
# "Licensed to the Apache Software Foundation (ASF) under one or more contributor
# license agreements; and to You under the Apache License, Version 2.0. "
#
# Forschungszentrum Jülich
# Institute: Institute for Advanced Simulation (IAS)
# Section: Jülich Supercomputing Centre (JSC)
# Division: High Performance Computing in Neuroscience
# Laboratory: Simulation Laboratory Neuroscience
# Team: Multi-scale Simulation and Design
#
# ------------------------------------------------------------------------------
from mpi4py import MPI
import time
import numpy as np
from EBRAINS_InterscaleHUB.refactored_modular.Communicator import Communicator
from EBRAINS_InterscaleHUB.refactored_modular import interscalehub_utils
from EBRAINS_InterscaleHUB.refactored_modular import interscalehub_mediator as mediator
#from EBRAINS_InterscaleHUB.Interscale_hub.transformer import spiketorate
from EBRAINS_ConfigManager.global_configurations_manager.xml_parsers.default_directories_enum import DefaultDirectories
from EBRAINS_RichEndpoint.Application_Companion.common_enums import Response
# NestTvbPivot and TvbNestPivot classes:
# TODO: proper abstraction -> extract the usecase details from the general implementation
# -> Init, start, stop are pretty much the same every time
# -> incoming (receive) and outgoing (send) loops (M:N mapping)
# -> the analyse (method) should be
# a) pivot, as raw data to cosim data
# b) transform (might be trivial) and
# c) analysis (might be trivial)
# TODO: rework on the receive and send loops (both, general coding style and usecase specifics)
class CommunicatorNestTvb(Communicator):
'''
Implements the PivotBaseClass for abstracting the pivot operations and
the underlying communication protocol. This class provides wrappers
for receving the data from NEST simulator and sending it to TVB simulator
after processing/transforming to the required format.
'''
def __init__(self, configurations_manager, log_settings, name, databuffer,
intracomm, param, comm_receiver, comm_sender):
'''
'''
super().__init__(configurations_manager,
log_settings,
name,
databuffer
)
# Parameter for transformation and analysis
self.__param = param
# INTERcommunicator
# TODO: Revisit the protocol to TVB and NEST
# TODO: rank 0 and rank 1 hardcoded
if intracomm.Get_rank() == 0:
self.__comm_receiver = comm_receiver
self.__num_sending = self.__comm_receiver.Get_remote_size()
elif intracomm.Get_rank() == 1:
self.__comm_sender = comm_sender
self.__num_receiving = self.__comm_sender.Get_remote_size()
self.__logger.info("Initialised")
def start(self, intracomm):
'''
Starts the pivot operation.
M:N mapping of MPI ranks, receive data, further process data.
Receive on rank 0, do the rest on rest of the ranks.
'''
if intracomm.Get_rank() == 0: # Receiver from input sim, rank 0
self._receive()
elif intracomm.Get_rank() == 1: # Science/analyse and sender to TVB, rank 1-x
self._send()
def stop(self):
'''
TODO: proper execution of stop command
'''
self.__stop = True
def _receive(self):
'''
Receive data on rank 0. Put it into the shared mem buffer.
Replaces the former 'receive' function.
NOTE: First refactored version -> not pretty, not final.
'''
# The last two buffer entries are used for shared information
# --> they replace the status_data variable from previous version
# --> find more elegant solution?
self.__logger.info("setting up buffers")
self.__databuffer[-1] = 1 # set buffer to 'ready to receive from nest'
self.__databuffer[-2] = 0 # marks the 'head' of the buffer
# It seems the 'check' variable is used to receive tags from NEST, i.e. ready for send...
# change this in the future, also mentioned in the FatEndPoint solution from Wouter.
check = np.empty(1,dtype='b')
shape = np.empty(1, dtype='i')
count = 0
status_ = MPI.Status()
self.__logger.info("reading from buffer")
###########################################################
#TODO Refactor to move this functionality to appropriate location
#NOTE As per protocol, it should be the response message of 'init'
# command, and should return the PID and the port information
import os
from EBRAINS_RichEndpoint.Application_Companion.common_enums import INTEGRATED_SIMULATOR_APPLICATION as SIMULATOR
pid_and_local_minimum_step_size = \
{SIMULATOR.PID.name: os.getpid(),
SIMULATOR.LOCAL_MINIMUM_STEP_SIZE.name: 0.0}
print(f'{pid_and_local_minimum_step_size}')
###########################################################
# self.__logger.info("NESTtoTVB -- consumer/receiver -- Rank:"+str(self.__comm_receiver.Get_rank()))
while True:
head_ = 0 # head of the buffer, reset after each iteration
# TODO: This is still not correct. We only check for the Tag of the last rank.
# IF all ranks send always the same tag in one iteration (simulation step)
# then this works. But it should be handled differently!!!!
self.__comm_receiver.Recv([check, 1, MPI.CXX_BOOL], source=0, tag=MPI.ANY_TAG, status=status_)
status_rank_0 = status_.Get_tag()
for i in range(1, self.__num_sending):
# new: We do not care which source sends first, give MPI the freedom to send in whichever order.
# self.__comm_receiver.Recv([check, 1, MPI.CXX_BOOL], source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status_)
# self.__logger.info("checking status")
self.__comm_receiver.Recv([check, 1, MPI.CXX_BOOL], source=i, tag=MPI.ANY_TAG, status=status_)
if status_rank_0 != status_.Get_tag():
# Case: the state of the NEST is different between the ranks
# Log the exception with traceback
interscalehub_utils.log_exception(
log_message="Abnormal state : the state of Nest is different between rank. Tag received: ",
mpi_tag_received=status_.Get_tag())
# Terminate with Error
return Response.ERROR
if status_.Get_tag() == 0:
# wait until ready to receive new data (i.e. the sender has cleared the buffer)
while self.__databuffer[-1] != 1: # TODO: use MPI, remove the sleep
time.sleep(0.001)
pass
for source in range(self.__num_sending):
# send 'ready' to the nest rank
# self.__logger.info("send ready")
self.__comm_receiver.Send([np.array(True,dtype='b'),MPI.BOOL],dest=source,tag=0)
# receive package size info
# self.__logger.info("DEBUG 121 ====> receiving size in NEST_TVB_PIVOT")
self.__comm_receiver.Recv([shape, 1, MPI.INT], source=source, tag=0, status=status_)
# self.__comm_receiver.Recv([shape, 1, MPI.INT], source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status_)
# NEW: receive directly into the buffer
self.__comm_receiver.Recv([self.__databuffer[head_:], MPI.DOUBLE], source=source, tag=0, status=status_)
head_ += shape[0] # move head
# Mark as 'ready to do analysis'
self.__databuffer[-1] = 0
# important: head_ is first buffer index WITHOUT data.
self.__databuffer[-2] = head_
# continue receiving the data
continue
elif status_.Get_tag() == 1:
# increment the count and continue receiving the data
count += 1
continue
elif status_.Get_tag() == 2:
# NOTE: simulation ended
# everything goes fine, terminate the loop and respond with OK
return Response.OK
else:
# A 'bad' MPI tag is received,
# log the exception with traceback
interscalehub_utils.log_exception(
log_message="bad mpi tag :",
mpi_tag_received=status_.Get_tag())
# terminate with Error
return Response.ERROR
def _send(self):
'''
Send data to TVB (multiple MPI ranks possible).
Replaces the former 'send' function.
NOTE: First refactored version -> not pretty, not final.
'''
count=0 # simulation/iteration step
status_ = MPI.Status()
# self.__logger.info("NESTtoTVB -- producer/sender -- Rank:"+str(self.__comm_sender.Get_rank()))
while True:
# TODO: this communication has the 'rank 0' problem described in the beginning
accept = False
#logger.info("Nest to TVB : wait to send " )
while not accept:
req = self.__comm_sender.irecv(source=MPI.ANY_SOURCE,tag=MPI.ANY_TAG)
accept = req.wait(status_)
#logger.info(" Nest to TVB : send data status : " +str(status_.Get_tag()))
if status_.Get_tag() == 0:
# wait until the receiver has cleared the buffer, i.e. filled with new data
while self.__databuffer[-1] != 0: # TODO: use MPI, remove the sleep
time.sleep(0.001)
pass
# NOTE: calling the mediator which calls the corresponding transformer functions
times,data = mediator.spike_to_rate(self.__databuffer, count)
# Mark as 'ready to receive next simulation step'
self.__databuffer[-1] = 1
### OLD Code
#logger.info("Nest to TVB : send data :"+str(np.sum(data)) )
# time of sim step
self.__comm_sender.Send([times, MPI.DOUBLE], dest=status_.Get_source(), tag=0)
# send the size of the rate
size = np.array(int(data.shape[0]),dtype='i')
self.__comm_sender.Send([size,MPI.INT], dest=status_.Get_source(), tag=0)
# send the rates
self.__comm_sender.Send([data,MPI.DOUBLE], dest=status_.Get_source(), tag=0)
# increment the count
count+=1
# continue sending the data
continue
### OLD Code end
elif status_.Get_tag() == 1:
# NOTE: simulation ended
# everything goes fine, terminate the loop and respond with OK
return Response.OK
else:
# A 'bad' MPI tag is received,
# log the exception with traceback
interscalehub_utils.log_exception(
log_message="bad mpi tag :",
mpi_tag_received=status_.Get_tag())
# terminate with Error
return Response.ERROR
'''
def _transform(self, count):
#store: Python object, create the histogram
#analyse: Python object, calculate rates
spikerate = spiketorate(self.__param)
times, data = spikerate.spike_to_rate(count, self.__databuffer[-2], self.__databuffer)
return times, data
'''
|
[
"EBRAINS_InterscaleHUB.refactored_modular.interscalehub_mediator.spike_to_rate",
"time.sleep",
"numpy.array",
"mpi4py.MPI.Status",
"numpy.empty",
"os.getpid"
] |
[((4387, 4409), 'numpy.empty', 'np.empty', (['(1)'], {'dtype': '"""b"""'}), "(1, dtype='b')\n", (4395, 4409), True, 'import numpy as np\n'), ((4425, 4447), 'numpy.empty', 'np.empty', (['(1)'], {'dtype': '"""i"""'}), "(1, dtype='i')\n", (4433, 4447), True, 'import numpy as np\n'), ((4488, 4500), 'mpi4py.MPI.Status', 'MPI.Status', ([], {}), '()\n', (4498, 4500), False, 'from mpi4py import MPI\n'), ((9273, 9285), 'mpi4py.MPI.Status', 'MPI.Status', ([], {}), '()\n', (9283, 9285), False, 'from mpi4py import MPI\n'), ((5070, 5081), 'os.getpid', 'os.getpid', ([], {}), '()\n', (5079, 5081), False, 'import os\n'), ((10253, 10301), 'EBRAINS_InterscaleHUB.refactored_modular.interscalehub_mediator.spike_to_rate', 'mediator.spike_to_rate', (['self.__databuffer', 'count'], {}), '(self.__databuffer, count)\n', (10275, 10301), True, 'from EBRAINS_InterscaleHUB.refactored_modular import interscalehub_mediator as mediator\n'), ((7088, 7105), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (7098, 7105), False, 'import time\n'), ((10067, 10084), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (10077, 10084), False, 'import time\n'), ((7342, 7367), 'numpy.array', 'np.array', (['(True)'], {'dtype': '"""b"""'}), "(True, dtype='b')\n", (7350, 7367), True, 'import numpy as np\n')]
|
import tensorflow as tf
import numpy as np
import resnet_block
def LeakyRelu(x, leak=0.2, name="LeakyRelu"):
with tf.variable_scope(name):
leak_c = tf.constant(0.1)
leak = tf.Variable(leak_c)
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * tf.abs(x)
def OurRelu(x, name="OurRelu"):
with tf.variable_scope(name):
leak_c = tf.constant(0.1)
leak = tf.Variable(leak_c)
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * tf.abs(x) - f2 * x
def Friend_relu(x):
x = tf.nn.relu(x)
Max = tf.constant([255.0])
return tf.minimum(x, Max)
#normalization
def Batch_normalization(X):
_mean, _var = tf.nn.moments(X, [0, 1, 2])
X = tf.nn.batch_normalization(X, _mean, _var, 0, 1, 0.0001)
return X
#group normalization
def GroupNorm(x,G=32,eps=1e-5):
N,H,W,C=x.shape
x=tf.reshape(x,[tf.cast(N,tf.int32),tf.cast(H,tf.int32),tf.cast(W,tf.int32),tf.cast(G,tf.int32),tf.cast(C//G,tf.int32)])
# x=tf.reshape(x,[N,H,W,G,C//G])
mean,var=tf.nn.moments(x,[1,2,4],keep_dims=True)
x=(x-mean)/tf.sqrt(var+eps)
x=tf.reshape(x,[tf.cast(N,tf.int32),tf.cast(H,tf.int32),tf.cast(W,tf.int32),tf.cast(C,tf.int32)])
gamma = tf.Variable(tf.ones(shape=[1,1,1,tf.cast(C,tf.int32)]), name="gamma")
beta = tf.Variable(tf.zeros(shape=[1,1,1,tf.cast(C,tf.int32)]), name="beta")
return x*gamma+beta
class Net:
def __init__(self):
pass
#kernel initial
def weight_variable(self, shape):
initial = tf.truncated_normal(shape, mean=0.0,stddev=np.sqrt(2.0/shape[2]))
return tf.Variable( initial)
#bias initial
def bias_variable(self,shape):
return tf.Variable(tf.random_normal(shape, stddev=0.1))
def model(self, input_X, training):
#Multi-scale Convolution
w_conv1_3 = self.weight_variable([3, 3, 3, 64])
x_conv1_3 = tf.nn.conv2d(input_X, w_conv1_3, strides=[1, 2, 2, 1], padding='SAME')#64 x 64 x64
w_conv1_5 = self.weight_variable([5, 5, 3, 32])
x_conv1_5 = tf.nn.conv2d(input_X, w_conv1_5, strides=[1, 2, 2, 1], padding='SAME')
w_conv1_7 = self.weight_variable([7, 7, 3, 32])
x_conv1_7 = tf.nn.conv2d(input_X, w_conv1_7, strides=[1, 2, 2, 1], padding='SAME')
x_conv1 = tf.concat([x_conv1_3, x_conv1_5, x_conv1_7],3)
x_conv1 = GroupNorm(x_conv1)
x_conv1 = LeakyRelu(x_conv1)
w_conv2 = self.weight_variable([3, 3, 128, 256])
x_conv2 = tf.nn.conv2d(x_conv1, w_conv2, strides=[1, 2, 2, 1], padding='SAME')#32 x32 x128
x_conv2 = GroupNorm(x_conv2)
x_conv2 = LeakyRelu(x_conv2)
w_conv4 = self.weight_variable([3, 3, 256, 512])
x_conv4 = tf.nn.conv2d(x_conv2, w_conv4, strides=[1, 2, 2, 1], padding='SAME')#16x16x256
x_conv4 = GroupNorm(x_conv4)
x_conv4 = LeakyRelu(x_conv4)
x_conv6 = resnet_block.identity_block(x_conv4, 3, 512, [256, 256, 512], stage=2, block='b', training=training )
x_conv7 = resnet_block.identity_block(x_conv6, 3, 512, [256, 256, 512], stage=2, block='c', training=training )
x_conv8 = resnet_block.identity_block(x_conv7, 3, 512, [256, 256, 512], stage=2, block='d', training=training )
x_conv8 = resnet_block.identity_block(x_conv8, 3, 512, [256, 256, 512], stage=2, block='e', training=training )
x_conv8 = resnet_block.identity_block(x_conv8, 3, 512, [256, 256, 512], stage=2, block='f', training=training )
x_conv8 = resnet_block.identity_block(x_conv8, 3, 512, [256, 256, 512], stage=2, block='g', training=training )
x_conv8 = resnet_block.identity_block(x_conv8, 3, 512, [256, 256, 512], stage=2, block='h', training=training )
w_deconv1 = self.weight_variable([1, 1, 512, 512])
x_conv9 = tf.nn.conv2d_transpose(x_conv8, w_deconv1,output_shape=tf.shape(x_conv4), strides=[1, 1, 1, 1], padding='VALID')#29x29x256
x_conv9 = GroupNorm(x_conv9)
x_conv9 = OurRelu(x_conv9)
x_conv9 = tf.concat([x_conv9, x_conv4],3)
w_conv9_1 = self.weight_variable([1, 1, 1024, 512])
x_conv9 = tf.nn.conv2d(x_conv9, w_conv9_1, strides=[1, 1, 1, 1], padding='VALID')
x_conv9 = GroupNorm(x_conv9)
x_conv9 = LeakyRelu(x_conv9)
w_deconv2 = self.weight_variable([3, 3, 256, 512])
x_conv10 = tf.nn.conv2d_transpose(x_conv9, w_deconv2,output_shape=tf.shape(x_conv2), strides=[1, 2, 2, 1], padding='SAME')
x_conv10 = GroupNorm(x_conv10)
x_conv10 = OurRelu(x_conv10)
x_conv10 = tf.concat([x_conv10, x_conv2],3)
w_conv10_1 = self.weight_variable([1, 1, 512, 256])
x_conv10 = tf.nn.conv2d(x_conv10, w_conv10_1, strides=[1, 1, 1, 1], padding='SAME')
x_conv10 = GroupNorm(x_conv10)
x_conv10 = LeakyRelu(x_conv10)
w_deconv3 = self.weight_variable([3, 3, 128, 256])
x_conv11 = tf.nn.conv2d_transpose(x_conv10, w_deconv3,output_shape=tf.shape(x_conv1), strides=[1, 2, 2, 1], padding='SAME')
x_conv11 = GroupNorm(x_conv11)
x_conv11 = OurRelu(x_conv11)
x_conv11 = tf.concat([x_conv11, x_conv1],3)
w_conv11_1 = self.weight_variable([1, 1, 256, 128])
x_conv11 = tf.nn.conv2d(x_conv11, w_conv11_1, strides=[1, 1, 1, 1], padding='VALID')
x_conv11 = GroupNorm(x_conv11)
x_conv11 = LeakyRelu(x_conv11)
w_deconv4 = self.weight_variable([3, 3, 3, 128])
x_conv12 = tf.nn.conv2d_transpose(x_conv11, w_deconv4,output_shape=tf.shape(input_X), strides=[1, 2, 2, 1], padding='SAME')
model = tf.add(x_conv12,input_X)
model = Friend_relu(model)
return input_X,x_conv12,model
if __name__ == "__main__":
net = Net()
input_X = tf.placeholder(tf.float32, [None, 128,128,3])
model = net.model(input_X,training=True)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
pre = sess.run(model)
print(pre.shape)
|
[
"numpy.sqrt",
"tensorflow.shape",
"tensorflow.nn.moments",
"resnet_block.identity_block",
"tensorflow.cast",
"tensorflow.random_normal",
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.concat",
"tensorflow.nn.conv2d",
"tensorflow.variable_scope",
"tensorflow.Variable",
"tensorflow.add",
"tensorflow.sqrt",
"tensorflow.nn.batch_normalization",
"tensorflow.minimum",
"tensorflow.nn.relu",
"tensorflow.global_variables_initializer",
"tensorflow.constant",
"tensorflow.abs"
] |
[((584, 597), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (594, 597), True, 'import tensorflow as tf\n'), ((608, 628), 'tensorflow.constant', 'tf.constant', (['[255.0]'], {}), '([255.0])\n', (619, 628), True, 'import tensorflow as tf\n'), ((640, 658), 'tensorflow.minimum', 'tf.minimum', (['x', 'Max'], {}), '(x, Max)\n', (650, 658), True, 'import tensorflow as tf\n'), ((725, 752), 'tensorflow.nn.moments', 'tf.nn.moments', (['X', '[0, 1, 2]'], {}), '(X, [0, 1, 2])\n', (738, 752), True, 'import tensorflow as tf\n'), ((761, 816), 'tensorflow.nn.batch_normalization', 'tf.nn.batch_normalization', (['X', '_mean', '_var', '(0)', '(1)', '(0.0001)'], {}), '(X, _mean, _var, 0, 1, 0.0001)\n', (786, 816), True, 'import tensorflow as tf\n'), ((1084, 1127), 'tensorflow.nn.moments', 'tf.nn.moments', (['x', '[1, 2, 4]'], {'keep_dims': '(True)'}), '(x, [1, 2, 4], keep_dims=True)\n', (1097, 1127), True, 'import tensorflow as tf\n'), ((5787, 5834), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 128, 128, 3]'], {}), '(tf.float32, [None, 128, 128, 3])\n', (5801, 5834), True, 'import tensorflow as tf\n'), ((5889, 5922), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (5920, 5922), True, 'import tensorflow as tf\n'), ((5934, 5946), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (5944, 5946), True, 'import tensorflow as tf\n'), ((119, 142), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (136, 142), True, 'import tensorflow as tf\n'), ((161, 177), 'tensorflow.constant', 'tf.constant', (['(0.1)'], {}), '(0.1)\n', (172, 177), True, 'import tensorflow as tf\n'), ((193, 212), 'tensorflow.Variable', 'tf.Variable', (['leak_c'], {}), '(leak_c)\n', (204, 212), True, 'import tensorflow as tf\n'), ((358, 381), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (375, 381), True, 'import tensorflow as tf\n'), ((400, 416), 'tensorflow.constant', 'tf.constant', (['(0.1)'], {}), '(0.1)\n', (411, 416), True, 'import tensorflow as tf\n'), ((432, 451), 'tensorflow.Variable', 'tf.Variable', (['leak_c'], {}), '(leak_c)\n', (443, 451), True, 'import tensorflow as tf\n'), ((1139, 1157), 'tensorflow.sqrt', 'tf.sqrt', (['(var + eps)'], {}), '(var + eps)\n', (1146, 1157), True, 'import tensorflow as tf\n'), ((1657, 1677), 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), '(initial)\n', (1668, 1677), True, 'import tensorflow as tf\n'), ((1964, 2034), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['input_X', 'w_conv1_3'], {'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""'}), "(input_X, w_conv1_3, strides=[1, 2, 2, 1], padding='SAME')\n", (1976, 2034), True, 'import tensorflow as tf\n'), ((2123, 2193), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['input_X', 'w_conv1_5'], {'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""'}), "(input_X, w_conv1_5, strides=[1, 2, 2, 1], padding='SAME')\n", (2135, 2193), True, 'import tensorflow as tf\n'), ((2270, 2340), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['input_X', 'w_conv1_7'], {'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""'}), "(input_X, w_conv1_7, strides=[1, 2, 2, 1], padding='SAME')\n", (2282, 2340), True, 'import tensorflow as tf\n'), ((2359, 2406), 'tensorflow.concat', 'tf.concat', (['[x_conv1_3, x_conv1_5, x_conv1_7]', '(3)'], {}), '([x_conv1_3, x_conv1_5, x_conv1_7], 3)\n', (2368, 2406), True, 'import tensorflow as tf\n'), ((2555, 2623), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['x_conv1', 'w_conv2'], {'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""'}), "(x_conv1, w_conv2, strides=[1, 2, 2, 1], padding='SAME')\n", (2567, 2623), True, 'import tensorflow as tf\n'), ((2785, 2853), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['x_conv2', 'w_conv4'], {'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""'}), "(x_conv2, w_conv4, strides=[1, 2, 2, 1], padding='SAME')\n", (2797, 2853), True, 'import tensorflow as tf\n'), ((2956, 3060), 'resnet_block.identity_block', 'resnet_block.identity_block', (['x_conv4', '(3)', '(512)', '[256, 256, 512]'], {'stage': '(2)', 'block': '"""b"""', 'training': 'training'}), "(x_conv4, 3, 512, [256, 256, 512], stage=2,\n block='b', training=training)\n", (2983, 3060), False, 'import resnet_block\n'), ((3076, 3180), 'resnet_block.identity_block', 'resnet_block.identity_block', (['x_conv6', '(3)', '(512)', '[256, 256, 512]'], {'stage': '(2)', 'block': '"""c"""', 'training': 'training'}), "(x_conv6, 3, 512, [256, 256, 512], stage=2,\n block='c', training=training)\n", (3103, 3180), False, 'import resnet_block\n'), ((3196, 3300), 'resnet_block.identity_block', 'resnet_block.identity_block', (['x_conv7', '(3)', '(512)', '[256, 256, 512]'], {'stage': '(2)', 'block': '"""d"""', 'training': 'training'}), "(x_conv7, 3, 512, [256, 256, 512], stage=2,\n block='d', training=training)\n", (3223, 3300), False, 'import resnet_block\n'), ((3316, 3420), 'resnet_block.identity_block', 'resnet_block.identity_block', (['x_conv8', '(3)', '(512)', '[256, 256, 512]'], {'stage': '(2)', 'block': '"""e"""', 'training': 'training'}), "(x_conv8, 3, 512, [256, 256, 512], stage=2,\n block='e', training=training)\n", (3343, 3420), False, 'import resnet_block\n'), ((3436, 3540), 'resnet_block.identity_block', 'resnet_block.identity_block', (['x_conv8', '(3)', '(512)', '[256, 256, 512]'], {'stage': '(2)', 'block': '"""f"""', 'training': 'training'}), "(x_conv8, 3, 512, [256, 256, 512], stage=2,\n block='f', training=training)\n", (3463, 3540), False, 'import resnet_block\n'), ((3556, 3660), 'resnet_block.identity_block', 'resnet_block.identity_block', (['x_conv8', '(3)', '(512)', '[256, 256, 512]'], {'stage': '(2)', 'block': '"""g"""', 'training': 'training'}), "(x_conv8, 3, 512, [256, 256, 512], stage=2,\n block='g', training=training)\n", (3583, 3660), False, 'import resnet_block\n'), ((3676, 3780), 'resnet_block.identity_block', 'resnet_block.identity_block', (['x_conv8', '(3)', '(512)', '[256, 256, 512]'], {'stage': '(2)', 'block': '"""h"""', 'training': 'training'}), "(x_conv8, 3, 512, [256, 256, 512], stage=2,\n block='h', training=training)\n", (3703, 3780), False, 'import resnet_block\n'), ((4068, 4100), 'tensorflow.concat', 'tf.concat', (['[x_conv9, x_conv4]', '(3)'], {}), '([x_conv9, x_conv4], 3)\n', (4077, 4100), True, 'import tensorflow as tf\n'), ((4178, 4249), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['x_conv9', 'w_conv9_1'], {'strides': '[1, 1, 1, 1]', 'padding': '"""VALID"""'}), "(x_conv9, w_conv9_1, strides=[1, 1, 1, 1], padding='VALID')\n", (4190, 4249), True, 'import tensorflow as tf\n'), ((4609, 4642), 'tensorflow.concat', 'tf.concat', (['[x_conv10, x_conv2]', '(3)'], {}), '([x_conv10, x_conv2], 3)\n', (4618, 4642), True, 'import tensorflow as tf\n'), ((4721, 4793), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['x_conv10', 'w_conv10_1'], {'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(x_conv10, w_conv10_1, strides=[1, 1, 1, 1], padding='SAME')\n", (4733, 4793), True, 'import tensorflow as tf\n'), ((5158, 5191), 'tensorflow.concat', 'tf.concat', (['[x_conv11, x_conv1]', '(3)'], {}), '([x_conv11, x_conv1], 3)\n', (5167, 5191), True, 'import tensorflow as tf\n'), ((5270, 5343), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['x_conv11', 'w_conv11_1'], {'strides': '[1, 1, 1, 1]', 'padding': '"""VALID"""'}), "(x_conv11, w_conv11_1, strides=[1, 1, 1, 1], padding='VALID')\n", (5282, 5343), True, 'import tensorflow as tf\n'), ((5627, 5652), 'tensorflow.add', 'tf.add', (['x_conv12', 'input_X'], {}), '(x_conv12, input_X)\n', (5633, 5652), True, 'import tensorflow as tf\n'), ((929, 949), 'tensorflow.cast', 'tf.cast', (['N', 'tf.int32'], {}), '(N, tf.int32)\n', (936, 949), True, 'import tensorflow as tf\n'), ((949, 969), 'tensorflow.cast', 'tf.cast', (['H', 'tf.int32'], {}), '(H, tf.int32)\n', (956, 969), True, 'import tensorflow as tf\n'), ((969, 989), 'tensorflow.cast', 'tf.cast', (['W', 'tf.int32'], {}), '(W, tf.int32)\n', (976, 989), True, 'import tensorflow as tf\n'), ((989, 1009), 'tensorflow.cast', 'tf.cast', (['G', 'tf.int32'], {}), '(G, tf.int32)\n', (996, 1009), True, 'import tensorflow as tf\n'), ((1009, 1034), 'tensorflow.cast', 'tf.cast', (['(C // G)', 'tf.int32'], {}), '(C // G, tf.int32)\n', (1016, 1034), True, 'import tensorflow as tf\n'), ((1176, 1196), 'tensorflow.cast', 'tf.cast', (['N', 'tf.int32'], {}), '(N, tf.int32)\n', (1183, 1196), True, 'import tensorflow as tf\n'), ((1196, 1216), 'tensorflow.cast', 'tf.cast', (['H', 'tf.int32'], {}), '(H, tf.int32)\n', (1203, 1216), True, 'import tensorflow as tf\n'), ((1216, 1236), 'tensorflow.cast', 'tf.cast', (['W', 'tf.int32'], {}), '(W, tf.int32)\n', (1223, 1236), True, 'import tensorflow as tf\n'), ((1236, 1256), 'tensorflow.cast', 'tf.cast', (['C', 'tf.int32'], {}), '(C, tf.int32)\n', (1243, 1256), True, 'import tensorflow as tf\n'), ((1764, 1799), 'tensorflow.random_normal', 'tf.random_normal', (['shape'], {'stddev': '(0.1)'}), '(shape, stddev=0.1)\n', (1780, 1799), True, 'import tensorflow as tf\n'), ((302, 311), 'tensorflow.abs', 'tf.abs', (['x'], {}), '(x)\n', (308, 311), True, 'import tensorflow as tf\n'), ((532, 541), 'tensorflow.abs', 'tf.abs', (['x'], {}), '(x)\n', (538, 541), True, 'import tensorflow as tf\n'), ((1619, 1642), 'numpy.sqrt', 'np.sqrt', (['(2.0 / shape[2])'], {}), '(2.0 / shape[2])\n', (1626, 1642), True, 'import numpy as np\n'), ((3910, 3927), 'tensorflow.shape', 'tf.shape', (['x_conv4'], {}), '(x_conv4)\n', (3918, 3927), True, 'import tensorflow as tf\n'), ((4457, 4474), 'tensorflow.shape', 'tf.shape', (['x_conv2'], {}), '(x_conv2)\n', (4465, 4474), True, 'import tensorflow as tf\n'), ((5006, 5023), 'tensorflow.shape', 'tf.shape', (['x_conv1'], {}), '(x_conv1)\n', (5014, 5023), True, 'import tensorflow as tf\n'), ((5554, 5571), 'tensorflow.shape', 'tf.shape', (['input_X'], {}), '(input_X)\n', (5562, 5571), True, 'import tensorflow as tf\n'), ((1303, 1323), 'tensorflow.cast', 'tf.cast', (['C', 'tf.int32'], {}), '(C, tf.int32)\n', (1310, 1323), True, 'import tensorflow as tf\n'), ((1385, 1405), 'tensorflow.cast', 'tf.cast', (['C', 'tf.int32'], {}), '(C, tf.int32)\n', (1392, 1405), True, 'import tensorflow as tf\n')]
|
# Copyright (c) 2016-2018, University of Idaho
# All rights reserved.
#
# <NAME> (<EMAIL>)
#
# The project described was supported by NSF award number IIA-1301792
# from the NSF Idaho EPSCoR Program and by the National Science Foundation.
import os
from os.path import exists as _exists
from os.path import join as _join
from os.path import split as _split
from glob import glob
import shutil
# non-standard
import jsonpickle
import numpy as np
# wepppy submodules
from wepppy.nodb.watershed import Watershed
from wepppy.nodb.base import NoDbBase
from wepppy.rhem.out import RhemOutput, RhemSummary
class RhemPostNoDbLockedException(Exception):
pass
class RhemPost(NoDbBase):
"""
Manager that keeps track of project details
and coordinates access of NoDb instances.
"""
__name__ = 'RhemPost'
def __init__(self, wd, cfg_fn):
super(RhemPost, self).__init__(wd, cfg_fn)
self.lock()
# noinspection PyBroadException
try:
self.hill_summaries = None
self.periods = None
self.watershed_annuals = None
self.dump_and_unlock()
except Exception:
self.unlock('-f')
raise
#
# Required for NoDbBase Subclass
#
# noinspection PyPep8Naming
@staticmethod
def getInstance(wd):
with open(_join(wd, 'rhempost.nodb')) as fp:
db = jsonpickle.decode(fp.read())
assert isinstance(db, RhemPost), db
if _exists(_join(wd, 'READONLY')):
db.wd = os.path.abspath(wd)
return db
if os.path.abspath(wd) != os.path.abspath(db.wd):
db.wd = wd
db.lock()
db.dump_and_unlock()
return db
@property
def _nodb(self):
return _join(self.wd, 'rhempost.nodb')
@property
def _lock(self):
return _join(self.wd, 'rhempost.nodb.lock')
def run_post(self):
from wepppy.nodb import Rhem
wd = self.wd
self.lock()
# noinspection PyBroadException
try:
output_dir = self.output_dir
watershed = Watershed.getInstance(wd)
rhem = Rhem.getInstance(wd)
out_dir = rhem.output_dir
hill_summaries = {}
total_area = 0.0
runoff = 0.0
soil_yield = 0.0
soil_loss = 0.0
precip = 0.0
periods = None
ret_rain = None
ret_runoff = None
ret_yield = None
ret_loss = None
for topaz_id, summary in watershed.sub_iter():
area_ha = summary.area / 10000
total_area += area_ha
summary_fn = _join(out_dir, 'hill_{}.sum'.format(topaz_id))
hill_summaries[topaz_id] = RhemSummary(summary_fn, area_ha)
runoff += hill_summaries[topaz_id].annuals['Avg-Runoff (m^3/yr)']
soil_yield += hill_summaries[topaz_id].annuals['Avg-SY (tonne/yr)']
soil_loss += hill_summaries[topaz_id].annuals['Avg-Soil-Loss (tonne/yr)']
precip += hill_summaries[topaz_id].annuals['Avg. Precipitation (m^3/yr)']
if ret_rain is None:
ret_rain = np.array(hill_summaries[topaz_id].return_freqs['Rain (m^3)'])
else:
ret_rain += np.array(hill_summaries[topaz_id].return_freqs['Rain (m^3)'])
if ret_runoff is None:
ret_runoff = np.array(hill_summaries[topaz_id].return_freqs['Runoff (m^3)'])
else:
ret_runoff += np.array(hill_summaries[topaz_id].return_freqs['Runoff (m^3)'])
if ret_yield is None:
ret_yield = np.array(hill_summaries[topaz_id].return_freqs['Sediment-Yield (tonne)'])
else:
ret_yield += np.array(hill_summaries[topaz_id].return_freqs['Sediment-Yield (tonne)'])
if ret_loss is None:
ret_loss = np.array(hill_summaries[topaz_id].return_freqs['Soil-Loss (tonne)'])
else:
ret_loss += np.array(hill_summaries[topaz_id].return_freqs['Soil-Loss (tonne)'])
if periods is None:
periods = [v for v in hill_summaries[topaz_id].ret_freq_periods]
self.hill_summaries = hill_summaries
self.watershed_annuals = {'Avg-Runoff (m^3/yr)': runoff,
'Avg-Runoff (mm/yr)': runoff / (total_area * 10000) * 1000,
'Avg-SY (tonne/yr)': soil_yield,
'Avg-SY (tonne/ha/yr)': soil_yield/ total_area,
'Avg-Soil-Loss (tonne/yr)': soil_loss,
'Avg-Soil-Loss (tonne/ha/yr)': soil_loss / total_area,
'Avg. Precipitation (m^3/yr)': precip,
'Avg. Precipitation (mm/yr)': precip / (total_area * 10000) * 1000}
self.ret_freq_periods = periods
watershed_ret_freqs = {'Rain (m^3)': ret_rain,
'Rain (mm)': ret_rain / (total_area * 10000) * 1000,
'Runoff (m^3)': ret_runoff,
'Runoff (mm)': ret_runoff / (total_area * 10000) * 1000,
'Sediment-Yield (tonne)': ret_yield,
'Sediment-Yield (tonne/ha)': ret_yield / total_area,
'Soil-Loss (tonne)': ret_loss,
'Soil-Loss (tonne/ha)': ret_loss / total_area}
for k in watershed_ret_freqs:
watershed_ret_freqs[k] = [float(v) for v in watershed_ret_freqs[k]]
self.watershed_ret_freqs = watershed_ret_freqs
self.dump_and_unlock()
except Exception:
self.unlock('-f')
raise
def query_sub_val(self, measure):
_measure = measure.strip().lower()
key = None
if _measure == 'runoff':
key = 'Avg-Runoff (mm/yr)'
elif _measure == 'sed_yield':
key = 'Avg-SY (tonne/ha/yr)'
elif _measure == 'soil_loss':
key = 'Avg-Soil-Loss (tonne/ha/yr)'
assert key is not None
hill_summaries = self.hill_summaries
d = {}
for topaz_id in hill_summaries:
d[str(topaz_id)] = dict(
topaz_id=topaz_id,
value=hill_summaries[topaz_id].annuals[key])
return d
|
[
"wepppy.nodb.Rhem.getInstance",
"os.path.join",
"numpy.array",
"wepppy.nodb.watershed.Watershed.getInstance",
"os.path.abspath",
"wepppy.rhem.out.RhemSummary"
] |
[((1829, 1860), 'os.path.join', '_join', (['self.wd', '"""rhempost.nodb"""'], {}), "(self.wd, 'rhempost.nodb')\n", (1834, 1860), True, 'from os.path import join as _join\n'), ((1912, 1948), 'os.path.join', '_join', (['self.wd', '"""rhempost.nodb.lock"""'], {}), "(self.wd, 'rhempost.nodb.lock')\n", (1917, 1948), True, 'from os.path import join as _join\n'), ((2172, 2197), 'wepppy.nodb.watershed.Watershed.getInstance', 'Watershed.getInstance', (['wd'], {}), '(wd)\n', (2193, 2197), False, 'from wepppy.nodb.watershed import Watershed\n'), ((2217, 2237), 'wepppy.nodb.Rhem.getInstance', 'Rhem.getInstance', (['wd'], {}), '(wd)\n', (2233, 2237), False, 'from wepppy.nodb import Rhem\n'), ((1355, 1381), 'os.path.join', '_join', (['wd', '"""rhempost.nodb"""'], {}), "(wd, 'rhempost.nodb')\n", (1360, 1381), True, 'from os.path import join as _join\n'), ((1508, 1529), 'os.path.join', '_join', (['wd', '"""READONLY"""'], {}), "(wd, 'READONLY')\n", (1513, 1529), True, 'from os.path import join as _join\n'), ((1556, 1575), 'os.path.abspath', 'os.path.abspath', (['wd'], {}), '(wd)\n', (1571, 1575), False, 'import os\n'), ((1618, 1637), 'os.path.abspath', 'os.path.abspath', (['wd'], {}), '(wd)\n', (1633, 1637), False, 'import os\n'), ((1641, 1663), 'os.path.abspath', 'os.path.abspath', (['db.wd'], {}), '(db.wd)\n', (1656, 1663), False, 'import os\n'), ((2853, 2885), 'wepppy.rhem.out.RhemSummary', 'RhemSummary', (['summary_fn', 'area_ha'], {}), '(summary_fn, area_ha)\n', (2864, 2885), False, 'from wepppy.rhem.out import RhemOutput, RhemSummary\n'), ((3302, 3363), 'numpy.array', 'np.array', (["hill_summaries[topaz_id].return_freqs['Rain (m^3)']"], {}), "(hill_summaries[topaz_id].return_freqs['Rain (m^3)'])\n", (3310, 3363), True, 'import numpy as np\n'), ((3418, 3479), 'numpy.array', 'np.array', (["hill_summaries[topaz_id].return_freqs['Rain (m^3)']"], {}), "(hill_summaries[topaz_id].return_freqs['Rain (m^3)'])\n", (3426, 3479), True, 'import numpy as np\n'), ((3553, 3616), 'numpy.array', 'np.array', (["hill_summaries[topaz_id].return_freqs['Runoff (m^3)']"], {}), "(hill_summaries[topaz_id].return_freqs['Runoff (m^3)'])\n", (3561, 3616), True, 'import numpy as np\n'), ((3673, 3736), 'numpy.array', 'np.array', (["hill_summaries[topaz_id].return_freqs['Runoff (m^3)']"], {}), "(hill_summaries[topaz_id].return_freqs['Runoff (m^3)'])\n", (3681, 3736), True, 'import numpy as np\n'), ((3808, 3881), 'numpy.array', 'np.array', (["hill_summaries[topaz_id].return_freqs['Sediment-Yield (tonne)']"], {}), "(hill_summaries[topaz_id].return_freqs['Sediment-Yield (tonne)'])\n", (3816, 3881), True, 'import numpy as np\n'), ((3937, 4010), 'numpy.array', 'np.array', (["hill_summaries[topaz_id].return_freqs['Sediment-Yield (tonne)']"], {}), "(hill_summaries[topaz_id].return_freqs['Sediment-Yield (tonne)'])\n", (3945, 4010), True, 'import numpy as np\n'), ((4080, 4148), 'numpy.array', 'np.array', (["hill_summaries[topaz_id].return_freqs['Soil-Loss (tonne)']"], {}), "(hill_summaries[topaz_id].return_freqs['Soil-Loss (tonne)'])\n", (4088, 4148), True, 'import numpy as np\n'), ((4203, 4271), 'numpy.array', 'np.array', (["hill_summaries[topaz_id].return_freqs['Soil-Loss (tonne)']"], {}), "(hill_summaries[topaz_id].return_freqs['Soil-Loss (tonne)'])\n", (4211, 4271), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
import decorators
from scipy import optimize
import settings
import utility_functions as utilfunc
import agent_mutation
import PySAM.Battwatts as battery
import PySAM.BatteryTools as batt_tools
import PySAM.Utilityrate5 as utility
import PySAM.Cashloan as cashloan
#==============================================================================
# Load logger
logger = utilfunc.get_logger()
#==============================================================================
#%%
def calc_system_performance(kw, pv, utilityrate, loan, batt, costs, agent, en_batt=True, batt_simple_dispatch=0):
"""
Executes Battwatts, Utilityrate5, and Cashloan PySAM modules with system sizes (kw) as input
Parameters
----------
kw: Capacity (in kW)
pv: Dictionary with generation_hourly and consumption_hourly
utilityrate: PySAM Utilityrate5 module
loan: PySAM Cashloan module
batt: PySAM Battwatts module
costs: Dictionary with system costs
agent: pd.Series with agent attributes
en_batt: Enable battery
batt_simple_dispatch: batt.Battery.batt_simple_dispatch
- batt_simple_dispatch = 0 (peak shaving look ahead)
- batt_simple_dispatch = 1 (peak shaving look behind)
Returns
-------
-loan.Outputs.npv: the negative net present value of system + storage to be optimized for system sizing
"""
inv_eff = 0.96 # default SAM inverter efficiency for PV
gen_hourly = pv['generation_hourly']
load_hourly = pv['consumption_hourly'] # same field as 'load_kwh_per_customer_in_bin_initial' when summed
dc = [(i * kw) * 1000 for i in gen_hourly] # W
ac = [i * inv_eff for i in dc] # W
gen = [i / 1000 for i in ac] # W to kW
# Set up battery, with system generation conditional on the battery generation being included
if en_batt:
batt.Battery.dc = dc
batt.Battery.ac = ac
batt.Battery.batt_simple_enable = 1
batt.Battery.batt_simple_chemistry = 1 # default value is 1: li ion for residential
batt.Battery.batt_simple_dispatch = batt_simple_dispatch
batt.Battery.batt_simple_meter_position = 0 # default value
batt.Battery.inverter_efficiency = 100 # recommended by Darice for dc-connected
batt.Battery.load = load_hourly
# PV to Battery ratio (kW) - From Ashreeta, 02/08/2020
pv_to_batt_ratio = 1.31372
batt_capacity_to_power_ratio = 2 # hours of operation
desired_size = kw / pv_to_batt_ratio # Default SAM value for residential systems is 10
desired_power = desired_size / batt_capacity_to_power_ratio
batt_inputs = {
'batt_chem': batt.Battery.batt_simple_chemistry,
'batt_Qfull': 2.5, # default SAM value
'batt_Vnom_default': 3.6, # default SAM value
'batt_ac_or_dc': 0, # dc-connected
'desired_power': desired_power,
'desired_capacity': desired_size,
'desired_voltage': 500,
'size_by_ac_not_dc': 0, # dc-connected
'inverter_eff': batt.Battery.inverter_efficiency
# 'batt_dc_dc_efficiency': (optional)
}
# Default values for lead acid batteries
if batt.Battery.batt_simple_chemistry == 0:
batt_inputs['LeadAcid_q10'] = 93.2
batt_inputs['LeadAcid_q20'] = 100
batt_inputs['LeadAcid_qn'] = 58.12
# batt_inputs['LeadAcid_tn']: (optional)
# PySAM.BatteryTools.size_li_ion_battery is the same as dGen_battery_sizing_battwatts.py
batt_outputs = batt_tools.size_li_ion_battery(batt_inputs)
computed_size = batt_outputs['batt_computed_bank_capacity']
computed_power = batt_outputs['batt_power_discharge_max_kwdc']
batt.Battery.batt_simple_kwh = computed_size
batt.Battery.batt_simple_kw = computed_power
batt.execute()
# declare value for net billing sell rate
if agent.loc['compensation_style']=='none':
net_billing_sell_rate = 0.
else:
net_billing_sell_rate = agent.loc['wholesale_elec_price_dollars_per_kwh'] * agent.loc['elec_price_multiplier']
utilityrate = process_tariff(utilityrate, agent.loc['tariff_dict'], net_billing_sell_rate)
utilityrate.SystemOutput.gen = batt.Outputs.gen
loan.BatterySystem.en_batt = 1
loan.BatterySystem.batt_computed_bank_capacity = batt.Outputs.batt_bank_installed_capacity
loan.BatterySystem.batt_bank_replacement = batt.Outputs.batt_bank_replacement
# Battery capacity-based System Costs amount [$/kWhcap]
loan.BatterySystem.battery_per_kWh = costs['batt_capex_per_kwh']
# specify number of O&M types (1 = PV+batt)
loan.SystemCosts.add_om_num_types = 1
# specify O&M variables
loan.SystemCosts.om_capacity = [costs['system_om_per_kw'] + costs['system_variable_om_per_kw']]
loan.SystemCosts.om_capacity1 = [costs['batt_om_per_kw']]
loan.SystemCosts.om_production1 = [costs['batt_om_per_kwh'] * 1000]
loan.SystemCosts.om_replacement_cost1 = [0.]
# Battery capacity for System Costs values [kW]
loan.SystemCosts.om_capacity1_nameplate = batt.Battery.batt_simple_kw
# Battery production for System Costs values [kWh]
loan.SystemCosts.om_production1_values = [batt.Battery.batt_simple_kwh]
batt_costs = ((costs['batt_capex_per_kw']*batt.Battery.batt_simple_kw) +
(costs['batt_capex_per_kwh'] * batt.Battery.batt_simple_kwh))
else:
batt.Battery.batt_simple_enable = 0
loan.BatterySystem.en_batt = 0
computed_power = computed_size = 0
# declare value for net billing sell rate
if agent.loc['compensation_style']=='none':
net_billing_sell_rate = 0.
else:
net_billing_sell_rate = agent.loc['wholesale_elec_price_dollars_per_kwh'] * agent.loc['elec_price_multiplier']
utilityrate = process_tariff(utilityrate, agent.loc['tariff_dict'], net_billing_sell_rate)
utilityrate.SystemOutput.gen = gen
# specify number of O&M types (0 = PV only)
loan.SystemCosts.add_om_num_types = 0
# specify O&M variables
loan.SystemCosts.om_capacity = [costs['system_om_per_kw'] + costs['system_variable_om_per_kw']]
loan.SystemCosts.om_replacement_cost1 = [0.]
system_costs = costs['system_capex_per_kw'] * kw
batt_costs = 0
# Execute utility rate module
utilityrate.Load.load = load_hourly
utilityrate.execute()
# Process payment incentives
loan = process_incentives(loan, kw, computed_power, computed_size, gen_hourly, agent)
# Specify final Cashloan parameters
loan.FinancialParameters.system_capacity = kw
loan.SystemOutput.annual_energy_value = utilityrate.Outputs.annual_energy_value
loan.SystemOutput.gen = utilityrate.SystemOutput.gen
loan.ThirdPartyOwnership.elec_cost_with_system = utilityrate.Outputs.elec_cost_with_system
loan.ThirdPartyOwnership.elec_cost_without_system = utilityrate.Outputs.elec_cost_without_system
# Calculate system costs
direct_costs = (system_costs + batt_costs) * costs['cap_cost_multiplier']
sales_tax = 0
loan.SystemCosts.total_installed_cost = direct_costs + sales_tax
# Execute financial module
loan.execute()
return -loan.Outputs.npv
def calc_system_size_and_performance_pv(agent, sectors, rate_switch_table=None):
"""
Calculate the optimal system and battery size and generation profile, and resulting bill savings and financial metrics.
Parameters
----------
agent : 'pd.df'
individual agent object.
Returns
-------
agent: 'pd.df'
Adds several features to the agent dataframe:
- **agent_id**
- **system_kw** - system capacity selected by agent
- **batt_kw** - battery capacity selected by agent
- **batt_kwh** - battery energy capacity
- **npv** - net present value of system + storage
- **cash_flow** - array of annual cash flows from system adoption
- **batt_dispatch_profile** - array of hourly battery dispatch
- **annual_energy_production_kwh** - annual energy production (kwh) of system
- **naep** - normalized annual energy production (kwh/kW) of system
- **capacity_factor** - annual capacity factor
- **first_year_elec_bill_with_system** - first year electricity bill with adopted system ($/yr)
- **first_year_elec_bill_savings** - first year electricity bill savings with adopted system ($/yr)
- **first_year_elec_bill_savings_frac** - fraction of savings on electricity bill in first year of system adoption
- **max_system_kw** - maximum system size allowed as constrained by roof size or not exceeding annual consumption
- **first_year_elec_bill_without_system** - first year electricity bill without adopted system ($/yr)
- **avg_elec_price_cents_per_kwh** - first year electricity price (c/kwh)
- **cbi** - ndarray of capacity-based incentives applicable to agent
- **ibi** - ndarray of investment-based incentives applicable to agent
- **pbi** - ndarray of performance-based incentives applicable to agent
- **cash_incentives** - ndarray of cash-based incentives applicable to agent
- **export_tariff_result** - summary of structure of retail tariff applied to agent
"""
# Initialize new DB connection
model_settings = settings.init_model_settings()
con, cur = utilfunc.make_con(model_settings.pg_conn_string, model_settings.role)
# PV
pv = dict()
# Extract load profile after scaling hourly load to annual total
load_profile_df = agent_mutation.elec.get_and_apply_agent_load_profiles(con, agent)
pv['consumption_hourly'] = pd.Series(load_profile_df['consumption_hourly']).iloc[0]
del load_profile_df
# Using the scale offset factor of 1E6 for capacity factors
norm_scaled_pv_cf_profiles_df = agent_mutation.elec.get_and_apply_normalized_hourly_resource_solar(con, agent)
pv['generation_hourly'] = pd.Series(norm_scaled_pv_cf_profiles_df['solar_cf_profile'].iloc[0]) / 1e6
del norm_scaled_pv_cf_profiles_df
# Calculate normalized annual energy production
agent.loc['naep'] = float(np.sum(pv['generation_hourly']))
# Battwatts
if agent.loc['sector_abbr'] == 'res':
batt = battery.default("PVWattsBatteryResidential")
else:
batt = battery.default("PVWattsBatteryCommercial")
# Utilityrate5
if agent.loc['sector_abbr'] == 'res':
utilityrate = utility.default("PVWattsBatteryResidential")
else:
utilityrate = utility.default("PVWattsBatteryCommercial")
######################################
###--------- UTILITYRATE5 ---------###
###--- SYSTEM LIFETIME SETTINGS ---###
######################################
# Inflation rate [%]
utilityrate.Lifetime.inflation_rate = agent.loc['inflation_rate'] * 100
# Number of years in analysis [years]
utilityrate.Lifetime.analysis_period = agent.loc['economic_lifetime_yrs']
# Lifetime hourly system outputs [0/1]; Options: 0=hourly first year,1=hourly lifetime
utilityrate.Lifetime.system_use_lifetime_output = 0
######################################
###--------- UTILITYRATE5 ---------###
###---- DEGRADATION/ESCALATION ----###
######################################
# Annual energy degradation [%]
utilityrate.SystemOutput.degradation = [agent.loc['pv_degradation_factor'] * 100] # convert decimal to %
# Annual electricity rate escalation [%/year]
utilityrate.ElectricityRates.rate_escalation = [agent.loc['elec_price_escalator'] * 100] # convert decimal to %
######################################
###--------- UTILITYRATE5 ---------###
###---- NET METERING SETTINGS -----###
######################################
# Dictionary to map dGen compensation styles to PySAM options
nem_options = {'net metering':0, 'net billing':2, 'buy all sell all':4, 'none':2}
# Metering options [0=net energy metering,1=net energy metering with $ credits,2=net billing,3=net billing with carryover to next month,4=buy all - sell all]
utilityrate.ElectricityRates.ur_metering_option = nem_options[agent.loc['compensation_style']]
# Year end sell rate [$/kWh]
utilityrate.ElectricityRates.ur_nm_yearend_sell_rate = agent.loc['wholesale_elec_price_dollars_per_kwh'] * agent.loc['elec_price_multiplier']
if agent.loc['compensation_style']=='none':
net_billing_sell_rate = 0.
else:
net_billing_sell_rate = agent.loc['wholesale_elec_price_dollars_per_kwh'] * agent.loc['elec_price_multiplier']
######################################
###--------- UTILITYRATE5 ---------###
###-------- BUY/SELL RATES --------###
######################################
# Enable time step sell rates [0/1]
utilityrate.ElectricityRates.ur_en_ts_sell_rate = 0
# Time step sell rates [0/1]
utilityrate.ElectricityRates.ur_ts_sell_rate = [0.]
# Set sell rate equal to buy rate [0/1]
utilityrate.ElectricityRates.ur_sell_eq_buy = 0
######################################
###--------- UTILITYRATE5 ---------###
###-------- MISC. SETTINGS --------###
######################################
# Use single monthly peak for TOU demand charge; options: 0=use TOU peak,1=use flat peak
utilityrate.ElectricityRates.TOU_demand_single_peak = 0 # ?
# Optionally enable/disable electricity_rate [years]
utilityrate.ElectricityRates.en_electricity_rates = 1
######################################
###--------- UTILITYRATE5 ---------###
###----- TARIFF RESTRUCTURING -----###
######################################
utilityrate = process_tariff(utilityrate, agent.loc['tariff_dict'], net_billing_sell_rate)
######################################
###----------- CASHLOAN -----------###
###----- FINANCIAL PARAMETERS -----###
######################################
# Initiate cashloan model and set market-specific variables
# Assume res agents do not evaluate depreciation at all
# Assume non-res agents only evaluate federal depreciation (not state)
if agent.loc['sector_abbr'] == 'res':
loan = cashloan.default("PVWattsBatteryResidential")
loan.FinancialParameters.market = 0
else:
loan = cashloan.default("PVWattsBatteryCommercial")
loan.FinancialParameters.market = 1
loan.FinancialParameters.analysis_period = agent.loc['economic_lifetime_yrs']
loan.FinancialParameters.debt_fraction = 100 - (agent.loc['down_payment_fraction'] * 100)
loan.FinancialParameters.federal_tax_rate = [(agent.loc['tax_rate'] * 100) * 0.7] # SAM default
loan.FinancialParameters.inflation_rate = agent.loc['inflation_rate'] * 100
loan.FinancialParameters.insurance_rate = 0
loan.FinancialParameters.loan_rate = agent.loc['loan_interest_rate'] * 100
loan.FinancialParameters.loan_term = agent.loc['loan_term_yrs']
loan.FinancialParameters.mortgage = 0 # default value - standard loan (no mortgage)
loan.FinancialParameters.prop_tax_assessed_decline = 5 # PySAM default
loan.FinancialParameters.prop_tax_cost_assessed_percent = 95 # PySAM default
loan.FinancialParameters.property_tax_rate = 0 # PySAM default
loan.FinancialParameters.real_discount_rate = agent.loc['real_discount_rate'] * 100
loan.FinancialParameters.salvage_percentage = 0
loan.FinancialParameters.state_tax_rate = [(agent.loc['tax_rate'] * 100) * 0.3] # SAM default
loan.FinancialParameters.system_heat_rate = 0
######################################
###----------- CASHLOAN -----------###
###--------- SYSTEM COSTS ---------###
######################################
# System costs that are input to loan.SystemCosts will depend on system configuration (PV, batt, PV+batt)
# and are therefore specified in calc_system_performance()
system_costs = dict()
system_costs['system_capex_per_kw'] = agent.loc['system_capex_per_kw']
system_costs['system_om_per_kw'] = agent.loc['system_om_per_kw']
system_costs['system_variable_om_per_kw'] = agent.loc['system_variable_om_per_kw']
system_costs['cap_cost_multiplier'] = agent.loc['cap_cost_multiplier']
system_costs['batt_capex_per_kw'] = agent.loc['batt_capex_per_kw']
system_costs['batt_capex_per_kwh'] = agent.loc['batt_capex_per_kwh']
system_costs['batt_om_per_kw'] = agent.loc['batt_om_per_kw']
system_costs['batt_om_per_kwh'] = agent.loc['batt_om_per_kwh']
######################################
###----------- CASHLOAN -----------###
###---- DEPRECIATION PARAMETERS ---###
######################################
if agent.loc['sector_abbr'] == 'res':
loan.Depreciation.depr_fed_type = 0
loan.Depreciation.depr_sta_type = 0
else:
loan.Depreciation.depr_fed_type = 1
loan.Depreciation.depr_sta_type = 0
######################################
###----------- CASHLOAN -----------###
###----- TAX CREDIT INCENTIVES ----###
######################################
loan.TaxCreditIncentives.itc_fed_percent = agent.loc['itc_fraction_of_capex'] * 100
######################################
###----------- CASHLOAN -----------###
###-------- BATTERY SYSTEM --------###
######################################
loan.BatterySystem.batt_replacement_option = 2 # user schedule
batt_replacement_schedule = [0 for i in range(0, agent.loc['batt_lifetime_yrs'] - 1)] + [1]
loan.BatterySystem.batt_replacement_schedule = batt_replacement_schedule
######################################
###----------- CASHLOAN -----------###
###-------- SYSTEM OUTPUT ---------###
######################################
loan.SystemOutput.degradation = [agent.loc['pv_degradation_factor'] * 100]
######################################
###----------- CASHLOAN -----------###
###----------- LIFETIME -----------###
######################################
loan.Lifetime.system_use_lifetime_output = 0
# From dGen - calc_system_size_and_financial_performance()
max_size_load = agent.loc['load_kwh_per_customer_in_bin'] / agent.loc['naep']
max_size_roof = agent.loc['developable_roof_sqft'] * agent.loc['pv_kw_per_sqft']
max_system_kw = min(max_size_load, max_size_roof)
# set tolerance for minimize_scalar based on max_system_kw value
tol = min(0.25 * max_system_kw, 0.5)
# Calculate the PV system size that maximizes the agent's NPV, to a tolerance of 0.5 kW.
# Note that the optimization is technically minimizing negative NPV
# ! As is, because of the tolerance this function would not necessarily return a system size of 0 or max PV size if those are optimal
res_with_batt = optimize.minimize_scalar(calc_system_performance,
args = (pv, utilityrate, loan, batt, system_costs, True, 0),
bounds = (0, max_system_kw),
method = 'bounded',
tol = tol)
# PySAM Module outputs with battery
batt_loan_outputs = loan.Outputs.export()
batt_util_outputs = utilityrate.Outputs.export()
batt_annual_energy_kwh = np.sum(utilityrate.SystemOutput.gen)
batt_kw = batt.Battery.batt_simple_kw
batt_kwh = batt.Battery.batt_simple_kwh
batt_dispatch_profile = batt.Outputs.batt_power # ?
# Run without battery
res_no_batt = optimize.minimize_scalar(calc_system_performance,
args = (pv, utilityrate, loan, batt, system_costs, False, 0),
bounds = (0, max_system_kw),
method = 'bounded',
tol = tol)
# PySAM Module outputs without battery
no_batt_loan_outputs = loan.Outputs.export()
no_batt_util_outputs = utilityrate.Outputs.export()
no_batt_annual_energy_kwh = np.sum(utilityrate.SystemOutput.gen)
# Retrieve NPVs of system with batt and system without batt
npv_w_batt = batt_loan_outputs['npv']
npv_no_batt = no_batt_loan_outputs['npv']
# Choose the system with the higher NPV
if npv_w_batt >= npv_no_batt:
system_kw = res_with_batt.x
annual_energy_production_kwh = batt_annual_energy_kwh
first_year_elec_bill_with_system = batt_util_outputs['elec_cost_with_system_year1']
first_year_elec_bill_without_system = batt_util_outputs['elec_cost_without_system_year1']
npv = npv_w_batt
payback = batt_loan_outputs['payback']
cash_flow = list(batt_loan_outputs['cf_payback_with_expenses']) # ?
cbi_total = batt_loan_outputs['cbi_total']
cbi_total_fed = batt_loan_outputs['cbi_total_fed']
cbi_total_oth = batt_loan_outputs['cbi_total_oth']
cbi_total_sta = batt_loan_outputs['cbi_total_sta']
cbi_total_uti = batt_loan_outputs['cbi_total_uti']
ibi_total = batt_loan_outputs['ibi_total']
ibi_total_fed = batt_loan_outputs['ibi_total_fed']
ibi_total_oth = batt_loan_outputs['ibi_total_oth']
ibi_total_sta = batt_loan_outputs['ibi_total_sta']
ibi_total_uti = batt_loan_outputs['ibi_total_uti']
cf_pbi_total = batt_loan_outputs['cf_pbi_total']
pbi_total_fed = batt_loan_outputs['cf_pbi_total_fed']
pbi_total_oth = batt_loan_outputs['cf_pbi_total_oth']
pbi_total_sta = batt_loan_outputs['cf_pbi_total_sta']
pbi_total_uti = batt_loan_outputs['cf_pbi_total_uti']
else:
system_kw = res_no_batt.x
annual_energy_production_kwh = no_batt_annual_energy_kwh
first_year_elec_bill_with_system = no_batt_util_outputs['elec_cost_with_system_year1']
first_year_elec_bill_without_system = no_batt_util_outputs['elec_cost_without_system_year1']
npv = npv_no_batt
payback = no_batt_loan_outputs['payback']
cash_flow = list(no_batt_loan_outputs['cf_payback_with_expenses'])
batt_kw = 0
batt_kwh = 0
batt_dispatch_profile = np.nan
cbi_total = no_batt_loan_outputs['cbi_total']
cbi_total_fed = no_batt_loan_outputs['cbi_total_fed']
cbi_total_oth = no_batt_loan_outputs['cbi_total_oth']
cbi_total_sta = no_batt_loan_outputs['cbi_total_sta']
cbi_total_uti = no_batt_loan_outputs['cbi_total_uti']
ibi_total = no_batt_loan_outputs['ibi_total']
ibi_total_fed = no_batt_loan_outputs['ibi_total_fed']
ibi_total_oth = no_batt_loan_outputs['ibi_total_oth']
ibi_total_sta = no_batt_loan_outputs['ibi_total_sta']
ibi_total_uti = no_batt_loan_outputs['ibi_total_uti']
cf_pbi_total = no_batt_loan_outputs['cf_pbi_total']
pbi_total_fed = no_batt_loan_outputs['cf_pbi_total_fed']
pbi_total_oth = no_batt_loan_outputs['cf_pbi_total_oth']
pbi_total_sta = no_batt_loan_outputs['cf_pbi_total_sta']
pbi_total_uti = no_batt_loan_outputs['cf_pbi_total_uti']
# change 0 value to 1 to avoid divide by zero errors
if first_year_elec_bill_without_system == 0:
first_year_elec_bill_without_system = 1.0
# Add outputs to agent df
naep = annual_energy_production_kwh / system_kw
first_year_elec_bill_savings = first_year_elec_bill_without_system - first_year_elec_bill_with_system
first_year_elec_bill_savings_frac = first_year_elec_bill_savings / first_year_elec_bill_without_system
avg_elec_price_cents_per_kwh = first_year_elec_bill_without_system / agent.loc['load_kwh_per_customer_in_bin']
agent.loc['system_kw'] = system_kw
agent.loc['npv'] = npv
agent.loc['payback_period'] = np.round(np.where(np.isnan(payback), 30.1, payback), 1).astype(float)
agent.loc['cash_flow'] = cash_flow
agent.loc['annual_energy_production_kwh'] = annual_energy_production_kwh
agent.loc['naep'] = naep
agent.loc['capacity_factor'] = agent.loc['naep'] / 8760
agent.loc['first_year_elec_bill_with_system'] = first_year_elec_bill_with_system
agent.loc['first_year_elec_bill_savings'] = first_year_elec_bill_savings
agent.loc['first_year_elec_bill_savings_frac'] = first_year_elec_bill_savings_frac
agent.loc['max_system_kw'] = max_system_kw
agent.loc['first_year_elec_bill_without_system'] = first_year_elec_bill_without_system
agent.loc['avg_elec_price_cents_per_kwh'] = avg_elec_price_cents_per_kwh
agent.loc['batt_kw'] = batt_kw
agent.loc['batt_kwh'] = batt_kwh
agent.loc['batt_dispatch_profile'] = batt_dispatch_profile
# Financial outputs (find out which ones to include):
agent.loc['cbi'] = np.array({'cbi_total': cbi_total,
'cbi_total_fed': cbi_total_fed,
'cbi_total_oth': cbi_total_oth,
'cbi_total_sta': cbi_total_sta,
'cbi_total_uti': cbi_total_uti
})
agent.loc['ibi'] = np.array({'ibi_total': ibi_total,
'ibi_total_fed': ibi_total_fed,
'ibi_total_oth': ibi_total_oth,
'ibi_total_sta': ibi_total_sta,
'ibi_total_uti': ibi_total_uti
})
agent.loc['pbi'] = np.array({'pbi_total': cf_pbi_total,
'pbi_total_fed': pbi_total_fed,
'pbi_total_oth': pbi_total_oth,
'pbi_total_sta': pbi_total_sta,
'pbi_total_uti': pbi_total_uti
})
agent.loc['cash_incentives'] = ''
agent.loc['export_tariff_results'] = ''
out_cols = ['agent_id',
'system_kw',
'batt_kw',
'batt_kwh',
'npv',
'payback_period',
'cash_flow',
'batt_dispatch_profile',
'annual_energy_production_kwh',
'naep',
'capacity_factor',
'first_year_elec_bill_with_system',
'first_year_elec_bill_savings',
'first_year_elec_bill_savings_frac',
'max_system_kw',
'first_year_elec_bill_without_system',
'avg_elec_price_cents_per_kwh',
'cbi',
'ibi',
'pbi',
'cash_incentives',
'export_tariff_results'
]
return agent[out_cols]
#%%
def calc_financial_performance_wind(agent, sectors, rate_switch_table=None):
"""
Calculate bill savings and financial metrics based on pre-selected wind system size.
Parameters
----------
agent : 'pd.df'
individual agent object.
Returns
-------
agent: 'pd.df'
Adds several features to the agent dataframe:
- **agent_id**
- **system_kw** - system capacity selected by agent
- **npv** - net present value of system + storage
- **cash_flow** - array of annual cash flows from system adoption
- **batt_dispatch_profile** - array of hourly battery dispatch
- **annual_energy_production_kwh** - annual energy production (kwh) of system
- **naep** - normalized annual energy production (kwh/kW) of system
- **capacity_factor** - annual capacity factor
- **first_year_elec_bill_with_system** - first year electricity bill with adopted system ($/yr)
- **first_year_elec_bill_savings** - first year electricity bill savings with adopted system ($/yr)
- **first_year_elec_bill_savings_frac** - fraction of savings on electricity bill in first year of system adoption
- **max_system_kw** - maximum system size allowed as constrained by roof size or not exceeding annual consumption
- **first_year_elec_bill_without_system** - first year electricity bill without adopted system ($/yr)
- **avg_elec_price_cents_per_kwh** - first year electricity price (c/kwh)
- **cbi** - ndarray of capacity-based incentives applicable to agent
- **ibi** - ndarray of investment-based incentives applicable to agent
- **pbi** - ndarray of performance-based incentives applicable to agent
- **cash_incentives** - ndarray of cash-based incentives applicable to agent
- **export_tariff_result** - summary of structure of retail tariff applied to agent
"""
# Initialize new DB connection
model_settings = settings.init_model_settings()
con, cur = utilfunc.make_con(model_settings.pg_conn_string, model_settings.role)
# Extract load profile after scaling hourly load to annual total
load_profile_df = agent_mutation.elec.get_and_apply_agent_load_profiles(con, agent)
consumption_hourly = pd.Series(load_profile_df['consumption_hourly']).iloc[0]
del load_profile_df
# Using the scale offset factor of 1E6 for capacity factors
norm_scaled_wind_profiles_df = agent_mutation.elec.get_and_apply_normalized_hourly_resource_wind(con, agent)
generation_hourly = pd.Series(norm_scaled_wind_profiles_df['generation_hourly']).iloc[0]
del norm_scaled_wind_profiles_df
# Instantiate utilityrate5 model based on agent sector
if agent.loc['sector_abbr'] == 'res':
utilityrate = utility.default('WindPowerResidential')
else:
utilityrate = utility.default('WindPowerCommercial')
######################################
###--------- UTILITYRATE5 ---------###
###------- ELECTRICITYRATES -------###
######################################
# Use single monthly peak for TOU demand charge; options: 0=use TOU peak,1=use flat peak
utilityrate.ElectricityRates.TOU_demand_single_peak = 0 # ?
# Optionally enable/disable electricity_rate [years]
utilityrate.ElectricityRates.en_electricity_rates = 1
# Annual electricity rate escalation [%/year]
utilityrate.ElectricityRates.rate_escalation = [agent.loc['elec_price_escalator'] * 100] # convert decimal to %
# Enable time step sell rates [0/1]
utilityrate.ElectricityRates.ur_en_ts_sell_rate = 0
# Time step sell rates [0/1]
utilityrate.ElectricityRates.ur_ts_sell_rate = [0.]
# Set sell rate equal to buy rate [0/1]
utilityrate.ElectricityRates.ur_sell_eq_buy = 0
# Dictionary to map dGen compensation styles to PySAM options
nem_options = {'net metering':0, 'net billing':2, 'buy all sell all':4, 'none':2}
# Metering options [0=net energy metering,1=net energy metering with $ credits,2=net billing,3=net billing with carryover to next month,4=buy all - sell all]
utilityrate.ElectricityRates.ur_metering_option = nem_options[agent.loc['compensation_style']]
# Year end sell rate [$/kWh]
utilityrate.ElectricityRates.ur_nm_yearend_sell_rate = agent.loc['wholesale_elec_price_dollars_per_kwh'] * agent.loc['elec_price_multiplier']
if agent.loc['compensation_style']=='none':
net_billing_sell_rate = 0.
else:
net_billing_sell_rate = agent.loc['wholesale_elec_price_dollars_per_kwh'] * agent.loc['elec_price_multiplier']
# Restructure tariff object for PySAM compatibility
utilityrate = process_tariff(utilityrate, agent.loc['tariff_dict'], net_billing_sell_rate)
######################################
###--------- UTILITYRATE5 ---------###
###----------- LIFETIME -----------###
######################################
# Number of years in analysis [years]
utilityrate.Lifetime.analysis_period = agent.loc['economic_lifetime_yrs']
# Inflation rate [%]
utilityrate.Lifetime.inflation_rate = agent.loc['inflation_rate'] * 100
# Lifetime hourly system outputs [0/1]; Options: 0=hourly first year,1=hourly lifetime
utilityrate.Lifetime.system_use_lifetime_output = 0
######################################
###--------- UTILITYRATE5 ---------###
###-------- SYSTEM OUTPUT ---------###
######################################
# Annual energy degradation [%] -- Wind degradation already applied via 'derate_factor'
utilityrate.SystemOutput.degradation = [0.]
# System power generated [kW]
utilityrate.SystemOutput.gen = generation_hourly
######################################
###--------- UTILITYRATE5 ---------###
###-------- SYSTEM OUTPUT ---------###
######################################
# Electricity load (year 1) [kW]
utilityrate.Load.load = consumption_hourly
######################################
###--------- UTILITYRATE5 ---------###
###------------ EXECUTE -----------###
######################################
utilityrate.execute()
######################################
###----------- CASHLOAN -----------###
###----- FINANCIAL PARAMETERS -----###
######################################
# Initiate cashloan model and set market-specific variables
if agent.loc['sector_abbr'] == 'res':
loan = cashloan.default('WindPowerResidential')
loan.FinancialParameters.market = 0
else:
loan = cashloan.default('WindPowerCommercial')
loan.FinancialParameters.market = 1
loan.FinancialParameters.analysis_period = agent.loc['economic_lifetime_yrs']
loan.FinancialParameters.debt_fraction = 100 - (agent.loc['down_payment_fraction'] * 100)
loan.FinancialParameters.federal_tax_rate = [(agent.loc['tax_rate'] * 100) * 0.7] # SAM default
loan.FinancialParameters.inflation_rate = agent.loc['inflation_rate'] * 100
loan.FinancialParameters.insurance_rate = 0
loan.FinancialParameters.loan_rate = agent.loc['loan_interest_rate'] * 100
loan.FinancialParameters.loan_term = agent.loc['loan_term_yrs']
loan.FinancialParameters.mortgage = 0 # default value - standard loan (no mortgage)
loan.FinancialParameters.prop_tax_assessed_decline = 5 # PySAM default
loan.FinancialParameters.prop_tax_cost_assessed_percent = 95 # PySAM default
loan.FinancialParameters.property_tax_rate = 0 # PySAM default
loan.FinancialParameters.real_discount_rate = agent.loc['real_discount_rate'] * 100
loan.FinancialParameters.salvage_percentage = 0
loan.FinancialParameters.state_tax_rate = [(agent.loc['tax_rate'] * 100) * 0.3] # SAM default
loan.FinancialParameters.system_heat_rate = 0
loan.FinancialParameters.system_capacity = agent.loc['system_size_kw']
######################################
###----------- CASHLOAN -----------###
###--------- SYSTEM COSTS ---------###
######################################
# specify number of O&M types (0 = system only)
loan.SystemCosts.add_om_num_types = 0
# specify O&M variables
loan.SystemCosts.om_capacity = [agent.loc['system_om_per_kw'] + agent.loc['system_variable_om_per_kw']]
# Calculate and specify system costs
system_costs = agent.loc['system_capex_per_kw'] * agent.loc['system_size_kw']
batt_costs = 0
sales_tax = 0
direct_costs = (system_costs + batt_costs) * agent.loc['cap_cost_multiplier']
loan.SystemCosts.total_installed_cost = direct_costs + sales_tax
######################################
###----------- CASHLOAN -----------###
###---- DEPRECIATION PARAMETERS ---###
######################################
# Federal and State depreciation type
# Options: 0=none, 1=MACRS half year, 2=straight-line, 3=custom
if agent.loc['sector_abbr'] == 'res':
loan.Depreciation.depr_fed_type = 0
loan.Depreciation.depr_sta_type = 0
else:
loan.Depreciation.depr_fed_type = 1
loan.Depreciation.depr_sta_type = 0
######################################
###----------- CASHLOAN -----------###
###----- TAX CREDIT INCENTIVES ----###
######################################
# Federal percentage-based ITC percent [%]
loan.TaxCreditIncentives.itc_fed_percent = agent.loc['itc_fraction_of_capex'] * 100
######################################
###----------- CASHLOAN -----------###
###------ PAYMENT INCENTIVES ------###
######################################
# Specify payment incentives within Cashloan object
loan = process_incentives(loan, agent.loc['system_size_kw'], 0, 0, generation_hourly, agent)
######################################
###----------- CASHLOAN -----------###
###-------- BATTERY SYSTEM --------###
######################################
# Enable battery storage model [0/1]
loan.BatterySystem.en_batt = 0
######################################
###----------- CASHLOAN -----------###
###-------- SYSTEM OUTPUT ---------###
######################################
# Energy value [$] -- i.e. "bill savings"
loan.SystemOutput.annual_energy_value = utilityrate.Outputs.annual_energy_value
# Annual energy degradation [%] -- Wind degradation already applied via 'derate_factor'
loan.SystemOutput.degradation = [0.]
# Power generated by renewable resource [kW]
loan.SystemOutput.gen = utilityrate.SystemOutput.gen
######################################
###----------- CASHLOAN -----------###
###----------- LIFETIME -----------###
######################################
loan.Lifetime.system_use_lifetime_output = 0
######################################
###----------- CASHLOAN -----------###
###----- THIRD PARTY OWNERSHIP ----###
######################################
# Energy value [$]
loan.ThirdPartyOwnership.elec_cost_with_system = utilityrate.Outputs.elec_cost_with_system
# Energy value [$]
loan.ThirdPartyOwnership.elec_cost_without_system = utilityrate.Outputs.elec_cost_without_system
######################################
###-------- POSTPROCESSING --------###
###------------ RESULTS -----------###
######################################
# Get outputs from Utilityrate5 model
util_outputs = utilityrate.Outputs.export()
# Assign variables from Utilityrate5 outputs, others
system_kw = agent.loc['system_size_kw']
first_year_elec_bill_with_system = util_outputs['elec_cost_with_system_year1']
first_year_elec_bill_without_system = util_outputs['elec_cost_without_system_year1']
# PySAM cannot evaluate system sizes of 0 kW -- check and manually assign values if system_size_kw = 0
if system_kw > 0:
# Execute Cashloan model
loan.execute()
loan_outputs = loan.Outputs.export()
npv = loan_outputs['npv']
payback = loan_outputs['payback']
cash_flow = list(loan_outputs['cf_payback_with_expenses'])
cbi_total = loan_outputs['cbi_total']
cbi_total_fed = loan_outputs['cbi_total_fed']
cbi_total_oth = loan_outputs['cbi_total_oth']
cbi_total_sta = loan_outputs['cbi_total_sta']
cbi_total_uti = loan_outputs['cbi_total_uti']
ibi_total = loan_outputs['ibi_total']
ibi_total_fed = loan_outputs['ibi_total_fed']
ibi_total_oth = loan_outputs['ibi_total_oth']
ibi_total_sta = loan_outputs['ibi_total_sta']
ibi_total_uti = loan_outputs['ibi_total_uti']
cf_pbi_total = loan_outputs['cf_pbi_total']
pbi_total_fed = loan_outputs['cf_pbi_total_fed']
pbi_total_oth = loan_outputs['cf_pbi_total_oth']
pbi_total_sta = loan_outputs['cf_pbi_total_sta']
pbi_total_uti = loan_outputs['cf_pbi_total_uti']
else:
npv = 0.
payback = 30.1
cash_flow = [0.] * (agent.loc['economic_lifetime_yrs'] + 1)
cbi_total = cbi_total_fed = cbi_total_oth = cbi_total_sta = cbi_total_uti = 0.
ibi_total = ibi_total_fed = ibi_total_oth = ibi_total_sta = ibi_total_uti = 0.
cf_pbi_total = pbi_total_fed = pbi_total_oth = pbi_total_sta = pbi_total_uti = 0.
# change 0 value to 1 to avoid divide by zero errors
if first_year_elec_bill_without_system == 0:
first_year_elec_bill_without_system = 1.0
# Add outputs to agent df
first_year_elec_bill_savings = first_year_elec_bill_without_system - first_year_elec_bill_with_system
first_year_elec_bill_savings_frac = first_year_elec_bill_savings / first_year_elec_bill_without_system
avg_elec_price_cents_per_kwh = first_year_elec_bill_without_system / agent.loc['load_kwh_per_customer_in_bin']
# Specify variables to write to agent df -- also write placeholder batt values
agent.loc['system_kw'] = system_kw
agent.loc['npv'] = npv
agent.loc['payback_period'] = np.round(np.where(np.isnan(payback), 30.1, payback), 1).astype(float)
agent.loc['cash_flow'] = cash_flow
agent.loc['first_year_elec_bill_with_system'] = first_year_elec_bill_with_system
agent.loc['first_year_elec_bill_savings'] = first_year_elec_bill_savings
agent.loc['first_year_elec_bill_savings_frac'] = first_year_elec_bill_savings_frac
agent.loc['first_year_elec_bill_without_system'] = first_year_elec_bill_without_system
agent.loc['avg_elec_price_cents_per_kwh'] = avg_elec_price_cents_per_kwh
agent.loc['batt_kw'] = 0.
agent.loc['batt_kwh'] = 0.
agent.loc['batt_dispatch_profile'] = np.nan
# Specify incentive outputs
agent.loc['cbi'] = np.array({'cbi_total': cbi_total,
'cbi_total_fed': cbi_total_fed,
'cbi_total_oth': cbi_total_oth,
'cbi_total_sta': cbi_total_sta,
'cbi_total_uti': cbi_total_uti
})
agent.loc['ibi'] = np.array({'ibi_total': ibi_total,
'ibi_total_fed': ibi_total_fed,
'ibi_total_oth': ibi_total_oth,
'ibi_total_sta': ibi_total_sta,
'ibi_total_uti': ibi_total_uti
})
agent.loc['pbi'] = np.array({'pbi_total': cf_pbi_total,
'pbi_total_fed': pbi_total_fed,
'pbi_total_oth': pbi_total_oth,
'pbi_total_sta': pbi_total_sta,
'pbi_total_uti': pbi_total_uti
})
agent.loc['cash_incentives'] = ''
agent.loc['export_tariff_results'] = ''
out_cols = ['agent_id',
'system_kw',
'npv',
'payback_period',
'cash_flow',
'first_year_elec_bill_with_system',
'first_year_elec_bill_savings',
'first_year_elec_bill_savings_frac',
'first_year_elec_bill_without_system',
'avg_elec_price_cents_per_kwh',
'cbi',
'ibi',
'pbi',
'cash_incentives',
'export_tariff_results',
'batt_kw',
'batt_kwh',
'batt_dispatch_profile'
]
return agent[out_cols]
#%%
def process_tariff(utilityrate, tariff_dict, net_billing_sell_rate):
"""
Instantiate the utilityrate5 PySAM model and process the agent's rate json object to conform with PySAM input formatting.
Parameters
----------
agent : 'pd.Series'
Individual agent object.
Returns
-------
utilityrate: 'PySAM.Utilityrate5'
"""
######################################
###--------- UTILITYRATE5 ---------###
###--- FIXED AND ANNUAL CHARGES ---###
######################################
# Monthly fixed charge [$]
utilityrate.ElectricityRates.ur_monthly_fixed_charge = tariff_dict['fixed_charge']
# Annual minimum charge [$]
utilityrate.ElectricityRates.ur_annual_min_charge = 0. # not currently tracked in URDB rate attribute downloads
# Monthly minimum charge [$]
utilityrate.ElectricityRates.ur_monthly_min_charge = 0. # not currently tracked in URDB rate attribute downloads
######################################
###--------- UTILITYRATE5 ---------###
###-------- DEMAND CHARGES --------###
######################################
# Enable demand charge
utilityrate.ElectricityRates.ur_dc_enable = (tariff_dict['d_flat_exists']) | (tariff_dict['d_tou_exists'])
if utilityrate.ElectricityRates.ur_dc_enable:
if tariff_dict['d_flat_exists']:
# Reformat demand charge table from dGen format
n_periods = len(tariff_dict['d_flat_levels'][0])
n_tiers = len(tariff_dict['d_flat_levels'])
ur_dc_flat_mat = []
for period in range(n_periods):
for tier in range(n_tiers):
row = [period, tier+1, tariff_dict['d_flat_levels'][tier][period], tariff_dict['d_flat_prices'][tier][period]]
ur_dc_flat_mat.append(row)
# Demand rates (flat) table
utilityrate.ElectricityRates.ur_dc_flat_mat = ur_dc_flat_mat
if tariff_dict['d_tou_exists']:
# Reformat demand charge table from dGen format
n_periods = len(tariff_dict['d_tou_levels'][0])
n_tiers = len(tariff_dict['d_tou_levels'])
ur_dc_tou_mat = []
for period in range(n_periods):
for tier in range(n_tiers):
row = [period+1, tier+1, tariff_dict['d_tou_levels'][tier][period], tariff_dict['d_tou_prices'][tier][period]]
ur_dc_tou_mat.append(row)
# Demand rates (TOU) table
utilityrate.ElectricityRates.ur_dc_tou_mat = ur_dc_tou_mat
# Reformat 12x24 tables - original are indexed to 0, PySAM needs index starting at 1
d_wkday_12by24 = []
for m in range(len(tariff_dict['d_wkday_12by24'])):
row = [x+1 for x in tariff_dict['d_wkday_12by24'][m]]
d_wkday_12by24.append(row)
d_wkend_12by24 = []
for m in range(len(tariff_dict['d_wkend_12by24'])):
row = [x+1 for x in tariff_dict['d_wkend_12by24'][m]]
d_wkend_12by24.append(row)
# Demand charge weekday schedule
utilityrate.ElectricityRates.ur_dc_sched_weekday = d_wkday_12by24
# Demand charge weekend schedule
utilityrate.ElectricityRates.ur_dc_sched_weekend = d_wkend_12by24
######################################
###--------- UTILITYRATE5 ---------###
###-------- ENERGY CHARGES --------###
######################################
if tariff_dict['e_exists']:
# Dictionary to map dGen max usage units to PySAM options
max_usage_dict = {'kWh':0, 'kWh/kW':1, 'kWh daily':2, 'kWh/kW daily':3}
# If max usage units are 'kWh daily', divide max usage by 30 -- rate download procedure converts daily to monthly
modifier = 30. if tariff_dict['energy_rate_unit'] == 'kWh daily' else 1.
# Reformat energy charge table from dGen format
n_periods = len(tariff_dict['e_levels'][0])
n_tiers = len(tariff_dict['e_levels'])
ur_ec_tou_mat = []
for period in range(n_periods):
for tier in range(n_tiers):
row = [period+1, tier+1, tariff_dict['e_levels'][tier][period]/modifier, max_usage_dict[tariff_dict['energy_rate_unit']], tariff_dict['e_prices'][tier][period], net_billing_sell_rate]
ur_ec_tou_mat.append(row)
# Energy rates table
utilityrate.ElectricityRates.ur_ec_tou_mat = ur_ec_tou_mat
# Reformat 12x24 tables - original are indexed to 0, PySAM needs index starting at 1
e_wkday_12by24 = []
for m in range(len(tariff_dict['e_wkday_12by24'])):
row = [x+1 for x in tariff_dict['e_wkday_12by24'][m]]
e_wkday_12by24.append(row)
e_wkend_12by24 = []
for m in range(len(tariff_dict['e_wkend_12by24'])):
row = [x+1 for x in tariff_dict['e_wkend_12by24'][m]]
e_wkend_12by24.append(row)
# Energy charge weekday schedule
utilityrate.ElectricityRates.ur_ec_sched_weekday = e_wkday_12by24
# Energy charge weekend schedule
utilityrate.ElectricityRates.ur_ec_sched_weekend = e_wkend_12by24
return utilityrate
#%%
def process_incentives(loan, kw, batt_kw, batt_kwh, generation_hourly, agent):
######################################
###----------- CASHLOAN -----------###
###------ PAYMENT INCENTIVES ------###
######################################
# Read incentive dataframe from agent attributes
incentive_df = agent.loc['state_incentives']
# Check dtype of incentive_df - process incentives if pd.DataFrame, otherwise do not assign incentive values to cashloan
if isinstance(incentive_df, pd.DataFrame):
# Fill NaNs in incentive_df - assume max incentive duration of 5 years and max incentive value of $10,000
incentive_df = incentive_df.fillna(value={'incentive_duration_yrs' : 5, 'max_incentive_usd' : 10000})
# Filter for CBI's in incentive_df
cbi_df = (incentive_df.loc[pd.notnull(incentive_df['cbi_usd_p_w'])]
.sort_values(['cbi_usd_p_w'], axis=0, ascending=False)
.reset_index(drop=True)
)
# For multiple CBIs that are applicable to the agent, cap at 2 and use PySAM's "state" and "other" option
if len(cbi_df) == 1:
loan.PaymentIncentives.cbi_sta_amount = cbi_df['cbi_usd_p_w'].iloc[0]
loan.PaymentIncentives.cbi_sta_deprbas_fed = 0
loan.PaymentIncentives.cbi_sta_deprbas_sta = 0
loan.PaymentIncentives.cbi_sta_maxvalue = cbi_df['max_incentive_usd'].iloc[0]
loan.PaymentIncentives.cbi_sta_tax_fed = 0
loan.PaymentIncentives.cbi_sta_tax_sta = 0
elif len(cbi_df) >= 2:
loan.PaymentIncentives.cbi_sta_amount = cbi_df['cbi_usd_p_w'].iloc[0]
loan.PaymentIncentives.cbi_sta_deprbas_fed = 0
loan.PaymentIncentives.cbi_sta_deprbas_sta = 0
loan.PaymentIncentives.cbi_sta_maxvalue = cbi_df['max_incentive_usd'].iloc[0]
loan.PaymentIncentives.cbi_sta_tax_fed = 1
loan.PaymentIncentives.cbi_sta_tax_sta = 1
loan.PaymentIncentives.cbi_oth_amount = cbi_df['cbi_usd_p_w'].iloc[1]
loan.PaymentIncentives.cbi_oth_deprbas_fed = 0
loan.PaymentIncentives.cbi_oth_deprbas_sta = 0
loan.PaymentIncentives.cbi_oth_maxvalue = cbi_df['max_incentive_usd'].iloc[1]
loan.PaymentIncentives.cbi_oth_tax_fed = 1
loan.PaymentIncentives.cbi_oth_tax_sta = 1
else:
pass
# Filter for PBI's in incentive_df
pbi_df = (incentive_df.loc[pd.notnull(incentive_df['pbi_usd_p_kwh'])]
.sort_values(['pbi_usd_p_kwh'], axis=0, ascending=False)
.reset_index(drop=True)
)
# For multiple PBIs that are applicable to the agent, cap at 2 and use PySAM's "state" and "other" option
if len(pbi_df) == 1:
# Aamount input [$/kWh] requires sequence -- repeat pbi_usd_p_kwh using incentive_duration_yrs
loan.PaymentIncentives.pbi_sta_amount = [pbi_df['pbi_usd_p_kwh'].iloc[0]] * int(pbi_df['incentive_duration_yrs'].iloc[0])
loan.PaymentIncentives.pbi_sta_escal = 0.
loan.PaymentIncentives.pbi_sta_tax_fed = 1
loan.PaymentIncentives.pbi_sta_tax_sta = 1
loan.PaymentIncentives.pbi_sta_term = pbi_df['incentive_duration_yrs'].iloc[0]
elif len(pbi_df) >= 2:
# Aamount input [$/kWh] requires sequence -- repeat pbi_usd_p_kwh using incentive_duration_yrs
loan.PaymentIncentives.pbi_sta_amount = [pbi_df['pbi_usd_p_kwh'].iloc[0]] * int(pbi_df['incentive_duration_yrs'].iloc[0])
loan.PaymentIncentives.pbi_sta_escal = 0.
loan.PaymentIncentives.pbi_sta_tax_fed = 1
loan.PaymentIncentives.pbi_sta_tax_sta = 1
loan.PaymentIncentives.pbi_sta_term = pbi_df['incentive_duration_yrs'].iloc[0]
# Aamount input [$/kWh] requires sequence -- repeat pbi_usd_p_kwh using incentive_duration_yrs
loan.PaymentIncentives.pbi_oth_amount = [pbi_df['pbi_usd_p_kwh'].iloc[1]] * int(pbi_df['incentive_duration_yrs'].iloc[1])
loan.PaymentIncentives.pbi_oth_escal = 0.
loan.PaymentIncentives.pbi_oth_tax_fed = 1
loan.PaymentIncentives.pbi_oth_tax_sta = 1
loan.PaymentIncentives.pbi_oth_term = pbi_df['incentive_duration_yrs'].iloc[1]
else:
pass
# Filter for IBI's in incentive_df
ibi_df = (incentive_df.loc[pd.notnull(incentive_df['ibi_pct'])]
.sort_values(['ibi_pct'], axis=0, ascending=False)
.reset_index(drop=True)
)
# For multiple IBIs that are applicable to the agent, cap at 2 and use PySAM's "state" and "other" option
# NOTE: this specifies IBI percentage, instead of IBI absolute amount
if len(ibi_df) == 1:
loan.PaymentIncentives.ibi_sta_percent = ibi_df['ibi_pct'].iloc[0]
loan.PaymentIncentives.ibi_sta_percent_deprbas_fed = 0
loan.PaymentIncentives.ibi_sta_percent_deprbas_sta = 0
loan.PaymentIncentives.ibi_sta_percent_maxvalue = ibi_df['max_incentive_usd'].iloc[0]
loan.PaymentIncentives.ibi_sta_percent_tax_fed = 1
loan.PaymentIncentives.ibi_sta_percent_tax_sta = 1
elif len(ibi_df) >= 2:
loan.PaymentIncentives.ibi_sta_percent = ibi_df['ibi_pct'].iloc[0]
loan.PaymentIncentives.ibi_sta_percent_deprbas_fed = 0
loan.PaymentIncentives.ibi_sta_percent_deprbas_sta = 0
loan.PaymentIncentives.ibi_sta_percent_maxvalue = ibi_df['max_incentive_usd'].iloc[0]
loan.PaymentIncentives.ibi_sta_percent_tax_fed = 1
loan.PaymentIncentives.ibi_sta_percent_tax_sta = 1
loan.PaymentIncentives.ibi_oth_percent = ibi_df['ibi_pct'].iloc[1]
loan.PaymentIncentives.ibi_oth_percent_deprbas_fed = 0
loan.PaymentIncentives.ibi_oth_percent_deprbas_sta = 0
loan.PaymentIncentives.ibi_oth_percent_maxvalue = ibi_df['max_incentive_usd'].iloc[1]
loan.PaymentIncentives.ibi_oth_percent_tax_fed = 1
loan.PaymentIncentives.ibi_oth_percent_tax_sta = 1
else:
pass
else:
pass
return loan
#%%
@decorators.fn_timer(logger = logger, tab_level = 2, prefix = '')
def calc_max_market_share(dataframe, max_market_share_df):
in_cols = list(dataframe.columns)
dataframe = dataframe.reset_index()
dataframe['business_model'] = 'host_owned'
dataframe['metric'] = 'payback_period'
# Convert metric value to integer as a primary key, then bound within max market share ranges
max_payback = max_market_share_df[max_market_share_df.metric == 'payback_period'].payback_period.max()
min_payback = max_market_share_df[max_market_share_df.metric == 'payback_period'].payback_period.min()
max_mbs = max_market_share_df[max_market_share_df.metric == 'percent_monthly_bill_savings'].payback_period.max()
min_mbs = max_market_share_df[max_market_share_df.metric == 'percent_monthly_bill_savings'].payback_period.min()
# copy the metric valeus to a new column to store an edited version
payback_period_bounded = dataframe['payback_period'].values.copy()
# where the metric value exceeds the corresponding max market curve bounds, set the value to the corresponding bound
payback_period_bounded[np.where((dataframe.metric == 'payback_period') & (dataframe['payback_period'] < min_payback))] = min_payback
payback_period_bounded[np.where((dataframe.metric == 'payback_period') & (dataframe['payback_period'] > max_payback))] = max_payback
payback_period_bounded[np.where((dataframe.metric == 'percent_monthly_bill_savings') & (dataframe['payback_period'] < min_mbs))] = min_mbs
payback_period_bounded[np.where((dataframe.metric == 'percent_monthly_bill_savings') & (dataframe['payback_period'] > max_mbs))] = max_mbs
dataframe['payback_period_bounded'] = np.round(payback_period_bounded.astype(float), 1)
# scale and round to nearest int
dataframe['payback_period_as_factor'] = (dataframe['payback_period_bounded'] * 100).round().astype('int')
# add a scaled key to the max_market_share dataframe too
max_market_share_df['payback_period_as_factor'] = (max_market_share_df['payback_period'] * 100).round().astype('int')
# Join the max_market_share table and dataframe in order to select the ultimate mms based on the metric value.
dataframe = pd.merge(dataframe, max_market_share_df[['sector_abbr', 'max_market_share', 'metric', 'payback_period_as_factor', 'business_model']],
how = 'left', on = ['sector_abbr', 'metric','payback_period_as_factor','business_model'])
out_cols = in_cols + ['max_market_share', 'metric']
return dataframe[out_cols]
|
[
"settings.init_model_settings",
"PySAM.Utilityrate5.default",
"numpy.array",
"agent_mutation.elec.get_and_apply_agent_load_profiles",
"pandas.notnull",
"PySAM.Battwatts.default",
"numpy.where",
"PySAM.Cashloan.default",
"scipy.optimize.minimize_scalar",
"PySAM.BatteryTools.size_li_ion_battery",
"pandas.merge",
"numpy.isnan",
"pandas.Series",
"decorators.fn_timer",
"utility_functions.get_logger",
"agent_mutation.elec.get_and_apply_normalized_hourly_resource_solar",
"numpy.sum",
"utility_functions.make_con",
"agent_mutation.elec.get_and_apply_normalized_hourly_resource_wind"
] |
[((411, 432), 'utility_functions.get_logger', 'utilfunc.get_logger', ([], {}), '()\n', (430, 432), True, 'import utility_functions as utilfunc\n'), ((55274, 55332), 'decorators.fn_timer', 'decorators.fn_timer', ([], {'logger': 'logger', 'tab_level': '(2)', 'prefix': '""""""'}), "(logger=logger, tab_level=2, prefix='')\n", (55293, 55332), False, 'import decorators\n'), ((9715, 9745), 'settings.init_model_settings', 'settings.init_model_settings', ([], {}), '()\n', (9743, 9745), False, 'import settings\n'), ((9761, 9830), 'utility_functions.make_con', 'utilfunc.make_con', (['model_settings.pg_conn_string', 'model_settings.role'], {}), '(model_settings.pg_conn_string, model_settings.role)\n', (9778, 9830), True, 'import utility_functions as utilfunc\n'), ((9949, 10014), 'agent_mutation.elec.get_and_apply_agent_load_profiles', 'agent_mutation.elec.get_and_apply_agent_load_profiles', (['con', 'agent'], {}), '(con, agent)\n', (10002, 10014), False, 'import agent_mutation\n'), ((10228, 10306), 'agent_mutation.elec.get_and_apply_normalized_hourly_resource_solar', 'agent_mutation.elec.get_and_apply_normalized_hourly_resource_solar', (['con', 'agent'], {}), '(con, agent)\n', (10294, 10306), False, 'import agent_mutation\n'), ((19336, 19507), 'scipy.optimize.minimize_scalar', 'optimize.minimize_scalar', (['calc_system_performance'], {'args': '(pv, utilityrate, loan, batt, system_costs, True, 0)', 'bounds': '(0, max_system_kw)', 'method': '"""bounded"""', 'tol': 'tol'}), "(calc_system_performance, args=(pv, utilityrate,\n loan, batt, system_costs, True, 0), bounds=(0, max_system_kw), method=\n 'bounded', tol=tol)\n", (19360, 19507), False, 'from scipy import optimize\n'), ((19856, 19892), 'numpy.sum', 'np.sum', (['utilityrate.SystemOutput.gen'], {}), '(utilityrate.SystemOutput.gen)\n', (19862, 19892), True, 'import numpy as np\n'), ((20081, 20253), 'scipy.optimize.minimize_scalar', 'optimize.minimize_scalar', (['calc_system_performance'], {'args': '(pv, utilityrate, loan, batt, system_costs, False, 0)', 'bounds': '(0, max_system_kw)', 'method': '"""bounded"""', 'tol': 'tol'}), "(calc_system_performance, args=(pv, utilityrate,\n loan, batt, system_costs, False, 0), bounds=(0, max_system_kw), method=\n 'bounded', tol=tol)\n", (20105, 20253), False, 'from scipy import optimize\n'), ((20607, 20643), 'numpy.sum', 'np.sum', (['utilityrate.SystemOutput.gen'], {}), '(utilityrate.SystemOutput.gen)\n', (20613, 20643), True, 'import numpy as np\n'), ((25301, 25471), 'numpy.array', 'np.array', (["{'cbi_total': cbi_total, 'cbi_total_fed': cbi_total_fed, 'cbi_total_oth':\n cbi_total_oth, 'cbi_total_sta': cbi_total_sta, 'cbi_total_uti':\n cbi_total_uti}"], {}), "({'cbi_total': cbi_total, 'cbi_total_fed': cbi_total_fed,\n 'cbi_total_oth': cbi_total_oth, 'cbi_total_sta': cbi_total_sta,\n 'cbi_total_uti': cbi_total_uti})\n", (25309, 25471), True, 'import numpy as np\n'), ((25547, 25717), 'numpy.array', 'np.array', (["{'ibi_total': ibi_total, 'ibi_total_fed': ibi_total_fed, 'ibi_total_oth':\n ibi_total_oth, 'ibi_total_sta': ibi_total_sta, 'ibi_total_uti':\n ibi_total_uti}"], {}), "({'ibi_total': ibi_total, 'ibi_total_fed': ibi_total_fed,\n 'ibi_total_oth': ibi_total_oth, 'ibi_total_sta': ibi_total_sta,\n 'ibi_total_uti': ibi_total_uti})\n", (25555, 25717), True, 'import numpy as np\n'), ((25793, 25966), 'numpy.array', 'np.array', (["{'pbi_total': cf_pbi_total, 'pbi_total_fed': pbi_total_fed, 'pbi_total_oth':\n pbi_total_oth, 'pbi_total_sta': pbi_total_sta, 'pbi_total_uti':\n pbi_total_uti}"], {}), "({'pbi_total': cf_pbi_total, 'pbi_total_fed': pbi_total_fed,\n 'pbi_total_oth': pbi_total_oth, 'pbi_total_sta': pbi_total_sta,\n 'pbi_total_uti': pbi_total_uti})\n", (25801, 25966), True, 'import numpy as np\n'), ((28935, 28965), 'settings.init_model_settings', 'settings.init_model_settings', ([], {}), '()\n', (28963, 28965), False, 'import settings\n'), ((28981, 29050), 'utility_functions.make_con', 'utilfunc.make_con', (['model_settings.pg_conn_string', 'model_settings.role'], {}), '(model_settings.pg_conn_string, model_settings.role)\n', (28998, 29050), True, 'import utility_functions as utilfunc\n'), ((29143, 29208), 'agent_mutation.elec.get_and_apply_agent_load_profiles', 'agent_mutation.elec.get_and_apply_agent_load_profiles', (['con', 'agent'], {}), '(con, agent)\n', (29196, 29208), False, 'import agent_mutation\n'), ((29415, 29492), 'agent_mutation.elec.get_and_apply_normalized_hourly_resource_wind', 'agent_mutation.elec.get_and_apply_normalized_hourly_resource_wind', (['con', 'agent'], {}), '(con, agent)\n', (29480, 29492), False, 'import agent_mutation\n'), ((41873, 42043), 'numpy.array', 'np.array', (["{'cbi_total': cbi_total, 'cbi_total_fed': cbi_total_fed, 'cbi_total_oth':\n cbi_total_oth, 'cbi_total_sta': cbi_total_sta, 'cbi_total_uti':\n cbi_total_uti}"], {}), "({'cbi_total': cbi_total, 'cbi_total_fed': cbi_total_fed,\n 'cbi_total_oth': cbi_total_oth, 'cbi_total_sta': cbi_total_sta,\n 'cbi_total_uti': cbi_total_uti})\n", (41881, 42043), True, 'import numpy as np\n'), ((42119, 42289), 'numpy.array', 'np.array', (["{'ibi_total': ibi_total, 'ibi_total_fed': ibi_total_fed, 'ibi_total_oth':\n ibi_total_oth, 'ibi_total_sta': ibi_total_sta, 'ibi_total_uti':\n ibi_total_uti}"], {}), "({'ibi_total': ibi_total, 'ibi_total_fed': ibi_total_fed,\n 'ibi_total_oth': ibi_total_oth, 'ibi_total_sta': ibi_total_sta,\n 'ibi_total_uti': ibi_total_uti})\n", (42127, 42289), True, 'import numpy as np\n'), ((42365, 42538), 'numpy.array', 'np.array', (["{'pbi_total': cf_pbi_total, 'pbi_total_fed': pbi_total_fed, 'pbi_total_oth':\n pbi_total_oth, 'pbi_total_sta': pbi_total_sta, 'pbi_total_uti':\n pbi_total_uti}"], {}), "({'pbi_total': cf_pbi_total, 'pbi_total_fed': pbi_total_fed,\n 'pbi_total_oth': pbi_total_oth, 'pbi_total_sta': pbi_total_sta,\n 'pbi_total_uti': pbi_total_uti})\n", (42373, 42538), True, 'import numpy as np\n'), ((57521, 57750), 'pandas.merge', 'pd.merge', (['dataframe', "max_market_share_df[['sector_abbr', 'max_market_share', 'metric',\n 'payback_period_as_factor', 'business_model']]"], {'how': '"""left"""', 'on': "['sector_abbr', 'metric', 'payback_period_as_factor', 'business_model']"}), "(dataframe, max_market_share_df[['sector_abbr', 'max_market_share',\n 'metric', 'payback_period_as_factor', 'business_model']], how='left',\n on=['sector_abbr', 'metric', 'payback_period_as_factor', 'business_model'])\n", (57529, 57750), True, 'import pandas as pd\n'), ((3624, 3667), 'PySAM.BatteryTools.size_li_ion_battery', 'batt_tools.size_li_ion_battery', (['batt_inputs'], {}), '(batt_inputs)\n', (3654, 3667), True, 'import PySAM.BatteryTools as batt_tools\n'), ((10337, 10405), 'pandas.Series', 'pd.Series', (["norm_scaled_pv_cf_profiles_df['solar_cf_profile'].iloc[0]"], {}), "(norm_scaled_pv_cf_profiles_df['solar_cf_profile'].iloc[0])\n", (10346, 10405), True, 'import pandas as pd\n'), ((10538, 10569), 'numpy.sum', 'np.sum', (["pv['generation_hourly']"], {}), "(pv['generation_hourly'])\n", (10544, 10569), True, 'import numpy as np\n'), ((10645, 10689), 'PySAM.Battwatts.default', 'battery.default', (['"""PVWattsBatteryResidential"""'], {}), "('PVWattsBatteryResidential')\n", (10660, 10689), True, 'import PySAM.Battwatts as battery\n'), ((10715, 10758), 'PySAM.Battwatts.default', 'battery.default', (['"""PVWattsBatteryCommercial"""'], {}), "('PVWattsBatteryCommercial')\n", (10730, 10758), True, 'import PySAM.Battwatts as battery\n'), ((10843, 10887), 'PySAM.Utilityrate5.default', 'utility.default', (['"""PVWattsBatteryResidential"""'], {}), "('PVWattsBatteryResidential')\n", (10858, 10887), True, 'import PySAM.Utilityrate5 as utility\n'), ((10920, 10963), 'PySAM.Utilityrate5.default', 'utility.default', (['"""PVWattsBatteryCommercial"""'], {}), "('PVWattsBatteryCommercial')\n", (10935, 10963), True, 'import PySAM.Utilityrate5 as utility\n'), ((14666, 14711), 'PySAM.Cashloan.default', 'cashloan.default', (['"""PVWattsBatteryResidential"""'], {}), "('PVWattsBatteryResidential')\n", (14682, 14711), True, 'import PySAM.Cashloan as cashloan\n'), ((14781, 14825), 'PySAM.Cashloan.default', 'cashloan.default', (['"""PVWattsBatteryCommercial"""'], {}), "('PVWattsBatteryCommercial')\n", (14797, 14825), True, 'import PySAM.Cashloan as cashloan\n'), ((29755, 29794), 'PySAM.Utilityrate5.default', 'utility.default', (['"""WindPowerResidential"""'], {}), "('WindPowerResidential')\n", (29770, 29794), True, 'import PySAM.Utilityrate5 as utility\n'), ((29827, 29865), 'PySAM.Utilityrate5.default', 'utility.default', (['"""WindPowerCommercial"""'], {}), "('WindPowerCommercial')\n", (29842, 29865), True, 'import PySAM.Utilityrate5 as utility\n'), ((33490, 33530), 'PySAM.Cashloan.default', 'cashloan.default', (['"""WindPowerResidential"""'], {}), "('WindPowerResidential')\n", (33506, 33530), True, 'import PySAM.Cashloan as cashloan\n'), ((33600, 33639), 'PySAM.Cashloan.default', 'cashloan.default', (['"""WindPowerCommercial"""'], {}), "('WindPowerCommercial')\n", (33616, 33639), True, 'import PySAM.Cashloan as cashloan\n'), ((56424, 56523), 'numpy.where', 'np.where', (["((dataframe.metric == 'payback_period') & (dataframe['payback_period'] <\n min_payback))"], {}), "((dataframe.metric == 'payback_period') & (dataframe[\n 'payback_period'] < min_payback))\n", (56432, 56523), True, 'import numpy as np\n'), ((56561, 56660), 'numpy.where', 'np.where', (["((dataframe.metric == 'payback_period') & (dataframe['payback_period'] >\n max_payback))"], {}), "((dataframe.metric == 'payback_period') & (dataframe[\n 'payback_period'] > max_payback))\n", (56569, 56660), True, 'import numpy as np\n'), ((56702, 56811), 'numpy.where', 'np.where', (["((dataframe.metric == 'percent_monthly_bill_savings') & (dataframe[\n 'payback_period'] < min_mbs))"], {}), "((dataframe.metric == 'percent_monthly_bill_savings') & (dataframe[\n 'payback_period'] < min_mbs))\n", (56710, 56811), True, 'import numpy as np\n'), ((56845, 56954), 'numpy.where', 'np.where', (["((dataframe.metric == 'percent_monthly_bill_savings') & (dataframe[\n 'payback_period'] > max_mbs))"], {}), "((dataframe.metric == 'percent_monthly_bill_savings') & (dataframe[\n 'payback_period'] > max_mbs))\n", (56853, 56954), True, 'import numpy as np\n'), ((10046, 10094), 'pandas.Series', 'pd.Series', (["load_profile_df['consumption_hourly']"], {}), "(load_profile_df['consumption_hourly'])\n", (10055, 10094), True, 'import pandas as pd\n'), ((29234, 29282), 'pandas.Series', 'pd.Series', (["load_profile_df['consumption_hourly']"], {}), "(load_profile_df['consumption_hourly'])\n", (29243, 29282), True, 'import pandas as pd\n'), ((29517, 29577), 'pandas.Series', 'pd.Series', (["norm_scaled_wind_profiles_df['generation_hourly']"], {}), "(norm_scaled_wind_profiles_df['generation_hourly'])\n", (29526, 29577), True, 'import pandas as pd\n'), ((24362, 24379), 'numpy.isnan', 'np.isnan', (['payback'], {}), '(payback)\n', (24370, 24379), True, 'import numpy as np\n'), ((41200, 41217), 'numpy.isnan', 'np.isnan', (['payback'], {}), '(payback)\n', (41208, 41217), True, 'import numpy as np\n'), ((49596, 49635), 'pandas.notnull', 'pd.notnull', (["incentive_df['cbi_usd_p_w']"], {}), "(incentive_df['cbi_usd_p_w'])\n", (49606, 49635), True, 'import pandas as pd\n'), ((51355, 51396), 'pandas.notnull', 'pd.notnull', (["incentive_df['pbi_usd_p_kwh']"], {}), "(incentive_df['pbi_usd_p_kwh'])\n", (51365, 51396), True, 'import pandas as pd\n'), ((53387, 53422), 'pandas.notnull', 'pd.notnull', (["incentive_df['ibi_pct']"], {}), "(incentive_df['ibi_pct'])\n", (53397, 53422), True, 'import pandas as pd\n')]
|
from pathlib import Path
import os
import re
from decimal import Decimal
import csv
import numpy
from Utils import TextProcessingUtils
from Utils import DefinedConstants
def readEmbeddingsFromTxtFile(inFile):
w2v = {}
with open(inFile, "r") as f:
for l in f.readlines():
if not l.strip():
continue
if l:
ar = l.strip().split()
v = []
for i in range(ar.length):
v[i-1] = Decimal(ar[i])
w2v[ar[0]] = v
return w2v
def readEmbeddingsFromTxtFileUsingVocab(inFile, vocab):
w2v = {}
with open(inFile, "r") as f:
for l in f.readlines():
if not l.strip():
continue
if l:
ar = l.strip().split()
if ar[0] in vocab:
v = []
for i in range(ar.length):
v[i-1] = Decimal(ar[i])
w2v[ar[0]] = v
return w2v
def readTextFile(inFile):
out = []
with open(inFile, "r") as f:
for i in f.readlines():
if not i.strip():
continue
if i:
out.append(i+'\n')
return ''.join(out)
def saveAlignments(alingments, outFile, fileEncoding="utf-8"):
if len(alingments)>0:
with open(outFile, 'w',encoding=fileEncoding) as f:
for match in alingments:
f.write(match.toString()+"\n\n")
def readNewselaEmbeddingVocabulary(inFolder, language):
vocab = set()
regFilter = r'^.*\.'+language+'.0.txt$'
for dirpath, dirs, files in os.walk(inFolder):
for filename in files:
if re.match(regFilter, filename):
fname = os.path.join(dirpath,filename)
text = readTextFile(fname)
print("Read file "+fname)
vocab.update(TextProcessingUtils.getCleanEmbeddingModelTokens(text))
for i in range(1, 5):
filename = re.sub("." + language + ".0.txt","." + language + "." + str(i) + ".txt", filename)
fname = os.path.join(dirpath,filename)
text = readTextFile(fname)
if text:
vocab.update(TextProcessingUtils.getCleanEmbeddingModelTokens(text))
return vocab
def displayAlignments(alignments, detailed=True):
print ("Alignments:")
for alignment in alignments:
if detailed:
print(alignment.toString())
else:
print(alignment.getIndexAlignmentString())
print("")
def readTwoTextPerLineFileEmbeddingVocabulary(inFile, fistSentIndex, secondSentIndex):
vocab = set()
with open(inFile, "r") as f:
for l in f.readlines():
if not l.strip():
continue
if l:
ar = l.strip().split("\t")
vocab.update(TextProcessingUtils.getCleanEmbeddingModelTokens(ar[fistSentIndex]))
vocab.update(TextProcessingUtils.getCleanEmbeddingModelTokens(ar[secondSentIndex]))
return vocab
def convertArgToOption(param2value, args, key):
if args:
param2value[key] = args
def parseOptions(args):
param2value = {}
convertArgToOption(param2value, args.i, "input")
convertArgToOption(param2value, args.o, "output")
convertArgToOption(param2value, args.l, "language")
convertArgToOption(param2value, args.s, "similarity")
convertArgToOption(param2value, args.a, "aLv")
convertArgToOption(param2value, args.t, "aSt")
convertArgToOption(param2value, args.u, "aSt2")
convertArgToOption(param2value, args.e, "emb")
convertArgToOption(param2value, args.ll, "linelevel")
return param2value
def showNewselaUsageMessage():
print("Usage:\nprogram -i inFolder -o outFolder -l language -s similarityStrategy -a alignmentLevel -t alignmentStrategy"
+ " {-u SubLevelalignmentStrategy} {-e embeddingsTxtFile}\n"
+ "\"inFolder\" is the folder with the original newsela texts.\n"
+ "\"outFolder\" is the folder where the alignments will be stored.\n"
+ "\"language\" can be \""+DefinedConstants.SpanishLanguage+"\" or \""+DefinedConstants.EnglishLanguage+"\". Default: \""+DefinedConstants.EnglishLanguage+"\".\n"
+ "\"similarityStrategy\" can be \""+DefinedConstants.CNGstrategy+"\", \""+DefinedConstants.WAVGstrategy+"\", or \""+DefinedConstants.CWASAstrategy+"\", where the N in \""+DefinedConstants.CNGstrategy+"\" should be replaced for the desired n-gram size, e.g. \""+DefinedConstants.CNGstrategy.replace("N", 3+"")+"\". Default: \""+DefinedConstants.CNGstrategy.replace("N", 3+"")+"\".\n"
+ "\"alignmentLevel\" can be \""+DefinedConstants.ParagraphSepEmptyLineLevel+"\", \""+DefinedConstants.SentenceLevel+"\", or \""+DefinedConstants.ParagraphSepEmptyLineAndSentenceLevel+"\". Default: \""+DefinedConstants.SentenceLevel+"\".\n"
+ "\"alignmentStrategy\" can be \""+DefinedConstants.closestSimStrategy+"\" or \""+DefinedConstants.closestSimKeepingSeqStrategy+"\". Default: \""+DefinedConstants.closestSimStrategy+"\".\n"
+ "\"SubLevelalignmentStrategy\" can be \""+DefinedConstants.closestSimStrategy+"\" or \""+DefinedConstants.closestSimKeepingSeqStrategy+"\". Default: \""+DefinedConstants.closestSimStrategy+"\".\n"
+ "\"embeddingsTxtFile\" is the file with the embeddings using the classical word2vec txt format.\n"
)
def showCustomModelUsageMessage():
print("Usage:\nprogram -i inFile -o outFile -s similarityStrategy {-e embeddingsTxtFile}\n"
"\"inFile\" is a file with two tab-separated texts per line. The program will output a similarity score for each one of these text pairs.\n"
"\"outFile\" contains the original \"inFile\" tab-separated texts plus their similarity score.\n"
"\"similarityStrategy\" can be \""+DefinedConstants.CNGstrategy+"\", \""+DefinedConstants.WAVGstrategy+"\", or \""+DefinedConstants.CWASAstrategy+"\", where the N in \""+DefinedConstants.CNGstrategy+"\" should be replaced for the desired n-gram size, e.g. \""+DefinedConstants.CNGstrategy.replace("N", str(3)+"")+"\". Default: \""+DefinedConstants.CNGstrategy.replace("N", str(3)+"")+"\".\n"
"\"embeddingsTxtFile\" is the file with the embeddings using the classical word2vec txt format.\n"
)
def getOutputFileName(inFile, alignmentLevel, similarityStrategy, nGramSize):
simStr = similarityStrategy
if similarityStrategy == DefinedConstants.CNGstrategy:
simStr.replace("N", str(nGramSize)+"")
return inFile+"_"+ alignmentLevel+"_"+ simStr
def saveAlignmentsToCVS(alingments, outFile, fileEncoding="utf-8"):
with open(outFile, 'w',encoding=fileEncoding) as f:
for alingment in alingments:
f.write(alingment.toCVS()+"\n\n")
def getStats(alingments, nbrOfLineOrginal, nbrOfLineSimple, outFile):
data = numpy.zeros(len(alingments)).tolist()
for i in range(len(alingments)):
data[i] = alingments[i].getSimilarity()
histogram = calcHistogram(data, 0.0, 1.0, 10)
out = ""
out = outFile+";"+str(len(nbrOfLineOrginal))+"/"+str(getTotalWord(nbrOfLineOrginal))+";"
out += str(len(nbrOfLineSimple))+"/"+str(getTotalWord(nbrOfLineSimple))+";"
total =0.0
aboveTrashord=0.0
for i in range(len(histogram)):
total+=histogram[i]
if i>=4:
aboveTrashord+=histogram[i]
out += str(aboveTrashord)+";"
out += str(((aboveTrashord)/(total))) + "%;"
for i in range(len(histogram)):
out += str(histogram[i])+" ["+"{:.2f}".format((histogram[i]/total)*100.0)+"%]"+";"
return out
def getTotalWord(nbrOfLineOrginal):
x = 0
for sentence in nbrOfLineOrginal:
x+= sentence.getNbrOfWords()
return x
def calcHistogram(data, min, max, numBins):
result = numpy.zeros(numBins).tolist()
binSize = (max - min)/numBins
for d in data:
bin = ((d - min) / binSize)
if bin < 0:
bin=0
elif bin >= numBins:
bin = numBins -1
result[int(bin)] += 1
return result
|
[
"Utils.DefinedConstants.CNGstrategy.replace",
"Utils.TextProcessingUtils.getCleanEmbeddingModelTokens",
"os.walk",
"os.path.join",
"re.match",
"numpy.zeros",
"decimal.Decimal"
] |
[((1639, 1656), 'os.walk', 'os.walk', (['inFolder'], {}), '(inFolder)\n', (1646, 1656), False, 'import os\n'), ((1706, 1735), 're.match', 're.match', (['regFilter', 'filename'], {}), '(regFilter, filename)\n', (1714, 1735), False, 'import re\n'), ((7973, 7993), 'numpy.zeros', 'numpy.zeros', (['numBins'], {}), '(numBins)\n', (7984, 7993), False, 'import numpy\n'), ((1761, 1792), 'os.path.join', 'os.path.join', (['dirpath', 'filename'], {}), '(dirpath, filename)\n', (1773, 1792), False, 'import os\n'), ((496, 510), 'decimal.Decimal', 'Decimal', (['ar[i]'], {}), '(ar[i])\n', (503, 510), False, 'from decimal import Decimal\n'), ((1906, 1960), 'Utils.TextProcessingUtils.getCleanEmbeddingModelTokens', 'TextProcessingUtils.getCleanEmbeddingModelTokens', (['text'], {}), '(text)\n', (1954, 1960), False, 'from Utils import TextProcessingUtils\n'), ((2142, 2173), 'os.path.join', 'os.path.join', (['dirpath', 'filename'], {}), '(dirpath, filename)\n', (2154, 2173), False, 'import os\n'), ((2934, 3001), 'Utils.TextProcessingUtils.getCleanEmbeddingModelTokens', 'TextProcessingUtils.getCleanEmbeddingModelTokens', (['ar[fistSentIndex]'], {}), '(ar[fistSentIndex])\n', (2982, 3001), False, 'from Utils import TextProcessingUtils\n'), ((3032, 3101), 'Utils.TextProcessingUtils.getCleanEmbeddingModelTokens', 'TextProcessingUtils.getCleanEmbeddingModelTokens', (['ar[secondSentIndex]'], {}), '(ar[secondSentIndex])\n', (3080, 3101), False, 'from Utils import TextProcessingUtils\n'), ((946, 960), 'decimal.Decimal', 'Decimal', (['ar[i]'], {}), '(ar[i])\n', (953, 960), False, 'from decimal import Decimal\n'), ((2286, 2340), 'Utils.TextProcessingUtils.getCleanEmbeddingModelTokens', 'TextProcessingUtils.getCleanEmbeddingModelTokens', (['text'], {}), '(text)\n', (2334, 2340), False, 'from Utils import TextProcessingUtils\n'), ((4683, 4732), 'Utils.DefinedConstants.CNGstrategy.replace', 'DefinedConstants.CNGstrategy.replace', (['"""N"""', "(3 + '')"], {}), "('N', 3 + '')\n", (4719, 4732), False, 'from Utils import DefinedConstants\n'), ((4617, 4666), 'Utils.DefinedConstants.CNGstrategy.replace', 'DefinedConstants.CNGstrategy.replace', (['"""N"""', "(3 + '')"], {}), "('N', 3 + '')\n", (4653, 4666), False, 'from Utils import DefinedConstants\n')]
|
from ai_safety_gridworlds.environments.shared import safety_game
from collections import defaultdict
import experiments.environment_helper as environment_helper
import numpy as np
class ModelFreeAUPAgent:
name = "Model-free AUP"
pen_epsilon, AUP_epsilon = .2, .9 # chance of choosing greedy action in training
default = {'lambd': 1./1.501, 'discount': .996, 'rpenalties': 30, 'episodes': 6000}
def __init__(self, env, lambd=default['lambd'], state_attainable=False, num_rewards=default['rpenalties'],
discount=default['discount'], episodes=default['episodes'], trials=50, use_scale=False):
"""Trains using the simulator and e-greedy exploration to determine a greedy policy.
:param env: Simulator.
:param lambd: Impact tuning parameter.
:param state_attainable: True - generate state indicator rewards; false - random rewards.
:param num_rewards: Size of the attainable set, |\mathcal{R}|.
:param discount:
:param episodes:
:param trials:
"""
self.actions = range(env.action_spec().maximum + 1)
self.probs = [[1.0 / (len(self.actions) - 1) if i != k else 0 for i in self.actions] for k in self.actions]
self.discount = discount
self.episodes = episodes
self.trials = trials
self.lambd = lambd
self.state_attainable = state_attainable
self.use_scale = use_scale
if state_attainable:
self.name = 'Relative reachability'
self.attainable_set = environment_helper.derive_possible_rewards(env)
else:
self.attainable_set = [defaultdict(np.random.random) for _ in range(num_rewards)]
if len(self.attainable_set) == 0:
self.name = 'Standard' # no penalty applied!
self.train(env)
def train(self, env):
self.performance = np.zeros((self.trials, self.episodes / 10))
# 0: high-impact, incomplete; 1: high-impact, complete; 2: low-impact, incomplete; 3: low-impact, complete
self.counts = np.zeros(4)
for trial in range(self.trials):
self.attainable_Q = defaultdict(lambda: np.zeros((len(self.attainable_set), len(self.actions))))
self.AUP_Q = defaultdict(lambda: np.zeros(len(self.actions)))
if not self.state_attainable:
self.attainable_set = [defaultdict(np.random.random) for _ in range(len(self.attainable_set))]
self.epsilon = self.pen_epsilon
for episode in range(self.episodes):
if episode > 2.0 / 3 * self.episodes: # begin greedy exploration
self.epsilon = self.AUP_epsilon
time_step = env.reset()
while not time_step.last():
last_board = str(time_step.observation['board'])
action = self.behavior_action(last_board)
time_step = env.step(action)
self.update_greedy(last_board, action, time_step)
if episode % 10 == 0:
_, actions, self.performance[trial][episode / 10], _ = environment_helper.run_episode(self, env)
self.counts[int(self.performance[trial, -1]) + 2] += 1 # -2 goes to idx 0
env.reset()
def act(self, obs):
return self.AUP_Q[str(obs['board'])].argmax()
def behavior_action(self, board):
"""Returns the e-greedy action for the state board string."""
greedy = self.AUP_Q[board].argmax()
if np.random.random() < self.epsilon or len(self.actions) == 1:
return greedy
else: # choose anything else
return np.random.choice(self.actions, p=self.probs[greedy])
def get_penalty(self, board, action):
if len(self.attainable_set) == 0: return 0
action_attainable = self.attainable_Q[board][:, action]
null_attainable = self.attainable_Q[board][:, safety_game.Actions.NOTHING]
diff = action_attainable - null_attainable
# Scaling number or vector (per-AU)
if self.use_scale:
scale = sum(abs(null_attainable))
if scale == 0:
scale = 1
penalty = sum(abs(diff) / scale)
else:
scale = np.copy(null_attainable)
scale[scale == 0] = 1 # avoid division by zero
penalty = np.average(np.divide(abs(diff), scale))
# Scaled difference between taking action and doing nothing
return self.lambd * penalty # ImpactUnit is 0!
def update_greedy(self, last_board, action, time_step):
"""Perform TD update on observed reward."""
learning_rate = 1
new_board = str(time_step.observation['board'])
def calculate_update(attainable_idx=None):
"""Do the update for the main function (or the attainable function at the given index)."""
if attainable_idx is not None:
reward = self.attainable_set[attainable_idx](new_board) if self.state_attainable \
else self.attainable_set[attainable_idx][new_board]
new_Q, old_Q = self.attainable_Q[new_board][attainable_idx].max(), \
self.attainable_Q[last_board][attainable_idx, action]
else:
reward = time_step.reward - self.get_penalty(last_board, action)
new_Q, old_Q = self.AUP_Q[new_board].max(), self.AUP_Q[last_board][action]
return learning_rate * (reward + self.discount * new_Q - old_Q)
# Learn the attainable reward functions
for attainable_idx in range(len(self.attainable_set)):
self.attainable_Q[last_board][attainable_idx, action] += calculate_update(attainable_idx)
if self.state_attainable:
self.attainable_Q[last_board][:, action] = np.clip(self.attainable_Q[last_board][:, action], 0, 1)
self.AUP_Q[last_board][action] += calculate_update()
|
[
"numpy.clip",
"numpy.copy",
"numpy.random.choice",
"numpy.random.random",
"experiments.environment_helper.run_episode",
"numpy.zeros",
"experiments.environment_helper.derive_possible_rewards",
"collections.defaultdict"
] |
[((1883, 1926), 'numpy.zeros', 'np.zeros', (['(self.trials, self.episodes / 10)'], {}), '((self.trials, self.episodes / 10))\n', (1891, 1926), True, 'import numpy as np\n'), ((2065, 2076), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (2073, 2076), True, 'import numpy as np\n'), ((1547, 1594), 'experiments.environment_helper.derive_possible_rewards', 'environment_helper.derive_possible_rewards', (['env'], {}), '(env)\n', (1589, 1594), True, 'import experiments.environment_helper as environment_helper\n'), ((3667, 3719), 'numpy.random.choice', 'np.random.choice', (['self.actions'], {'p': 'self.probs[greedy]'}), '(self.actions, p=self.probs[greedy])\n', (3683, 3719), True, 'import numpy as np\n'), ((4262, 4286), 'numpy.copy', 'np.copy', (['null_attainable'], {}), '(null_attainable)\n', (4269, 4286), True, 'import numpy as np\n'), ((5837, 5892), 'numpy.clip', 'np.clip', (['self.attainable_Q[last_board][:, action]', '(0)', '(1)'], {}), '(self.attainable_Q[last_board][:, action], 0, 1)\n', (5844, 5892), True, 'import numpy as np\n'), ((1644, 1673), 'collections.defaultdict', 'defaultdict', (['np.random.random'], {}), '(np.random.random)\n', (1655, 1673), False, 'from collections import defaultdict\n'), ((3523, 3541), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (3539, 3541), True, 'import numpy as np\n'), ((2383, 2412), 'collections.defaultdict', 'defaultdict', (['np.random.random'], {}), '(np.random.random)\n', (2394, 2412), False, 'from collections import defaultdict\n'), ((3130, 3171), 'experiments.environment_helper.run_episode', 'environment_helper.run_episode', (['self', 'env'], {}), '(self, env)\n', (3160, 3171), True, 'import experiments.environment_helper as environment_helper\n')]
|
import six
import time
import signal
import multiprocessing
from functools import partial
import numpy as np
from astropy.utils.console import (_get_stdout, isatty, isiterable,
human_file_size, _CAN_RESIZE_TERMINAL,
terminal_size, color_print, human_time)
import contextlib
import warnings
try:
import builtins
except ImportError:
# python2
import __builtin__ as builtins
'''
Copyright (c) 2011-2016, Astropy Developers
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* Neither the name of the Astropy Team nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
class ProgressBar(six.Iterator):
"""
A class to display a progress bar in the terminal.
It is designed to be used either with the ``with`` statement::
with ProgressBar(len(items)) as bar:
for item in enumerate(items):
bar.update()
or as a generator::
for item in ProgressBar(items):
item.process()
"""
def __init__(self, total_or_items, ipython_widget=False, file=None):
"""
Parameters
----------
total_or_items : int or sequence
If an int, the number of increments in the process being
tracked. If a sequence, the items to iterate over.
ipython_widget : bool, optional
If `True`, the progress bar will display as an IPython
notebook widget.
file : writable file-like object, optional
The file to write the progress bar to. Defaults to
`sys.stdout`. If `file` is not a tty (as determined by
calling its `isatty` member, if any, or special case hacks
to detect the IPython console), the progress bar will be
completely silent.
"""
ipython_widget = False
# if ipython_widget:
# # Import only if ipython_widget, i.e., widget in IPython
# # notebook
# if ipython_major_version < 4:
# from IPython.html import widgets
# else:
# from ipywidgets import widgets
# from IPython.display import display
if file is None:
file = _get_stdout()
if not isatty(file) and not ipython_widget:
self.update = self._silent_update
self._silent = True
else:
self._silent = False
if isiterable(total_or_items):
self._items = iter(total_or_items)
self._total = len(total_or_items)
else:
try:
self._total = int(total_or_items)
except TypeError:
raise TypeError("First argument must be int or sequence")
else:
self._items = iter(range(self._total))
self._file = file
self._start_time = time.time()
self._human_total = human_file_size(self._total)
self._ipython_widget = ipython_widget
self._signal_set = False
if not ipython_widget:
self._should_handle_resize = (
_CAN_RESIZE_TERMINAL and self._file.isatty())
self._handle_resize()
if self._should_handle_resize:
signal.signal(signal.SIGWINCH, self._handle_resize)
self._signal_set = True
self.update(0)
def _handle_resize(self, signum=None, frame=None):
terminal_width = terminal_size(self._file)[1]
self._bar_length = terminal_width - 37
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if not self._silent:
if exc_type is None:
self.update(self._total)
self._file.write('\n')
self._file.flush()
if self._signal_set:
signal.signal(signal.SIGWINCH, signal.SIG_DFL)
def __iter__(self):
return self
def __next__(self):
try:
rv = next(self._items)
except StopIteration:
self.__exit__(None, None, None)
raise
else:
self.update()
return rv
def update(self, value=None):
"""
Update progress bar via the console or notebook accordingly.
"""
# Update self.value
if value is None:
value = self._current_value + 1
self._current_value = value
# Choose the appropriate environment
if self._ipython_widget:
self._update_ipython_widget(value)
else:
self._update_console(value)
def _update_console(self, value=None):
"""
Update the progress bar to the given value (out of the total
given to the constructor).
"""
if self._total == 0:
frac = 1.0
else:
frac = float(value) / float(self._total)
file = self._file
write = file.write
if frac > 1:
bar_fill = int(self._bar_length)
else:
bar_fill = int(float(self._bar_length) * frac)
write('\r|')
color_print('=' * bar_fill, 'blue', file=file, end='')
if bar_fill < self._bar_length:
color_print('>', 'green', file=file, end='')
write('-' * (self._bar_length - bar_fill - 1))
write('|')
if value >= self._total:
t = time.time() - self._start_time
prefix = ' '
elif value <= 0:
t = None
prefix = ''
else:
t = ((time.time() - self._start_time) * (1.0 - frac)) / frac
prefix = ' ETA '
write(' {0:>4s}/{1:>4s}'.format(
human_file_size(value),
self._human_total))
write(' ({0:>6s}%)'.format('{0:.2f}'.format(frac * 100.0)))
write(prefix)
if t is not None:
write(human_time(t))
self._file.flush()
def _update_ipython_widget(self, value=None):
"""
Update the progress bar to the given value (out of a total
given to the constructor).
This method is for use in the IPython notebook 2+.
"""
pass
# Create and display an empty progress bar widget,
# if none exists.
# if not hasattr(self, '_widget'):
# # Import only if an IPython widget, i.e., widget in iPython NB
# if ipython_major_version < 4:
# from IPython.html import widgets
# self._widget = widgets.FloatProgressWidget()
# else:
# from ipywidgets import widgets
# self._widget = widgets.FloatProgress()
# from IPython.display import display
# display(self._widget)
# self._widget.value = 0
# # Calculate percent completion, and update progress bar
# percent = (value / self._total) * 100
# self._widget.value = percent
# self._widget.description = \
# ' ({0:>6s}%)'.format('{0:.2f}'.format(percent))
def _silent_update(self, value=None):
pass
@classmethod
def map(cls, function, items, multiprocess=False, file=None, chunksize=100,
item_len=None, nprocesses=None, **pool_kwargs):
"""
Does a `map` operation while displaying a progress bar with
percentage complete.
::
def work(i):
print(i)
ProgressBar.map(work, range(50))
Parameters
----------
function : function
Function to call for each step
items : sequence
Sequence where each element is a tuple of arguments to pass to
*function*.
multiprocess : bool, optional
If `True`, use the `multiprocessing` module to distribute each
task to a different processor core.
file : writeable file-like object, optional
The file to write the progress bar to. Defaults to
`sys.stdout`. If `file` is not a tty (as determined by
calling its `isatty` member, if any), the scrollbar will
be completely silent.
step : int, optional
Update the progress bar at least every *step* steps (default: 100).
If ``multiprocess`` is `True`, this will affect the size
of the chunks of ``items`` that are submitted as separate tasks
to the process pool. A large step size may make the job
complete faster if ``items`` is very long.
"""
results = []
if file is None:
file = _get_stdout()
if item_len is not None:
assert isinstance(item_len, int)
if hasattr(items, "__len__"):
assert item_len == len(items)
else:
if hasattr(items, "__len__"):
item_len = len(items)
else:
# Will convert to iterable. Not a good thing to do with
# large inputs.
items = list(items)
item_len = len(items)
with cls(item_len, file=file) as bar:
if not multiprocess:
# Here chunksize is just how frequently the progress gets
# updated
if chunksize is None:
chunksize = np.floor(item_len / 100.).astype(int)
for i, item in enumerate(items):
results.append(function(item))
if (i % chunksize) == 0:
bar.update(i)
else:
max_proc = multiprocessing.cpu_count()
if nprocesses is None:
nprocesses = max_proc
elif nprocesses > max_proc:
nprocesses = max_proc
if chunksize is None:
chunksize = choose_chunksize(nprocesses, item_len)
pool = multiprocessing.Pool(nprocesses, **pool_kwargs)
for i, out in enumerate(pool.imap_unordered(function,
items,
chunksize=chunksize)):
bar.update(i)
results.append(out)
pool.close()
pool.join()
return results
'''
Copyright (c) 2014, spectral-cube developers
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
@contextlib.contextmanager
def _map_context(numcores, verbose=False, num_jobs=None, chunksize=None,
**pool_kwargs):
"""
Mapping context manager to allow parallel mapping or regular mapping
depending on the number of cores specified.
"""
if verbose:
if numcores is not None and numcores > 1:
parallel = True
else:
numcores = 1
parallel = False
map = lambda func, items: \
ProgressBar.map(func, items,
nprocesses=numcores,
multiprocess=parallel,
item_len=num_jobs,
chunksize=chunksize,
**pool_kwargs)
else:
if numcores is not None and numcores > 1:
try:
import multiprocessing
pool = multiprocessing.Pool(processes=numcores, **pool_kwargs)
if chunksize is None:
chunksize = 1
map = partial(pool.map, chunksize=chunksize)
parallel = True
except ImportError:
map = builtins.map
warnings.warn("Could not import multiprocessing. "
"map will be non-parallel.")
parallel = False
else:
parallel = False
map = builtins.map
try:
yield map
finally:
# ProgressBar.map already closes the pool
if not verbose and parallel:
pool.close()
pool.join()
def choose_chunksize(nprocesses, njobs):
'''
Split the chunks into roughly equal portions.
'''
# Auto split into close to equal chunks
if njobs % nprocesses == 0:
chunksize = njobs / nprocesses
else:
# Split into smaller chunks that are still
# roughly equal, but won't have any small
# leftovers that would slow things down
chunksize = njobs / (nprocesses + 1)
return chunksize if chunksize > 0 else 1
|
[
"astropy.utils.console.color_print",
"astropy.utils.console.terminal_size",
"signal.signal",
"astropy.utils.console.human_time",
"astropy.utils.console.human_file_size",
"numpy.floor",
"multiprocessing.cpu_count",
"astropy.utils.console.isatty",
"functools.partial",
"multiprocessing.Pool",
"astropy.utils.console.isiterable",
"warnings.warn",
"astropy.utils.console._get_stdout",
"time.time"
] |
[((3767, 3793), 'astropy.utils.console.isiterable', 'isiterable', (['total_or_items'], {}), '(total_or_items)\n', (3777, 3793), False, 'from astropy.utils.console import _get_stdout, isatty, isiterable, human_file_size, _CAN_RESIZE_TERMINAL, terminal_size, color_print, human_time\n'), ((4200, 4211), 'time.time', 'time.time', ([], {}), '()\n', (4209, 4211), False, 'import time\n'), ((4240, 4268), 'astropy.utils.console.human_file_size', 'human_file_size', (['self._total'], {}), '(self._total)\n', (4255, 4268), False, 'from astropy.utils.console import _get_stdout, isatty, isiterable, human_file_size, _CAN_RESIZE_TERMINAL, terminal_size, color_print, human_time\n'), ((6449, 6503), 'astropy.utils.console.color_print', 'color_print', (["('=' * bar_fill)", '"""blue"""'], {'file': 'file', 'end': '""""""'}), "('=' * bar_fill, 'blue', file=file, end='')\n", (6460, 6503), False, 'from astropy.utils.console import _get_stdout, isatty, isiterable, human_file_size, _CAN_RESIZE_TERMINAL, terminal_size, color_print, human_time\n'), ((3563, 3576), 'astropy.utils.console._get_stdout', '_get_stdout', ([], {}), '()\n', (3574, 3576), False, 'from astropy.utils.console import _get_stdout, isatty, isiterable, human_file_size, _CAN_RESIZE_TERMINAL, terminal_size, color_print, human_time\n'), ((4775, 4800), 'astropy.utils.console.terminal_size', 'terminal_size', (['self._file'], {}), '(self._file)\n', (4788, 4800), False, 'from astropy.utils.console import _get_stdout, isatty, isiterable, human_file_size, _CAN_RESIZE_TERMINAL, terminal_size, color_print, human_time\n'), ((6556, 6600), 'astropy.utils.console.color_print', 'color_print', (['""">"""', '"""green"""'], {'file': 'file', 'end': '""""""'}), "('>', 'green', file=file, end='')\n", (6567, 6600), False, 'from astropy.utils.console import _get_stdout, isatty, isiterable, human_file_size, _CAN_RESIZE_TERMINAL, terminal_size, color_print, human_time\n'), ((9954, 9967), 'astropy.utils.console._get_stdout', '_get_stdout', ([], {}), '()\n', (9965, 9967), False, 'from astropy.utils.console import _get_stdout, isatty, isiterable, human_file_size, _CAN_RESIZE_TERMINAL, terminal_size, color_print, human_time\n'), ((3593, 3605), 'astropy.utils.console.isatty', 'isatty', (['file'], {}), '(file)\n', (3599, 3605), False, 'from astropy.utils.console import _get_stdout, isatty, isiterable, human_file_size, _CAN_RESIZE_TERMINAL, terminal_size, color_print, human_time\n'), ((4578, 4629), 'signal.signal', 'signal.signal', (['signal.SIGWINCH', 'self._handle_resize'], {}), '(signal.SIGWINCH, self._handle_resize)\n', (4591, 4629), False, 'import signal\n'), ((5172, 5218), 'signal.signal', 'signal.signal', (['signal.SIGWINCH', 'signal.SIG_DFL'], {}), '(signal.SIGWINCH, signal.SIG_DFL)\n', (5185, 5218), False, 'import signal\n'), ((6729, 6740), 'time.time', 'time.time', ([], {}), '()\n', (6738, 6740), False, 'import time\n'), ((7028, 7050), 'astropy.utils.console.human_file_size', 'human_file_size', (['value'], {}), '(value)\n', (7043, 7050), False, 'from astropy.utils.console import _get_stdout, isatty, isiterable, human_file_size, _CAN_RESIZE_TERMINAL, terminal_size, color_print, human_time\n'), ((7218, 7231), 'astropy.utils.console.human_time', 'human_time', (['t'], {}), '(t)\n', (7228, 7231), False, 'from astropy.utils.console import _get_stdout, isatty, isiterable, human_file_size, _CAN_RESIZE_TERMINAL, terminal_size, color_print, human_time\n'), ((10941, 10968), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (10966, 10968), False, 'import multiprocessing\n'), ((11270, 11317), 'multiprocessing.Pool', 'multiprocessing.Pool', (['nprocesses'], {}), '(nprocesses, **pool_kwargs)\n', (11290, 11317), False, 'import multiprocessing\n'), ((14088, 14143), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': 'numcores'}), '(processes=numcores, **pool_kwargs)\n', (14108, 14143), False, 'import multiprocessing\n'), ((14239, 14277), 'functools.partial', 'partial', (['pool.map'], {'chunksize': 'chunksize'}), '(pool.map, chunksize=chunksize)\n', (14246, 14277), False, 'from functools import partial\n'), ((14393, 14470), 'warnings.warn', 'warnings.warn', (['"""Could not import multiprocessing. map will be non-parallel."""'], {}), "('Could not import multiprocessing. map will be non-parallel.')\n", (14406, 14470), False, 'import warnings\n'), ((6891, 6902), 'time.time', 'time.time', ([], {}), '()\n', (6900, 6902), False, 'import time\n'), ((10675, 10701), 'numpy.floor', 'np.floor', (['(item_len / 100.0)'], {}), '(item_len / 100.0)\n', (10683, 10701), True, 'import numpy as np\n')]
|
import numpy
from keras.models import Sequential
from keras.layers import Dense
#loading pima indians dataset from the csv
# fix random seed for reproducibility
numpy.random.seed(7)
dataset = numpy.loadtxt(
"./data/pima-indians-diabetes.csv", delimiter=","
)
#split into input (X) and (Y) variables
X = dataset[:,0:8]
Y = dataset[:,8]
#create model
model = Sequential()
model.add(Dense(12, input_dim=8, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
#compile model
model.compile(
loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']
)
#fit the model
model.fit(X, Y, epochs=150, batch_size=10)
#evaluate the model
scores = model.evaluate(X, Y)
print(
"\n%s: %.2f%%" % (model.metrics_names[1], scores[1]*100)
)
|
[
"keras.layers.Dense",
"numpy.loadtxt",
"numpy.random.seed",
"keras.models.Sequential"
] |
[((162, 182), 'numpy.random.seed', 'numpy.random.seed', (['(7)'], {}), '(7)\n', (179, 182), False, 'import numpy\n'), ((193, 257), 'numpy.loadtxt', 'numpy.loadtxt', (['"""./data/pima-indians-diabetes.csv"""'], {'delimiter': '""","""'}), "('./data/pima-indians-diabetes.csv', delimiter=',')\n", (206, 257), False, 'import numpy\n'), ((366, 378), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (376, 378), False, 'from keras.models import Sequential\n'), ((389, 430), 'keras.layers.Dense', 'Dense', (['(12)'], {'input_dim': '(8)', 'activation': '"""relu"""'}), "(12, input_dim=8, activation='relu')\n", (394, 430), False, 'from keras.layers import Dense\n'), ((442, 469), 'keras.layers.Dense', 'Dense', (['(8)'], {'activation': '"""relu"""'}), "(8, activation='relu')\n", (447, 469), False, 'from keras.layers import Dense\n'), ((481, 511), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (486, 511), False, 'from keras.layers import Dense\n')]
|
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
#################Create EvalCallBack ########################
"""
import numpy as np
from mindspore.train.callback import Callback
from mindspore.train.serialization import load_param_into_net, load_checkpoint
from mindspore.communication.management import get_rank
from mindspore import Tensor, save_checkpoint
from src.c3d_model import C3D
from src.model_utils.config import config
from src.dataset import classification_dataset
class EvalCallBack(Callback):
"""EvalCallBack"""
def __init__(self, model, eval_per_epoch, epoch_per_eval, save_ckpt_path, train_batch_num):
config.load_type = 'test'
self.model = model
self.rank = get_rank() if config.is_distributed else 0
self.eval_per_epoch = eval_per_epoch
self.epoch_per_eval = epoch_per_eval
self.save_ckpt_path = save_ckpt_path
self.eval_dataset, self.eval_dataset_len = classification_dataset(config.batch_size, 1, shuffle=True,
repeat_num=1, drop_remainder=True)
self.best_ckpt = 0
self.best_acc = 0
self.train_batch_num = train_batch_num
def epoch_end(self, run_context):
"""culculate acc"""
network = C3D(config.num_classes)
cb_param = run_context.original_args()
cur_epoch = cb_param.cur_epoch_num
save_ckpt_path = self.save_ckpt_path + str(self.rank) + '-' + str(cur_epoch) + '_' \
+ str(self.train_batch_num) + '.ckpt'
# pre_trained
param_dict = load_checkpoint(save_ckpt_path)
param_not_load = load_param_into_net(network, param_dict)
batch_num = self.eval_dataset.get_dataset_size()
print('ckpt:', save_ckpt_path)
print('param_not_load', param_not_load)
if cur_epoch % self.eval_per_epoch == 0:
network.set_train(mode=False)
acc_sum, sample_num = 0, 0
for idnum, (input_data, label) in enumerate(self.eval_dataset):
predictions = network(Tensor(input_data))
predictions, label = predictions.asnumpy(), label.asnumpy()
acc = np.sum(np.argmax(predictions, 1) == label[:, -1])
batch_size = label.shape[0]
acc_sum += acc
sample_num += batch_size
if idnum % 20 == 0:
print("setep: {}/{}, acc: {}".format(idnum + 1, batch_num, acc / batch_size))
top_1 = acc_sum / sample_num
print('eval result: top_1 {:.3f}%'.format(top_1 * 100))
if self.best_acc < top_1:
self.best_acc = top_1
self.best_ckpt = cur_epoch
best_ckpt_file = 'best_acc.ckpt'
best_ckpt_file = self.save_ckpt_path + str(self.rank) + best_ckpt_file
save_checkpoint(network, best_ckpt_file)
print('best result: top_1 {:.3f}%'.format(self.best_acc * 100))
print('best ckpt:{}'.format(self.best_ckpt))
|
[
"mindspore.train.serialization.load_checkpoint",
"numpy.argmax",
"src.dataset.classification_dataset",
"mindspore.save_checkpoint",
"mindspore.train.serialization.load_param_into_net",
"src.c3d_model.C3D",
"mindspore.Tensor",
"mindspore.communication.management.get_rank"
] |
[((1565, 1662), 'src.dataset.classification_dataset', 'classification_dataset', (['config.batch_size', '(1)'], {'shuffle': '(True)', 'repeat_num': '(1)', 'drop_remainder': '(True)'}), '(config.batch_size, 1, shuffle=True, repeat_num=1,\n drop_remainder=True)\n', (1587, 1662), False, 'from src.dataset import classification_dataset\n'), ((1918, 1941), 'src.c3d_model.C3D', 'C3D', (['config.num_classes'], {}), '(config.num_classes)\n', (1921, 1941), False, 'from src.c3d_model import C3D\n'), ((2231, 2262), 'mindspore.train.serialization.load_checkpoint', 'load_checkpoint', (['save_ckpt_path'], {}), '(save_ckpt_path)\n', (2246, 2262), False, 'from mindspore.train.serialization import load_param_into_net, load_checkpoint\n'), ((2288, 2328), 'mindspore.train.serialization.load_param_into_net', 'load_param_into_net', (['network', 'param_dict'], {}), '(network, param_dict)\n', (2307, 2328), False, 'from mindspore.train.serialization import load_param_into_net, load_checkpoint\n'), ((1336, 1346), 'mindspore.communication.management.get_rank', 'get_rank', ([], {}), '()\n', (1344, 1346), False, 'from mindspore.communication.management import get_rank\n'), ((3516, 3556), 'mindspore.save_checkpoint', 'save_checkpoint', (['network', 'best_ckpt_file'], {}), '(network, best_ckpt_file)\n', (3531, 3556), False, 'from mindspore import Tensor, save_checkpoint\n'), ((2717, 2735), 'mindspore.Tensor', 'Tensor', (['input_data'], {}), '(input_data)\n', (2723, 2735), False, 'from mindspore import Tensor, save_checkpoint\n'), ((2842, 2867), 'numpy.argmax', 'np.argmax', (['predictions', '(1)'], {}), '(predictions, 1)\n', (2851, 2867), True, 'import numpy as np\n')]
|
# Ising Model in Python.
# 28-03-2019.
# Written by <NAME>.
# Python 3.7.
# NumPy has been installed and used in this project.
# Numba has been installed and used in this project.
# Tools used: Visual Studio Code, GitHub Desktop.
from Input_param_reader import Ising_input # Python Function in the same directory as the Main.py File
from Montecarlo import Monte_Carlo # Python Function in the same directory as the Main.py File
from numba import jit # Python Package to be downloaded manually
from Path import Output_Path_Set # Python Function to create output folder by date and time and set it as working directory
import random
import numpy
import time
import math
import csv
import os
time_start = time.perf_counter() # For Program Runtime Profiling. Time.clock() has been depreciated
i=0 # Dummy Integer
j=0 # Dummy Integer
k=0 # Dummy Integer
m=0 # Dummy Integer
n=0 # Dummy Integer
d=0 # Dummy Integer
nrows=0 # Number of Rows in A
ncols=0 # Number of Columns in A
nlayers=0 # Number of Layers in Quasi 3D Matrix
temp=0 # Temperature
beta=0 # Inverse Temperature
ConfigType=0 # Starting Configuration type
npass=0 # number of passes for MC algorithm
ipass=0 # the current pass number
nequil=0 # number of equilibration steps
trial_spin=0 # values of changed spin
high_temp=0 # starting temp for scan
low_temp=0 # final temp for scan
temp_interval=0 # interval between scan points
nscans=0 # number of scans (each at diff T)
iscan=1 # current number
iscan1=0 # current number
DeltaU=0 # change in energy between 2 configs
log_eta=0 # log of random number to compare to
magnetization=0 # magnetization of all spins in lattice
magnetization_ave=0 # cumulative average magnetization
magnetization2_ave=0 # cumulative average of mag. squared
energy=0 # energy of all spins in lattice
energy_ave=0 # cumulative average of energy
energy2_ave=0 # cumulative average of energy squared
output_count=0 # Number of times things have been added to averages
ran0=0 # T B C
iterator=0 # to be used with for loop / dummy operation
iterator2=0 # to be used for loop / dummy operations
print("\n")
print("MONTE CARLO QUASI 3D ISING MODEL\n")
print("Monte Carlo Statistics for Quasi 3D Ising Model with periodic boundary conditions\n")
print("The critical temperature is approximately 2.3, as seen on Chandler p. 123.\n")
# This section is for reading input parameters and assigning it to global variables
nrows, ncols, nlayers, npass, nequil, high_temp, low_temp, temp_interval, ConfigType=Ising_input()
# End of input parameter reader section
iterator = nrows # Setting iterator to be used as number of rows value
iterator2 = ncols # Setting iterator to be used as number of columns value
if(nrows%2!=0):
iterator+=1
if(ncols%2!=0):
iterator2+=1
print("Running program for %d rows, %d columns and %d layers\n" % (iterator,iterator2,nlayers))
# Matrix arrays are stored as a[depth,row,column] manner in Numpy
a=numpy.ones((nlayers,iterator,iterator2),dtype=int)
start_matrix=a
# Functions
# Function to generate uniform random numbers
@jit(nopython=True)
def pick_random(ran0):
ran0=round(random.uniform(0,1),12)
return ran0
# End of function
# Function to obtain magnetization value
@jit(nopython=True)
def magnetization_sum(nlayers,iterator,iterator2,a):
return numpy.sum(a[0:nlayers,1:iterator-1,1:iterator-1])/(nlayers*iterator*iterator2*1.0)
# End of function
path=Output_Path_Set()
input_config=open("Input_Config.csv","w+") # To write input configuration to output folder in a seperate file for future use.
input_config.write("Number of Rows :"+str(nrows))
input_config.write("\nNumber of Columns :"+str(ncols))
input_config.write("\nValue of npass :"+str(npass))
input_config.write("\nValue of nequil :"+str(nequil))
input_config.write("\nValue of high_temp :"+str(high_temp))
input_config.write("\nValue of low_temp :"+str(low_temp))
input_config.write("\nValue of temp_interval :"+str(temp_interval))
input_config.write("\nConfigType :"+str(ConfigType))
input_config.close()
spin_attribute = open("spin_array_attribute.csv", "w")
spin_attribute.write("number of rows :"+str(nrows))
spin_attribute.write("\nnumber of columns :"+str(ncols))
spin_attribute.write("\nnumber of layers :"+str(nlayers))
nscans=int((high_temp-low_temp)/temp_interval+1) # Determining the number of scans
spin_attribute.write("\nnumber of scans :"+str(nscans))
spin_attribute.write("\n2")
spin_attribute.close()
spin = open("spin_array.csv","w+")
spin_writer=csv.writer(spin)
spin_row=["temp","i","j","k","a[i,j]"]
spin_writer.writerow(spin_row)
magnet = open("magnetization.csv","w+")
magnet.write("Temp , Ave_magnetization , Ave_magnetization^2 , Susceptibility")
magnet.write("\n")
magnet_writer=csv.writer(magnet)
energyObj = open("energy.csv","w+")
energyObj.write("Temp , Ave_energy , Ave_energy^2 , C_v")
energyObj.write("\n")
energy_writer=csv.writer(energyObj)
# Section for choosing Configtype
if(ConfigType==1):
# Checkerboard Pattern Matrix
start_matrix[1::2,::2,::2] = -1 # Depth
start_matrix[::2,1::2,::2] = -1 # Row
start_matrix[::2,::2,1::2] = -1 # Column
elif(ConfigType==2):
# Interface Pattern Matrix
for k in range(0,nlayers): # Depth
for i in range(0,iterator): # Row
for j in range(0,iterator2): # Column
if(j>=iterator2/2):
dummyval=-1
else:
dummyval=1
start_matrix[:,:,j]=dummyval
dummyval=0
elif(ConfigType==3):
# Unequal Interface Pattern Matrix
for k in range(0,nlayers): # Depth
for i in range(0,iterator): # Row
for j in range(0,iterator2): # Column
if(j>=iterator2/4):
dummyval=-1
else:
dummyval=1
start_matrix[:,:,j]=dummyval
dummyval=0
elif(ConfigType==4):
# Random Pattern Matrix
for k in range(0,nlayers): # Depth
for i in range(0,iterator): # Row
for j in range(0,iterator2): # Column
dummy=pick_random(ran0)
if(dummy>=0.5):
dummy=1
else:
dummy=-1
start_matrix[k,i,j]=dummy
else:
print("Error! Check ConfigType parameter in ising.in")
# Scan Loop
for iscan in range(1,nscans+1): # Main for loop
temp = float(round((high_temp - temp_interval*(iscan-1)), 3)) # rounding off to two decimal places for optimisation purposes
print("Running Program for Temperature : "+str(temp)+"\n")
beta = 1.0/temp # Reseting variables to initial values
output_count = 0
energy_ave = 0.0
energy2_ave = 0.0
magnetization_ave = 0.0
magnetization2_ave = 0.0
a=start_matrix # Reseting matrix a to initial congiguration
# Main loop containing Monte Carlo algorithm
m , n , d , i , j , k , ipass , npass , nequil , iterator , iterator2 , nlayers , ran0 , a , magnetization , magnetization_ave , magnetization2_ave , energy , beta , DeltaU , output_count , energy_ave , energy2_ave = Monte_Carlo( m , n , d , i , j , k , ipass , npass , nequil , iterator , iterator2 , nlayers , ran0 , a , magnetization , magnetization_ave , magnetization2_ave , energy , beta , DeltaU , output_count,energy_ave,energy2_ave )
# End Monte carlo pases
for k in range(0,nlayers): # Depth
for i in range(0,iterator): # Rows
for j in range(0,iterator2): # Columns
spin_row=[temp,k,i,j,a[k,i,j]]
spin_writer.writerow(spin_row)
magnet_row=[temp , abs(magnetization_ave/output_count) , magnetization2_ave/output_count , beta*(magnetization2_ave/output_count - (magnetization_ave/output_count)**2)]
magnet_writer.writerow(magnet_row)
energy_row=[temp , energy_ave/output_count , energy2_ave/output_count , (beta**2)*(energy2_ave/output_count - (energy_ave/output_count)**2)]
energy_writer.writerow(energy_row)
# End Scan Loop
print("\nProgram completed.\n\nOpen folder",path,"to view output.\n\n")
spin.close() # Closing open files.This part is important as open files may not allow writing of new data
magnet.close()
energyObj.close()
Profiler = open("Program_Profile.csv","a+")
time_elapsed=(time.perf_counter()-time_start) # Program execuion time profiler
time_elapsed=round(time_elapsed,5)
Profiler.write("\nProgram FInished running in "+str(time_elapsed)+" Seconds on "+str(time.ctime()))
Profiler.close()
# THE END
|
[
"Path.Output_Path_Set",
"random.uniform",
"time.ctime",
"numpy.ones",
"Montecarlo.Monte_Carlo",
"csv.writer",
"Input_param_reader.Ising_input",
"time.perf_counter",
"numpy.sum",
"numba.jit"
] |
[((806, 825), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (823, 825), False, 'import time\n'), ((3146, 3159), 'Input_param_reader.Ising_input', 'Ising_input', ([], {}), '()\n', (3157, 3159), False, 'from Input_param_reader import Ising_input\n'), ((3604, 3657), 'numpy.ones', 'numpy.ones', (['(nlayers, iterator, iterator2)'], {'dtype': 'int'}), '((nlayers, iterator, iterator2), dtype=int)\n', (3614, 3657), False, 'import numpy\n'), ((3740, 3758), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (3743, 3758), False, 'from numba import jit\n'), ((3915, 3933), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (3918, 3933), False, 'from numba import jit\n'), ((4108, 4125), 'Path.Output_Path_Set', 'Output_Path_Set', ([], {}), '()\n', (4123, 4125), False, 'from Path import Output_Path_Set\n'), ((5323, 5339), 'csv.writer', 'csv.writer', (['spin'], {}), '(spin)\n', (5333, 5339), False, 'import csv\n'), ((5564, 5582), 'csv.writer', 'csv.writer', (['magnet'], {}), '(magnet)\n', (5574, 5582), False, 'import csv\n'), ((5714, 5735), 'csv.writer', 'csv.writer', (['energyObj'], {}), '(energyObj)\n', (5724, 5735), False, 'import csv\n'), ((8326, 8539), 'Montecarlo.Monte_Carlo', 'Monte_Carlo', (['m', 'n', 'd', 'i', 'j', 'k', 'ipass', 'npass', 'nequil', 'iterator', 'iterator2', 'nlayers', 'ran0', 'a', 'magnetization', 'magnetization_ave', 'magnetization2_ave', 'energy', 'beta', 'DeltaU', 'output_count', 'energy_ave', 'energy2_ave'], {}), '(m, n, d, i, j, k, ipass, npass, nequil, iterator, iterator2,\n nlayers, ran0, a, magnetization, magnetization_ave, magnetization2_ave,\n energy, beta, DeltaU, output_count, energy_ave, energy2_ave)\n', (8337, 8539), False, 'from Montecarlo import Monte_Carlo\n'), ((9609, 9628), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (9626, 9628), False, 'import time\n'), ((3802, 3822), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (3816, 3822), False, 'import random\n'), ((3998, 4053), 'numpy.sum', 'numpy.sum', (['a[0:nlayers, 1:iterator - 1, 1:iterator - 1]'], {}), '(a[0:nlayers, 1:iterator - 1, 1:iterator - 1])\n', (4007, 4053), False, 'import numpy\n'), ((9803, 9815), 'time.ctime', 'time.ctime', ([], {}), '()\n', (9813, 9815), False, 'import time\n')]
|
'''
===============================================================================
ENGR 133 Program Description
This function takes an image array, a size number and a blur value and returns an array that contains a blurred image
Assignment Information
Assignment: Python Group Project
Author: <NAME>, <EMAIL>
<NAME>, <EMAIL>
<NAME>, <EMAIL>
<NAME>, <EMAIL>
Team ID: 002-10
===============================================================================
'''
## UNIMPLEMENTED IN MAIN
import math
import numpy as np
def process(imageData, blur, size):
channelCount = len(imageData[0][0])#determine if RGB or RGBA
rowCount = len(imageData)#for progress display
if(size%2 == 0):
size += 1
kernel = getKernel(size, blur)
outimage = np.empty([len(imageData), len(imageData[0]), channelCount])#create empty image with same dimensions as original
for i in range(0, len(imageData)):
print(f"Row {i}/{rowCount}")
for j in range(0, len(imageData[0])):#for each pixel in image
weightedAvg = np.zeros(channelCount)
for h in range(0, len(kernel)):
for k in range(0, len(kernel[0])):#for each number in kernel
dx = -len(kernel)//2 + h#relative change in pixel x
dy = -len(kernel)//2 + k#relative change in pixel y
if i+dx >=0 and i+dx < len(imageData):#if pixel is out of bounds, extend the image
pixelX = i+dx
elif i+dx < 0:
pixelX = 0
elif i+dx >= len(imageData):
pixelX = -1
if j+dy >= 0 and j+dy < len(imageData[0]):
pixelY = j+dy
elif j+dy < 0:
pixelY = 0
elif j+dy > len(imageData[0]):
pixelY = -1
pixel = imageData[pixelX][pixelY]#get pixel data for target pixel
weightedAvg += np.multiply(kernel[h][k], pixel)#sum the corresponding ARGB or RGB numbers
outimage[i][j] = weightedAvg
return outimage
def getKernel(size, stddev):#odd integer size of square kernel, spread in both directions
kernel = []
center = (int(size))
for j in range(0, size):
row = []
for i in range(0, size):
x = i-center
y = j-center
row.append(gaussian2D(x, y, stddev))
kernel.append(row)
kernel = normalizeArray(kernel)
return kernel
def gaussian2D(x, y, stddev):#return the gaussian2D function evaluated at x and y
A = 1/(2*math.pi*stddev*stddev)
return A*math.exp(-(x*x+y*y)/(2*stddev*stddev))
def normalizeArray(array):#2D array input
total = 0
for i in range(len(array[0])):
for j in range(len(array)):
total += array[i][j]
#print(sum(sum(array,[])))
for i in range(len(array[0])):
for j in range(len(array)):
array[i][j] /= total
#print(sum(sum(array,[])))
return(array)
|
[
"math.exp",
"numpy.multiply",
"numpy.zeros"
] |
[((2771, 2821), 'math.exp', 'math.exp', (['(-(x * x + y * y) / (2 * stddev * stddev))'], {}), '(-(x * x + y * y) / (2 * stddev * stddev))\n', (2779, 2821), False, 'import math\n'), ((1129, 1151), 'numpy.zeros', 'np.zeros', (['channelCount'], {}), '(channelCount)\n', (1137, 1151), True, 'import numpy as np\n'), ((2109, 2141), 'numpy.multiply', 'np.multiply', (['kernel[h][k]', 'pixel'], {}), '(kernel[h][k], pixel)\n', (2120, 2141), True, 'import numpy as np\n')]
|
import os
from typing import Dict, Optional
import numpy as np
import pandas as pd
from scipy.signal import correlate
from . import ShakeExtractor, helpers
from .abstract_extractor import AbstractExtractor
from .helpers import normalize, get_equidistant_signals
from .log import logger
from .synchronization_errors import StartEqualsEndError
from .types import SourceDict, ResultTableSpec, SyncPairTimeshift, SyncPairs
class Synchronizer:
@property
def extractor(self) -> AbstractExtractor:
"""Get the current extractor"""
return self._extractor
@extractor.setter
def extractor(self, value: AbstractExtractor):
if not issubclass(type(value), AbstractExtractor):
raise TypeError("Extractor needs to be a subclass of AbstractExtractor.")
self._extractor = value
def __init__(
self,
sources: SourceDict,
reference_source_name: str,
extractor: Optional[AbstractExtractor] = None,
sampling_freq: Optional[float] = None,
):
"""
Create a new synchronizer. Synchronizer objects are used to remove offsets and clock offsets by stretching and
moving reference points detected by an extractor.
:param sources: A SourceDict to describe the input data
:param reference_source_name: name of the sensor to be used as reference.
Other sensors will be made synchronous to this sensor, and data from this sensor will not be modified.
:param extractor: This will be used to find synchronization points in the source data. If None, it defaults to
a ShakeExtractor instance
:param sampling_freq: Override the frequency used to resample input data. If None, it defaults to the maximum
input frequency
"""
self.sources = sources
self.ref_source_name = reference_source_name
self._check_sources()
self.extractor = extractor if extractor is not None else ShakeExtractor()
self.ref_signals = self._prepare_ref_signals()
self.sampling_freq = (
sampling_freq
if sampling_freq is not None
else helpers.get_max_ref_frequency(self.ref_signals)
)
def _check_sources(self):
"""Verifies that the source dict adheres to the required format and that the reference source is available"""
for source_name, source in self.sources.items():
if "data" not in source or "ref_column" not in source:
raise ValueError(
"Each source needs to have a `data` and a `ref_column` property"
)
if not isinstance(source["data"], pd.DataFrame):
raise ValueError(
"The `data` property of each source must contain a DatFrame"
)
if not isinstance(source["data"].index, pd.DatetimeIndex):
raise ValueError(
"The `data` DataFrame must have a pd.DatetimeIndex for each source"
)
if source["data"].index.duplicated().any():
raise ValueError(
"The input dataframe must not have duplicate index values, "
"convert the data into a normalized wide format"
)
if (
not isinstance(source["ref_column"], str)
or source["ref_column"] not in source["data"].columns
):
raise ValueError(
"Each source must have a string specifying the reference column, and the reference"
"column must be available in the source's DataFrame"
)
if self.ref_source_name not in self.sources.keys():
raise ValueError(
"The reference source name must be available in the source dict"
)
def _prepare_ref_signals(self) -> pd.DataFrame:
"""
Collect the reference columns from all sources and join them into a single dataframe.
Each reference column is named equal to the name of the source it comes from.
:return: normalized reference signals
"""
reference_signals = pd.DataFrame()
for source_name, source in self.sources.items():
signal = source["data"][source["ref_column"]].dropna()
reference_signals = reference_signals.join(signal, how="outer")
reference_signals.rename(
columns={source["ref_column"]: source_name}, inplace=True
)
reference_signals = reference_signals.apply(normalize)
return reference_signals
@staticmethod
def _get_timeshift_pair(
dataframe: pd.DataFrame, ref_col: str, sig_col: str, segments: SyncPairs
) -> SyncPairTimeshift:
"""
Returns timeshifts to synchronize sig_col to ref_col.
Expects equidistant sampled signals.
:param dataframe: reference signal dataframe
:param ref_col: name of the reference signal in segments
:param sig_col: name of the target signal in segments
:param segments: all detected synchronization pairs
:return: timeshift to align the first and second synchronization point
for the target signal to the reference signal
"""
timeshifts = {}
for index, segment in enumerate(["first", "second"]):
logger.debug(
f"Calculate timeshift of {segment} segment "
f"for {sig_col} to {ref_col}."
)
# reference signal segment data extraction
ref_start, ref_end, ref_data = helpers.get_segment_data(
dataframe, segments, ref_col, segment
)
sig_start, sig_end, sig_data = helpers.get_segment_data(
dataframe, segments, sig_col, segment
)
# calculate cross-correlation of segments
cross_corr = correlate(ref_data, sig_data)
shift_in_samples = np.argmax(cross_corr) - len(sig_data) + 1
# get timestamp at which sig_segment must start to sync signals
max_corr_ts = dataframe.index[
dataframe.index.get_loc(ref_start, method="nearest") + shift_in_samples
]
logger.debug(
f"Highest correlation with start at "
f"{max_corr_ts} with {np.max(cross_corr)}."
)
# calculate timeshift to move signal to maximize correlation
timeshifts[segment] = max_corr_ts - sig_start
logger.debug("Timeshift is {}.".format(str(timeshifts[segment])))
return timeshifts
def _calculate_stretch_factors(self) -> pd.DataFrame:
"""
Calculate the stretch factor that aligns each reference signal to the reference
signal of the reference source. It immediately applies these stretch factors
to a copy of ``self.ref_signals``.
:return: a copy of self.ref_signals with the stretch factors applied.
"""
ref_signals = self.ref_signals.copy()
start_time = ref_signals.index.min()
# Get equidistantly sampled reference signals for the cross correlation to work
df_equidistant = get_equidistant_signals(ref_signals, self.sampling_freq)
sync_pairs = self.extractor.get_segments(df_equidistant)
helpers.verify_segments(ref_signals.columns, sync_pairs)
for source in df_equidistant.columns:
if source == self.ref_source_name:
continue
timeshifts = Synchronizer._get_timeshift_pair(
df_equidistant, self.ref_source_name, source, sync_pairs
)
logger.debug(
f"Timedelta between shifts before stretching: "
f"{timeshifts['first'] - timeshifts['second']}"
)
try:
stretch_factor = helpers.get_stretch_factor(
sync_pairs[source], timeshifts
)
except ZeroDivisionError:
raise StartEqualsEndError(
"First and last segment have been identified as exactly the same. Bad window, maybe?"
)
logger.info(f"Stretch factor for {source}: {stretch_factor}")
# stretch signal and exchange it in dataframe
signal_stretched = helpers.stretch_signals(
pd.DataFrame(ref_signals[source]).dropna(),
stretch_factor,
start_time,
)
ref_signals = (
ref_signals.drop(source, axis="columns")
.join(signal_stretched, how="outer")
.astype(pd.SparseDtype("float"))
)
self.sources[source]["stretch_factor"] = stretch_factor
return ref_signals
def _calculate_timeshifts(self, stretched_ref_signals: pd.DataFrame):
"""
Calculate the shift necessary to align the stretched reference signals to the not-stretched reference sensor.
:param stretched_ref_signals: a copy of self.ref_signals that has been stretched to align the duration between
the synchronization points to the duration between them in the reference sensor
"""
# Resample again with stretched signal
df_equi = get_equidistant_signals(stretched_ref_signals, self.sampling_freq)
segments = self.extractor.get_segments(df_equi)
helpers.verify_segments(stretched_ref_signals.columns, segments)
for source in df_equi.columns:
if source == self.ref_source_name:
continue
timeshifts = Synchronizer._get_timeshift_pair(
df_equi, self.ref_source_name, source, segments
)
timedelta = timeshifts["first"] - timeshifts["second"]
if timedelta > pd.Timedelta(0):
logger.warning(
f"Timedelta between shifts after stretching: {timedelta}."
f"This should be very small: the timedelta to the reference signal"
f"should be equal for both start and end so a simple offset aligns the"
f"signals perfectly."
)
logger.info("Timeshift for {}: {}".format(source, timeshifts["first"]))
self.sources[source]["timeshift"] = timeshifts["first"]
def _calculate_sync_params(self):
"""
This function calculates the synchronization parameters to sync all signals to the reference signal.
It stores the result in ``self.sources``, in the keys ``timeshift`` and ``stretch_factor``.
"""
self.sources[self.ref_source_name]["timeshift"] = None
self.sources[self.ref_source_name]["stretch_factor"] = 1
# Firstly, determine stretch factor and get stretched reference signals
stretched_ref_signals = self._calculate_stretch_factors()
# Secondly, get timeshift for the stretched signals
self._calculate_timeshifts(stretched_ref_signals)
def get_sync_params(self, recalculate: bool = False):
"""
Get the synchronization params. If they have not been calculated yet, they will be.
:param recalculate: force calculation, even if it was already done before
:return: the synchronization params for each source, i.e., each timeshift and stretch factor
"""
selected_keys = ["timeshift", "stretch_factor"]
if recalculate or "timeshift" not in self.sources[self.ref_source_name]:
self._calculate_sync_params()
return {
source_name: {
key: value for key, value in source.items() if key in selected_keys
}
for source_name, source in self.sources.items()
}
def get_synced_data(self, recalculate: bool = False) -> Dict[str, pd.DataFrame]:
"""
Synchronize the input data.
:param recalculate: force recalculating the synchronization parameters
:return: a dictionary of the shifted and stretched source signals
"""
self.get_sync_params(recalculate)
synced_data = {}
start_time = self.ref_signals.index.min()
for source_name, source in self.sources.items():
data = source["data"].copy()
stretch_factor, timeshift = source["stretch_factor"], source["timeshift"]
if stretch_factor != 1:
data = helpers.stretch_signals(data, stretch_factor, start_time)
if timeshift is not None:
data = data.shift(1, freq=timeshift)
synced_data[source_name] = data
return synced_data
def save_pickles(self, target_dir: str) -> Dict[str, pd.DataFrame]:
"""
Save a pickled, synced, dataframe for each source file.
Does not save a total table.
Sync parameters are saved as ``SYNC.csv``.
:param target_dir: target directory for the export files
:return: the synced data, plus a sync parameter dataframe in the dictionary entry with the key "SYNC".
"""
sync_params = pd.DataFrame(self.get_sync_params())
synced_data = self.get_synced_data()
sync_params.to_csv(os.path.join(target_dir, "SYNC.csv"))
for source_name, synced_df in synced_data.items():
synced_df.to_pickle(
os.path.join(target_dir, f"{source_name.upper()}.PICKLE")
)
return {**synced_data, "SYNC": sync_params}
def save_data(
self,
target_dir: str,
tables: Optional[ResultTableSpec] = None,
save_total_table: bool = True,
):
"""
Export synchronized data.
Two formats are possible: if ``tables`` is given, a file for each root key is created containing the columns
from the sensors specified as the keys on the second level. This can be used to create a file for each sensor
type, see ``ResultTableSpec`` for an example.
A ``SYNC.csv`` is always exported to store the synchronization parameters that have been calculated.
:param target_dir: target directory for the export files
:param tables: ResultTableSpec to specify the export format, or None
:param save_total_table: exports an outer join over all synchronized dataframes
"""
if tables is not None and "SYNC" in tables.keys():
raise ValueError(
"SYNC must not be one of the table names. "
"It is reserved for the synchronization parameters."
)
if save_total_table and tables is not None:
if "TOTAL" in tables.keys():
raise ValueError(
"TOTAL must not be one of the table names, "
"if the table with all data should be saved."
)
sync_params = self.get_sync_params()
synced_data = self.get_synced_data()
# Save sync params
pd.DataFrame(sync_params).to_csv(os.path.join(target_dir, "SYNC.csv"))
# Save custom tables
logger.info(tables)
if tables is not None:
for table_name, table_spec in tables.items():
if len(table_spec) == 0:
logger.warning(
f"Table entry {table_name} is missing any requested columns"
)
continue
table_df = pd.DataFrame()
for source_name, source_columns in table_spec.items():
# create dataframe for each source
source_df = pd.DataFrame()
for column in source_columns:
try:
data = synced_data[source_name][column]
except KeyError:
raise ValueError(
f"Requested non-existing {source_name}->{column}"
)
# join selected signals to device dataframe
source_df = source_df.join(data, how="outer")
if not source_df.empty:
# add device signals to general dataframe
source_df = source_df.rename(
lambda col_name: f"{source_name}_{col_name}",
axis="columns",
)
table_df = table_df.join(source_df, how="outer")
table_df.dropna(axis="index", how="all", inplace=True)
table_df.to_csv(os.path.join(target_dir, f"{table_name}.csv"))
# Save table with total data
if save_total_table:
total_table = pd.DataFrame()
for source_name, data in synced_data.items():
source_df = data.rename(
lambda col_name: f"{source_name}_{col_name}",
axis="columns",
)
total_table = total_table.join(source_df, how="outer")
total_table.to_csv(os.path.join(target_dir, "TOTAL.csv"))
|
[
"pandas.Timedelta",
"pandas.SparseDtype",
"os.path.join",
"scipy.signal.correlate",
"numpy.argmax",
"numpy.max",
"pandas.DataFrame"
] |
[((4212, 4226), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4224, 4226), True, 'import pandas as pd\n'), ((5964, 5993), 'scipy.signal.correlate', 'correlate', (['ref_data', 'sig_data'], {}), '(ref_data, sig_data)\n', (5973, 5993), False, 'from scipy.signal import correlate\n'), ((13268, 13304), 'os.path.join', 'os.path.join', (['target_dir', '"""SYNC.csv"""'], {}), "(target_dir, 'SYNC.csv')\n", (13280, 13304), False, 'import os\n'), ((15052, 15088), 'os.path.join', 'os.path.join', (['target_dir', '"""SYNC.csv"""'], {}), "(target_dir, 'SYNC.csv')\n", (15064, 15088), False, 'import os\n'), ((16776, 16790), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (16788, 16790), True, 'import pandas as pd\n'), ((8721, 8744), 'pandas.SparseDtype', 'pd.SparseDtype', (['"""float"""'], {}), "('float')\n", (8735, 8744), True, 'import pandas as pd\n'), ((9893, 9908), 'pandas.Timedelta', 'pd.Timedelta', (['(0)'], {}), '(0)\n', (9905, 9908), True, 'import pandas as pd\n'), ((15019, 15044), 'pandas.DataFrame', 'pd.DataFrame', (['sync_params'], {}), '(sync_params)\n', (15031, 15044), True, 'import pandas as pd\n'), ((15478, 15492), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (15490, 15492), True, 'import pandas as pd\n'), ((17113, 17150), 'os.path.join', 'os.path.join', (['target_dir', '"""TOTAL.csv"""'], {}), "(target_dir, 'TOTAL.csv')\n", (17125, 17150), False, 'import os\n'), ((6025, 6046), 'numpy.argmax', 'np.argmax', (['cross_corr'], {}), '(cross_corr)\n', (6034, 6046), True, 'import numpy as np\n'), ((15652, 15666), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (15664, 15666), True, 'import pandas as pd\n'), ((16636, 16681), 'os.path.join', 'os.path.join', (['target_dir', 'f"""{table_name}.csv"""'], {}), "(target_dir, f'{table_name}.csv')\n", (16648, 16681), False, 'import os\n'), ((6407, 6425), 'numpy.max', 'np.max', (['cross_corr'], {}), '(cross_corr)\n', (6413, 6425), True, 'import numpy as np\n'), ((8441, 8474), 'pandas.DataFrame', 'pd.DataFrame', (['ref_signals[source]'], {}), '(ref_signals[source])\n', (8453, 8474), True, 'import pandas as pd\n')]
|
# ! /usr/bin/python3
"""### Provides tools for maps and heightmaps
This module contains functions to:
* Calculate a heightmap ideal for building
* Visualise numpy arrays
"""
__all__ = ['calcGoodHeightmap']
# __version__
import cv2
import matplotlib.pyplot as plt
import numpy as np
def calcGoodHeightmap(worldSlice):
"""**Calculates a heightmap ideal for building.**
Trees are ignored and water is considered ground.
Args:
worldSlice (WorldSlice): an instance of the WorldSlice class containing the raw heightmaps and block data
Returns:
any: numpy array containing the calculated heightmap
"""
hm_mbnl = worldSlice.heightmaps["MOTION_BLOCKING_NO_LEAVES"]
heightmapNoTrees = hm_mbnl[:]
area = worldSlice.rect
for x in range(area[2]):
for z in range(area[3]):
while True:
y = heightmapNoTrees[x, z]
block = worldSlice.getBlockAt(
(area[0] + x, y - 1, area[1] + z))
if block[-4:] == '_log':
heightmapNoTrees[x, z] -= 1
else:
break
return np.array(np.minimum(hm_mbnl, heightmapNoTrees))
def visualize(*arrays, title=None, autonormalize=True):
"""**Visualizes one or multiple numpy arrays.**
Args:
title (str, optional): display title. Defaults to None.
autonormalize (bool, optional): Normalizes the array to be between 0 (black) and 255 (white). Defaults to True.
"""
for array in arrays:
if autonormalize:
array = (normalize(array) * 255).astype(np.uint8)
plt.figure()
if title:
plt.title(title)
plt_image = cv2.cvtColor(array, cv2.COLOR_BGR2RGB)
imgplot = plt.imshow(plt_image)
plt.show()
def normalize(array):
"""**Normalizes the array to contain values from 0 to 1.**"""
return (array - array.min()) / (array.max() - array.min())
|
[
"matplotlib.pyplot.imshow",
"numpy.minimum",
"matplotlib.pyplot.figure",
"cv2.cvtColor",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
] |
[((1790, 1800), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1798, 1800), True, 'import matplotlib.pyplot as plt\n'), ((1153, 1190), 'numpy.minimum', 'np.minimum', (['hm_mbnl', 'heightmapNoTrees'], {}), '(hm_mbnl, heightmapNoTrees)\n', (1163, 1190), True, 'import numpy as np\n'), ((1627, 1639), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1637, 1639), True, 'import matplotlib.pyplot as plt\n'), ((1707, 1745), 'cv2.cvtColor', 'cv2.cvtColor', (['array', 'cv2.COLOR_BGR2RGB'], {}), '(array, cv2.COLOR_BGR2RGB)\n', (1719, 1745), False, 'import cv2\n'), ((1764, 1785), 'matplotlib.pyplot.imshow', 'plt.imshow', (['plt_image'], {}), '(plt_image)\n', (1774, 1785), True, 'import matplotlib.pyplot as plt\n'), ((1670, 1686), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (1679, 1686), True, 'import matplotlib.pyplot as plt\n')]
|
#!/usr/bin/env python
"""
Generate sample olfactory model stimulus.
"""
import numpy as np
import h5py
osn_num = 1375
dt = 1e-4 # time step
Ot = 2000 # number of data point during reset period
Rt = 1000 # number of data point during odor delivery period
#Nt = 4*Ot + 3*Rt # number of data points in time
#Nt = 10000
#t = np.arange(0, dt*Nt, dt)
I = 0.5195 # amplitude of odorant concentration
u_1 = np.zeros(500, np.float64)
u_2 = I*np.ones(5000, np.float64)
u_3 = np.zeros(4500,np.float64)
u_4 = I*np.ones(1000,np.float64)
u_5 = np.zeros(1000, np.float64)
u_6 = I*np.ones(1500, np.float64)
u_7 = np.zeros(500,np.float64)
#u_on = I*np.ones(Ot, dtype=np.float64)
#u_off = np.zeros(Ot, dtype=np.float64)
#u_reset = np.zeros(Rt, dtype=np.float64)
u = np.concatenate((u_1,u_2,u_3,u_4,u_5,u_6,u_7))
Nt = u.size
#print Nt
u_all = np.transpose(np.kron(np.ones((osn_num, 1)), u))
with h5py.File('olfactory_input.h5', 'w') as f:
f.create_dataset('array', (Nt, osn_num),
dtype=np.float64,
data=u_all)
|
[
"numpy.zeros",
"numpy.ones",
"numpy.concatenate",
"h5py.File"
] |
[((412, 437), 'numpy.zeros', 'np.zeros', (['(500)', 'np.float64'], {}), '(500, np.float64)\n', (420, 437), True, 'import numpy as np\n'), ((478, 504), 'numpy.zeros', 'np.zeros', (['(4500)', 'np.float64'], {}), '(4500, np.float64)\n', (486, 504), True, 'import numpy as np\n'), ((543, 569), 'numpy.zeros', 'np.zeros', (['(1000)', 'np.float64'], {}), '(1000, np.float64)\n', (551, 569), True, 'import numpy as np\n'), ((610, 635), 'numpy.zeros', 'np.zeros', (['(500)', 'np.float64'], {}), '(500, np.float64)\n', (618, 635), True, 'import numpy as np\n'), ((772, 823), 'numpy.concatenate', 'np.concatenate', (['(u_1, u_2, u_3, u_4, u_5, u_6, u_7)'], {}), '((u_1, u_2, u_3, u_4, u_5, u_6, u_7))\n', (786, 823), True, 'import numpy as np\n'), ((446, 471), 'numpy.ones', 'np.ones', (['(5000)', 'np.float64'], {}), '(5000, np.float64)\n', (453, 471), True, 'import numpy as np\n'), ((512, 537), 'numpy.ones', 'np.ones', (['(1000)', 'np.float64'], {}), '(1000, np.float64)\n', (519, 537), True, 'import numpy as np\n'), ((578, 603), 'numpy.ones', 'np.ones', (['(1500)', 'np.float64'], {}), '(1500, np.float64)\n', (585, 603), True, 'import numpy as np\n'), ((904, 940), 'h5py.File', 'h5py.File', (['"""olfactory_input.h5"""', '"""w"""'], {}), "('olfactory_input.h5', 'w')\n", (913, 940), False, 'import h5py\n'), ((871, 892), 'numpy.ones', 'np.ones', (['(osn_num, 1)'], {}), '((osn_num, 1))\n', (878, 892), True, 'import numpy as np\n')]
|
import pytest
from numpy.testing import assert_almost_equal, assert_array_equal, \
assert_array_almost_equal
from ctapipe.calib.camera.r1 import (
CameraR1CalibratorFactory,
HESSIOR1Calibrator,
TargetIOR1Calibrator,
NullR1Calibrator
)
from ctapipe.io.eventsource import EventSource
from ctapipe.io.simteleventsource import SimTelEventSource
from ctapipe.io.targetioeventsource import TargetIOEventSource
from ctapipe.utils import get_dataset_path
def test_hessio_r1_calibrator(example_event):
telid = 11
calibrator = HESSIOR1Calibrator()
calibrator.calibrate(example_event)
r1 = example_event.r1.tel[telid].waveform
assert_almost_equal(r1[0, 0, 0], -0.091, 3)
def test_null_r1_calibrator(example_event):
telid = 11
calibrator = NullR1Calibrator()
calibrator.calibrate(example_event)
r0 = example_event.r0.tel[telid].waveform
r1 = example_event.r1.tel[telid].waveform
assert_array_equal(r0, r1)
def test_targetio_calibrator():
pytest.importorskip("target_calib")
url_r0 = get_dataset_path("targetmodule_r0.tio")
url_r1 = get_dataset_path("targetmodule_r1.tio")
pedpath = get_dataset_path("targetmodule_ped.tcal")
source_r0 = TargetIOEventSource(input_url=url_r0)
source_r1 = TargetIOEventSource(input_url=url_r1)
r1c = CameraR1CalibratorFactory.produce(eventsource=source_r0)
event_r0 = source_r0._get_event_by_index(0)
event_r1 = source_r1._get_event_by_index(0)
r1c.calibrate(event_r0)
assert_array_equal(event_r0.r0.tel[0].waveform,
event_r0.r1.tel[0].waveform)
r1c = CameraR1CalibratorFactory.produce(
eventsource=source_r0,
pedestal_path=pedpath
)
r1c.calibrate(event_r0)
assert_array_almost_equal(event_r0.r1.tel[0].waveform,
event_r1.r1.tel[0].waveform, 1)
def test_targetio_calibrator_wrong_file(example_event):
pytest.importorskip("target_calib")
r1c = TargetIOR1Calibrator()
with pytest.raises(ValueError):
r1c.calibrate(example_event)
def test_check_r0_exists(example_event):
telid = 11
calibrator = HESSIOR1Calibrator()
assert (calibrator.check_r0_exists(example_event, telid) is True)
example_event.r0.tel[telid].waveform = None
assert (calibrator.check_r0_exists(example_event, telid) is False)
def test_factory_from_product():
calibrator = CameraR1CalibratorFactory.produce(
product="NullR1Calibrator"
)
assert isinstance(calibrator, NullR1Calibrator)
calibrator = CameraR1CalibratorFactory.produce(
product="HESSIOR1Calibrator"
)
assert isinstance(calibrator, HESSIOR1Calibrator)
def test_factory_default():
calibrator = CameraR1CalibratorFactory.produce()
assert isinstance(calibrator, NullR1Calibrator)
def test_factory_from_eventsource():
dataset = get_dataset_path("gamma_test.simtel.gz")
eventsource = SimTelEventSource(input_url=dataset)
calibrator = CameraR1CalibratorFactory.produce(eventsource=eventsource)
assert isinstance(calibrator, HESSIOR1Calibrator)
def test_factory_from_eventsource_override():
dataset = get_dataset_path("gamma_test.simtel.gz")
eventsource = SimTelEventSource(input_url=dataset)
calibrator = CameraR1CalibratorFactory.produce(
eventsource=eventsource,
product="NullR1Calibrator"
)
assert isinstance(calibrator, NullR1Calibrator)
class UnknownEventSource(EventSource):
"""
Simple working EventSource
"""
def _generator(self):
return range(len(self.input_url))
@staticmethod
def is_compatible(file_path):
return False
def test_factory_from_unknown_eventsource():
dataset = get_dataset_path("gamma_test.simtel.gz")
eventsource = UnknownEventSource(input_url=dataset)
calibrator = CameraR1CalibratorFactory.produce(eventsource=eventsource)
assert isinstance(calibrator, NullR1Calibrator)
|
[
"ctapipe.utils.get_dataset_path",
"numpy.testing.assert_array_almost_equal",
"ctapipe.calib.camera.r1.CameraR1CalibratorFactory.produce",
"ctapipe.calib.camera.r1.TargetIOR1Calibrator",
"numpy.testing.assert_almost_equal",
"pytest.importorskip",
"ctapipe.io.simteleventsource.SimTelEventSource",
"ctapipe.calib.camera.r1.NullR1Calibrator",
"ctapipe.io.targetioeventsource.TargetIOEventSource",
"pytest.raises",
"ctapipe.calib.camera.r1.HESSIOR1Calibrator",
"numpy.testing.assert_array_equal"
] |
[((549, 569), 'ctapipe.calib.camera.r1.HESSIOR1Calibrator', 'HESSIOR1Calibrator', ([], {}), '()\n', (567, 569), False, 'from ctapipe.calib.camera.r1 import CameraR1CalibratorFactory, HESSIOR1Calibrator, TargetIOR1Calibrator, NullR1Calibrator\n'), ((660, 703), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['r1[0, 0, 0]', '(-0.091)', '(3)'], {}), '(r1[0, 0, 0], -0.091, 3)\n', (679, 703), False, 'from numpy.testing import assert_almost_equal, assert_array_equal, assert_array_almost_equal\n'), ((783, 801), 'ctapipe.calib.camera.r1.NullR1Calibrator', 'NullR1Calibrator', ([], {}), '()\n', (799, 801), False, 'from ctapipe.calib.camera.r1 import CameraR1CalibratorFactory, HESSIOR1Calibrator, TargetIOR1Calibrator, NullR1Calibrator\n'), ((938, 964), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['r0', 'r1'], {}), '(r0, r1)\n', (956, 964), False, 'from numpy.testing import assert_almost_equal, assert_array_equal, assert_array_almost_equal\n'), ((1003, 1038), 'pytest.importorskip', 'pytest.importorskip', (['"""target_calib"""'], {}), "('target_calib')\n", (1022, 1038), False, 'import pytest\n'), ((1052, 1091), 'ctapipe.utils.get_dataset_path', 'get_dataset_path', (['"""targetmodule_r0.tio"""'], {}), "('targetmodule_r0.tio')\n", (1068, 1091), False, 'from ctapipe.utils import get_dataset_path\n'), ((1105, 1144), 'ctapipe.utils.get_dataset_path', 'get_dataset_path', (['"""targetmodule_r1.tio"""'], {}), "('targetmodule_r1.tio')\n", (1121, 1144), False, 'from ctapipe.utils import get_dataset_path\n'), ((1159, 1200), 'ctapipe.utils.get_dataset_path', 'get_dataset_path', (['"""targetmodule_ped.tcal"""'], {}), "('targetmodule_ped.tcal')\n", (1175, 1200), False, 'from ctapipe.utils import get_dataset_path\n'), ((1218, 1255), 'ctapipe.io.targetioeventsource.TargetIOEventSource', 'TargetIOEventSource', ([], {'input_url': 'url_r0'}), '(input_url=url_r0)\n', (1237, 1255), False, 'from ctapipe.io.targetioeventsource import TargetIOEventSource\n'), ((1272, 1309), 'ctapipe.io.targetioeventsource.TargetIOEventSource', 'TargetIOEventSource', ([], {'input_url': 'url_r1'}), '(input_url=url_r1)\n', (1291, 1309), False, 'from ctapipe.io.targetioeventsource import TargetIOEventSource\n'), ((1321, 1377), 'ctapipe.calib.camera.r1.CameraR1CalibratorFactory.produce', 'CameraR1CalibratorFactory.produce', ([], {'eventsource': 'source_r0'}), '(eventsource=source_r0)\n', (1354, 1377), False, 'from ctapipe.calib.camera.r1 import CameraR1CalibratorFactory, HESSIOR1Calibrator, TargetIOR1Calibrator, NullR1Calibrator\n'), ((1508, 1584), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['event_r0.r0.tel[0].waveform', 'event_r0.r1.tel[0].waveform'], {}), '(event_r0.r0.tel[0].waveform, event_r0.r1.tel[0].waveform)\n', (1526, 1584), False, 'from numpy.testing import assert_almost_equal, assert_array_equal, assert_array_almost_equal\n'), ((1619, 1698), 'ctapipe.calib.camera.r1.CameraR1CalibratorFactory.produce', 'CameraR1CalibratorFactory.produce', ([], {'eventsource': 'source_r0', 'pedestal_path': 'pedpath'}), '(eventsource=source_r0, pedestal_path=pedpath)\n', (1652, 1698), False, 'from ctapipe.calib.camera.r1 import CameraR1CalibratorFactory, HESSIOR1Calibrator, TargetIOR1Calibrator, NullR1Calibrator\n'), ((1753, 1844), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['event_r0.r1.tel[0].waveform', 'event_r1.r1.tel[0].waveform', '(1)'], {}), '(event_r0.r1.tel[0].waveform, event_r1.r1.tel[0].\n waveform, 1)\n', (1778, 1844), False, 'from numpy.testing import assert_almost_equal, assert_array_equal, assert_array_almost_equal\n'), ((1932, 1967), 'pytest.importorskip', 'pytest.importorskip', (['"""target_calib"""'], {}), "('target_calib')\n", (1951, 1967), False, 'import pytest\n'), ((1978, 2000), 'ctapipe.calib.camera.r1.TargetIOR1Calibrator', 'TargetIOR1Calibrator', ([], {}), '()\n', (1998, 2000), False, 'from ctapipe.calib.camera.r1 import CameraR1CalibratorFactory, HESSIOR1Calibrator, TargetIOR1Calibrator, NullR1Calibrator\n'), ((2150, 2170), 'ctapipe.calib.camera.r1.HESSIOR1Calibrator', 'HESSIOR1Calibrator', ([], {}), '()\n', (2168, 2170), False, 'from ctapipe.calib.camera.r1 import CameraR1CalibratorFactory, HESSIOR1Calibrator, TargetIOR1Calibrator, NullR1Calibrator\n'), ((2412, 2473), 'ctapipe.calib.camera.r1.CameraR1CalibratorFactory.produce', 'CameraR1CalibratorFactory.produce', ([], {'product': '"""NullR1Calibrator"""'}), "(product='NullR1Calibrator')\n", (2445, 2473), False, 'from ctapipe.calib.camera.r1 import CameraR1CalibratorFactory, HESSIOR1Calibrator, TargetIOR1Calibrator, NullR1Calibrator\n'), ((2557, 2620), 'ctapipe.calib.camera.r1.CameraR1CalibratorFactory.produce', 'CameraR1CalibratorFactory.produce', ([], {'product': '"""HESSIOR1Calibrator"""'}), "(product='HESSIOR1Calibrator')\n", (2590, 2620), False, 'from ctapipe.calib.camera.r1 import CameraR1CalibratorFactory, HESSIOR1Calibrator, TargetIOR1Calibrator, NullR1Calibrator\n'), ((2736, 2771), 'ctapipe.calib.camera.r1.CameraR1CalibratorFactory.produce', 'CameraR1CalibratorFactory.produce', ([], {}), '()\n', (2769, 2771), False, 'from ctapipe.calib.camera.r1 import CameraR1CalibratorFactory, HESSIOR1Calibrator, TargetIOR1Calibrator, NullR1Calibrator\n'), ((2877, 2917), 'ctapipe.utils.get_dataset_path', 'get_dataset_path', (['"""gamma_test.simtel.gz"""'], {}), "('gamma_test.simtel.gz')\n", (2893, 2917), False, 'from ctapipe.utils import get_dataset_path\n'), ((2936, 2972), 'ctapipe.io.simteleventsource.SimTelEventSource', 'SimTelEventSource', ([], {'input_url': 'dataset'}), '(input_url=dataset)\n', (2953, 2972), False, 'from ctapipe.io.simteleventsource import SimTelEventSource\n'), ((2990, 3048), 'ctapipe.calib.camera.r1.CameraR1CalibratorFactory.produce', 'CameraR1CalibratorFactory.produce', ([], {'eventsource': 'eventsource'}), '(eventsource=eventsource)\n', (3023, 3048), False, 'from ctapipe.calib.camera.r1 import CameraR1CalibratorFactory, HESSIOR1Calibrator, TargetIOR1Calibrator, NullR1Calibrator\n'), ((3165, 3205), 'ctapipe.utils.get_dataset_path', 'get_dataset_path', (['"""gamma_test.simtel.gz"""'], {}), "('gamma_test.simtel.gz')\n", (3181, 3205), False, 'from ctapipe.utils import get_dataset_path\n'), ((3224, 3260), 'ctapipe.io.simteleventsource.SimTelEventSource', 'SimTelEventSource', ([], {'input_url': 'dataset'}), '(input_url=dataset)\n', (3241, 3260), False, 'from ctapipe.io.simteleventsource import SimTelEventSource\n'), ((3278, 3369), 'ctapipe.calib.camera.r1.CameraR1CalibratorFactory.produce', 'CameraR1CalibratorFactory.produce', ([], {'eventsource': 'eventsource', 'product': '"""NullR1Calibrator"""'}), "(eventsource=eventsource, product=\n 'NullR1Calibrator')\n", (3311, 3369), False, 'from ctapipe.calib.camera.r1 import CameraR1CalibratorFactory, HESSIOR1Calibrator, TargetIOR1Calibrator, NullR1Calibrator\n'), ((3731, 3771), 'ctapipe.utils.get_dataset_path', 'get_dataset_path', (['"""gamma_test.simtel.gz"""'], {}), "('gamma_test.simtel.gz')\n", (3747, 3771), False, 'from ctapipe.utils import get_dataset_path\n'), ((3845, 3903), 'ctapipe.calib.camera.r1.CameraR1CalibratorFactory.produce', 'CameraR1CalibratorFactory.produce', ([], {'eventsource': 'eventsource'}), '(eventsource=eventsource)\n', (3878, 3903), False, 'from ctapipe.calib.camera.r1 import CameraR1CalibratorFactory, HESSIOR1Calibrator, TargetIOR1Calibrator, NullR1Calibrator\n'), ((2010, 2035), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2023, 2035), False, 'import pytest\n')]
|
####
#
# The MIT License (MIT)
#
# Copyright 2021 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is furnished
# to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
####
import os
import argparse
import pandas as pd
import numpy as np
import sqlite3
import logging
import gzip
from tqdm import tqdm
# ================
# Setup the Logger
LOGGER = logging.getLogger("Get CFM-ID Candidates")
LOGGER.setLevel(logging.INFO)
LOGGER.propagate = False
CH = logging.StreamHandler()
CH.setLevel(logging.INFO)
FORMATTER = logging.Formatter('[%(levelname)s] %(name)s : %(message)s')
CH.setFormatter(FORMATTER)
LOGGER.addHandler(CH)
# ================
IONIZATION_MODES = ["neg", "pos"]
def fopener(fn: str):
# Output writer
if args.gzip:
return gzip.open(fn, "wt")
else:
return open(fn, "w")
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("base_output_dir")
arg_parser.add_argument("--massbank_db_fn", help="Filepath of the Massbank database.", default="./massbank.sqlite")
arg_parser.add_argument("--gzip", action="store_true")
arg_parser.add_argument("--store_candidates_separately", action="store_true")
args = arg_parser.parse_args()
# Read in training molecules (inchikeys) and their left-out cv-folds
df_train = {}
for imode in IONIZATION_MODES:
df_train[imode] = pd.read_csv(os.path.join(args.base_output_dir, imode, "mol_list_cv.tsv"), sep="\t")
df_train[imode]["INCHIKEY1"] = [ikey.split("-")[0] for ikey in df_train[imode]["INCHIKEY"]]
# There is a candidate set for each CFM-ID model
candidates = {imode: [set() for _ in range(10)] for imode in IONIZATION_MODES}
# Track which model was used for which spectrum
df_spec2model = {imode: [] for imode in IONIZATION_MODES}
# Connect to db
conn = sqlite3.connect(args.massbank_db_fn)
try:
# Get all spectrum ids and the corresponding InChIKey(1)s
rows = conn.execute(
"SELECT accession, cid, inchikey1, precursor_type FROM scored_spectra_meta"
" INNER JOIN molecules m on m.cid = scored_spectra_meta.molecule"
).fetchall()
for idx, (acc, cid, ikey1, ptype) in tqdm(enumerate(rows), desc="Process spectra", total=len(rows)):
# Determine ionization time
if ptype.endswith("+"):
imode = "pos"
elif ptype.endswith("-"):
imode = "neg"
else:
raise ValueError("Cannot determine ionization mode from precursor type: '%s'." % ptype)
# Check for the spectrum, whether it is used for the CFM-ID training and if yes in which fold
try:
idx = df_train[imode]["INCHIKEY1"].tolist().index(ikey1)
cv_fold = df_train[imode].iloc[idx]["CV"]
except ValueError:
cv_fold = np.random.RandomState(idx).randint(0, 10) # Use a random fold as fallback
# Get the candidates for the current spectrum
for cid_can, smi_cnd in conn.execute(
"SELECT cid, smiles_iso FROM candidates_spectra "
" INNER JOIN molecules m ON m.cid = candidates_spectra.candidate"
" WHERE spectrum IS ?", (acc, )
):
# Add the molecule and its isomeric SMILES representation to prediction list for the current model
candidates[imode][cv_fold] |= {(cid_can, smi_cnd)}
# Track spectra information and their corresponding models
df_spec2model[imode].append((acc, cid, cv_fold, imode, ikey1))
finally:
conn.close()
# Write out which model is used for which spectrum
for imode in IONIZATION_MODES:
pd.DataFrame(df_spec2model[imode], columns=["accession", "cid", "cv_fold", "ionization", "inchikey1"]) \
.to_csv(os.path.join(args.base_output_dir, imode, "spec2model.tsv"), sep="\t", index=False)
# Write out the model specific candidate sets
if args.store_candidates_separately:
for imode in IONIZATION_MODES:
for cv_fold in tqdm(range(10), desc="Write out candidate files (%s)" % imode):
if len(candidates[imode][cv_fold]) > 0:
for cid, smi in candidates[imode][cv_fold]:
ofn = os.path.join(args.base_output_dir, imode, "%d__cv=%d.cand" % (cid, cv_fold))
with open(ofn, "w") as ofile:
ofile.write("%s %s\n" % (cid, smi))
else:
for imode in IONIZATION_MODES:
for cv_fold in tqdm(range(10), desc="Write out candidate files (%s)" % imode):
if len(candidates[imode][cv_fold]) > 0:
ofn = os.path.join(args.base_output_dir, imode, "candidates__cv=%d.csv" % cv_fold)
if args.gzip:
ofn += ".gz"
with fopener(ofn) as ofile:
for cid, smi in candidates[imode][cv_fold]:
ofile.write("%s %s\n" % (cid, smi))
|
[
"logging.getLogger",
"logging.StreamHandler",
"sqlite3.connect",
"argparse.ArgumentParser",
"gzip.open",
"logging.Formatter",
"os.path.join",
"pandas.DataFrame",
"numpy.random.RandomState"
] |
[((1309, 1351), 'logging.getLogger', 'logging.getLogger', (['"""Get CFM-ID Candidates"""'], {}), "('Get CFM-ID Candidates')\n", (1326, 1351), False, 'import logging\n'), ((1413, 1436), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (1434, 1436), False, 'import logging\n'), ((1476, 1535), 'logging.Formatter', 'logging.Formatter', (['"""[%(levelname)s] %(name)s : %(message)s"""'], {}), "('[%(levelname)s] %(name)s : %(message)s')\n", (1493, 1535), False, 'import logging\n'), ((1822, 1847), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1845, 1847), False, 'import argparse\n'), ((2812, 2848), 'sqlite3.connect', 'sqlite3.connect', (['args.massbank_db_fn'], {}), '(args.massbank_db_fn)\n', (2827, 2848), False, 'import sqlite3\n'), ((1717, 1736), 'gzip.open', 'gzip.open', (['fn', '"""wt"""'], {}), "(fn, 'wt')\n", (1726, 1736), False, 'import gzip\n'), ((2356, 2416), 'os.path.join', 'os.path.join', (['args.base_output_dir', 'imode', '"""mol_list_cv.tsv"""'], {}), "(args.base_output_dir, imode, 'mol_list_cv.tsv')\n", (2368, 2416), False, 'import os\n'), ((4846, 4905), 'os.path.join', 'os.path.join', (['args.base_output_dir', 'imode', '"""spec2model.tsv"""'], {}), "(args.base_output_dir, imode, 'spec2model.tsv')\n", (4858, 4905), False, 'import os\n'), ((4721, 4827), 'pandas.DataFrame', 'pd.DataFrame', (['df_spec2model[imode]'], {'columns': "['accession', 'cid', 'cv_fold', 'ionization', 'inchikey1']"}), "(df_spec2model[imode], columns=['accession', 'cid', 'cv_fold',\n 'ionization', 'inchikey1'])\n", (4733, 4827), True, 'import pandas as pd\n'), ((5719, 5795), 'os.path.join', 'os.path.join', (['args.base_output_dir', 'imode', "('candidates__cv=%d.csv' % cv_fold)"], {}), "(args.base_output_dir, imode, 'candidates__cv=%d.csv' % cv_fold)\n", (5731, 5795), False, 'import os\n'), ((5302, 5378), 'os.path.join', 'os.path.join', (['args.base_output_dir', 'imode', "('%d__cv=%d.cand' % (cid, cv_fold))"], {}), "(args.base_output_dir, imode, '%d__cv=%d.cand' % (cid, cv_fold))\n", (5314, 5378), False, 'import os\n'), ((3860, 3886), 'numpy.random.RandomState', 'np.random.RandomState', (['idx'], {}), '(idx)\n', (3881, 3886), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
'''Module that defines classes and functions for Brillouin zone sampling
'''
import os
import re
from copy import deepcopy
import numpy as np
from mykit.core._control import (build_tag_map_obj, extract_from_tagdict,
parse_to_tagdict, prog_mapper, tags_mapping)
from mykit.core.log import Verbose
from mykit.core.numeric import Prec
# from mykit.core.utils import if_vec_same_direction
# Allowed pattern for kpoint symbols and kpath string
KSYM_PATTERN = r'[A-Z]{1,2}'
KPATH_PATTERN = r'^('+ KSYM_PATTERN + r'-)+' + KSYM_PATTERN + r'$'
class KmeshError(Exception):
pass
class kmesh_control(Verbose, prog_mapper):
_meta = os.path.join(os.path.dirname(__file__), 'metadata', 'kmeshmap.json')
_tagMaps = build_tag_map_obj(_meta, "mykit", "json")
_kmeshTagMaps = _tagMaps
_kmeshValMaps = {}
def __init__(self, progName, **kmargs):
self._kmeshTags = {}
self._parse_kmeshtags(progName, **kmargs)
def parse_tags(self, progName, **kmtags):
'''
'''
self._parse_kmeshtags(progName, **kmtags)
def _parse_kmeshtags(self, progName, **kmtags):
if len(kmtags) == 0:
return
parse_to_tagdict(self._kmeshTags, self._kmeshTagMaps, progName, **kmtags)
def delete_tags(self, progName, *tags):
self._pop_kmeshtags(progName, *tags)
def pop_tags(self, progName, *tags):
return self._pop_kmeshtags(progName, *tags)
def _pop_kmeshtags(self, progName, *tags):
vals = self._kmeshtag_vals(progName, *tags, delete=True)
return vals
def _get_one_mykit_tag(self, kmTagName):
return self._get_one_kmeshtag(kmTagName)
def _get_one_kmeshtag(self, kmTagName):
return self._kmeshTags.get(kmTagName, None)
def tag_vals(self, progName, *tags):
return self._kmeshtag_vals(progName, *tags)
def _kmeshtag_vals(self, progName, *tags, delete=False):
if len(tags) == 0:
return []
vals = extract_from_tagdict(kmesh_control, self._kmeshTags, progName, *tags, delete=delete)
return vals
@property
def kmeshTags(self):
return self._kmeshTags
@property
def kmode(self):
return self._kmeshTags.get("kmode")
@property
def kdiv(self):
return self._kmeshTags.get("div")
@classmethod
def map_tags(cls, *tags, progFrom="mykit", progTo="mykit", getAll=False):
'''
'''
_pF = progFrom.lower()
_pT = progTo.lower()
return tags_mapping(cls._kmeshTagMaps, _pF, _pF, *tags, getAll=getAll)
def kpath_decoder(kpath):
'''Decode string consisting kpoints symbol to a list of strings.
Those with even and odd indices (from 0) are the starting and
ending point in reciprocal space, repectively
The path can have more than one continuous path in reciprocal space,
separated by space.
However, each continuous path should match the ``KPATH_PATTERN``,
otherwise `KmeshError` will be raised.
Args:
kpath (str): the string containing kpoints symbol and
representing a trajectory in reciprocal space
Examples:
>>> kpath_decoder("A-B-C D-E")
["A", "B", "B", "C", "D", "E"]
>>> kpath_decoder("GM-W-X-L-GM-X")
["GM", "W", "W", "X", "X", "L", "L", "GM", "GM", "X"]
'''
try:
_klines = kpath.split()
except (AttributeError, SyntaxError):
raise KmeshError("Input kpath should be string: {}".format(kpath))
# the pattern of each path segment
linePat = re.compile(KPATH_PATTERN)
ksegs = []
for kline in _klines:
if not re.match(linePat, kline):
raise KmeshError("Invalid kpath line string: {}".format(kline))
symbols = kline.split('-')
nSyms = len(symbols)
for i in range(nSyms-1):
# ksegs.append('{}-{}'.format(symbols[i], symbols[i+1]))
if symbols[i] == symbols[i+1]:
raise KmeshError("kpath with zero length: {}-{}".format(symbols[i], symbols[i+1]))
ksegs.extend([symbols[i], symbols[i+1]])
return ksegs
def kpath_encoder(ksyms):
'''Encode a list/tuple of strings to a complete kpath string.
Args:
ksyms (list or tuple): container of kpath symbols, must have an even length
'''
try:
assert isinstance(ksyms, (list, tuple))
except AssertionError:
raise KmeshError("require list or tuple, received {}".format(type(ksyms)))
try:
assert len(ksyms)%2 == 0
except AssertionError:
raise KmeshError("require even length, received {}".format(len(ksyms)))
nLineSeg = int(len(ksyms)/2)
kpath = ''
symPat = re.compile(r'^' + KSYM_PATTERN + r'$')
lastSym = ''
for _i in range(nLineSeg):
st = ksyms[2*_i]
ed = ksyms[2*_i+1]
if st == ed:
raise KmeshError("kpath with zero length: {}-{}".format(st, ed))
seg = (st, ed)
for ksym in seg:
if not re.match(symPat, ksym):
raise KmeshError("Invalid kpoint symbol: {}".format(ksym))
if _i == 0:
kpath += '-'.join(seg)
else:
if st == lastSym:
kpath += '-' + ed
else:
kpath += ' ' + '-'.join(seg)
lastSym = ed
return kpath
def _check_valid_ksym_coord_pair(ksym, coord):
if not re.match(r"^" + KSYM_PATTERN + r"$", ksym):
raise KeyError("Invalid kpoint symbol: {}".format(ksym))
try:
shape = np.shape(coord)
except ValueError:
raise ValueError("Invalid kpoint coordinate for symbol {}".format(ksym))
else:
if shape != (3,):
raise ValueError("Invalid kpoint coordinate for symbol {}".format(ksym))
def _check_valid_kpath_dict(kpathDict):
try:
assert isinstance(kpathDict, dict)
except AssertionError:
raise TypeError("kpath must be dictionary.")
try:
assert set(["symbols", "coordinates"]) == set(kpathDict.keys())
except AssertionError:
raise KeyError("\"symbols\", \"coordinates\" keys not found. Please check")
for (ksym, coord) in zip(kpathDict["symbols"],kpathDict["coordinates"]):
try:
_check_valid_ksym_coord_pair(ksym, coord)
except (KeyError, ValueError) as _err:
raise _err
def check_kvecs_form_kpath(kvec):
'''Check if the kpoint vectors form several line segments in the reciprocal space
Usually, the number of kpoints on one line segments is no less than 3.
Args:
kvec (array-like): the kpoint vectors to analysis, shape, (n,3)
Returns:
list, with tuple as members. Each tuple has 2 int members,
the indices of kpoint vectors at the beginning and end of
a line segment
'''
segs = []
# check the shape of kvec
try:
shape = np.shape(kvec)
assert len(shape) == 2
assert shape[1] == 3
except (TypeError, AssertionError):
return segs
nkpt = shape[0]
if nkpt < 3:
return segs
_kvec = np.array(kvec, dtype=Prec._dtype)
dkvec = _kvec[1:, :] - _kvec[:-1, :]
# normalize the dkvec,
n = np.linalg.norm(dkvec, axis=1)
for i in range(nkpt-1):
if np.isclose(n[i], 0):
dkvec[i,:] = 1000.0
else:
dkvec[i,:] = dkvec[i,:]/n[i]
dp = np.sum(dkvec[:-1,:] * dkvec[1:,:], axis=1)
st = 0
ed = 2
while ed < nkpt:
if not np.isclose(dp[ed-2], 1):
if ed - st > 2:
segs.append((st, ed-1))
st = ed - 1
ed = ed + 1
if ed - st > 2:
segs.append((st, ed-1))
return segs
# the mapping from kpoint symbol to LaTeX commands
#pylint: disable=anomalous-backslash-in-string
KSYMBOL_LATEX = {
"GM": "$\Gamma$",
"LM": "$\lambda$",
}
|
[
"numpy.isclose",
"re.compile",
"mykit.core._control.build_tag_map_obj",
"mykit.core._control.extract_from_tagdict",
"re.match",
"mykit.core._control.tags_mapping",
"numpy.array",
"numpy.sum",
"os.path.dirname",
"numpy.linalg.norm",
"mykit.core._control.parse_to_tagdict",
"numpy.shape"
] |
[((777, 818), 'mykit.core._control.build_tag_map_obj', 'build_tag_map_obj', (['_meta', '"""mykit"""', '"""json"""'], {}), "(_meta, 'mykit', 'json')\n", (794, 818), False, 'from mykit.core._control import build_tag_map_obj, extract_from_tagdict, parse_to_tagdict, prog_mapper, tags_mapping\n'), ((3609, 3634), 're.compile', 're.compile', (['KPATH_PATTERN'], {}), '(KPATH_PATTERN)\n', (3619, 3634), False, 'import re\n'), ((4747, 4783), 're.compile', 're.compile', (["('^' + KSYM_PATTERN + '$')"], {}), "('^' + KSYM_PATTERN + '$')\n", (4757, 4783), False, 'import re\n'), ((7133, 7166), 'numpy.array', 'np.array', (['kvec'], {'dtype': 'Prec._dtype'}), '(kvec, dtype=Prec._dtype)\n', (7141, 7166), True, 'import numpy as np\n'), ((7244, 7273), 'numpy.linalg.norm', 'np.linalg.norm', (['dkvec'], {'axis': '(1)'}), '(dkvec, axis=1)\n', (7258, 7273), True, 'import numpy as np\n'), ((7430, 7474), 'numpy.sum', 'np.sum', (['(dkvec[:-1, :] * dkvec[1:, :])'], {'axis': '(1)'}), '(dkvec[:-1, :] * dkvec[1:, :], axis=1)\n', (7436, 7474), True, 'import numpy as np\n'), ((706, 731), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (721, 731), False, 'import os\n'), ((1233, 1306), 'mykit.core._control.parse_to_tagdict', 'parse_to_tagdict', (['self._kmeshTags', 'self._kmeshTagMaps', 'progName'], {}), '(self._kmeshTags, self._kmeshTagMaps, progName, **kmtags)\n', (1249, 1306), False, 'from mykit.core._control import build_tag_map_obj, extract_from_tagdict, parse_to_tagdict, prog_mapper, tags_mapping\n'), ((2056, 2144), 'mykit.core._control.extract_from_tagdict', 'extract_from_tagdict', (['kmesh_control', 'self._kmeshTags', 'progName', '*tags'], {'delete': 'delete'}), '(kmesh_control, self._kmeshTags, progName, *tags,\n delete=delete)\n', (2076, 2144), False, 'from mykit.core._control import build_tag_map_obj, extract_from_tagdict, parse_to_tagdict, prog_mapper, tags_mapping\n'), ((2588, 2651), 'mykit.core._control.tags_mapping', 'tags_mapping', (['cls._kmeshTagMaps', '_pF', '_pF', '*tags'], {'getAll': 'getAll'}), '(cls._kmeshTagMaps, _pF, _pF, *tags, getAll=getAll)\n', (2600, 2651), False, 'from mykit.core._control import build_tag_map_obj, extract_from_tagdict, parse_to_tagdict, prog_mapper, tags_mapping\n'), ((5445, 5485), 're.match', 're.match', (["('^' + KSYM_PATTERN + '$')", 'ksym'], {}), "('^' + KSYM_PATTERN + '$', ksym)\n", (5453, 5485), False, 'import re\n'), ((5579, 5594), 'numpy.shape', 'np.shape', (['coord'], {}), '(coord)\n', (5587, 5594), True, 'import numpy as np\n'), ((6928, 6942), 'numpy.shape', 'np.shape', (['kvec'], {}), '(kvec)\n', (6936, 6942), True, 'import numpy as np\n'), ((7313, 7332), 'numpy.isclose', 'np.isclose', (['n[i]', '(0)'], {}), '(n[i], 0)\n', (7323, 7332), True, 'import numpy as np\n'), ((3692, 3716), 're.match', 're.match', (['linePat', 'kline'], {}), '(linePat, kline)\n', (3700, 3716), False, 'import re\n'), ((7531, 7556), 'numpy.isclose', 'np.isclose', (['dp[ed - 2]', '(1)'], {}), '(dp[ed - 2], 1)\n', (7541, 7556), True, 'import numpy as np\n'), ((5052, 5074), 're.match', 're.match', (['symPat', 'ksym'], {}), '(symPat, ksym)\n', (5060, 5074), False, 'import re\n')]
|
from pathlib import Path
from tqdm.auto import tqdm
import numpy as np
import pickle
import os
from astropy.table import Table
import pickle as pkl
from multiprocessing import Pool, Manager
from threading import Lock
from .cones import make_cone_density
from .utils import load_data
from .cones import make_cone
from .constants import x_isgri, x_picsit
class BaseDataset:
"""
Base Dataset classs
"""
def __init__(self):
self.basedir = Path(__file__).parent.joinpath('data')
def generate(self, src_dir):
"""
generate dataset from source directory of *.npy filess
:param src_dir: path
"""
self.data = None
raise NotImplementedError("This is only the base class, supercharge this method please")
return self.data
def save(self, filename='basefile.pickle'):
self.filepath = self.basedir.joinpath(filename)
with open(self.filepath, 'wb') as file:
pickle.dump(self.data, file)
def load(self, filename=None):
if filename is not None:
self.filepath = self.basedir.joinpath(filename)
with open(self.filepath, 'rb') as file:
self.data = pickle.load(file)
"""Generation of the Cone Density Dataset with a single source
"""
class SingleSourceDensityDataset:
target_filename = "single_source_density_dataset.pkl"
source_directory = "save_Compton"
max_threads = 1
n = 100
def __init__(self, filename=None):
if filename is not None:
self.filename = filename
pass
def generate(self):
"""Create the datafile
"""
# get cone density data for all files in dataset
manager = Manager()
data = manager.list()
labels = manager.list()
lock = Lock()
def get_data(filename):
for i in range(self.n):
print("Loading from {} {}".format(filename, i))
if filename.endswith(".npy"):
_, theta_source, _, phi_source = filename.replace(".npy", "").split("_")
lock.acquire()
labels.append([float(theta_source), float(phi_source)])
data.append(make_cone_density(theta_source, phi_source, x_isgri, x_picsit, progress=False,
n_events=[100, 2000]))
lock.release()
if len(data) % 100 == 0:
print("Aquiring lock")
lock.acquire()
# load data already available
x, y = pkl.load(open(self.target_filename))
new_x, new_y = np.array(list(data)), np.array(list(labels))
x = np.concatenate((x, new_x), axis=0)
y = np.concatenate((y, new_y), axis=0)
pkl.dump((x, y), open(self.target_filename, "wb"))
# clear the data and label lists
data.clear()
labels.clear()
lock.release()
print("Realeased lock")
with Pool(self.max_threads, maxtasksperchild=10) as p:
for t in p.imap(get_data, os.listdir("save_Compton"), chunksize=365):
pass
@staticmethod
def load(filename=None):
"""Load the dataset from the pickle file
"""
if filename is not None:
return pkl.load(open(SingleSourceDensityDataset.target_filename))
return pkl.load(open(SingleSourceDensityDataset.target_filename))
|
[
"os.listdir",
"pickle.dump",
"pathlib.Path",
"threading.Lock",
"pickle.load",
"multiprocessing.Pool",
"numpy.concatenate",
"multiprocessing.Manager"
] |
[((1710, 1719), 'multiprocessing.Manager', 'Manager', ([], {}), '()\n', (1717, 1719), False, 'from multiprocessing import Pool, Manager\n'), ((1797, 1803), 'threading.Lock', 'Lock', ([], {}), '()\n', (1801, 1803), False, 'from threading import Lock\n'), ((964, 992), 'pickle.dump', 'pickle.dump', (['self.data', 'file'], {}), '(self.data, file)\n', (975, 992), False, 'import pickle\n'), ((1194, 1211), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (1205, 1211), False, 'import pickle\n'), ((3122, 3165), 'multiprocessing.Pool', 'Pool', (['self.max_threads'], {'maxtasksperchild': '(10)'}), '(self.max_threads, maxtasksperchild=10)\n', (3126, 3165), False, 'from multiprocessing import Pool, Manager\n'), ((3210, 3236), 'os.listdir', 'os.listdir', (['"""save_Compton"""'], {}), "('save_Compton')\n", (3220, 3236), False, 'import os\n'), ((462, 476), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (466, 476), False, 'from pathlib import Path\n'), ((2743, 2777), 'numpy.concatenate', 'np.concatenate', (['(x, new_x)'], {'axis': '(0)'}), '((x, new_x), axis=0)\n', (2757, 2777), True, 'import numpy as np\n'), ((2802, 2836), 'numpy.concatenate', 'np.concatenate', (['(y, new_y)'], {'axis': '(0)'}), '((y, new_y), axis=0)\n', (2816, 2836), True, 'import numpy as np\n')]
|
from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
import numpy as np
import pandas as pd
import pickle as pi
class Classifier:
def __init__(self):
#Array für alle Ergebnisse
self.ergebnis = []
def train_models(self, X_train, X_test, y_train, y_test, models):
for self.model in models:
#-----------------------
#Knn-Classifier
#-----------------------
if self.model == 'knn':
#Optimalen Knn-Classifier bestimmen
error = []
for i in range(1, 40):
knn = KNeighborsClassifier(n_neighbors=i)
knn.fit(X_train, y_train)
pred_i = knn.predict(X_test)
error.append(np.mean(pred_i != y_test))
#Knn-Classifier trainieren
knnclf = KNeighborsClassifier(n_neighbors=7)
knnclf.fit(X_train, y_train)
#Knn-Classifier Akkuranz bestimmen
score = knnclf.score(X_test,y_test)
self.ergebnis.append(['knn-classifier', score, knnclf])
#-----------------------
#-----------------------
#Decision Tree
#-----------------------
elif self.model == 'dt':
#class_weight gebrauchen für DT und RF
#Optimalen Decision Tree bestimmen
#Zu testende Decision Tree Parameter
dt = DecisionTreeClassifier()
tree_para = {'criterion':['gini','entropy'],'max_depth':[i for i in range(1,20)], 'min_samples_split':[i for i in range (2,20)]}
#GridSearchCV
grd_clf = GridSearchCV(dt, tree_para, cv=5)
grd_clf.fit(X_train, y_train)
#Besten gefundenen Decision Tree übergeben
dt_clf = grd_clf.best_estimator_
score = dt_clf.score(X_test,y_test)
self.ergebnis.append(['decision tree', score, dt_clf])
#-----------------------
#-----------------------
#Random Forest
#-----------------------
elif self.model == 'rf':
#rf = RandomForestClassifier(max_depth=8, criterion="entropy", min_samples_split=9)
rf = RandomForestClassifier(n_estimators=100)
rf.fit(X_train,y_train)
score = rf.score(X_test,y_test)
self.ergebnis.append(['random forest', score, rf])
#-----------------------
#-----------------------
#Support Vector Machine
#-----------------------
elif self.model == 'svm':
svm = SVC(kernel = 'poly')
svm.fit(X_train, y_train)
score = svm.score(X_test,y_test)
self.ergebnis.append(['support vector machine', score, svm])
#-----------------------
#MLP
#-----------------------
elif self.model == 'mlp':
mlp = MLPClassifier(hidden_layer_sizes=[100,100], max_iter=5000, solver='sgd'
, learning_rate='adaptive', learning_rate_init=0.01, n_iter_no_change=200, early_stopping=True)
mlp.fit(X_train, y_train)
score = mlp.score(X_test,y_test)
self.ergebnis.append(['multi-layer perceptron', score, mlp])
print("iterations: {}; layers: {}; loss: {}".format(mlp.n_iter_, mlp.n_layers_, mlp.loss_))
epochs = np.linspace(1,mlp.n_iter_, mlp.n_iter_)
#plt.plot(epochs, mlp.loss_curve_, label="Fehlerfunktion")
#plt.plot(weight,2* weight,label="Ableitung")
#plt.show()
return self.ergebnis
|
[
"sklearn.model_selection.GridSearchCV",
"numpy.mean",
"sklearn.neural_network.MLPClassifier",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.ensemble.RandomForestClassifier",
"numpy.linspace",
"sklearn.svm.SVC"
] |
[((1092, 1127), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(7)'}), '(n_neighbors=7)\n', (1112, 1127), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((832, 867), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': 'i'}), '(n_neighbors=i)\n', (852, 867), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((1722, 1746), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), '()\n', (1744, 1746), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((1950, 1983), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['dt', 'tree_para'], {'cv': '(5)'}), '(dt, tree_para, cv=5)\n', (1962, 1983), False, 'from sklearn.model_selection import GridSearchCV\n'), ((996, 1021), 'numpy.mean', 'np.mean', (['(pred_i != y_test)'], {}), '(pred_i != y_test)\n', (1003, 1021), True, 'import numpy as np\n'), ((2560, 2600), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(100)'}), '(n_estimators=100)\n', (2582, 2600), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((2964, 2982), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""poly"""'}), "(kernel='poly')\n", (2967, 2982), False, 'from sklearn.svm import SVC\n'), ((3305, 3480), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'hidden_layer_sizes': '[100, 100]', 'max_iter': '(5000)', 'solver': '"""sgd"""', 'learning_rate': '"""adaptive"""', 'learning_rate_init': '(0.01)', 'n_iter_no_change': '(200)', 'early_stopping': '(True)'}), "(hidden_layer_sizes=[100, 100], max_iter=5000, solver='sgd',\n learning_rate='adaptive', learning_rate_init=0.01, n_iter_no_change=200,\n early_stopping=True)\n", (3318, 3480), False, 'from sklearn.neural_network import MLPClassifier\n'), ((3790, 3830), 'numpy.linspace', 'np.linspace', (['(1)', 'mlp.n_iter_', 'mlp.n_iter_'], {}), '(1, mlp.n_iter_, mlp.n_iter_)\n', (3801, 3830), True, 'import numpy as np\n')]
|
def brute_force_root_finder(f, a, b, n):
from numpy import linspace
x = linspace(a, b, n)
y = f(x)
roots = []
for i in range(n-1):
if y[i]*y[i+1] < 0:
root = x[i] - (x[i+1] - x[i])/(y[i+1] - y[i])*y[i]
roots.append(root)
elif y[i] == 0:
root = x[i]
roots.append(root)
return roots
def demo():
from numpy import exp, cos
roots = brute_force_root_finder(
lambda x: exp(-x**2)*cos(4*x), 0, 4, 1001)
if roots:
print(roots)
else:
print('Could not find any roots')
if __name__ == '__main__':
demo()
|
[
"numpy.exp",
"numpy.linspace",
"numpy.cos"
] |
[((82, 99), 'numpy.linspace', 'linspace', (['a', 'b', 'n'], {}), '(a, b, n)\n', (90, 99), False, 'from numpy import linspace\n'), ((491, 503), 'numpy.exp', 'exp', (['(-x ** 2)'], {}), '(-x ** 2)\n', (494, 503), False, 'from numpy import exp, cos\n'), ((502, 512), 'numpy.cos', 'cos', (['(4 * x)'], {}), '(4 * x)\n', (505, 512), False, 'from numpy import exp, cos\n')]
|
import geo.geo_utils
import geo.raster_lookup
from progress.null_callback import NullCallback
from progress.progress import Progress
import glob
import numpy as np
class Heightmap:
def __init__(self):
self.pixels = []
self.heightmap = None
self.nodata_fillin = 0
self.out_of_bounds_count = 0
self.nodata_count = 0
def createFromRaster(self,
raster_lookup,
geo_transform,
heightmap_size,
progress_callback=NullCallback()):
pixel_count = heightmap_size[0] * heightmap_size[1]
self.pixels = [0 for i in range(pixel_count)]
for y in range(heightmap_size[1]):
for x in range(heightmap_size[0]):
geo_pos = geo_transform.transformPixelLocationToGeoLocation(x, y)
pixel_index = x + y*heightmap_size[0]
if raster_lookup.locationInBounds(geo_pos[0], geo_pos[1]):
elevation = raster_lookup.getElevationAtPosition(geo_pos[0], geo_pos[1])
if elevation is not None:
self.pixels[pixel_index] = elevation
else:
self.pixels[pixel_index] = self.nodata_fillin
self.nodata_count += 1
else:
self.out_of_bounds_count += 1
progress_callback(Progress(progress=pixel_index + 1,
message="Creating heightmap",
max_progress=heightmap_size[0] * heightmap_size[1],))
raster_matrix = np.array(self.pixels).reshape(heightmap_size)
self.heightmap = raster_matrix
return self
def pixelCount(self):
return self.heightmap.shape[0] * self.heightmap.shape[1]
def getStatistics(self):
return {
'out_of_bounds_percentage':
100.0 * float(self.out_of_bounds_count) / self.pixelCount(),
'nodata_percentage':
100.0 * float(self.nodata_count) / self.pixelCount()
}
def loadFromFile(self, file_name):
self.heightmap = np.load(file_name)
self.pixels = list(self.heightmap.reshape(self.heightmap.shape[0] * self.heightmap.shape[1]))
return self
def writeToFile(self, file_name):
if self.heightmap is None or len(self.pixels) <= 0:
raise Exception("Heigtmap is not loaded")
np.save(file_name, self.heightmap)
def getWidth(self):
return self.heightmap.shape[0]
def getHeight(self):
return self.heightmap.shape[1]
def getHeightmap(self):
return self.heightmap
|
[
"progress.null_callback.NullCallback",
"numpy.array",
"numpy.load",
"progress.progress.Progress",
"numpy.save"
] |
[((557, 571), 'progress.null_callback.NullCallback', 'NullCallback', ([], {}), '()\n', (569, 571), False, 'from progress.null_callback import NullCallback\n'), ((2163, 2181), 'numpy.load', 'np.load', (['file_name'], {}), '(file_name)\n', (2170, 2181), True, 'import numpy as np\n'), ((2466, 2500), 'numpy.save', 'np.save', (['file_name', 'self.heightmap'], {}), '(file_name, self.heightmap)\n', (2473, 2500), True, 'import numpy as np\n'), ((1624, 1645), 'numpy.array', 'np.array', (['self.pixels'], {}), '(self.pixels)\n', (1632, 1645), True, 'import numpy as np\n'), ((1440, 1560), 'progress.progress.Progress', 'Progress', ([], {'progress': '(pixel_index + 1)', 'message': '"""Creating heightmap"""', 'max_progress': '(heightmap_size[0] * heightmap_size[1])'}), "(progress=pixel_index + 1, message='Creating heightmap',\n max_progress=heightmap_size[0] * heightmap_size[1])\n", (1448, 1560), False, 'from progress.progress import Progress\n')]
|
from distutils.core import setup
from Cython.Build import cythonize
setup(
name = "tax",
ext_modules = cythonize('tax.pyx'),
script_name = 'setup.py',
script_args = ['build_ext', '--inplace']
)
import tax
import numpy as np
print(tax.tax(np.ones(10)))
|
[
"Cython.Build.cythonize",
"numpy.ones"
] |
[((112, 132), 'Cython.Build.cythonize', 'cythonize', (['"""tax.pyx"""'], {}), "('tax.pyx')\n", (121, 132), False, 'from Cython.Build import cythonize\n'), ((255, 266), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (262, 266), True, 'import numpy as np\n')]
|
import os, pdb
# ______________________________________NLPDV____________________________________
# _______________________________________________________________________
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from transformers import *
import _pickle as pkl
import shutil
import numpy as np
from tqdm import trange, tqdm
# _______________________________________________________________________
# ______________________________________NLPDV____________________________________
#gpu 0,2 on NLP9 are culprit gpu 3,4 on nlp8
CUDA_VISIBLE_DEVICES = [0,1,3,4,5,6,7]
BASE_DATA_DIR = '/local/rizwan/UDTree/'
run_file = './examples/run_multi_domain_pos.py'
model_type = 'bert'
train_model_name_or_path = 'bert-base-multilingual-cased' # 'bert-large-uncased-whole-word-masking'
do_lower_case = False
num_train_epochs = 4.0
num_eval_epochs = 1.0
per_gpu_eval_batch_size = 32
per_gpu_train_batch_size = 32
learning_rate = 5e-5
max_seq_length = 128
fp16 = True
overwrite_cache = False
evaluate_during_training = True
#batch sizes: 8, 16, 32, 64, 128 (for max seq 128, max batch size is 32)
#learning rates: 3e-4, 1e-4, 5e-5, 3e-5, 2e-5
'''
Runs:
'''
ALL_EVAL_TASKS = [
'UD_ARABIC',
'UD_BASQUE',
'UD_BULGARIAN',
'UD_CATALAN',
'UD_CHINESE',
'UD_CROATIAN',
'UD_CZECH',
'UD_DANISH',
'UD_DUTCH',
'UD_ENGLISH',
'UD_FINNISH',
'UD_FRENCH',
'UD_GERMAN',
'UD_HEBREW',
'UD_HINDI',
'UD_INDONESIAN',
'UD_ITALIAN',
'UD_JAPANESE',
'UD_KOREAN',
'UD_NORWEGIAN',
'UD_PERSIAN',
'UD_POLISH',
'UD_PORTUGUESE',
'UD_ROMANIAN',
'UD_RUSSIAN',
'UD_SERBIAN',
'UD_SLOVAK',
'UD_SLOVENIAN',
'UD_SPANISH',
'UD_SWEDISH',
'UD_TURKISH']
shpley_removals = {
'UD_ARABIC': [0, 3, 7, 8, 11, 13, 16, 21, 29],
'UD_BASQUE': [17, 19],
'UD_BULGARIAN': [3, 19], #[3, 13, 17, 19],
'UD_CATALAN': [ 0, 3, 17, 19, 20],
'UD_CHINESE':[5, 13, 20, 25, 26],
'UD_CROATIAN': [13, 17, 19],
'UD_CZECH': [13, 17, 19],
'UD_DANISH': [13],
'UD_DUTCH': [17,19],
'UD_ENGLISH': [0, 3, 5, 6, 8, 10, 11, 12, 13, 15, 16, 17, 18, 19, 20, 21, 23, 24, 25, 26, 29],
'UD_FINNISH': [13, 17, 19],
'UD_FRENCH': [17],
'UD_GERMAN': [ 17, 19], # Try with [3, 5, 16, 17, 19, 20]
'UD_HEBREW': [17],
'UD_HINDI': [ 0, 17, 19],
'UD_INDONESIAN': [ 0, 13, 17, 19],
'UD_ITALIAN': [ 5, 17, 19, 20],
'UD_JAPANESE': [19],
'UD_KOREAN': [ 0, 13, 19],
'UD_NORWEGIAN': [0, 13, 19],
'UD_PERSIAN': [4, 17, 19],
'UD_POLISH': [13, 17, 19],
'UD_PORTUGUESE': [17],
'UD_ROMANIAN': [ 13, 17, 19],
'UD_RUSSIAN': [ 13, 17, 19],
'UD_SERBIAN': [ 13, 17, 19],
'UD_SLOVAK': [ 13, 17, 19],
'UD_SLOVENIAN': [17],
'UD_SPANISH':[5, 17, 19],
'UD_SWEDISH':[17],
'UD_TURKISH': [13, 17, 19]
}
all_acc_shapley = {eval_task_name:[] for eval_task_name in ALL_EVAL_TASKS}
all_acc_baseline = {eval_task_name:[] for eval_task_name in ALL_EVAL_TASKS}
all_acc_baseline_s = {eval_task_name:[] for eval_task_name in ALL_EVAL_TASKS}
is_tune=True
BASELINES_S = 'baseline-s'
if not is_tune: num_train_epochs=4.0
for eval_task_name in ['UD_FINNISH']:
if len(shpley_removals[eval_task_name])<1: continue
for i in range(1):
seed = 43
np.random.seed(seed)
for is_few_shot in [False]:
best_shapley_learning_rate = None
best_shapley_per_gpu_train_batch_size = None
best_baseline_learning_rate = None
best_baseline_per_gpu_train_batch_size = None
best_baseline_s_learning_rate = None
best_baseline_s_per_gpu_train_batch_size = None
BEST_BASELINE_ACC = None
BEST_SHAPLEY_ACC = None
for is_Shapley in [ BASELINES_S,]:
best_learning_rate = None
best_per_gpu_train_batch_size = None
best_acc = -1
if BEST_BASELINE_ACC and BEST_SHAPLEY_ACC and BEST_BASELINE_ACC > BEST_SHAPLEY_ACC: continue
# _______________________________________________________________________
# ______________________________________NLPDV____________________________________
ALL_BINARY_TASKS = [
'UD_ARABIC',
'UD_BASQUE',
'UD_BULGARIAN',
'UD_CATALAN',
'UD_CHINESE',
'UD_CROATIAN',
'UD_CZECH',
'UD_DANISH',
'UD_DUTCH',
'UD_ENGLISH',
'UD_FINNISH',
'UD_FRENCH',
'UD_GERMAN',
'UD_HEBREW',
'UD_HINDI',
'UD_INDONESIAN',
'UD_ITALIAN',
'UD_JAPANESE',
'UD_KOREAN',
'UD_NORWEGIAN',
'UD_PERSIAN',
'UD_POLISH',
'UD_PORTUGUESE',
'UD_ROMANIAN',
'UD_RUSSIAN',
'UD_SERBIAN',
'UD_SLOVAK',
'UD_SLOVENIAN',
'UD_SPANISH',
'UD_SWEDISH',
'UD_TURKISH']
DOMAIN_TRANSFER = True
# _______________________________________________________________________
# ______________________________________NLPDV____________________________________
if eval_task_name in ALL_BINARY_TASKS: ALL_BINARY_TASKS.remove(eval_task_name)
if is_Shapley==BASELINES_S:
raddom_domains = np.random.choice(np.arange(len(ALL_BINARY_TASKS)), \
len(shpley_removals[eval_task_name]), replace=False)
learning_rates = [ 2e-5, 3e-5, 5e-5]
bz_szs = [ 16, 32]
for learning_rate in learning_rates:
for per_gpu_train_batch_size in bz_szs:
train_task_name = eval_task_name
if is_Shapley=='LOO': train_output_dir = 'temp/' + train_task_name + '_output_LOO_'+str(per_gpu_train_batch_size) + '_'+str(learning_rate) #+str(seed)+'/'
elif is_Shapley==True:
train_output_dir = 'temp/' + train_task_name + '_output_Shapley_'+str(per_gpu_train_batch_size) + '_'+str(learning_rate) #+str(seed)+'/'
elif is_Shapley == BASELINES_S:
train_output_dir = 'temp/' + train_task_name + '_output_baseline-s_' + str(
per_gpu_train_batch_size) + '_' + str(learning_rate) #+str(seed)+'/'
else:
train_output_dir = 'temp/' + train_task_name + '_output_baseline_'+str(per_gpu_train_batch_size) + '_'+str(learning_rate) #+str(seed)+'/'
eval_output_dir = train_output_dir +'/best'
train_data_dir = BASE_DATA_DIR
eval_data_dir = BASE_DATA_DIR
directory = eval_output_dir
if not os.path.exists(train_output_dir) :
os.makedirs(directory)
os.makedirs(os.path.join(directory, 'plots'))
if not os.path.exists(directory) :
os.makedirs(directory)
os.makedirs(os.path.join(directory, 'plots'))
def write_indices_to_delete(indices_to_delete_file_path, ids):
with open(indices_to_delete_file_path, "w") as writer:
print(f"***** Writing ids to {str(indices_to_delete_file_path)} *****", flush=True)
for id in ids:
writer.write("%s " % (id))
indices_to_delete_file_path = directory + '/indices_to_delete_file_path' + '.json'
if is_Shapley == True and eval_task_name != 'UD_TURKISH':
write_indices_to_delete(indices_to_delete_file_path, shpley_removals[eval_task_name])
if is_Shapley == BASELINES_S and eval_task_name != 'UD_TURKISH':
print('-eval_task_name: ', eval_task_name, flush=True)
print('raddom_removal_domains: ', raddom_domains,\
'shapley removals: ', shpley_removals[eval_task_name], flush=True)
write_indices_to_delete(indices_to_delete_file_path, raddom_domains )
# if is_Shapley == False and eval_task_name == 'UD_ENGLISH':
# write_indices_to_delete(indices_to_delete_file_path,\
# [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 28, 29])
# else is_Shapley: continue
run_command = "CUDA_VISIBLE_DEVICES=" + str(CUDA_VISIBLE_DEVICES[0])
for i in CUDA_VISIBLE_DEVICES[1:]:
run_command += ',' + str(i)
run_command += ' python '
if len(CUDA_VISIBLE_DEVICES) > 1: run_command += '-m torch.distributed.launch --nproc_per_node ' \
+ str(len(CUDA_VISIBLE_DEVICES))
run_command += ' ' + run_file + ' ' + ' --model_type ' + model_type + \
' --max_seq_length ' + str(max_seq_length) + ' --per_gpu_eval_batch_size=' + str(
per_gpu_eval_batch_size) + \
' --per_gpu_train_batch_size=' + str(per_gpu_train_batch_size) + ' --learning_rate ' + str(learning_rate) \
+ ' --overwrite_output_dir '
if do_lower_case: run_command += '--do_lower_case '
if fp16: run_command += ' --fp16 '
if overwrite_cache:
run_command += ' --overwrite_cache '
if evaluate_during_training: run_command += ' --evaluate_during_training '
# For training:
train_run_command = run_command + ' --do_train --task_name ' + train_task_name + \
' --data_dir ' + train_data_dir + ' --output_dir ' + \
train_output_dir + ' --model_name_or_path ' + train_model_name_or_path
if is_Shapley: train_run_command += ' --indices_to_delete_file_path ' + indices_to_delete_file_path
if is_few_shot : train_run_command += ' --is_few_shot'
command = train_run_command + ' --num_train_epochs 1'
print(command, flush=True)
if not os.path.exists(os.path.join(eval_output_dir,"pytorch_model.bin")):
os.system(command)
# initial Eval on whole dataset
# For eval:
run_command = "CUDA_VISIBLE_DEVICES=" + str(CUDA_VISIBLE_DEVICES[0])
run_command += ' python '
run_command += ' ' + run_file + ' ' + ' --model_type ' + model_type + \
' --max_seq_length ' + str(max_seq_length) + ' --per_gpu_eval_batch_size=' + str(
per_gpu_eval_batch_size) + \
' --per_gpu_train_batch_size=' + str(
per_gpu_train_batch_size) + ' --learning_rate ' + str(learning_rate) \
+ ' --overwrite_output_dir '
if do_lower_case: run_command += '--do_lower_case '
if fp16: run_command += ' --fp16 '
if overwrite_cache:
run_command += ' --overwrite_cache '
if evaluate_during_training: run_command += ' --evaluate_during_training '
eval_run_command = run_command + ' --do_eval --task_name ' + eval_task_name + \
' --data_dir ' + eval_data_dir + ' --output_dir ' + eval_output_dir + \
' --model_name_or_path ' + eval_output_dir
command = eval_run_command
print(command, flush=True)
os.system(command)
try:
output_eval_file = os.path.join(eval_output_dir, "eval_results.txt")
with open(output_eval_file, "r") as reader:
for line in reader:
line = line.strip().split()
key = line[0]
value = line[-1]
if key in ['acc']:
acc = float(value)
except:
acc = 0
print('-'*100, flush=True)
print("Task: ", train_task_name, flush=True)
print("learning_rate: ", learning_rate, flush=True)
print("per_gpu_train_batch_size: ", per_gpu_train_batch_size, flush=True)
print("Acc: ", acc, flush=True)
print("Shapely: ", str(is_Shapley), flush=True)
print('-'*100, flush=True)
if is_Shapley==True:
all_acc_shapley[eval_task_name].append(acc)
elif is_Shapley==False:
all_acc_baseline[eval_task_name].append(acc)
else:
all_acc_baseline_s[eval_task_name].append(acc)
if acc>best_acc:
best_per_gpu_train_batch_size = per_gpu_train_batch_size
best_learning_rate = learning_rate
best_acc=acc
print('-'*100, flush=True)
print('-Task: ', eval_task_name, flush=True)
print('-is_Shapley: ', is_Shapley, flush=True)
print('-best lr: ', best_learning_rate, '\n-bz sz: ', best_per_gpu_train_batch_size, \
'\n-best acc: ', best_acc, '\n-all_acc_shapley: ', all_acc_shapley, \
'\n-all_acc_shapley_baseline_s: ',all_acc_baseline_s,'\n- all_acc_baseline: ', all_acc_baseline, flush=True)
print('-'*100, flush=True)
# For Test:
train_task_name = eval_task_name
if is_Shapley == 'LOO':
train_output_dir = 'temp/' + train_task_name + '_output_LOO_' + str(
best_per_gpu_train_batch_size) + '_' + str(
best_learning_rate) # +str(seed)+'/'
elif is_Shapley == True:
train_output_dir = 'temp/' + train_task_name + '_output_Shapley_' + str(
best_per_gpu_train_batch_size) + '_' + str(best_learning_rate) # +str(seed)+'/'
elif is_Shapley == BASELINES_S:
train_output_dir = 'temp/' + train_task_name + '_output_baseline-s_' + str(
best_per_gpu_train_batch_size) + '_' + str(best_learning_rate) # +str(seed)+'/'
else:
train_output_dir = 'temp/' + train_task_name + '_output_baseline_' + str(
best_per_gpu_train_batch_size) + '_' + str(best_learning_rate) # +str(seed)+'/'
eval_output_dir = train_output_dir + '/best/'
run_command = "CUDA_VISIBLE_DEVICES=" + str(CUDA_VISIBLE_DEVICES[0])
for i in CUDA_VISIBLE_DEVICES[1:]:
run_command += ',' + str(i)
run_command += ' python '
if len(CUDA_VISIBLE_DEVICES) > 1: run_command += '-m torch.distributed.launch --nproc_per_node ' \
+ str(len(CUDA_VISIBLE_DEVICES))
run_command += ' ' + run_file + ' ' + ' --model_type ' + model_type + \
' --max_seq_length ' + str(max_seq_length) + ' --per_gpu_eval_batch_size=' + str(
per_gpu_eval_batch_size) + \
' --per_gpu_train_batch_size=' + str(
best_per_gpu_train_batch_size) + ' --learning_rate ' + str(
best_learning_rate) \
+ ' --overwrite_output_dir '
if do_lower_case: run_command += '--do_lower_case '
if fp16: run_command += ' --fp16 '
if overwrite_cache:
run_command += ' --overwrite_cache '
if evaluate_during_training: run_command += ' --evaluate_during_training '
train_run_command = run_command + ' --do_train --task_name ' + train_task_name + \
' --data_dir ' + train_data_dir + ' --output_dir ' + \
train_output_dir + ' --model_name_or_path ' + train_model_name_or_path
# For eval:
run_command = "CUDA_VISIBLE_DEVICES=" + str(CUDA_VISIBLE_DEVICES[0])
run_command += ' python '
run_command += ' ' + run_file + ' ' + ' --model_type ' + model_type + \
' --max_seq_length ' + str(max_seq_length) + ' --per_gpu_eval_batch_size=' + str(
per_gpu_eval_batch_size) + \
' --per_gpu_train_batch_size=' + str(
best_per_gpu_train_batch_size) + ' --learning_rate ' + str(
best_learning_rate) \
+ ' --overwrite_output_dir '
if do_lower_case: run_command += '--do_lower_case '
if fp16: run_command += ' --fp16 '
if overwrite_cache:
run_command += ' --overwrite_cache '
eval_run_command = run_command + ' --do_predict --task_name ' + eval_task_name + \
' --data_dir ' + eval_data_dir + ' --output_dir ' + eval_output_dir + \
' --model_name_or_path ' + eval_output_dir
indices_to_delete_file_path = eval_output_dir + '/indices_to_delete_file_path' + '.json'
if is_Shapley: train_run_command += ' --indices_to_delete_file_path ' + indices_to_delete_file_path
command = train_run_command + ' --num_train_epochs ' + str(num_train_epochs)
print(command, flush=True)
os.system(command)
# initial Eval on whole dataset
command = eval_run_command
print(command, flush=True)
os.system(command)
output_eval_file = os.path.join(eval_output_dir, "eval_results.txt")
with open(output_eval_file, "r") as reader:
for line in reader:
line = line.strip().split()
key = line[0]
value = line[-1]
if key in ['acc']:
acc = float(value)
print('-' * 100, flush=True)
print("Task: ", train_task_name, flush=True)
print("best_learning_rate: ", best_learning_rate, flush=True)
print("best_per_gpu_train_batch_size: ", best_per_gpu_train_batch_size, flush=True)
print("BEST TEST Acc: ", acc, flush=True)
print("Shapely: ", str(is_Shapley), flush=True)
print('-' * 100, flush=True)
if is_Shapley==True:
best_shapley_learning_rate = best_learning_rate
best_shapley_per_gpu_train_batch_size = best_per_gpu_train_batch_size
BEST_SHAPLEY_ACC = acc
elif is_Shapley==BASELINES_S:
best_baseline_s_learning_rate = best_learning_rate
best_baseline_s_per_gpu_train_batch_size = best_per_gpu_train_batch_size
else:
best_baseline_learning_rate = best_learning_rate
best_baseline_per_gpu_train_batch_size = best_per_gpu_train_batch_size
BEST_BASELINE_ACC = acc
best_shapley_dir = 'temp/'+eval_task_name+'_output_Shapley_'+str(best_shapley_per_gpu_train_batch_size)+'_'+\
str(best_shapley_learning_rate)+'/best/'
gold = best_shapley_dir+'test_gold.txt'
shapley = best_shapley_dir+'test_predictions.txt'
baseline = 'temp/'+eval_task_name+'_output_baseline_'+str(best_baseline_per_gpu_train_batch_size)+'_'+\
str(best_baseline_learning_rate)+'/best/'+'test_predictions.txt'
baseline_s = 'temp/'+eval_task_name+'_output_baseline-s_'+str(best_baseline_s_per_gpu_train_batch_size)+'_'+\
str(best_baseline_s_learning_rate)+'/best/'+'test_predictions.txt'
print('-'*100, flush=True)
print('Boostrap paired test of Shapley woth baseline!', flush=True)
command = "python script_t_test.py "+ gold + ' '+ shapley + ' ' + baseline
print(command, flush=True)
print('-' * 50, flush=True)
os.system(command)
print('-' * 50, flush=True)
print('-' * 50, flush=True)
print('Boostrap paired test of Shapley woth baseline-s!', flush=True)
command = "python script_t_test.py " + gold + ' ' + shapley + ' ' + baseline_s
print(command, flush=True)
print('-' * 50, flush=True)
os.system(command)
print('-' * 100, flush=True)
|
[
"os.path.exists",
"os.makedirs",
"matplotlib.use",
"os.path.join",
"numpy.random.seed",
"os.system"
] |
[((191, 212), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (205, 212), False, 'import matplotlib\n'), ((3438, 3458), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (3452, 3458), True, 'import numpy as np\n'), ((22098, 22116), 'os.system', 'os.system', (['command'], {}), '(command)\n', (22107, 22116), False, 'import os, pdb\n'), ((22461, 22479), 'os.system', 'os.system', (['command'], {}), '(command)\n', (22470, 22479), False, 'import os, pdb\n'), ((19369, 19387), 'os.system', 'os.system', (['command'], {}), '(command)\n', (19378, 19387), False, 'import os, pdb\n'), ((19540, 19558), 'os.system', 'os.system', (['command'], {}), '(command)\n', (19549, 19558), False, 'import os, pdb\n'), ((19595, 19644), 'os.path.join', 'os.path.join', (['eval_output_dir', '"""eval_results.txt"""'], {}), "(eval_output_dir, 'eval_results.txt')\n", (19607, 19644), False, 'import os, pdb\n'), ((12973, 12991), 'os.system', 'os.system', (['command'], {}), '(command)\n', (12982, 12991), False, 'import os, pdb\n'), ((7397, 7429), 'os.path.exists', 'os.path.exists', (['train_output_dir'], {}), '(train_output_dir)\n', (7411, 7429), False, 'import os, pdb\n'), ((7460, 7482), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (7471, 7482), False, 'import os, pdb\n'), ((7589, 7614), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (7603, 7614), False, 'import os, pdb\n'), ((7645, 7667), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (7656, 7667), False, 'import os, pdb\n'), ((11422, 11440), 'os.system', 'os.system', (['command'], {}), '(command)\n', (11431, 11440), False, 'import os, pdb\n'), ((13069, 13118), 'os.path.join', 'os.path.join', (['eval_output_dir', '"""eval_results.txt"""'], {}), "(eval_output_dir, 'eval_results.txt')\n", (13081, 13118), False, 'import os, pdb\n'), ((7523, 7555), 'os.path.join', 'os.path.join', (['directory', '"""plots"""'], {}), "(directory, 'plots')\n", (7535, 7555), False, 'import os, pdb\n'), ((7708, 7740), 'os.path.join', 'os.path.join', (['directory', '"""plots"""'], {}), "(directory, 'plots')\n", (7720, 7740), False, 'import os, pdb\n'), ((11342, 11392), 'os.path.join', 'os.path.join', (['eval_output_dir', '"""pytorch_model.bin"""'], {}), "(eval_output_dir, 'pytorch_model.bin')\n", (11354, 11392), False, 'import os, pdb\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.