code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
import numpy as np
import pandas as pd
from collections import OrderedDict
import tabulate
del(tabulate.LATEX_ESCAPE_RULES[u'$'])
del(tabulate.LATEX_ESCAPE_RULES[u'\\'])
del(tabulate.LATEX_ESCAPE_RULES[u'{'])
del(tabulate.LATEX_ESCAPE_RULES[u'}'])
del(tabulate.LATEX_ESCAPE_RULES[u'^'])
data = {}
scens = ["SPEAR-SWV","SPEAR-IBM","CPLEX-RCW","CPLEX-REG","CPLEX-CORLAT"]
models = ["RF", "DNN"]
EVA_BUDGETs = [1,3600]
WC_BUDGET = 172800 # sec
RUNS = 3
for scen in scens:
for model in models:
for EVA_BUDGET in EVA_BUDGETs:
rep_options = [True] if model=="DNN" else [False]
for reg in rep_options:
print(scen,model,EVA_BUDGET,reg)
data[scen] = data.get(scen,{})
data[scen][model] = data[scen].get(model,{})
data[scen][model][EVA_BUDGET] = data[scen][model].get(EVA_BUDGET,{})
t = [np.inf]
rmse_I = [np.inf]
rmsle_I = [np.inf]
rmse_II = [np.inf]
rmsle_II = [np.inf]
rmse_III = [np.inf]
rmsle_III = [np.inf]
rmse_IV = [np.inf]
rmsle_IV = [np.inf]
for seed in range(1,RUNS+1):
with open("{0}_{1}_{2}_{3}_{4}_{5}.log".format(scen, model, reg, EVA_BUDGET,WC_BUDGET, seed)) as fp:
for line in fp:
if line.startswith("Training Time: "):
t.append(float(line.split(":")[1]))
elif line.startswith("RMSE (I)"):
rmse_I.append(float(line.split(":")[1]))
elif line.startswith("RMSLE (I)"):
rmsle_I.append(float(line.split(":")[1]))
elif line.startswith("RMSE (II)"):
rmse_II.append(float(line.split(":")[1]))
elif line.startswith("RMSLE (II)"):
rmsle_II.append(float(line.split(":")[1]))
elif line.startswith("RMSE (III)"):
rmse_III.append(float(line.split(":")[1]))
elif line.startswith("RMSLE (III)"):
rmsle_III.append(float(line.split(":")[1]))
elif line.startswith("RMSE (IV)"):
rmse_IV.append(float(line.split(":")[1]))
elif line.startswith("RMSLE (IV)"):
rmsle_IV.append(float(line.split(":")[1]))
best_run = np.argmin(rmse_I)
data[scen][model][EVA_BUDGET]["time"] = t[best_run]
data[scen][model][EVA_BUDGET]["RMSE (I)"] = rmse_I[best_run]
data[scen][model][EVA_BUDGET]["RMSEL (I)"] = rmsle_I[best_run]
data[scen][model][EVA_BUDGET]["RMSE (II)"] = rmse_II[best_run]
data[scen][model][EVA_BUDGET]["RMSEL (II)"] = rmsle_II[best_run]
data[scen][model][EVA_BUDGET]["RMSE (III)"] = rmse_III[best_run]
data[scen][model][EVA_BUDGET]["RMSEL (III)"] = rmsle_III[best_run]
data[scen][model][EVA_BUDGET]["RMSE (IV)"] = rmse_IV[best_run]
data[scen][model][EVA_BUDGET]["RMSEL (IV)"] = rmsle_IV[best_run]
for budget in EVA_BUDGETs:
table_data = [["","","\multicolumn{2}{c}{$\conf_{\\text{train}}$}","\multicolumn{2}{c}{$\conf_{\\text{test}}$}"],
["Domain", "Instances","RF","DNN","RF","DNN"]
]
for scen in scens:
row = [scen, "$\insts_{\\text{train}}$"]
for quad in ["I","III"]:
for model in models:
row.append("$%.2f$" %(data[scen][model][budget]["RMSEL (%s)" %(quad)]))
table_data.append(row)
row = [scen, "$\insts_{\\text{test}}$"]
for quad in ["II","IV"]:
for model in models:
row.append("$%.2f$" %(data[scen][model][budget]["RMSEL (%s)" %(quad)]))
table_data.append(row)
print(tabulate.tabulate(tabular_data=table_data, tablefmt="latex_booktabs"))
|
[
"numpy.argmin",
"tabulate.tabulate"
] |
[((4159, 4228), 'tabulate.tabulate', 'tabulate.tabulate', ([], {'tabular_data': 'table_data', 'tablefmt': '"""latex_booktabs"""'}), "(tabular_data=table_data, tablefmt='latex_booktabs')\n", (4176, 4228), False, 'import tabulate\n'), ((2679, 2696), 'numpy.argmin', 'np.argmin', (['rmse_I'], {}), '(rmse_I)\n', (2688, 2696), True, 'import numpy as np\n')]
|
"""
(C) Copyright 2021 IBM Corp.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Created on June 30, 2021
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from fuse.losses.loss_base import FuseLossBase
from fuse.utils.utils_hierarchical_dict import FuseUtilsHierarchicalDict
from typing import Callable, Dict, Optional
def make_one_hot(input, num_classes, device='cuda'):
"""Convert class index tensor to one hot encoding tensor.
Args:
input: A tensor of shape [N, 1, *]
num_classes: An int of number of class
Returns:
A tensor of shape [N, num_classes, *]
"""
shape = np.array(input.shape)
shape[1] = num_classes
shape = tuple(shape)
result = torch.zeros(shape, device=device)
result = result.scatter_(1, input, 1)
return result
class BinaryDiceLoss(nn.Module):
def __init__(self, power: int=1, eps: float =1., reduction: str = 'mean'):
'''
:param power: Denominator value: \sum{x^p} + \sum{y^p}, default: 1
:param eps: A float number to smooth loss, and avoid NaN error, default: 1
:param reduction: Reduction method to apply, return mean over batch if 'mean',
return sum if 'sum', return a tensor of shape [N,] if 'none'
Returns: Loss tensor according to arg reduction
Raise: Exception if unexpected reduction
'''
super().__init__()
self.p = power
self.reduction = reduction
self.eps = eps
def __call__(self, predict, target):
assert predict.shape[0] == target.shape[0], "predict & target batch size don't match"
predict = predict.contiguous().view(predict.shape[0], -1)
target = target.contiguous().view(target.shape[0], -1)
if target.dtype == torch.int64:
target = target.type(torch.float32).to(target.device)
num = 2*torch.sum(torch.mul(predict, target), dim=1) + self.eps
den = torch.sum(predict.pow(self.p) + target.pow(self.p), dim=1) + self.eps
loss = 1 - num / den
# return loss
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
elif self.reduction == 'none':
return loss
else:
raise Exception('Unexpected reduction {}'.format(self.reduction))
class FuseDiceLoss(FuseLossBase):
def __init__(self, pred_name,
target_name,
filter_func: Optional[Callable] = None,
class_weights=None,
ignore_cls_index_list=None,
resize_mode: str = 'maxpool',
**kwargs):
'''
:param pred_name: batch_dict key for predicted output (e.g., class probabilities after softmax).
Expected Tensor shape = [batch, num_classes, height, width]
:param target_name: batch_dict key for target (e.g., ground truth label). Expected Tensor shape = [batch, height, width]
:param filter_func: function that filters batch_dict/ The function gets ans input batch_dict and returns filtered batch_dict
:param class_weights: An array of shape [num_classes,]
:param ignore_cls_index_list: class index to ignore (list)
:param resize_mode: Resize mode- either using a max pooling kernel(default), or using PyTorch
interpolation ('interpolate'/'maxpool')
:param kwargs: args pass to BinaryDiceLoss
'''
super().__init__(pred_name, target_name, class_weights)
self.class_weights = class_weights
self.filter_func = filter_func
self.kwargs = kwargs
self.ignore_cls_index_list = ignore_cls_index_list
self.resize_mode = resize_mode
self.dice = BinaryDiceLoss(**self.kwargs)
def __call__(self, batch_dict):
if self.filter_func is not None:
batch_dict = self.filter_func(batch_dict)
predict = FuseUtilsHierarchicalDict.get(batch_dict, self.pred_name).float()
target = FuseUtilsHierarchicalDict.get(batch_dict, self.target_name).long()
n, c, h, w = predict.shape
tar_shape = target.shape
if len(tar_shape) < 4:
target = target.unsqueeze(1)
nt, ct, ht, wt = target.shape
if h != ht or w != wt: # upsample
if self.resize_mode == 'maxpool':
block_height = int(ht / h)
block_width = int(wt / w)
residual_h = int((ht - (block_height * h)) / 2)
residual_w = int((wt - (block_width * w)) / 2)
target = torch.nn.functional.max_pool2d(target[:, :, residual_h:ht - residual_h, residual_w:wt - residual_w],
kernel_size=(block_height, block_width))
elif self.resize_mode == 'interpolate':
target = torch.nn.functional.interpolate(target, size=(h, w))
else:
raise Exception
total_loss = 0
n_classes = predict.shape[1]
# Convert target to one hot encoding
if n_classes > 1 and target.shape[1] != n_classes:
target = make_one_hot(target, n_classes)
assert predict.shape == target.shape, 'predict & target shape do not match'
total_class_weights = sum(self.class_weights) if self.class_weights is not None else n_classes
for cls_index in range(n_classes):
if cls_index not in self.ignore_cls_index_list:
dice_loss = self.dice(predict[:, cls_index, :, :], target[:, cls_index, :, :])
if self.class_weights is not None:
assert self.class_weights.shape[0] == n_classes, \
'Expect weight shape [{}], got[{}]'.format(n_classes, self.class_weights.shape[0])
dice_loss *= self.class_weights[cls_index]
total_loss += dice_loss
total_loss /= total_class_weights
return self.weight*total_loss
|
[
"torch.mul",
"numpy.array",
"fuse.utils.utils_hierarchical_dict.FuseUtilsHierarchicalDict.get",
"torch.nn.functional.interpolate",
"torch.nn.functional.max_pool2d",
"torch.zeros"
] |
[((1137, 1158), 'numpy.array', 'np.array', (['input.shape'], {}), '(input.shape)\n', (1145, 1158), True, 'import numpy as np\n'), ((1224, 1257), 'torch.zeros', 'torch.zeros', (['shape'], {'device': 'device'}), '(shape, device=device)\n', (1235, 1257), False, 'import torch\n'), ((4650, 4707), 'fuse.utils.utils_hierarchical_dict.FuseUtilsHierarchicalDict.get', 'FuseUtilsHierarchicalDict.get', (['batch_dict', 'self.pred_name'], {}), '(batch_dict, self.pred_name)\n', (4679, 4707), False, 'from fuse.utils.utils_hierarchical_dict import FuseUtilsHierarchicalDict\n'), ((4733, 4792), 'fuse.utils.utils_hierarchical_dict.FuseUtilsHierarchicalDict.get', 'FuseUtilsHierarchicalDict.get', (['batch_dict', 'self.target_name'], {}), '(batch_dict, self.target_name)\n', (4762, 4792), False, 'from fuse.utils.utils_hierarchical_dict import FuseUtilsHierarchicalDict\n'), ((5307, 5452), 'torch.nn.functional.max_pool2d', 'torch.nn.functional.max_pool2d', (['target[:, :, residual_h:ht - residual_h, residual_w:wt - residual_w]'], {'kernel_size': '(block_height, block_width)'}), '(target[:, :, residual_h:ht - residual_h,\n residual_w:wt - residual_w], kernel_size=(block_height, block_width))\n', (5337, 5452), False, 'import torch\n'), ((2444, 2470), 'torch.mul', 'torch.mul', (['predict', 'target'], {}), '(predict, target)\n', (2453, 2470), False, 'import torch\n'), ((5583, 5635), 'torch.nn.functional.interpolate', 'torch.nn.functional.interpolate', (['target'], {'size': '(h, w)'}), '(target, size=(h, w))\n', (5614, 5635), False, 'import torch\n')]
|
from typing import Optional
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras import Model
from tensorflow.keras.layers import Layer
import numpy as np
import rinokeras as rk
from rinokeras.layers import WeightNormDense as Dense
from rinokeras.layers import LayerNorm, Stack
class RandomReplaceMask(Layer):
""" Copied from rinokeras because we're going to potentially have
different replace masks.
Replaces some percentage of the input with a mask token. Used for
implementing style models. This is actually slightly more complex - it
does one of three things
Based on https://arxiv.org/abs/1810.04805.
Args:
percentage (float): Percentage of input tokens to mask
mask_token (int): Token to replace masked input with
"""
def __init__(self,
percentage: float,
mask_token: int,
n_symbols: Optional[int] = None,
*args, **kwargs) -> None:
super().__init__(*args, **kwargs)
if not 0 <= percentage < 1:
raise ValueError("Masking percentage must be in [0, 1).\
Received {}".format(percentage))
self.percentage = percentage
self.mask_token = mask_token
self.n_symbols = n_symbols
def _generate_bert_mask(self, inputs):
mask_shape = K.shape(inputs)
bert_mask = K.random_uniform(mask_shape) < self.percentage
return bert_mask
def call(self,
inputs: tf.Tensor,
mask: Optional[tf.Tensor] = None):
"""
Args:
inputs (tf.Tensor[ndims=2, int]): Tensor of values to mask
mask (Optional[tf.Tensor[bool]]): Locations in the inputs to that are valid
(i.e. not padding, start tokens, etc.)
Returns:
masked_inputs (tf.Tensor[ndims=2, int]): Tensor of masked values
bert_mask: Locations in the input that were masked
"""
bert_mask = self._generate_bert_mask(inputs)
if mask is not None:
bert_mask &= mask
masked_inputs = inputs * tf.cast(~bert_mask, inputs.dtype)
token_bert_mask = K.random_uniform(K.shape(bert_mask)) < 0.8
random_bert_mask = (K.random_uniform(
K.shape(bert_mask)) < 0.1) & ~token_bert_mask
true_bert_mask = ~token_bert_mask & ~random_bert_mask
token_bert_mask = tf.cast(token_bert_mask & bert_mask, inputs.dtype)
random_bert_mask = tf.cast(random_bert_mask & bert_mask, inputs.dtype)
true_bert_mask = tf.cast(true_bert_mask & bert_mask, inputs.dtype)
masked_inputs += self.mask_token * token_bert_mask # type: ignore
masked_inputs += K.random_uniform(
K.shape(bert_mask), 0, self.n_symbols, dtype=inputs.dtype) * random_bert_mask
masked_inputs += inputs * true_bert_mask
return masked_inputs, bert_mask
class ContiguousReplaceMask(Layer):
""" Copied from rinokeras because we're going to potentially have
different replace masks.
Replaces some percentage of the input with a mask token. Used for
implementing style models. This is actually slightly more complex - it
does one of three things
Based on https://arxiv.org/abs/1810.04805.
Args:
percentage (float): Percentage of input tokens to mask
mask_token (int): Token to replace masked input with
"""
def __init__(self,
percentage: float,
mask_token: int,
n_symbols: Optional[int] = None,
avg_seq_len: int = 3,
*args, **kwargs) -> None:
super().__init__(*args, **kwargs)
if not 0 <= percentage < 1:
raise ValueError("Masking percentage must be in [0, 1).\
Received {}".format(percentage))
self.percentage = percentage
self.mask_token = mask_token
self.avg_seq_len = avg_seq_len
self.n_symbols = n_symbols
def _generate_bert_mask(self, inputs):
def _numpy_generate_contiguous_mask(array):
mask = np.random.random(array.shape) < (1 / self.avg_seq_len)
mask = np.cumsum(mask, 1)
seqvals = np.max(mask)
mask_prob = self.percentage * array.shape[1] / seqvals # increase probability because fewer sequences
vals_to_mask = np.arange(seqvals)[np.random.random((seqvals,)) < mask_prob]
indices_to_mask = np.isin(mask, vals_to_mask)
mask[indices_to_mask] = 1
mask[~indices_to_mask] = 0
return np.asarray(mask, np.bool)
bert_mask = tf.py_func(_numpy_generate_contiguous_mask, [inputs], tf.bool)
bert_mask.set_shape(inputs.shape)
return bert_mask
class RandomSequenceMask(Model):
def __init__(self,
n_symbols: int,
mask_token: int,
mask_percentage: float = 0.15,
mask_type: str = 'random'):
super().__init__()
if mask_type == 'random':
self.bert_mask = RandomReplaceMask(mask_percentage, mask_token, n_symbols)
elif mask_type == 'contiguous':
self.bert_mask = ContiguousReplaceMask(mask_percentage, mask_token, n_symbols)
else:
raise ValueError("Unrecognized mask_type: {}".format(mask_type))
def call(self, inputs):
"""
Args:
sequence: tf.Tensor[int32] - Amino acid sequence,
a padded tensor with shape [batch_size, MAX_PROTEIN_LENGTH]
protein_length: tf.Tensor[int32] - Length of each protein in the sequence, a tensor with shape [batch_size]
Output:
amino_acid_probs: tf.Tensor[float32] - Probability of each type of amino acid,
a tensor with shape [batch_size, MAX_PROTEIN_LENGTH, n_symbols]
"""
sequence = inputs['primary']
protein_length = inputs['protein_length']
sequence_mask = rk.utils.convert_sequence_length_to_sequence_mask(
sequence, protein_length)
masked_sequence, bert_mask = self.bert_mask(sequence, sequence_mask)
inputs['original_sequence'] = sequence
inputs['primary'] = masked_sequence
inputs['bert_mask'] = bert_mask
return inputs
|
[
"tensorflow.keras.backend.shape",
"numpy.random.random",
"numpy.asarray",
"numpy.isin",
"numpy.max",
"rinokeras.utils.convert_sequence_length_to_sequence_mask",
"numpy.cumsum",
"tensorflow.keras.backend.random_uniform",
"tensorflow.py_func",
"tensorflow.cast",
"numpy.arange"
] |
[((1368, 1383), 'tensorflow.keras.backend.shape', 'K.shape', (['inputs'], {}), '(inputs)\n', (1375, 1383), True, 'import tensorflow.keras.backend as K\n'), ((2467, 2517), 'tensorflow.cast', 'tf.cast', (['(token_bert_mask & bert_mask)', 'inputs.dtype'], {}), '(token_bert_mask & bert_mask, inputs.dtype)\n', (2474, 2517), True, 'import tensorflow as tf\n'), ((2545, 2596), 'tensorflow.cast', 'tf.cast', (['(random_bert_mask & bert_mask)', 'inputs.dtype'], {}), '(random_bert_mask & bert_mask, inputs.dtype)\n', (2552, 2596), True, 'import tensorflow as tf\n'), ((2622, 2671), 'tensorflow.cast', 'tf.cast', (['(true_bert_mask & bert_mask)', 'inputs.dtype'], {}), '(true_bert_mask & bert_mask, inputs.dtype)\n', (2629, 2671), True, 'import tensorflow as tf\n'), ((4697, 4759), 'tensorflow.py_func', 'tf.py_func', (['_numpy_generate_contiguous_mask', '[inputs]', 'tf.bool'], {}), '(_numpy_generate_contiguous_mask, [inputs], tf.bool)\n', (4707, 4759), True, 'import tensorflow as tf\n'), ((6044, 6119), 'rinokeras.utils.convert_sequence_length_to_sequence_mask', 'rk.utils.convert_sequence_length_to_sequence_mask', (['sequence', 'protein_length'], {}), '(sequence, protein_length)\n', (6093, 6119), True, 'import rinokeras as rk\n'), ((1404, 1432), 'tensorflow.keras.backend.random_uniform', 'K.random_uniform', (['mask_shape'], {}), '(mask_shape)\n', (1420, 1432), True, 'import tensorflow.keras.backend as K\n'), ((2170, 2203), 'tensorflow.cast', 'tf.cast', (['(~bert_mask)', 'inputs.dtype'], {}), '(~bert_mask, inputs.dtype)\n', (2177, 2203), True, 'import tensorflow as tf\n'), ((4238, 4256), 'numpy.cumsum', 'np.cumsum', (['mask', '(1)'], {}), '(mask, 1)\n', (4247, 4256), True, 'import numpy as np\n'), ((4279, 4291), 'numpy.max', 'np.max', (['mask'], {}), '(mask)\n', (4285, 4291), True, 'import numpy as np\n'), ((4525, 4552), 'numpy.isin', 'np.isin', (['mask', 'vals_to_mask'], {}), '(mask, vals_to_mask)\n', (4532, 4552), True, 'import numpy as np\n'), ((4650, 4675), 'numpy.asarray', 'np.asarray', (['mask', 'np.bool'], {}), '(mask, np.bool)\n', (4660, 4675), True, 'import numpy as np\n'), ((2248, 2266), 'tensorflow.keras.backend.shape', 'K.shape', (['bert_mask'], {}), '(bert_mask)\n', (2255, 2266), True, 'import tensorflow.keras.backend as K\n'), ((2804, 2822), 'tensorflow.keras.backend.shape', 'K.shape', (['bert_mask'], {}), '(bert_mask)\n', (2811, 2822), True, 'import tensorflow.keras.backend as K\n'), ((4164, 4193), 'numpy.random.random', 'np.random.random', (['array.shape'], {}), '(array.shape)\n', (4180, 4193), True, 'import numpy as np\n'), ((4434, 4452), 'numpy.arange', 'np.arange', (['seqvals'], {}), '(seqvals)\n', (4443, 4452), True, 'import numpy as np\n'), ((2332, 2350), 'tensorflow.keras.backend.shape', 'K.shape', (['bert_mask'], {}), '(bert_mask)\n', (2339, 2350), True, 'import tensorflow.keras.backend as K\n'), ((4453, 4481), 'numpy.random.random', 'np.random.random', (['(seqvals,)'], {}), '((seqvals,))\n', (4469, 4481), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import glob as glb
import os
import cv2
import pickle
#################################################################################################################
def create_new_folder (new_dir):
if not os.path.exists(new_dir):
os.makedirs(new_dir)
return
#################################################################################################################
def save_transform_matrix(img_filename, saveas_dir):
img = np.copy(mpimg.imread(img_filename))
img_size = img.shape[1::-1]
# prepare source and destination points to calculate the transform matrix
dim_x = img_size[0]
pt1 = (219, 720)
pt2 = (1110, 720)
pt3 = (675, 442)
pt4 = (602, 442)
pts = (pt1, pt2, pt3, pt4)
src = np.float32(pts).reshape(-1, 2)
dst = np.copy(src)
dst[0][0] = 400
dst[1][0] = dim_x - 400
dst[3][0] = 400
dst[2][0] = dim_x - 400
dst[3][1] = 0
dst[2][1] = 0
# calculate transform matrix
M = cv2.getPerspectiveTransform(src, dst)
# calculate inverse transform matrix
Minv = cv2.getPerspectiveTransform(dst, src)
# save M and Minv in binary format
db_file = open(saveas_dir + 'mtx_transform', 'wb')
db = {}
db['TM'] = M
db['TMinv'] = Minv
pickle.dump(db, db_file)
db_file.close()
return
#get images from folder
images=glb.glob('./*.jpg')
#create folder to save the new img in
new_dir = './transform_matrix/'
create_new_folder (new_dir)
#caluculate transform Matrix (using first image)
test=images[0]
save_transform_matrix(test, new_dir)
|
[
"numpy.copy",
"os.path.exists",
"pickle.dump",
"os.makedirs",
"cv2.getPerspectiveTransform",
"matplotlib.image.imread",
"numpy.float32",
"glob.glob"
] |
[((1438, 1457), 'glob.glob', 'glb.glob', (['"""./*.jpg"""'], {}), "('./*.jpg')\n", (1446, 1457), True, 'import glob as glb\n'), ((878, 890), 'numpy.copy', 'np.copy', (['src'], {}), '(src)\n', (885, 890), True, 'import numpy as np\n'), ((1066, 1103), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['src', 'dst'], {}), '(src, dst)\n', (1093, 1103), False, 'import cv2\n'), ((1157, 1194), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['dst', 'src'], {}), '(dst, src)\n', (1184, 1194), False, 'import cv2\n'), ((1347, 1371), 'pickle.dump', 'pickle.dump', (['db', 'db_file'], {}), '(db, db_file)\n', (1358, 1371), False, 'import pickle\n'), ((298, 321), 'os.path.exists', 'os.path.exists', (['new_dir'], {}), '(new_dir)\n', (312, 321), False, 'import os\n'), ((331, 351), 'os.makedirs', 'os.makedirs', (['new_dir'], {}), '(new_dir)\n', (342, 351), False, 'import os\n'), ((549, 575), 'matplotlib.image.imread', 'mpimg.imread', (['img_filename'], {}), '(img_filename)\n', (561, 575), True, 'import matplotlib.image as mpimg\n'), ((837, 852), 'numpy.float32', 'np.float32', (['pts'], {}), '(pts)\n', (847, 852), True, 'import numpy as np\n')]
|
import logging
from collections import OrderedDict
from copy import deepcopy
from datetime import datetime, timedelta
from pathlib import Path
from time import sleep
import cv2
import numpy as np
import pandas as pd
from PyQt5.QtCore import Qt, QTimer, pyqtSlot
from PyQt5.QtGui import QColor, QImage, QPixmap
from PyQt5.QtWidgets import QMessageBox, QStyle, QWidget
from .view import VideoAppViewer
class VideoApp(VideoAppViewer):
def __init__(self, videopath: str, outpath: str, **config):
self.videopath = videopath
self.outpath = outpath
self.config = config
self.title = self.config.get('title', 'PyQt5 video labeling viewer')
super().__init__(title=self.title)
# draw config
if self.config.get('draw') and isinstance(self.config['draw'], dict):
draw_config = self.config['draw']
self.label_frame.draw_color = draw_config.get('color', QColor(0, 0, 0))
self.label_frame.draw_thickness = draw_config.get('thickness', 2)
self.label_frame.draw_style = draw_config.get('style', Qt.SolidLine)
if self.config.get('select') and isinstance(self.config['select'], dict):
select_config = self.config['select']
self.label_frame.select_color = select_config.get('color', QColor(0, 0, 0))
self.label_frame.select_thickness = select_config.get('thickness', 3)
self.label_frame.select_style = select_config.get('style', Qt.SolidLine)
# record config
check_label = self.config.get('label')
label_color = self.config['label'].get('color', (0, 0, 0)) if check_label else None
label_thickness = self.config['label'].get('thickness', 2) if check_label else None
self.label_color = label_color
self.label_thickness = label_thickness
self.limit_nlabel = self.config.get('limit_nlabel', None)
self.records = []
# read video
self.cap = cv2.VideoCapture(self.videopath)
self.target_frame_idx = 0 # ready to update
self.render_frame_idx = None # redneded
self.scale_height = self.scale_width = None
self.is_playing_video = False
self.is_force_update = False
self._update_video_info()
self._update_frame()
# widget binding
self.slider_video.setRange(0, self.frame_count-1)
self.slider_video.sliderMoved.connect(self.on_slider_moved)
self.slider_video.sliderReleased.connect(self.on_slider_released)
self.btn_play_video.clicked.connect(self.on_play_video_clicked)
self.label_frame.mousePressEvent = self.event_frame_mouse_press
self.label_frame.mouseMoveEvent = self.event_frame_mouse_move
self.label_frame.mouseReleaseEvent = self.event_frame_mouse_release
self.btn_previous_record.clicked.connect(self._goto_previous_record)
self.btn_next_record.clicked.connect(self._goto_next_record)
self.btn_export_records.clicked.connect(self.save_file)
self.table_preview_records.doubleClicked.connect(self.event_preview_double_clicked)
self.show()
@property
def frame_count(self):
return int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) if self.cap else None
@property
def frame_height(self):
return int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) if self.cap else None
@property
def frame_width(self):
return int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)) if self.cap else None
@property
def video_fps(self):
return int(self.cap.get(cv2.CAP_PROP_FPS)) if self.cap else None
def _ndarray_to_qimage(self, image: np.ndarray):
"""convert cv2 image to pyqt5 image
Arguments:
image {np.ndarray} -- original RGB image
Returns:
{QImage} -- pyqt5 image format
"""
return QImage(image, image.shape[1], image.shape[0], QImage.Format_RGB888)
def _frame_idx_to_hmsf(self, frame_idx: int):
"""convert to hmsf timestamp by given frame idx and fps"""
assert self.video_fps
base = datetime.strptime('00:00:00.000000', '%H:%M:%S.%f')
delta = timedelta(seconds=frame_idx/self.video_fps)
return (base + delta).strftime('%H:%M:%S.%f')
def _frame_idx_to_hms(self, frame_idx: int):
"""convert to hms timestamp by given frame idx and fps"""
assert self.video_fps
base = datetime.strptime('00:00:00', '%H:%M:%S')
delta = timedelta(seconds=frame_idx//self.video_fps)
return (base + delta).strftime('%H:%M:%S')
def _read_frame(self, frame_idx: int):
"""check frame idx and read frame status than return frame
Arguments:
frame_idx {int} -- frame index
Returns:
{np.ndarray} -- RGB image in (h, w, c)
"""
if frame_idx >= self.frame_count:
self.logger.exception('frame index %d should be less than %d', frame_idx, self.frame_count)
else:
self.target_frame_idx = frame_idx
self.cap.set(1, frame_idx)
read_success, frame = self.cap.read()
if read_success:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
return frame
self.logger.exception('read #%d frame failed', frame_idx)
def _play_video(self):
"""play video when button clicked"""
if self.is_playing_video and self.video_fps:
frame_idx = min(self.render_frame_idx+1, self.frame_count)
if frame_idx == self.frame_count:
self.on_play_video_clicked()
else:
self.target_frame_idx = frame_idx
QTimer.singleShot(1/self.video_fps, self._play_video)
def _check_coor_in_frame(self, coor_x: int, coor_y: int):
"""check the coordinate in mouse event"""
return 0 < coor_x < self.scale_width and 0 < coor_y < self.scale_height
def _update_video_info(self):
shape = str((self.frame_width, self.frame_height))
self.label_video_path.setText(self.videopath)
self.label_video_shape.setText(shape)
self.label_video_fps.setText(str(self.video_fps))
def _update_frame(self):
"""read and update image to label"""
if self.target_frame_idx != self.render_frame_idx or self.is_force_update:
self.is_force_update = False
frame = self._read_frame(self.target_frame_idx)
if frame is not None:
# draw, convert, resize pixmap
frame = self.draw_rects(self.target_frame_idx, frame)
pixmap = QPixmap(self._ndarray_to_qimage(frame))
self.scale_width = int(min(pixmap.width(), self.screen.width()*0.8))
self.scale_height = int(pixmap.height() * (self.scale_width / pixmap.width()))
pixmap = pixmap.scaled(self.scale_width, self.scale_height, Qt.KeepAspectRatio)
self.label_frame.setPixmap(pixmap)
self.label_frame.resize(self.scale_width, self.scale_height)
# sync, update related information
self._update_frame_status(self.target_frame_idx)
self.render_frame_idx = self.target_frame_idx
self.slider_video.setValue(self.render_frame_idx)
QTimer.singleShot(1000/self.video_fps, self._update_frame)
def _update_frame_status(self, frame_idx: int, err: str = ''):
"""update frame status
Arguments:
frame_idx {int} -- frame index
Keyword Arguments:
err {str} -- show status when exception (default: '')
"""
msg = '#frame ({}/{})'.format(frame_idx, self.frame_count-1)
if err:
msg += '\n{}'.format(err)
self.label_video_status.setText(msg)
def _get_records_by_frame_idx(self, frame_idx=None):
"""return specfic records by frame index (default: current frame)"""
frame_idx = frame_idx or self.render_frame_idx
return list(filter(lambda x: x['frame_idx'] == frame_idx, self.records))
def _get_nrecord_in_current_frame(self):
"""get the number of records in current frame"""
current_records = self._get_records_by_frame_idx()
return len(current_records) if current_records else None
def _get_closest_record_in_current_frame(self, coor_x: int, coor_y: int):
"""get the closest record by given coor in current frame
Arguments:
coor_x {int} -- cooridinate x
coor_y {int} -- cooridinate
Returns:
{OrderedDict} -- the closest record
"""
current_records = deepcopy(self._get_records_by_frame_idx())
for rid, record in enumerate(current_records):
pt1, pt2 = (record['x1'], record['y1']), (record['x2'], record['y2'])
if pt1[0] < coor_x < pt2[0] and pt1[1] < coor_y < pt2[1]:
center = np.array(((pt2[0]+pt1[0])/2, (pt2[1]+pt1[1])/2))
dist = np.linalg.norm(center - np.array((coor_x, coor_y)))
current_records[rid]['dist'] = dist
current_records = list(filter(lambda x: 'dist' in x, current_records))
if current_records:
return sorted(current_records, key=lambda x: x['dist'])[0]
def _remove_record(self, frame_idx: int, pt1: tuple, pt2: tuple):
"""remove record by given value
Arguments:
frame_idx {int} -- record frame index
pt1 {tuple} -- record (x1, y1)
pt2 {tuple} -- record (x2, y2)
"""
current_records = self._get_records_by_frame_idx(frame_idx)
target_record = None
for record in current_records:
src_pt1, src_pt2 = (record['x1'], record['y1']), (record['x2'], record['y2'])
if src_pt1 == pt1 and src_pt2 == pt2:
target_record = record
if target_record:
target_row_idx = self.records.index(target_record)
self.records.remove(target_record)
self.remove_record_from_preview(target_row_idx)
@pyqtSlot()
def _goto_previous_record(self):
rest_records = list(filter(lambda x: x['frame_idx'] < self.render_frame_idx, self.records))
if not rest_records:
QMessageBox.information(self, 'Info', 'no previous record', QMessageBox.Ok)
else:
self.target_frame_idx = rest_records[-1]['frame_idx']
@pyqtSlot()
def _goto_next_record(self):
rest_records = list(filter(lambda x: x['frame_idx'] > self.render_frame_idx, self.records))
if not rest_records:
QMessageBox.information(self, 'Info', 'no next record', QMessageBox.Ok)
else:
self.target_frame_idx = rest_records[0]['frame_idx']
@pyqtSlot()
def on_slider_released(self):
"""update frame and frame status when the slider released"""
self.target_frame_idx = self.slider_video.value()
@pyqtSlot()
def on_slider_moved(self):
"""update frame status only when the slider moved"""
self._update_frame_status(frame_idx=self.slider_video.value())
@pyqtSlot()
def on_play_video_clicked(self):
"""control to play or pause the video"""
self.is_playing_video = not self.is_playing_video
if self.is_playing_video:
self.btn_play_video.setIcon(self.style().standardIcon(QStyle.SP_MediaPause))
self._play_video()
else:
self.btn_play_video.setIcon(self.style().standardIcon(QStyle.SP_MediaPlay))
@pyqtSlot()
def event_frame_mouse_press(self, event):
"""label frame press mouse event
- Qt.LeftButton: drawing
- Qt.RightButton: select to delete
Arguments:
event {PyQt5.QtGui.QMouseEvent} -- event object
"""
if self._check_coor_in_frame(event.x(), event.y()) and not self.is_playing_video:
if event.button() == Qt.LeftButton:
nrecords = self._get_nrecord_in_current_frame()
if self.limit_nlabel and nrecords and self.limit_nlabel <= nrecords:
self.logger.warning('not available to add a new record (exist=%d, limit=%d)', \
nrecords, self.limit_nlabel)
else:
self.label_frame.is_drawing = True
self.label_frame.is_selecting = False
self.logger.debug('press mouse at (%d, %d)', event.x(), event.y())
self.label_frame.pt1 = (event.x(), event.y())
elif event.button() == Qt.RightButton:
closest_record = self._get_closest_record_in_current_frame(event.x(), event.y())
if closest_record:
pt1 = (closest_record['x1'], closest_record['y1'])
pt2 = (closest_record['x2'], closest_record['y2'])
message = '<b>Do you want to delete the record ?</b><br/><br/> \
frame index -\t{} <br/> position -\t{} {}'.format(
closest_record['frame_idx'], str(pt1), str(pt2))
reply = QMessageBox.question(self, 'Delete Record', message, \
QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
self._remove_record(closest_record['frame_idx'], pt1, pt2)
self.is_force_update = True
self.update()
@pyqtSlot()
def event_frame_mouse_move(self, event):
if self.label_frame.is_drawing and self._check_coor_in_frame(event.x(), event.y()):
self.logger.debug('move mouse at (%d, %d)', event.x(), event.y())
self.label_frame.pt2 = (event.x(), event.y())
self.update()
elif not self.label_frame.is_drawing and not self.is_playing_video:
closest_record = self._get_closest_record_in_current_frame(event.x(), event.y())
if closest_record:
self.label_frame.is_selecting = True
self.label_frame.select_pt1 = (closest_record['x1'], closest_record['y1'])
self.label_frame.select_pt2 = (closest_record['x2'], closest_record['y2'])
else:
self.label_frame.is_selecting = False
self.label_frame.select_pt1 = self.label_frame.select_pt2 = None
self.update()
@pyqtSlot()
def event_frame_mouse_release(self, event):
if self.label_frame.is_drawing:
self.label_frame.is_drawing = False
self.logger.debug('release mouse at (%d, %d)', event.x(), event.y())
if self._check_coor_in_frame(event.x(), event.y()):
self.label_frame.pt2 = (event.x(), event.y())
pt1, pt2 = self.label_frame.revise_coor(self.label_frame.pt1, self.label_frame.pt2)
record = OrderedDict([
('timestamp_hms', self._frame_idx_to_hms(self.render_frame_idx)),
('timestamp_hmsf', self._frame_idx_to_hmsf(self.render_frame_idx)),
('frame_idx', self.render_frame_idx), ('fps', self.video_fps),
('frame_height', self.frame_height), ('frame_width', self.frame_width),
('scale_height', self.scale_height), ('scale_width', self.scale_width),
('x1', pt1[0]), ('y1', pt1[1]), ('x2', pt2[0]), ('y2', pt2[1]),
('center_x', (pt1[0]+pt2[0])//2), ('center_y', (pt1[1]+pt2[1])//2)
])
self.records.append(record)
self.records = sorted(self.records, key=lambda x: x['frame_idx'])
self.add_record_to_preview(record['timestamp_hms'], \
record['frame_idx'], \
(record['x1'], record['y1']), \
(record['x2'], record['y2']))
self.label_frame.pt1 = self.label_frame.pt2 = None
self.is_force_update = True
self.update()
@pyqtSlot()
def event_preview_double_clicked(self):
row = self.table_preview_records.currentRow()
frame_idx = int(self.table_preview_records.item(row, 1).text())
self.target_frame_idx = frame_idx
def draw_rects(self, frame_idx: int, frame: np.ndarray):
rest_records = list(filter(lambda x: x['frame_idx'] == frame_idx, self.records))
if not rest_records:
return frame
for record in rest_records:
pt1, pt2 = (record['x1'], record['y1']), (record['x2'], record['y2'])
cv2.rectangle(frame, pt1, pt2, self.label_color, self.label_thickness)
return frame
def save_file(self):
"""export records to default paths
- click ok only close message box
- click close to close PyQt program
"""
exist_msg = 'File <b>{}</b> exist.<br/><br/>\
Do you want to replace?'.format(self.outpath)
info_msg = 'Save at <b>{}</b><br/>\
total records: {}'.format(self.outpath, len(self.records))
# check the file existense
exist_reply = QMessageBox.No
if Path(self.outpath).exists():
exist_reply = QMessageBox.question(self, 'File Exist', exist_msg, \
QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if not Path(self.outpath).exists() or exist_reply == QMessageBox.Yes:
df_labels = pd.DataFrame().from_records(self.records)
df_labels.to_csv(self.outpath, index=False)
# check if the application is going to close
reply = QMessageBox.about(self, 'Info', info_msg)
self.close()
def keyPressEvent(self, event):
"""global keyboard event"""
if event.key() in [Qt.Key_Space, Qt.Key_P]:
self.on_play_video_clicked()
elif event.key() in [Qt.Key_Right, Qt.Key_D]:
self.target_frame_idx = min(self.target_frame_idx+self.video_fps, self.frame_count-1)
elif event.key() in [Qt.Key_Left, Qt.Key_A]:
self.target_frame_idx = max(0, self.target_frame_idx-self.video_fps)
else:
self.logger.debug('clicked %s but no related binding event', str(event.key()))
|
[
"cv2.rectangle",
"PyQt5.QtCore.QTimer.singleShot",
"datetime.datetime.strptime",
"pathlib.Path",
"PyQt5.QtGui.QColor",
"PyQt5.QtCore.pyqtSlot",
"PyQt5.QtGui.QImage",
"PyQt5.QtWidgets.QMessageBox.information",
"numpy.array",
"PyQt5.QtWidgets.QMessageBox.question",
"PyQt5.QtWidgets.QMessageBox.about",
"cv2.VideoCapture",
"cv2.cvtColor",
"pandas.DataFrame",
"datetime.timedelta"
] |
[((10100, 10110), 'PyQt5.QtCore.pyqtSlot', 'pyqtSlot', ([], {}), '()\n', (10108, 10110), False, 'from PyQt5.QtCore import Qt, QTimer, pyqtSlot\n'), ((10451, 10461), 'PyQt5.QtCore.pyqtSlot', 'pyqtSlot', ([], {}), '()\n', (10459, 10461), False, 'from PyQt5.QtCore import Qt, QTimer, pyqtSlot\n'), ((10793, 10803), 'PyQt5.QtCore.pyqtSlot', 'pyqtSlot', ([], {}), '()\n', (10801, 10803), False, 'from PyQt5.QtCore import Qt, QTimer, pyqtSlot\n'), ((10971, 10981), 'PyQt5.QtCore.pyqtSlot', 'pyqtSlot', ([], {}), '()\n', (10979, 10981), False, 'from PyQt5.QtCore import Qt, QTimer, pyqtSlot\n'), ((11151, 11161), 'PyQt5.QtCore.pyqtSlot', 'pyqtSlot', ([], {}), '()\n', (11159, 11161), False, 'from PyQt5.QtCore import Qt, QTimer, pyqtSlot\n'), ((11568, 11578), 'PyQt5.QtCore.pyqtSlot', 'pyqtSlot', ([], {}), '()\n', (11576, 11578), False, 'from PyQt5.QtCore import Qt, QTimer, pyqtSlot\n'), ((13541, 13551), 'PyQt5.QtCore.pyqtSlot', 'pyqtSlot', ([], {}), '()\n', (13549, 13551), False, 'from PyQt5.QtCore import Qt, QTimer, pyqtSlot\n'), ((14471, 14481), 'PyQt5.QtCore.pyqtSlot', 'pyqtSlot', ([], {}), '()\n', (14479, 14481), False, 'from PyQt5.QtCore import Qt, QTimer, pyqtSlot\n'), ((16076, 16086), 'PyQt5.QtCore.pyqtSlot', 'pyqtSlot', ([], {}), '()\n', (16084, 16086), False, 'from PyQt5.QtCore import Qt, QTimer, pyqtSlot\n'), ((1967, 1999), 'cv2.VideoCapture', 'cv2.VideoCapture', (['self.videopath'], {}), '(self.videopath)\n', (1983, 1999), False, 'import cv2\n'), ((3879, 3946), 'PyQt5.QtGui.QImage', 'QImage', (['image', 'image.shape[1]', 'image.shape[0]', 'QImage.Format_RGB888'], {}), '(image, image.shape[1], image.shape[0], QImage.Format_RGB888)\n', (3885, 3946), False, 'from PyQt5.QtGui import QColor, QImage, QPixmap\n'), ((4110, 4161), 'datetime.datetime.strptime', 'datetime.strptime', (['"""00:00:00.000000"""', '"""%H:%M:%S.%f"""'], {}), "('00:00:00.000000', '%H:%M:%S.%f')\n", (4127, 4161), False, 'from datetime import datetime, timedelta\n'), ((4178, 4223), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(frame_idx / self.video_fps)'}), '(seconds=frame_idx / self.video_fps)\n', (4187, 4223), False, 'from datetime import datetime, timedelta\n'), ((4437, 4478), 'datetime.datetime.strptime', 'datetime.strptime', (['"""00:00:00"""', '"""%H:%M:%S"""'], {}), "('00:00:00', '%H:%M:%S')\n", (4454, 4478), False, 'from datetime import datetime, timedelta\n'), ((4495, 4541), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(frame_idx // self.video_fps)'}), '(seconds=frame_idx // self.video_fps)\n', (4504, 4541), False, 'from datetime import datetime, timedelta\n'), ((5695, 5750), 'PyQt5.QtCore.QTimer.singleShot', 'QTimer.singleShot', (['(1 / self.video_fps)', 'self._play_video'], {}), '(1 / self.video_fps, self._play_video)\n', (5712, 5750), False, 'from PyQt5.QtCore import Qt, QTimer, pyqtSlot\n'), ((7327, 7387), 'PyQt5.QtCore.QTimer.singleShot', 'QTimer.singleShot', (['(1000 / self.video_fps)', 'self._update_frame'], {}), '(1000 / self.video_fps, self._update_frame)\n', (7344, 7387), False, 'from PyQt5.QtCore import Qt, QTimer, pyqtSlot\n'), ((17701, 17742), 'PyQt5.QtWidgets.QMessageBox.about', 'QMessageBox.about', (['self', '"""Info"""', 'info_msg'], {}), "(self, 'Info', info_msg)\n", (17718, 17742), False, 'from PyQt5.QtWidgets import QMessageBox, QStyle, QWidget\n'), ((10289, 10364), 'PyQt5.QtWidgets.QMessageBox.information', 'QMessageBox.information', (['self', '"""Info"""', '"""no previous record"""', 'QMessageBox.Ok'], {}), "(self, 'Info', 'no previous record', QMessageBox.Ok)\n", (10312, 10364), False, 'from PyQt5.QtWidgets import QMessageBox, QStyle, QWidget\n'), ((10636, 10707), 'PyQt5.QtWidgets.QMessageBox.information', 'QMessageBox.information', (['self', '"""Info"""', '"""no next record"""', 'QMessageBox.Ok'], {}), "(self, 'Info', 'no next record', QMessageBox.Ok)\n", (10659, 10707), False, 'from PyQt5.QtWidgets import QMessageBox, QStyle, QWidget\n'), ((16634, 16704), 'cv2.rectangle', 'cv2.rectangle', (['frame', 'pt1', 'pt2', 'self.label_color', 'self.label_thickness'], {}), '(frame, pt1, pt2, self.label_color, self.label_thickness)\n', (16647, 16704), False, 'import cv2\n'), ((17280, 17385), 'PyQt5.QtWidgets.QMessageBox.question', 'QMessageBox.question', (['self', '"""File Exist"""', 'exist_msg', '(QMessageBox.Yes | QMessageBox.No)', 'QMessageBox.No'], {}), "(self, 'File Exist', exist_msg, QMessageBox.Yes |\n QMessageBox.No, QMessageBox.No)\n", (17300, 17385), False, 'from PyQt5.QtWidgets import QMessageBox, QStyle, QWidget\n'), ((929, 944), 'PyQt5.QtGui.QColor', 'QColor', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (935, 944), False, 'from PyQt5.QtGui import QColor, QImage, QPixmap\n'), ((1308, 1323), 'PyQt5.QtGui.QColor', 'QColor', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (1314, 1323), False, 'from PyQt5.QtGui import QColor, QImage, QPixmap\n'), ((5193, 5231), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2RGB'], {}), '(frame, cv2.COLOR_BGR2RGB)\n', (5205, 5231), False, 'import cv2\n'), ((8947, 9003), 'numpy.array', 'np.array', (['((pt2[0] + pt1[0]) / 2, (pt2[1] + pt1[1]) / 2)'], {}), '(((pt2[0] + pt1[0]) / 2, (pt2[1] + pt1[1]) / 2))\n', (8955, 9003), True, 'import numpy as np\n'), ((17225, 17243), 'pathlib.Path', 'Path', (['self.outpath'], {}), '(self.outpath)\n', (17229, 17243), False, 'from pathlib import Path\n'), ((17533, 17547), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (17545, 17547), True, 'import pandas as pd\n'), ((9043, 9069), 'numpy.array', 'np.array', (['(coor_x, coor_y)'], {}), '((coor_x, coor_y))\n', (9051, 9069), True, 'import numpy as np\n'), ((13159, 13265), 'PyQt5.QtWidgets.QMessageBox.question', 'QMessageBox.question', (['self', '"""Delete Record"""', 'message', '(QMessageBox.Yes | QMessageBox.No)', 'QMessageBox.No'], {}), "(self, 'Delete Record', message, QMessageBox.Yes |\n QMessageBox.No, QMessageBox.No)\n", (13179, 13265), False, 'from PyQt5.QtWidgets import QMessageBox, QStyle, QWidget\n'), ((17446, 17464), 'pathlib.Path', 'Path', (['self.outpath'], {}), '(self.outpath)\n', (17450, 17464), False, 'from pathlib import Path\n')]
|
import numpy as np
import pandas as pd
from .scm import SCM
class DataGenerator:
def generate(self, scm: SCM, n_samples: int, seed: int):
pass
class SimpleDataGenerator(DataGenerator):
def generate(self, scm: SCM, n_samples: int, seed: int):
"""
Generates date according to the given Structural Causal Model
This Generator assumes that variables are normally distributed
The noise is distributed according to standard normal distribution
:param scm: instance of SCM
:param n_samples: number of samples to generate
:param seed: random seed
:return:
"""
np.random.seed(seed)
data = {}
for equation in scm.equations:
data[equation["output_variable"].name] = np.zeros(n_samples)
for input_variable, coeff in equation["input_variables"].items():
if input_variable.name not in data:
raise AttributeError(
f"No data generated for dependent variable {input_variable.name}"
)
data[equation["output_variable"].name] += (
data[input_variable.name] * coeff
)
mean = 0
std = 1.0
if isinstance(equation["output_variable"].config, dict):
mean = equation["output_variable"].config.get("mean", 0)
std = equation["output_variable"].config.get("std", 1.0)
data[equation["output_variable"].name] += np.random.normal(
loc=mean, scale=std, size=n_samples
)
if (
isinstance(equation["output_variable"].config, dict)
and "mask" in equation["output_variable"].config
):
out_val = data[equation["output_variable"].name]
out_val[out_val < equation["output_variable"].config["mask"]] = 0
out_val[out_val > 0] = 1
data[equation["output_variable"].name] = out_val
return pd.DataFrame.from_dict(data)
|
[
"numpy.random.normal",
"numpy.zeros",
"numpy.random.seed",
"pandas.DataFrame.from_dict"
] |
[((653, 673), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (667, 673), True, 'import numpy as np\n'), ((2057, 2085), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['data'], {}), '(data)\n', (2079, 2085), True, 'import pandas as pd\n'), ((786, 805), 'numpy.zeros', 'np.zeros', (['n_samples'], {}), '(n_samples)\n', (794, 805), True, 'import numpy as np\n'), ((1537, 1590), 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'mean', 'scale': 'std', 'size': 'n_samples'}), '(loc=mean, scale=std, size=n_samples)\n', (1553, 1590), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
import numpy
from rl.agents.policy.policy_agent import PolicyAgent
class Random(PolicyAgent):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def act(self, state: numpy.ndarray, available_actions: numpy.ndarray):
"""
Uses a uniform random distribution to determine it's action given a state
TODO: Act according to different distributions
:param state: The state of the environment
:param available_actions: A list of available possible actions (positions on the board to mark)
:return: a random action
"""
return numpy.random.choice(available_actions)
|
[
"numpy.random.choice"
] |
[((644, 682), 'numpy.random.choice', 'numpy.random.choice', (['available_actions'], {}), '(available_actions)\n', (663, 682), False, 'import numpy\n')]
|
# ---------------------------------------------------
# Intermediate Python - Loops
# 22 set 2020
# VNTBJR
# ---------------------------------------------------
#
# Load packages
library(reticulate)
# while loop -------------------------------------------
# Basic while loop
# Initialize offset
offset = 8
# Code the while loop
while offset != 0 :
print("correcting...")
offset = offset - 1
print(offset)
quit()
# Add conditionals
# Initialize offset
offset = -6
# Code the while loop
while offset != 0 :
print("correcting...")
if offset > 0 :
offset = offset - 1
else :
offset = offset + 1
print(offset)
quit()
######################################################################
# for loop -------------------------------------------
# Loop over a list
# areas list
areas = [11.25, 18.0, 20.0, 10.75, 9.50]
# Code the for loop
for area in areas :
print(area)
quit()
# Indexes and values
# areas list
areas = [11.25, 18.0, 20.0, 10.75, 9.50]
# Change for loop to use enumerate() and update print()
for index, a in enumerate(areas) :
print("room " + str(index) + ": " + str(a))
quit()
# Indexes and values (2)
# areas list
areas = [11.25, 18.0, 20.0, 10.75, 9.50]
# Code the for loop
for index, area in enumerate(areas) :
print("room " + str(index + 1) + ": " + str(area))
quit()
# Loop over list of lists
# house list of lists
house = [["hallway", 11.25],
["kitchen", 18.0],
["living room", 20.0],
["bedroom", 10.75],
["bathroom", 9.50]]
# Build a for loop from scratch
for room, area in house :
print("the " + str(room) + " is " + str(area) + " sqm")
quit()
######################################################################
# Loop Data Structures Part 1 -------------------------------------------
# Loop over dictionary
# Definition of dictionary
europe = {'spain':'madrid', 'france':'paris', 'germany':'berlin',
'norway':'oslo', 'italy':'rome', 'poland':'warsaw', 'austria':'vienna' }
# Iterate over europe
for key, value in europe.items() :
print("the capital of " + str(key) + " is " + str(value))
quit()
# Loop over Numpy array
# Import numpy as np
import numpy as np
import pandas as pd
# Load data
mlb = pd.read_csv("Datasets/MLB.csv", sep = ",")
# Create data for the exercise
np_height = np.array(mlb[["Height"]])
np_weight = np.array(mlb[["Weight"]])
np_baseball = []
for height in np.nditer(np_height) :
for weight in np.nditer(np_weight) :
np_baseball.append([height, weight])
quit()
np_baseball = np.array(np_baseball)
type(np_baseball)
# For loop over np_height
for height in np.nditer(np_height) :
print(str(height) + " inches")
quit()
# For loop over np_baseball
for hw in np.nditer(np_baseball) :
print(hw)
quit()
######################################################################
# Loop Data Structures Part 2 -------------------------------------------
# Loop over DataFrame (1)
# Import cars data
import pandas as pd
cars = pd.read_csv('Datasets/Cars.csv', index_col = 0)
# Iterate over rows of cars
for lab, row in cars.iterrows() :
print(lab)
print(row)
quit()
# Loop over DataFrame (2)
# Adapt for loop
for lab, row in cars.iterrows() :
print(str(lab) + ": " + str(row["cars_per_cap"]))
quit()
# Add column (1)
# Code for loop that adds COUNTRY column
for lab, row in cars.iterrows() :
cars.loc[lab, "COUNTRY"] = row["country"].upper()
quit()
# Print cars
print(cars)
# Add column (2)
# Import cars data
import pandas as pd
cars = pd.read_csv('Datasets/Cars.csv', index_col = 0)
# Use .apply(str.upper)
cars["COUNTRY"] = cars["country"].apply(str.upper)
print(cars)
######################################################################
|
[
"numpy.array",
"numpy.nditer",
"pandas.read_csv"
] |
[((2262, 2302), 'pandas.read_csv', 'pd.read_csv', (['"""Datasets/MLB.csv"""'], {'sep': '""","""'}), "('Datasets/MLB.csv', sep=',')\n", (2273, 2302), True, 'import pandas as pd\n'), ((2349, 2374), 'numpy.array', 'np.array', (["mlb[['Height']]"], {}), "(mlb[['Height']])\n", (2357, 2374), True, 'import numpy as np\n'), ((2387, 2412), 'numpy.array', 'np.array', (["mlb[['Weight']]"], {}), "(mlb[['Weight']])\n", (2395, 2412), True, 'import numpy as np\n'), ((2445, 2465), 'numpy.nditer', 'np.nditer', (['np_height'], {}), '(np_height)\n', (2454, 2465), True, 'import numpy as np\n'), ((2569, 2590), 'numpy.array', 'np.array', (['np_baseball'], {}), '(np_baseball)\n', (2577, 2590), True, 'import numpy as np\n'), ((2650, 2670), 'numpy.nditer', 'np.nditer', (['np_height'], {}), '(np_height)\n', (2659, 2670), True, 'import numpy as np\n'), ((2753, 2775), 'numpy.nditer', 'np.nditer', (['np_baseball'], {}), '(np_baseball)\n', (2762, 2775), True, 'import numpy as np\n'), ((3017, 3062), 'pandas.read_csv', 'pd.read_csv', (['"""Datasets/Cars.csv"""'], {'index_col': '(0)'}), "('Datasets/Cars.csv', index_col=0)\n", (3028, 3062), True, 'import pandas as pd\n'), ((3542, 3587), 'pandas.read_csv', 'pd.read_csv', (['"""Datasets/Cars.csv"""'], {'index_col': '(0)'}), "('Datasets/Cars.csv', index_col=0)\n", (3553, 3587), True, 'import pandas as pd\n'), ((2484, 2504), 'numpy.nditer', 'np.nditer', (['np_weight'], {}), '(np_weight)\n', (2493, 2504), True, 'import numpy as np\n')]
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import functools
import gzip
import hashlib
import json
import logging
import os
import random
import warnings
from collections import defaultdict
from dataclasses import dataclass, field
from itertools import islice
from pathlib import Path
from typing import (
ClassVar,
Dict,
List,
Optional,
Sequence,
Tuple,
Type,
TypedDict,
Union,
)
import numpy as np
import torch
from iopath.common.file_io import PathManager
from PIL import Image
from pytorch3d.io import IO
from pytorch3d.renderer.cameras import PerspectiveCameras
from pytorch3d.structures.pointclouds import Pointclouds
from . import types
from .dataset_base import DatasetBase, FrameData
logger = logging.getLogger(__name__)
class FrameAnnotsEntry(TypedDict):
subset: Optional[str]
frame_annotation: types.FrameAnnotation
@dataclass(eq=False)
class JsonIndexDataset(DatasetBase):
"""
A dataset with annotations in json files like the Common Objects in 3D
(CO3D) dataset.
Args:
frame_annotations_file: A zipped json file containing metadata of the
frames in the dataset, serialized List[types.FrameAnnotation].
sequence_annotations_file: A zipped json file containing metadata of the
sequences in the dataset, serialized List[types.SequenceAnnotation].
subset_lists_file: A json file containing the lists of frames corresponding
corresponding to different subsets (e.g. train/val/test) of the dataset;
format: {subset: (sequence_name, frame_id, file_path)}.
subsets: Restrict frames/sequences only to the given list of subsets
as defined in subset_lists_file (see above).
limit_to: Limit the dataset to the first #limit_to frames (after other
filters have been applied).
limit_sequences_to: Limit the dataset to the first
#limit_sequences_to sequences (after other sequence filters have been
applied but before frame-based filters).
pick_sequence: A list of sequence names to restrict the dataset to.
exclude_sequence: A list of the names of the sequences to exclude.
limit_category_to: Restrict the dataset to the given list of categories.
dataset_root: The root folder of the dataset; all the paths in jsons are
specified relative to this root (but not json paths themselves).
load_images: Enable loading the frame RGB data.
load_depths: Enable loading the frame depth maps.
load_depth_masks: Enable loading the frame depth map masks denoting the
depth values used for evaluation (the points consistent across views).
load_masks: Enable loading frame foreground masks.
load_point_clouds: Enable loading sequence-level point clouds.
max_points: Cap on the number of loaded points in the point cloud;
if reached, they are randomly sampled without replacement.
mask_images: Whether to mask the images with the loaded foreground masks;
0 value is used for background.
mask_depths: Whether to mask the depth maps with the loaded foreground
masks; 0 value is used for background.
image_height: The height of the returned images, masks, and depth maps;
aspect ratio is preserved during cropping/resizing.
image_width: The width of the returned images, masks, and depth maps;
aspect ratio is preserved during cropping/resizing.
box_crop: Enable cropping of the image around the bounding box inferred
from the foreground region of the loaded segmentation mask; masks
and depth maps are cropped accordingly; cameras are corrected.
box_crop_mask_thr: The threshold used to separate pixels into foreground
and background based on the foreground_probability mask; if no value
is greater than this threshold, the loader lowers it and repeats.
box_crop_context: The amount of additional padding added to each
dimension of the cropping bounding box, relative to box size.
remove_empty_masks: Removes the frames with no active foreground pixels
in the segmentation mask after thresholding (see box_crop_mask_thr).
n_frames_per_sequence: If > 0, randomly samples #n_frames_per_sequence
frames in each sequences uniformly without replacement if it has
more frames than that; applied before other frame-level filters.
seed: The seed of the random generator sampling #n_frames_per_sequence
random frames per sequence.
sort_frames: Enable frame annotations sorting to group frames from the
same sequences together and order them by timestamps
eval_batches: A list of batches that form the evaluation set;
list of batch-sized lists of indices corresponding to __getitem__
of this class, thus it can be used directly as a batch sampler.
"""
frame_annotations_type: ClassVar[
Type[types.FrameAnnotation]
] = types.FrameAnnotation
path_manager: Optional[PathManager] = None
frame_annotations_file: str = ""
sequence_annotations_file: str = ""
subset_lists_file: str = ""
subsets: Optional[List[str]] = None
limit_to: int = 0
limit_sequences_to: int = 0
pick_sequence: Sequence[str] = ()
exclude_sequence: Sequence[str] = ()
limit_category_to: Sequence[int] = ()
dataset_root: str = ""
load_images: bool = True
load_depths: bool = True
load_depth_masks: bool = True
load_masks: bool = True
load_point_clouds: bool = False
max_points: int = 0
mask_images: bool = False
mask_depths: bool = False
image_height: Optional[int] = 256
image_width: Optional[int] = 256
box_crop: bool = False
box_crop_mask_thr: float = 0.4
box_crop_context: float = 1.0
remove_empty_masks: bool = False
n_frames_per_sequence: int = -1
seed: int = 0
sort_frames: bool = False
eval_batches: Optional[List[List[int]]] = None
frame_annots: List[FrameAnnotsEntry] = field(init=False)
seq_annots: Dict[str, types.SequenceAnnotation] = field(init=False)
def __post_init__(self) -> None:
# pyre-fixme[16]: `JsonIndexDataset` has no attribute `subset_to_image_path`.
self.subset_to_image_path = None
self._load_frames()
self._load_sequences()
if self.sort_frames:
self._sort_frames()
self._load_subset_lists()
self._filter_db() # also computes sequence indices
logger.info(str(self))
def seq_frame_index_to_dataset_index(
self,
seq_frame_index: Union[
List[List[Union[Tuple[str, int, str], Tuple[str, int]]]],
],
) -> List[List[int]]:
"""
Obtain indices into the dataset object given a list of frames specified as
`seq_frame_index = List[List[Tuple[sequence_name:str, frame_number:int]]]`.
"""
# TODO: check the frame numbers are unique
_dataset_seq_frame_n_index = {
seq: {
self.frame_annots[idx]["frame_annotation"].frame_number: idx
for idx in seq_idx
}
for seq, seq_idx in self._seq_to_idx.items()
}
def _get_batch_idx(seq_name, frame_no, path=None) -> int:
idx = _dataset_seq_frame_n_index[seq_name][frame_no]
if path is not None:
# Check that the loaded frame path is consistent
# with the one stored in self.frame_annots.
assert os.path.normpath(
self.frame_annots[idx]["frame_annotation"].image.path
) == os.path.normpath(
path
), f"Inconsistent batch {seq_name, frame_no, path}."
return idx
batches_idx = [[_get_batch_idx(*b) for b in batch] for batch in seq_frame_index]
return batches_idx
def __str__(self) -> str:
return f"JsonIndexDataset #frames={len(self.frame_annots)}"
def __len__(self) -> int:
return len(self.frame_annots)
def _get_frame_type(self, entry: FrameAnnotsEntry) -> Optional[str]:
return entry["subset"]
def __getitem__(self, index) -> FrameData:
if index >= len(self.frame_annots):
raise IndexError(f"index {index} out of range {len(self.frame_annots)}")
entry = self.frame_annots[index]["frame_annotation"]
point_cloud = self.seq_annots[entry.sequence_name].point_cloud
frame_data = FrameData(
frame_number=_safe_as_tensor(entry.frame_number, torch.long),
frame_timestamp=_safe_as_tensor(entry.frame_timestamp, torch.float),
sequence_name=entry.sequence_name,
sequence_category=self.seq_annots[entry.sequence_name].category,
camera_quality_score=_safe_as_tensor(
self.seq_annots[entry.sequence_name].viewpoint_quality_score,
torch.float,
),
point_cloud_quality_score=_safe_as_tensor(
point_cloud.quality_score, torch.float
)
if point_cloud is not None
else None,
)
# The rest of the fields are optional
frame_data.frame_type = self._get_frame_type(self.frame_annots[index])
(
frame_data.fg_probability,
frame_data.mask_path,
frame_data.bbox_xywh,
clamp_bbox_xyxy,
) = self._load_crop_fg_probability(entry)
scale = 1.0
if self.load_images and entry.image is not None:
# original image size
frame_data.image_size_hw = _safe_as_tensor(entry.image.size, torch.long)
(
frame_data.image_rgb,
frame_data.image_path,
frame_data.mask_crop,
scale,
) = self._load_crop_images(
entry, frame_data.fg_probability, clamp_bbox_xyxy
)
if self.load_depths and entry.depth is not None:
(
frame_data.depth_map,
frame_data.depth_path,
frame_data.depth_mask,
) = self._load_mask_depth(entry, clamp_bbox_xyxy, frame_data.fg_probability)
if entry.viewpoint is not None:
frame_data.camera = self._get_pytorch3d_camera(
entry,
scale,
clamp_bbox_xyxy,
)
if self.load_point_clouds and point_cloud is not None:
frame_data.sequence_point_cloud_path = pcl_path = os.path.join(
self.dataset_root, point_cloud.path
)
frame_data.sequence_point_cloud = _load_pointcloud(
self._local_path(pcl_path), max_points=self.max_points
)
return frame_data
def _load_crop_fg_probability(
self, entry: types.FrameAnnotation
) -> Tuple[
Optional[torch.Tensor],
Optional[str],
Optional[torch.Tensor],
Optional[torch.Tensor],
]:
fg_probability, full_path, bbox_xywh, clamp_bbox_xyxy = (
None,
None,
None,
None,
)
if (self.load_masks or self.box_crop) and entry.mask is not None:
full_path = os.path.join(self.dataset_root, entry.mask.path)
mask = _load_mask(self._local_path(full_path))
if mask.shape[-2:] != entry.image.size:
raise ValueError(
f"bad mask size: {mask.shape[-2:]} vs {entry.image.size}!"
)
bbox_xywh = torch.tensor(_get_bbox_from_mask(mask, self.box_crop_mask_thr))
if self.box_crop:
clamp_bbox_xyxy = _get_clamp_bbox(bbox_xywh, self.box_crop_context)
mask = _crop_around_box(mask, clamp_bbox_xyxy, full_path)
fg_probability, _, _ = self._resize_image(mask, mode="nearest")
return fg_probability, full_path, bbox_xywh, clamp_bbox_xyxy
def _load_crop_images(
self,
entry: types.FrameAnnotation,
fg_probability: Optional[torch.Tensor],
clamp_bbox_xyxy: Optional[torch.Tensor],
) -> Tuple[torch.Tensor, str, torch.Tensor, float]:
assert self.dataset_root is not None and entry.image is not None
path = os.path.join(self.dataset_root, entry.image.path)
image_rgb = _load_image(self._local_path(path))
if image_rgb.shape[-2:] != entry.image.size:
raise ValueError(
f"bad image size: {image_rgb.shape[-2:]} vs {entry.image.size}!"
)
if self.box_crop:
assert clamp_bbox_xyxy is not None
image_rgb = _crop_around_box(image_rgb, clamp_bbox_xyxy, path)
image_rgb, scale, mask_crop = self._resize_image(image_rgb)
if self.mask_images:
assert fg_probability is not None
image_rgb *= fg_probability
return image_rgb, path, mask_crop, scale
def _load_mask_depth(
self,
entry: types.FrameAnnotation,
clamp_bbox_xyxy: Optional[torch.Tensor],
fg_probability: Optional[torch.Tensor],
) -> Tuple[torch.Tensor, str, torch.Tensor]:
entry_depth = entry.depth
assert entry_depth is not None
path = os.path.join(self.dataset_root, entry_depth.path)
depth_map = _load_depth(self._local_path(path), entry_depth.scale_adjustment)
if self.box_crop:
assert clamp_bbox_xyxy is not None
depth_bbox_xyxy = _rescale_bbox(
clamp_bbox_xyxy, entry.image.size, depth_map.shape[-2:]
)
depth_map = _crop_around_box(depth_map, depth_bbox_xyxy, path)
depth_map, _, _ = self._resize_image(depth_map, mode="nearest")
if self.mask_depths:
assert fg_probability is not None
depth_map *= fg_probability
if self.load_depth_masks:
assert entry_depth.mask_path is not None
mask_path = os.path.join(self.dataset_root, entry_depth.mask_path)
depth_mask = _load_depth_mask(self._local_path(mask_path))
if self.box_crop:
assert clamp_bbox_xyxy is not None
depth_mask_bbox_xyxy = _rescale_bbox(
clamp_bbox_xyxy, entry.image.size, depth_mask.shape[-2:]
)
depth_mask = _crop_around_box(
depth_mask, depth_mask_bbox_xyxy, mask_path
)
depth_mask, _, _ = self._resize_image(depth_mask, mode="nearest")
else:
depth_mask = torch.ones_like(depth_map)
return depth_map, path, depth_mask
def _get_pytorch3d_camera(
self,
entry: types.FrameAnnotation,
scale: float,
clamp_bbox_xyxy: Optional[torch.Tensor],
) -> PerspectiveCameras:
entry_viewpoint = entry.viewpoint
assert entry_viewpoint is not None
# principal point and focal length
principal_point = torch.tensor(
entry_viewpoint.principal_point, dtype=torch.float
)
focal_length = torch.tensor(entry_viewpoint.focal_length, dtype=torch.float)
half_image_size_wh_orig = (
torch.tensor(list(reversed(entry.image.size)), dtype=torch.float) / 2.0
)
# first, we convert from the dataset's NDC convention to pixels
format = entry_viewpoint.intrinsics_format
if format.lower() == "ndc_norm_image_bounds":
# this is e.g. currently used in CO3D for storing intrinsics
rescale = half_image_size_wh_orig
elif format.lower() == "ndc_isotropic":
rescale = half_image_size_wh_orig.min()
else:
raise ValueError(f"Unknown intrinsics format: {format}")
# principal point and focal length in pixels
principal_point_px = half_image_size_wh_orig - principal_point * rescale
focal_length_px = focal_length * rescale
if self.box_crop:
assert clamp_bbox_xyxy is not None
principal_point_px -= clamp_bbox_xyxy[:2]
# now, convert from pixels to PyTorch3D v0.5+ NDC convention
if self.image_height is None or self.image_width is None:
out_size = list(reversed(entry.image.size))
else:
out_size = [self.image_width, self.image_height]
half_image_size_output = torch.tensor(out_size, dtype=torch.float) / 2.0
half_min_image_size_output = half_image_size_output.min()
# rescaled principal point and focal length in ndc
principal_point = (
half_image_size_output - principal_point_px * scale
) / half_min_image_size_output
focal_length = focal_length_px * scale / half_min_image_size_output
return PerspectiveCameras(
focal_length=focal_length[None],
principal_point=principal_point[None],
R=torch.tensor(entry_viewpoint.R, dtype=torch.float)[None],
T=torch.tensor(entry_viewpoint.T, dtype=torch.float)[None],
)
def _load_frames(self) -> None:
logger.info(f"Loading Co3D frames from {self.frame_annotations_file}.")
local_file = self._local_path(self.frame_annotations_file)
with gzip.open(local_file, "rt", encoding="utf8") as zipfile:
frame_annots_list = types.load_dataclass(
zipfile, List[self.frame_annotations_type]
)
if not frame_annots_list:
raise ValueError("Empty dataset!")
self.frame_annots = [
FrameAnnotsEntry(frame_annotation=a, subset=None) for a in frame_annots_list
]
def _load_sequences(self) -> None:
logger.info(f"Loading Co3D sequences from {self.sequence_annotations_file}.")
local_file = self._local_path(self.sequence_annotations_file)
with gzip.open(local_file, "rt", encoding="utf8") as zipfile:
seq_annots = types.load_dataclass(zipfile, List[types.SequenceAnnotation])
if not seq_annots:
raise ValueError("Empty sequences file!")
self.seq_annots = {entry.sequence_name: entry for entry in seq_annots}
def _load_subset_lists(self) -> None:
logger.info(f"Loading Co3D subset lists from {self.subset_lists_file}.")
if not self.subset_lists_file:
return
with open(self._local_path(self.subset_lists_file), "r") as f:
subset_to_seq_frame = json.load(f)
frame_path_to_subset = {
path: subset
for subset, frames in subset_to_seq_frame.items()
for _, _, path in frames
}
for frame in self.frame_annots:
frame["subset"] = frame_path_to_subset.get(
frame["frame_annotation"].image.path, None
)
if frame["subset"] is None:
warnings.warn(
"Subset lists are given but don't include "
+ frame["frame_annotation"].image.path
)
def _sort_frames(self) -> None:
# Sort frames to have them grouped by sequence, ordered by timestamp
self.frame_annots = sorted(
self.frame_annots,
key=lambda f: (
f["frame_annotation"].sequence_name,
f["frame_annotation"].frame_timestamp or 0,
),
)
def _filter_db(self) -> None:
if self.remove_empty_masks:
logger.info("Removing images with empty masks.")
old_len = len(self.frame_annots)
msg = "remove_empty_masks needs every MaskAnnotation.mass to be set."
def positive_mass(frame_annot: types.FrameAnnotation) -> bool:
mask = frame_annot.mask
if mask is None:
return False
if mask.mass is None:
raise ValueError(msg)
return mask.mass > 1
self.frame_annots = [
frame
for frame in self.frame_annots
if positive_mass(frame["frame_annotation"])
]
logger.info("... filtered %d -> %d" % (old_len, len(self.frame_annots)))
# this has to be called after joining with categories!!
subsets = self.subsets
if subsets:
if not self.subset_lists_file:
raise ValueError(
"Subset filter is on but subset_lists_file was not given"
)
logger.info(f"Limiting Co3D dataset to the '{subsets}' subsets.")
# truncate the list of subsets to the valid one
self.frame_annots = [
entry for entry in self.frame_annots if entry["subset"] in subsets
]
if len(self.frame_annots) == 0:
raise ValueError(f"There are no frames in the '{subsets}' subsets!")
self._invalidate_indexes(filter_seq_annots=True)
if len(self.limit_category_to) > 0:
logger.info(f"Limiting dataset to categories: {self.limit_category_to}")
self.seq_annots = {
name: entry
for name, entry in self.seq_annots.items()
if entry.category in self.limit_category_to
}
# sequence filters
for prefix in ("pick", "exclude"):
orig_len = len(self.seq_annots)
attr = f"{prefix}_sequence"
arr = getattr(self, attr)
if len(arr) > 0:
logger.info(f"{attr}: {str(arr)}")
self.seq_annots = {
name: entry
for name, entry in self.seq_annots.items()
if (name in arr) == (prefix == "pick")
}
logger.info("... filtered %d -> %d" % (orig_len, len(self.seq_annots)))
if self.limit_sequences_to > 0:
self.seq_annots = dict(
islice(self.seq_annots.items(), self.limit_sequences_to)
)
# retain only frames from retained sequences
self.frame_annots = [
f
for f in self.frame_annots
if f["frame_annotation"].sequence_name in self.seq_annots
]
self._invalidate_indexes()
if self.n_frames_per_sequence > 0:
logger.info(f"Taking max {self.n_frames_per_sequence} per sequence.")
keep_idx = []
for seq, seq_indices in self._seq_to_idx.items():
# infer the seed from the sequence name, this is reproducible
# and makes the selection differ for different sequences
seed = _seq_name_to_seed(seq) + self.seed
seq_idx_shuffled = random.Random(seed).sample(
sorted(seq_indices), len(seq_indices)
)
keep_idx.extend(seq_idx_shuffled[: self.n_frames_per_sequence])
logger.info(
"... filtered %d -> %d" % (len(self.frame_annots), len(keep_idx))
)
self.frame_annots = [self.frame_annots[i] for i in keep_idx]
self._invalidate_indexes(filter_seq_annots=False)
# sequences are not decimated, so self.seq_annots is valid
if self.limit_to > 0 and self.limit_to < len(self.frame_annots):
logger.info(
"limit_to: filtered %d -> %d" % (len(self.frame_annots), self.limit_to)
)
self.frame_annots = self.frame_annots[: self.limit_to]
self._invalidate_indexes(filter_seq_annots=True)
def _invalidate_indexes(self, filter_seq_annots: bool = False) -> None:
# update _seq_to_idx and filter seq_meta according to frame_annots change
# if filter_seq_annots, also uldates seq_annots based on the changed _seq_to_idx
self._invalidate_seq_to_idx()
if filter_seq_annots:
self.seq_annots = {
k: v for k, v in self.seq_annots.items() if k in self._seq_to_idx
}
def _invalidate_seq_to_idx(self) -> None:
seq_to_idx = defaultdict(list)
for idx, entry in enumerate(self.frame_annots):
seq_to_idx[entry["frame_annotation"].sequence_name].append(idx)
self._seq_to_idx = seq_to_idx
def _resize_image(
self, image, mode="bilinear"
) -> Tuple[torch.Tensor, float, torch.Tensor]:
image_height, image_width = self.image_height, self.image_width
if image_height is None or image_width is None:
# skip the resizing
imre_ = torch.from_numpy(image)
return imre_, 1.0, torch.ones_like(imre_[:1])
# takes numpy array, returns pytorch tensor
minscale = min(
image_height / image.shape[-2],
image_width / image.shape[-1],
)
imre = torch.nn.functional.interpolate(
torch.from_numpy(image)[None],
# pyre-ignore[6]
scale_factor=minscale,
mode=mode,
align_corners=False if mode == "bilinear" else None,
recompute_scale_factor=True,
)[0]
imre_ = torch.zeros(image.shape[0], self.image_height, self.image_width)
imre_[:, 0 : imre.shape[1], 0 : imre.shape[2]] = imre
mask = torch.zeros(1, self.image_height, self.image_width)
mask[:, 0 : imre.shape[1] - 1, 0 : imre.shape[2] - 1] = 1.0
return imre_, minscale, mask
def _local_path(self, path: str) -> str:
if self.path_manager is None:
return path
return self.path_manager.get_local_path(path)
def get_frame_numbers_and_timestamps(
self, idxs: Sequence[int]
) -> List[Tuple[int, float]]:
out: List[Tuple[int, float]] = []
for idx in idxs:
frame_annotation = self.frame_annots[idx]["frame_annotation"]
out.append(
(frame_annotation.frame_number, frame_annotation.frame_timestamp)
)
return out
def get_eval_batches(self) -> Optional[List[List[int]]]:
return self.eval_batches
def _seq_name_to_seed(seq_name) -> int:
return int(hashlib.sha1(seq_name.encode("utf-8")).hexdigest(), 16)
def _load_image(path) -> np.ndarray:
with Image.open(path) as pil_im:
im = np.array(pil_im.convert("RGB"))
im = im.transpose((2, 0, 1))
im = im.astype(np.float32) / 255.0
return im
def _load_16big_png_depth(depth_png) -> np.ndarray:
with Image.open(depth_png) as depth_pil:
# the image is stored with 16-bit depth but PIL reads it as I (32 bit).
# we cast it to uint16, then reinterpret as float16, then cast to float32
depth = (
np.frombuffer(np.array(depth_pil, dtype=np.uint16), dtype=np.float16)
.astype(np.float32)
.reshape((depth_pil.size[1], depth_pil.size[0]))
)
return depth
def _load_1bit_png_mask(file: str) -> np.ndarray:
with Image.open(file) as pil_im:
mask = (np.array(pil_im.convert("L")) > 0.0).astype(np.float32)
return mask
def _load_depth_mask(path) -> np.ndarray:
if not path.lower().endswith(".png"):
raise ValueError('unsupported depth mask file name "%s"' % path)
m = _load_1bit_png_mask(path)
return m[None] # fake feature channel
def _load_depth(path, scale_adjustment) -> np.ndarray:
if not path.lower().endswith(".png"):
raise ValueError('unsupported depth file name "%s"' % path)
d = _load_16big_png_depth(path) * scale_adjustment
d[~np.isfinite(d)] = 0.0
return d[None] # fake feature channel
def _load_mask(path) -> np.ndarray:
with Image.open(path) as pil_im:
mask = np.array(pil_im)
mask = mask.astype(np.float32) / 255.0
return mask[None] # fake feature channel
def _get_1d_bounds(arr) -> Tuple[int, int]:
nz = np.flatnonzero(arr)
return nz[0], nz[-1]
def _get_bbox_from_mask(
mask, thr, decrease_quant: float = 0.05
) -> Tuple[int, int, int, int]:
# bbox in xywh
masks_for_box = np.zeros_like(mask)
while masks_for_box.sum() <= 1.0:
masks_for_box = (mask > thr).astype(np.float32)
thr -= decrease_quant
if thr <= 0.0:
warnings.warn(f"Empty masks_for_bbox (thr={thr}) => using full image.")
x0, x1 = _get_1d_bounds(masks_for_box.sum(axis=-2))
y0, y1 = _get_1d_bounds(masks_for_box.sum(axis=-1))
return x0, y0, x1 - x0, y1 - y0
def _get_clamp_bbox(
bbox: torch.Tensor, box_crop_context: float = 0.0, impath: str = ""
) -> torch.Tensor:
# box_crop_context: rate of expansion for bbox
# returns possibly expanded bbox xyxy as float
# increase box size
if box_crop_context > 0.0:
c = box_crop_context
bbox = bbox.float()
bbox[0] -= bbox[2] * c / 2
bbox[1] -= bbox[3] * c / 2
bbox[2] += bbox[2] * c
bbox[3] += bbox[3] * c
if (bbox[2:] <= 1.0).any():
raise ValueError(
f"squashed image {impath}!! The bounding box contains no pixels."
)
bbox[2:] = torch.clamp(bbox[2:], 2)
bbox[2:] += bbox[0:2] + 1 # convert to [xmin, ymin, xmax, ymax]
# +1 because upper bound is not inclusive
return bbox
def _crop_around_box(tensor, bbox, impath: str = ""):
# bbox is xyxy, where the upper bound is corrected with +1
bbox[[0, 2]] = torch.clamp(bbox[[0, 2]], 0.0, tensor.shape[-1])
bbox[[1, 3]] = torch.clamp(bbox[[1, 3]], 0.0, tensor.shape[-2])
bbox = bbox.round().long()
tensor = tensor[..., bbox[1] : bbox[3], bbox[0] : bbox[2]]
assert all(c > 0 for c in tensor.shape), f"squashed image {impath}"
return tensor
def _rescale_bbox(bbox: torch.Tensor, orig_res, new_res) -> torch.Tensor:
assert bbox is not None
assert np.prod(orig_res) > 1e-8
# average ratio of dimensions
rel_size = (new_res[0] / orig_res[0] + new_res[1] / orig_res[1]) / 2.0
return bbox * rel_size
def _safe_as_tensor(data, dtype):
if data is None:
return None
return torch.tensor(data, dtype=dtype)
# NOTE this cache is per-worker; they are implemented as processes.
# each batch is loaded and collated by a single worker;
# since sequences tend to co-occur within batches, this is useful.
@functools.lru_cache(maxsize=256)
def _load_pointcloud(pcl_path: Union[str, Path], max_points: int = 0) -> Pointclouds:
pcl = IO().load_pointcloud(pcl_path)
if max_points > 0:
pcl = pcl.subsample(max_points)
return pcl
|
[
"logging.getLogger",
"numpy.prod",
"gzip.open",
"dataclasses.dataclass",
"torch.from_numpy",
"numpy.array",
"numpy.isfinite",
"random.Random",
"numpy.flatnonzero",
"os.path.normpath",
"pytorch3d.io.IO",
"warnings.warn",
"dataclasses.field",
"torch.ones_like",
"torch.clamp",
"PIL.Image.open",
"os.path.join",
"torch.tensor",
"collections.defaultdict",
"json.load",
"functools.lru_cache",
"numpy.zeros_like",
"torch.zeros"
] |
[((908, 935), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (925, 935), False, 'import logging\n'), ((1046, 1065), 'dataclasses.dataclass', 'dataclass', ([], {'eq': '(False)'}), '(eq=False)\n', (1055, 1065), False, 'from dataclasses import dataclass, field\n'), ((30535, 30567), 'functools.lru_cache', 'functools.lru_cache', ([], {'maxsize': '(256)'}), '(maxsize=256)\n', (30554, 30567), False, 'import functools\n'), ((6340, 6357), 'dataclasses.field', 'field', ([], {'init': '(False)'}), '(init=False)\n', (6345, 6357), False, 'from dataclasses import dataclass, field\n'), ((6412, 6429), 'dataclasses.field', 'field', ([], {'init': '(False)'}), '(init=False)\n', (6417, 6429), False, 'from dataclasses import dataclass, field\n'), ((28144, 28163), 'numpy.flatnonzero', 'np.flatnonzero', (['arr'], {}), '(arr)\n', (28158, 28163), True, 'import numpy as np\n'), ((28331, 28350), 'numpy.zeros_like', 'np.zeros_like', (['mask'], {}), '(mask)\n', (28344, 28350), True, 'import numpy as np\n'), ((29348, 29372), 'torch.clamp', 'torch.clamp', (['bbox[2:]', '(2)'], {}), '(bbox[2:], 2)\n', (29359, 29372), False, 'import torch\n'), ((29643, 29691), 'torch.clamp', 'torch.clamp', (['bbox[[0, 2]]', '(0.0)', 'tensor.shape[-1]'], {}), '(bbox[[0, 2]], 0.0, tensor.shape[-1])\n', (29654, 29691), False, 'import torch\n'), ((29711, 29759), 'torch.clamp', 'torch.clamp', (['bbox[[1, 3]]', '(0.0)', 'tensor.shape[-2]'], {}), '(bbox[[1, 3]], 0.0, tensor.shape[-2])\n', (29722, 29759), False, 'import torch\n'), ((30309, 30340), 'torch.tensor', 'torch.tensor', (['data'], {'dtype': 'dtype'}), '(data, dtype=dtype)\n', (30321, 30340), False, 'import torch\n'), ((12619, 12668), 'os.path.join', 'os.path.join', (['self.dataset_root', 'entry.image.path'], {}), '(self.dataset_root, entry.image.path)\n', (12631, 12668), False, 'import os\n'), ((13601, 13650), 'os.path.join', 'os.path.join', (['self.dataset_root', 'entry_depth.path'], {}), '(self.dataset_root, entry_depth.path)\n', (13613, 13650), False, 'import os\n'), ((15331, 15395), 'torch.tensor', 'torch.tensor', (['entry_viewpoint.principal_point'], {'dtype': 'torch.float'}), '(entry_viewpoint.principal_point, dtype=torch.float)\n', (15343, 15395), False, 'import torch\n'), ((15441, 15502), 'torch.tensor', 'torch.tensor', (['entry_viewpoint.focal_length'], {'dtype': 'torch.float'}), '(entry_viewpoint.focal_length, dtype=torch.float)\n', (15453, 15502), False, 'import torch\n'), ((24389, 24406), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (24400, 24406), False, 'from collections import defaultdict\n'), ((25437, 25501), 'torch.zeros', 'torch.zeros', (['image.shape[0]', 'self.image_height', 'self.image_width'], {}), '(image.shape[0], self.image_height, self.image_width)\n', (25448, 25501), False, 'import torch\n'), ((25579, 25630), 'torch.zeros', 'torch.zeros', (['(1)', 'self.image_height', 'self.image_width'], {}), '(1, self.image_height, self.image_width)\n', (25590, 25630), False, 'import torch\n'), ((26545, 26561), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (26555, 26561), False, 'from PIL import Image\n'), ((26767, 26788), 'PIL.Image.open', 'Image.open', (['depth_png'], {}), '(depth_png)\n', (26777, 26788), False, 'from PIL import Image\n'), ((27246, 27262), 'PIL.Image.open', 'Image.open', (['file'], {}), '(file)\n', (27256, 27262), False, 'from PIL import Image\n'), ((27940, 27956), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (27950, 27956), False, 'from PIL import Image\n'), ((27983, 27999), 'numpy.array', 'np.array', (['pil_im'], {}), '(pil_im)\n', (27991, 27999), True, 'import numpy as np\n'), ((28502, 28573), 'warnings.warn', 'warnings.warn', (['f"""Empty masks_for_bbox (thr={thr}) => using full image."""'], {}), "(f'Empty masks_for_bbox (thr={thr}) => using full image.')\n", (28515, 28573), False, 'import warnings\n'), ((30060, 30077), 'numpy.prod', 'np.prod', (['orig_res'], {}), '(orig_res)\n', (30067, 30077), True, 'import numpy as np\n'), ((10859, 10908), 'os.path.join', 'os.path.join', (['self.dataset_root', 'point_cloud.path'], {}), '(self.dataset_root, point_cloud.path)\n', (10871, 10908), False, 'import os\n'), ((11582, 11630), 'os.path.join', 'os.path.join', (['self.dataset_root', 'entry.mask.path'], {}), '(self.dataset_root, entry.mask.path)\n', (11594, 11630), False, 'import os\n'), ((14318, 14372), 'os.path.join', 'os.path.join', (['self.dataset_root', 'entry_depth.mask_path'], {}), '(self.dataset_root, entry_depth.mask_path)\n', (14330, 14372), False, 'import os\n'), ((14922, 14948), 'torch.ones_like', 'torch.ones_like', (['depth_map'], {}), '(depth_map)\n', (14937, 14948), False, 'import torch\n'), ((16726, 16767), 'torch.tensor', 'torch.tensor', (['out_size'], {'dtype': 'torch.float'}), '(out_size, dtype=torch.float)\n', (16738, 16767), False, 'import torch\n'), ((17590, 17634), 'gzip.open', 'gzip.open', (['local_file', '"""rt"""'], {'encoding': '"""utf8"""'}), "(local_file, 'rt', encoding='utf8')\n", (17599, 17634), False, 'import gzip\n'), ((18193, 18237), 'gzip.open', 'gzip.open', (['local_file', '"""rt"""'], {'encoding': '"""utf8"""'}), "(local_file, 'rt', encoding='utf8')\n", (18202, 18237), False, 'import gzip\n'), ((18785, 18797), 'json.load', 'json.load', (['f'], {}), '(f)\n', (18794, 18797), False, 'import json\n'), ((24869, 24892), 'torch.from_numpy', 'torch.from_numpy', (['image'], {}), '(image)\n', (24885, 24892), False, 'import torch\n'), ((27828, 27842), 'numpy.isfinite', 'np.isfinite', (['d'], {}), '(d)\n', (27839, 27842), True, 'import numpy as np\n'), ((30664, 30668), 'pytorch3d.io.IO', 'IO', ([], {}), '()\n', (30666, 30668), False, 'from pytorch3d.io import IO\n'), ((19192, 19294), 'warnings.warn', 'warnings.warn', (['("Subset lists are given but don\'t include " + frame[\'frame_annotation\'].\n image.path)'], {}), '("Subset lists are given but don\'t include " + frame[\n \'frame_annotation\'].image.path)\n', (19205, 19294), False, 'import warnings\n'), ((24924, 24950), 'torch.ones_like', 'torch.ones_like', (['imre_[:1]'], {}), '(imre_[:1])\n', (24939, 24950), False, 'import torch\n'), ((7842, 7913), 'os.path.normpath', 'os.path.normpath', (["self.frame_annots[idx]['frame_annotation'].image.path"], {}), "(self.frame_annots[idx]['frame_annotation'].image.path)\n", (7858, 7913), False, 'import os\n'), ((7955, 7977), 'os.path.normpath', 'os.path.normpath', (['path'], {}), '(path)\n', (7971, 7977), False, 'import os\n'), ((17253, 17303), 'torch.tensor', 'torch.tensor', (['entry_viewpoint.R'], {'dtype': 'torch.float'}), '(entry_viewpoint.R, dtype=torch.float)\n', (17265, 17303), False, 'import torch\n'), ((17325, 17375), 'torch.tensor', 'torch.tensor', (['entry_viewpoint.T'], {'dtype': 'torch.float'}), '(entry_viewpoint.T, dtype=torch.float)\n', (17337, 17375), False, 'import torch\n'), ((25184, 25207), 'torch.from_numpy', 'torch.from_numpy', (['image'], {}), '(image)\n', (25200, 25207), False, 'import torch\n'), ((23035, 23054), 'random.Random', 'random.Random', (['seed'], {}), '(seed)\n', (23048, 23054), False, 'import random\n'), ((27009, 27045), 'numpy.array', 'np.array', (['depth_pil'], {'dtype': 'np.uint16'}), '(depth_pil, dtype=np.uint16)\n', (27017, 27045), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 13 13:10:41 2018
@author: crius
"""
import Hamiltonians as H
import numpy as np
import tools as t
import spintensor as st
import spinops as so
import time
import Expand as ex
from matplotlib import pyplot as plt
exp = np.exp
N = 8
nlegs = 4
S = 0.5
c = np.sqrt(2)
Jcurr=0
J1=1
J2=1
Htot = H.nlegHeisenberg.blockH(N,S,nlegs,c,Js = [1,1],gamma =[0.0, 4.0],full='True')
Hfull = sum(Htot).todense()
eigs, vecs = np.linalg.eigh(Hfull)
Eigvecs = np.asarray(vecs.T)
print(list(eigs))
Envecs = []
for vc in Eigvecs:
A = ex.Expand(vc,S,N,Jcurr)
Envecs.append(A)
statelist = t.Statelist(N,S)
Statevecs = []
for state in statelist:
vec = []
up = [1,0]
down = [0,1]
for i in state:
if i == 0:
vec.append(down)
elif i ==1:
vec.append(up)
Basestate = ''.join(map(str,state))
B = [Basestate,vec[0]]#initializes state so that tensor product can be preformed
for i in range(len(vec)-1):
D = np.kron(B[1],vec[i+1])
B[1] = D
Statevecs.append((B[0],B[1]))
####
Basestates = dict(Statevecs)
#print(Basestates)
BS = Basestates['11001100']
s = so.sz(S)
print(t.exval(BS,BS,Hfull))
#####
k=1
Op = so.SziOp(N,S,k,full='True')
def getkey(m,n):
key = str((2**m)*(2*n + 1) -1)
return(key)
Coeff = []
for i,vec1 in enumerate(Eigvecs):
for j,vec2 in enumerate(Eigvecs):
co = (getkey(i,j),vec2@BS*t.exval(BS,vec1,Op))
Coeff.append(co)
Coeffs = dict(Coeff)
####
Coeff1 = []
for i1,vec1 in enumerate(Eigvecs) :
for j1,vec2 in enumerate(Eigvecs):
c1 = Coeffs[getkey(i1,j1)]
co1 = t.exval(vec1,vec2,Op)*c1
E1 = (getkey(i1,j1),co1)
Coeff1.append(E1)
Coeffs1 = dict(Coeff1)
#print(Coeffs)
######
times = np.linspace(0.0,1,10)
#times = [0, 1/9, 2/9,3/9,4/9,5/9,6/9,7/9,8/9,1]
#print(times)
timeEx = []
for tim in times:
#print(tim)
start = time.time()
ex = []
for i,eig1 in enumerate(eigs):
for j,eig2 in enumerate(eigs):
C = Coeffs1[getkey(i,j)]
#F = t.timeEv(tim,eig1,eig2,C)
F = C*np.cos(tim*(eig2-eig1))#
ex.append(F)
timeEx.append(sum(ex))
stop = time.time()
print(stop-start)
print(timeEx)
plt.plot(times,timeEx)
|
[
"tools.Statelist",
"numpy.sqrt",
"Expand.append",
"spinops.SziOp",
"matplotlib.pyplot.plot",
"numpy.asarray",
"numpy.kron",
"numpy.linspace",
"Expand.Expand",
"tools.exval",
"Hamiltonians.nlegHeisenberg.blockH",
"numpy.linalg.eigh",
"numpy.cos",
"time.time",
"spinops.sz"
] |
[((306, 316), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (313, 316), True, 'import numpy as np\n'), ((343, 429), 'Hamiltonians.nlegHeisenberg.blockH', 'H.nlegHeisenberg.blockH', (['N', 'S', 'nlegs', 'c'], {'Js': '[1, 1]', 'gamma': '[0.0, 4.0]', 'full': '"""True"""'}), "(N, S, nlegs, c, Js=[1, 1], gamma=[0.0, 4.0], full=\n 'True')\n", (366, 429), True, 'import Hamiltonians as H\n'), ((463, 484), 'numpy.linalg.eigh', 'np.linalg.eigh', (['Hfull'], {}), '(Hfull)\n', (477, 484), True, 'import numpy as np\n'), ((495, 513), 'numpy.asarray', 'np.asarray', (['vecs.T'], {}), '(vecs.T)\n', (505, 513), True, 'import numpy as np\n'), ((643, 660), 'tools.Statelist', 't.Statelist', (['N', 'S'], {}), '(N, S)\n', (654, 660), True, 'import tools as t\n'), ((1225, 1233), 'spinops.sz', 'so.sz', (['S'], {}), '(S)\n', (1230, 1233), True, 'import spinops as so\n'), ((1278, 1308), 'spinops.SziOp', 'so.SziOp', (['N', 'S', 'k'], {'full': '"""True"""'}), "(N, S, k, full='True')\n", (1286, 1308), True, 'import spinops as so\n'), ((1890, 1913), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1)', '(10)'], {}), '(0.0, 1, 10)\n', (1901, 1913), True, 'import numpy as np\n'), ((2414, 2437), 'matplotlib.pyplot.plot', 'plt.plot', (['times', 'timeEx'], {}), '(times, timeEx)\n', (2422, 2437), True, 'from matplotlib import pyplot as plt\n'), ((571, 597), 'Expand.Expand', 'ex.Expand', (['vc', 'S', 'N', 'Jcurr'], {}), '(vc, S, N, Jcurr)\n', (580, 597), True, 'import Expand as ex\n'), ((1240, 1262), 'tools.exval', 't.exval', (['BS', 'BS', 'Hfull'], {}), '(BS, BS, Hfull)\n', (1247, 1262), True, 'import tools as t\n'), ((2034, 2045), 'time.time', 'time.time', ([], {}), '()\n', (2043, 2045), False, 'import time\n'), ((2360, 2371), 'time.time', 'time.time', ([], {}), '()\n', (2369, 2371), False, 'import time\n'), ((1051, 1076), 'numpy.kron', 'np.kron', (['B[1]', 'vec[i + 1]'], {}), '(B[1], vec[i + 1])\n', (1058, 1076), True, 'import numpy as np\n'), ((1735, 1758), 'tools.exval', 't.exval', (['vec1', 'vec2', 'Op'], {}), '(vec1, vec2, Op)\n', (1742, 1758), True, 'import tools as t\n'), ((2297, 2309), 'Expand.append', 'ex.append', (['F'], {}), '(F)\n', (2306, 2309), True, 'import Expand as ex\n'), ((1507, 1528), 'tools.exval', 't.exval', (['BS', 'vec1', 'Op'], {}), '(BS, vec1, Op)\n', (1514, 1528), True, 'import tools as t\n'), ((2260, 2287), 'numpy.cos', 'np.cos', (['(tim * (eig2 - eig1))'], {}), '(tim * (eig2 - eig1))\n', (2266, 2287), True, 'import numpy as np\n')]
|
import mnist
import numpy as np
from PIL import Image
from conv import Conv3x3
from maxpool import MaxPool2
from softmax import Softmax
train_images = mnist.train_images()[:100]
train_labels = mnist.train_labels()[:100]
test_images = mnist.test_images()[:1000]
test_labels = mnist.test_labels()[:1000]
conv = Conv3x3(8)
pool = MaxPool2()
softmax = Softmax(13 * 13 * 8, 10)
def forward(image, label):
out = conv.forward((image / 255) - 0.5)
out = pool.forward(out)
out = softmax.forward(out)
loss = -np.log(out[label])
acc = 1 if np.argmax(out) == label else 0
return out, loss, acc
def train(image, label, lr=.005):
out, loss, acc = forward(image, label)
gradient = np.zeros(10)
gradient[label] = -1 / out[label]
gradient = softmax.backprop(gradient, lr)
gradient = pool.backprop(gradient)
gradient = conv.backprop(gradient, lr)
return out, loss, acc
def start_training():
loss = 0
num_correct = 0
for i, (image, label) in enumerate(zip(train_images, train_labels)):
if i % 100 == 99:
print(
f'[Step {i + 1}] Past 100 steps: Average loss {loss / 100}. Accuracy: {num_correct}%'
)
loss = 0
num_correct = 0
_, l, acc = train(image, label)
loss += l
num_correct += acc
def test(image, label):
np_image = np.array(image)
result, loss, acc = train(np_image, label)
return result.tolist()
|
[
"softmax.Softmax",
"mnist.test_labels",
"mnist.train_images",
"numpy.log",
"numpy.argmax",
"mnist.test_images",
"numpy.array",
"numpy.zeros",
"conv.Conv3x3",
"maxpool.MaxPool2",
"mnist.train_labels"
] |
[((311, 321), 'conv.Conv3x3', 'Conv3x3', (['(8)'], {}), '(8)\n', (318, 321), False, 'from conv import Conv3x3\n'), ((329, 339), 'maxpool.MaxPool2', 'MaxPool2', ([], {}), '()\n', (337, 339), False, 'from maxpool import MaxPool2\n'), ((350, 374), 'softmax.Softmax', 'Softmax', (['(13 * 13 * 8)', '(10)'], {}), '(13 * 13 * 8, 10)\n', (357, 374), False, 'from softmax import Softmax\n'), ((152, 172), 'mnist.train_images', 'mnist.train_images', ([], {}), '()\n', (170, 172), False, 'import mnist\n'), ((194, 214), 'mnist.train_labels', 'mnist.train_labels', ([], {}), '()\n', (212, 214), False, 'import mnist\n'), ((235, 254), 'mnist.test_images', 'mnist.test_images', ([], {}), '()\n', (252, 254), False, 'import mnist\n'), ((276, 295), 'mnist.test_labels', 'mnist.test_labels', ([], {}), '()\n', (293, 295), False, 'import mnist\n'), ((689, 701), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (697, 701), True, 'import numpy as np\n'), ((1294, 1309), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (1302, 1309), True, 'import numpy as np\n'), ((511, 529), 'numpy.log', 'np.log', (['out[label]'], {}), '(out[label])\n', (517, 529), True, 'import numpy as np\n'), ((543, 557), 'numpy.argmax', 'np.argmax', (['out'], {}), '(out)\n', (552, 557), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Functions for plotting reliability diagrams: smooths of simulated vs observed
outcomes on the y-axis against predicted probabilities on the x-axis.
"""
from __future__ import absolute_import
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sbn
from .plot_utils import _label_despine_save_and_show_plot
from .smoothers import ContinuousSmoother, DiscreteSmoother, SmoothPlotter
from .utils import progress
try:
# in Python 3 range returns an iterator instead of list
# to maintain backwards compatibility use "old" version of range
from past.builtins import range
except ImportError:
pass
# Set the plotting style
sbn.set_style("darkgrid")
def _check_reliability_args(probs, choices, partitions, sim_y):
"""
Ensures `probs` is a 1D or 2D ndarray, that `choices` is a 1D ndarray, that
`partitions` is an int, and that `sim_y` is a ndarray of the same shape as
`probs` or None.
"""
if not isinstance(probs, np.ndarray):
msg = "`probs` MUST be an ndarray."
raise ValueError(msg)
if probs.ndim not in [1, 2]:
msg = "probs` MUST be a 1D or 2D ndarray."
raise ValueError(msg)
if not isinstance(choices, np.ndarray):
msg = "`choices` MUST be an ndarray."
raise ValueError(msg)
if choices.ndim != 1:
msg = "`choices` MUST be a 1D ndarray."
raise ValueError(msg)
if not isinstance(partitions, int):
msg = "`partitions` MUST be an int."
raise ValueError(msg)
if not isinstance(sim_y, np.ndarray) and sim_y is not None:
msg = "`sim_y` MUST be an ndarray or None."
raise ValueError(msg)
sim_to_prob_conditions = probs.ndim != 1 and sim_y.shape != probs.shape
if sim_y is not None and sim_to_prob_conditions:
msg = (
"`sim_y` MUST have the same shape as `probs` if "
"`probs.shape[1] != 1`."
)
raise ValueError(msg)
return None
def add_ref_line(ax, ref_label="Perfect Calibration"):
"""
Plots a diagonal line to show perfectly calibrated probabilities.
Parameters
----------
ax : matplotlib Axes instance
The Axes that the reference line should be plotted on.
ref_label : str, optional.
The label to be applied to the reference line that is drawn.
Returns
-------
None. `ax` is modified in place: the line is plotted and the label added.
"""
# Determine the maximum value of the x-axis or y-axis
max_ref_val = max(ax.get_xlim()[1], ax.get_ylim()[1])
min_ref_val = max(ax.get_xlim()[0], ax.get_ylim()[0])
# Determine the values to use to plot the reference line
ref_vals = np.linspace(min_ref_val, max_ref_val, num=100)
# Plot the reference line as a black dashed line
ax.plot(ref_vals, ref_vals, "k--", label=ref_label)
return None
def plot_smoothed_reliability(
probs,
choices,
discrete=True,
partitions=10,
n_estimators=50,
min_samples_leaf=10,
random_state=None,
line_color="#1f78b4",
line_label="Observed vs Predicted",
alpha=None,
sim_y=None,
sim_line_color="#a6cee3",
sim_label="Simulated vs Predicted",
sim_alpha=0.5,
x_label="Mean Predicted Probability",
y_label="Binned\nEmpirical\nProbability",
title=None,
fontsize=12,
ref_line=True,
figsize=(5, 3),
fig_and_ax=None,
legend=True,
progress_bar=True,
show=True,
output_file=None,
dpi=500,
):
"""
Creates a binned reliability plot based on the given probability
predictions and the given observed outcomes.
Parameters
----------
probs : 1D or 2D ndarray.
Each element should be in [0, 1]. There should be 1 column for each
set of predicted probabilities. These will be plotted on the x-axis.
choices : 1D ndarray.
Each element should be either a zero or a one. Elements should denote
whether the alternative corresponding to the given row was chosen or
not. A 'one' corresponds to a an outcome of 'success'.
discrete : bool, optional.
Determines whether discrete smoothing (i.e. binning) will be used or
whether continuous binning via Extremely Randomized Trees will be used.
Default is to use discrete binning, so `discrete == True`.
partitions : positive int, optional.
Denotes the number of partitions to split one's data into for binning.
Only used if `discrete is True`. Default == 10.
n_estimators : positive int, optional.
Determines the number of trees in the ensemble of Extremely Randomized
Trees that is used to do continuous smoothing. This parameter controls
how smooth one's resulting estimate is. The more estimators the
smoother one's estimated relationship and the lower the variance in
that estimated relationship. This kwarg is only used if `discrete is
False`. Default == 50.
min_samples_leaf : positive int, optional.
Determines the minimum number of observations allowed in a leaf node in
any tree in the ensemble. This parameter is conceptually equivalent to
the bandwidth parameter in a kernel density estimator. This kwarg is
only used if `discrete is False`. Default == 10.
random_state : positive int, or None, optional.
Denotes the random seed to be used when constructing the ensemble of
Extremely Randomized Trees. This kwarg is only used if `discrete is
False`. Default is None.
line_color : valid matplotlib color, optional.
Determines the color that is used to plot the predicted probabilities
versus the observed choices. Default is `'#1f78b4'`.
line_label : str or None, optional.
Denotes the label to be used for the lines relating the predicted
probabilities and the binned, empirical probabilities. Default is
'Observed vs Predicted'.
alpha : positive float in [0.0, 1.0], or `None`, optional.
Determines the opacity of the observed data drawn on the plot.
0.0 == transparent and 1.0 == opaque. Default == 1.0.
sim_y : 2D ndarray or None, optional.
Denotes the choices that were simulated based on `probs`. If passed,
`sim_y.shape` MUST equal `probs.shape` in order to ensure that lines
are plotted for the predicted probabilities versus simulated choices.
This kwarg is useful because it shows one the reference distribution of
predicted probabilities versus choices that actually come from one's
postulated model.
sim_line_color : valid matplotlib color, optional.
Determines the color that is used to plot the predicted probabilities
versus the simulated choices. Default is `'#a6cee3'`.
sim_line_label : str, or None, optional.
Denotes the label to be used for the lines relating the predicted
probabilities and the binned, empirical probabilities based on the
simulated choices. Default is 'Simulated vs Predicted'.
sim_alpha : positive float in [0.0, 1.0], or `None`, optional.
Determines the opacity of the simulated reliability curves.
0.0 == transparent and 1.0 == opaque. Default == 0.5.
x_label, y_label : str, optional.
Denotes the label for the x-axis and y-axis, respectively. Defaults are
'Mean Predicted Probability' and 'Binned\nEmpirical\nProbability' for
the x-axis and y-axis, respectively.
title : str, or None, optional.
Denotes the title to be displayed for the plot. Default is None.
fontsize : int or None, optional.
The fontsize to be used in the plot. Default is 12.
ref_line : bool, optional.
Determines whether a diagonal line, y = x, will be plotted to show the
expected relationship. Default is True.
figsize : 2-tuple of positive ints.
Determines the size of the created figure. Default == (5, 3).
fig_and_ax : list of matplotlib figure and axis, or `None`, optional.
Determines whether a new figure will be created for the plot or whether
the plot will be drawn on existing axes. If None, a new figure will be
created. Default is `None`.
legend : bool, optional.
Determines whether a legend is printed for the plot. Default == True.
progress_bar : bool, optional.
Determines whether a progress bar is displayed while making the plot.
Default == True.
show : bool, optional.
Determines whether the figure is shown after plotting is complete.
Default == True.
output_file : str, or None, optional.
Denotes the relative or absolute filepath (including the file format)
that is to be used to save the plot. If None, the plot will not be
saved to file. Default is None.
dpi : positive int, optional.
Denotes the number of 'dots per inch' for the saved figure. Will only
be used if `output_file is not None`. Default == 500.
Returns
-------
None.
"""
# Perform some basic argument checking
_check_reliability_args(probs, choices, partitions, sim_y)
# Make probs 2D if necessary
probs = probs[:, None] if probs.ndim == 1 else probs
# Create the figure and axes if need be
if fig_and_ax is None:
fig, ax = plt.subplots(1, figsize=figsize)
fig_and_ax = [fig, ax]
else:
fig, ax = fig_and_ax
# Create the progressbar iterator if desired
if progress_bar and sim_y is not None:
description = "Plotting" if sim_y is None else "Plotting Simulations"
sim_iterator = progress(range(sim_y.shape[1]), desc=description)
else:
sim_iterator = range(probs.shape[1])
# Create the desired smoother
if discrete:
smoother = DiscreteSmoother(
num_obs=probs.shape[0], partitions=partitions
)
else:
smoother = ContinuousSmoother(
n_estimators=n_estimators,
min_samples_leaf=min_samples_leaf,
random_state=random_state,
)
# Create the plotter that will plot single smooth curves
plotter = SmoothPlotter(smoother=smoother, ax=ax)
# Create helper functions
def get_current_probs(col):
"""
Fetches the current probabilities when plotting the reliability curves.
"""
current = probs[:, 0] if probs.shape[1] == 1 else probs[:, col]
return current
# Plot the simulated reliability curves, if desired
if sim_y is not None:
for i in sim_iterator:
current_label = sim_label if i == 0 else None
plotter.plot(
get_current_probs(i),
sim_y[:, i],
label=current_label,
color=sim_line_color,
alpha=sim_alpha,
sort=True,
)
# Create the progressbar iterator if desired
if progress_bar:
prob_iterator = progress(range(probs.shape[1]), desc="Plotting")
else:
prob_iterator = range(probs.shape[1])
# Make the 'true' reliability plots
for col in prob_iterator:
plotter.plot(
get_current_probs(col),
choices,
label=line_label,
color=line_color,
alpha=alpha,
sort=True,
)
# Create the reference line if desired
if ref_line:
add_ref_line(ax)
# Make the legend, if desired
if legend:
ax.legend(loc="best", fontsize=fontsize)
# Take care of boilerplate plotting necessities
_label_despine_save_and_show_plot(
x_label=x_label,
y_label=y_label,
fig_and_ax=fig_and_ax,
fontsize=fontsize,
y_rot=0,
y_pad=40,
title=title,
output_file=output_file,
show=show,
dpi=dpi,
)
return None
|
[
"seaborn.set_style",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"past.builtins.range"
] |
[((681, 706), 'seaborn.set_style', 'sbn.set_style', (['"""darkgrid"""'], {}), "('darkgrid')\n", (694, 706), True, 'import seaborn as sbn\n'), ((2708, 2754), 'numpy.linspace', 'np.linspace', (['min_ref_val', 'max_ref_val'], {'num': '(100)'}), '(min_ref_val, max_ref_val, num=100)\n', (2719, 2754), True, 'import numpy as np\n'), ((9365, 9397), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': 'figsize'}), '(1, figsize=figsize)\n', (9377, 9397), True, 'import matplotlib.pyplot as plt\n'), ((9745, 9766), 'past.builtins.range', 'range', (['probs.shape[1]'], {}), '(probs.shape[1])\n', (9750, 9766), False, 'from past.builtins import range\n'), ((11078, 11099), 'past.builtins.range', 'range', (['probs.shape[1]'], {}), '(probs.shape[1])\n', (11083, 11099), False, 'from past.builtins import range\n'), ((9671, 9692), 'past.builtins.range', 'range', (['sim_y.shape[1]'], {}), '(sim_y.shape[1])\n', (9676, 9692), False, 'from past.builtins import range\n'), ((11004, 11025), 'past.builtins.range', 'range', (['probs.shape[1]'], {}), '(probs.shape[1])\n', (11009, 11025), False, 'from past.builtins import range\n')]
|
"""A tool to convert annotation files created with CVAT into ground-truth style images
for machine learning. The initial code was copied from:
https://gist.github.com/cheind/9850e35bb08cfe12500942fb8b55531f
originally written for a similar purpose for the tool BeaverDam (which produces json),
and was then adapted for use with CVAT (which produces xml).
"""
import cv2
import xml.etree.ElementTree as ET
import numpy as np
from tqdm import tqdm
# Create a list of BGR colours stored as 3-tuples of uint_8s
colours = [
[255, 0, 0], # Blue
[0, 255, 0], # Green
[0, 0, 255], # Red
[0, 255, 255], # Yellow
[255, 255, 0], # Cyan
[255, 0, 255], # Magenta
[192, 192, 192], # Silver
[0, 0, 128], # Maroon
[0, 128, 128], # Olive
[0, 165, 255], # Orange
]
def draw_annotations(video, annotations, display=False):
tree = ET.parse(args.ann)
root = tree.getroot()
# Create a list of 'track' nodes that are children of the root
tracks = [child for child in root if child.tag == "track"]
# Read the video in as a video object
cap = cv2.VideoCapture(args.video)
# Get a rough count of the number of frames in the video
rough_frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# Find the name/path of the video, without file type
name_index = -1
while args.video[name_index] != ".":
name_index -= 1
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*"DIVX")
framerate = cap.get(cv2.CAP_PROP_FPS)
(width, height) = (
int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)),
)
out = cv2.VideoWriter(
args.video[:name_index] + "_annotated.avi", fourcc, framerate, (width, height)
)
for frame_count in tqdm(range(rough_frame_count)):
# Read the next frame of the video
ret, frame = cap.read()
if not ret:
# Video is done, so break out of the loop
break
# Loop over the track objects. For all that have an annotation for this frame,
# draw a corresponding rectangle with a colour from the colours list
for track in tracks:
# Check that this track has any box nodes left
if len(track) > 0:
# Since the nodes are sorted by frame number, we only have to check the first one
box = track[0]
if int(box.attrib["frame"]) == frame_count:
# Draw the rectangle described by this 'box' node on this frame
# Cast the coordinates to floats, then to ints,
# as the cv2.rectangle function cannot handle float pixel values
# And int(str) cannot handle float strings
x_tl = int(float(box.attrib["xtl"]))
y_tl = int(float(box.attrib["ytl"]))
x_br = int(float(box.attrib["xbr"]))
y_br = int(float(box.attrib["ybr"]))
cv2.rectangle(
frame,
(x_tl, y_tl),
(x_br, y_br),
colours[int(track.attrib["id"]) % len(colours)],
2,
-1,
)
# delete this box from the track,so we can keep
# only checking the first box in the future
track.remove(box)
# Write the frame with boxes
out.write(frame)
# Display the resulting frame
if display:
cv2.imshow("frame", frame)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
frame_count += 1
# Keep going, as the frame count is not necessarily accurate so we might not be done
while True:
# Read the next frame of the video
ret, frame = cap.read()
if not ret:
# Video is done, so break out of the loop
break
# Loop over the track objects. For all that have an annotation for this frame,
# draw a corresponding rectangle with a colour from the colours list
for track in tracks:
# Check that this track has any box nodes left
if len(track) > 0:
# Since the nodes are sorted by frame number,
# we only have to check the first one
box = track[0]
if int(box.attrib["frame"]) == frame_count:
# Draw the rectangle described by this 'box' node on this frame
# Cast the coordinates to floats, then to ints,
# as the cv2.rectangle function cannot handle float pixel values
# And int(str) cannot handle float strings
x_tl = int(float(box.attrib["xtl"]))
y_tl = int(float(box.attrib["ytl"]))
x_br = int(float(box.attrib["xbr"]))
y_br = int(float(box.attrib["ybr"]))
cv2.rectangle(
frame,
(x_tl, y_tl),
(x_br, y_br),
colours[int(track.attrib["id"]) % len(colours)],
2,
-1,
)
# delete this box from the track,so we can keep
# only checking the first box in the future
track.remove(box)
# Write the frame with boxes
out.write(frame)
# Display the resulting frame
if display:
cv2.imshow("frame", frame)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
frame_count += 1
# Release everything
cap.release()
out.release()
cv2.destroyAllWindows()
def draw_groundtruth(video, annotations, display=False):
tree = ET.parse(args.ann)
root = tree.getroot()
# Create a list of 'track' nodes that are children of the root
tracks = [child for child in root if child.tag == "track"]
# Read the video in as a video object
cap = cv2.VideoCapture(args.video)
# Get a rough count of the number of frames in the video
rough_frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# Find the name/path of the video, without file type
name_index = -1
while args.video[name_index] != ".":
name_index -= 1
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*"DIVX")
framerate = cap.get(cv2.CAP_PROP_FPS)
(width, height) = (
int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)),
)
out = cv2.VideoWriter(
args.video[:name_index] + "_groundtruth.avi",
fourcc,
framerate,
(width, height),
0,
)
blank_frame = np.zeros((height, width), dtype=np.uint8)
for frame_count in tqdm(range(rough_frame_count)):
# Copy a new blank frame
frame = np.copy(blank_frame)
# Read the next frame but discard it, to check if the video is done yet
ret, _ = cap.read()
if not ret:
# Video is done, so break out of the loop
break
# Loop over the track objects. For all that have an annotation for this frame,
# draw a corresponding rectangle with a colour from the colours list
for track in tracks:
# Check that this track has any box nodes left
if len(track) > 0:
# Since the nodes are sorted by frame number,
# we only have to check the first one
box = track[0]
if int(box.attrib["frame"]) == frame_count:
# Draw the rectangle described by this 'box' node on this frame
# Cast the coordinates to floats, then to ints,
# as the cv2.rectangle function cannot handle float pixel values
# And int(str) cannot handle float strings
x_tl = int(float(box.attrib["xtl"]))
y_tl = int(float(box.attrib["ytl"]))
x_br = int(float(box.attrib["xbr"]))
y_br = int(float(box.attrib["ybr"]))
cv2.rectangle(frame, (x_tl, y_tl), (x_br, y_br), 255, cv2.FILLED)
# delete this box from the track, so we can keep
# only checking the first box in the future
track.remove(box)
# Write the frame with boxes
# Convert to BGR so video can be properly saved
# frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR)
out.write(frame)
# Display the resulting frame
if display:
cv2.imshow("frame", frame)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
frame_count += 1
# Keep going, as the frame count is not necessarily accurate so we might not be done
while True:
# Copy a new blank frame
frame = np.copy(blank_frame)
# Read the next frame but discard it, to check if the video is done yet
ret, og_frame = cap.read()
if not ret:
# Video is done, so break out of the loop
break
# Loop over the track objects. For all that have an annotation for this frame,
# draw a corresponding rectangle with a colour from the colours list
for track in tracks:
# Check that this track has any box nodes left
if len(track) > 0:
# Since the nodes are sorted by frame number, we only have to check the first one
box = track[0]
if int(box.attrib["frame"]) == frame_count:
# Draw the rectangle described by this 'box' node on this frame
# Cast the coordinates to floats, then to ints,
# as the cv2.rectangle function cannot handle float pixel values
# And int(str) cannot handle float strings
x_tl = int(float(box.attrib["xtl"]))
y_tl = int(float(box.attrib["ytl"]))
x_br = int(float(box.attrib["xbr"]))
y_br = int(float(box.attrib["ybr"]))
cv2.rectangle(frame, (x_tl, y_tl), (x_br, y_br), 255, cv2.FILLED)
# delete this box from the track, so we can keep
# only checking the first box in the future
track.remove(box)
# Write the frame with boxes
out.write(frame)
# Display the resulting frame
if display:
cv2.imshow("frame", frame)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
frame_count += 1
# Release everything
cap.release()
out.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description="Draw annotations, either on original videos or as ground-truth saliency maps"
)
parser.add_argument(
"--folder",
"-f",
dest="folder",
help="Folder containing input files. Video and annotation file names must match exactly, with videos as .mp4 or .avi, and annotations as .xml",
required=False,
)
parser.add_argument(
"--video", "-vid", dest="video", help="Input video file", required=False
)
parser.add_argument(
"--annotation", "-ann", dest="ann", help="Dense annotation file", required=False
)
parser.add_argument(
"--bounding_boxes",
"-bb",
dest="drawing_function",
action="store_const",
const=draw_annotations,
default=draw_groundtruth,
)
parser.add_argument("--verbose", "-v", dest="verbose", action="store_true")
args = parser.parse_args()
# Check that either folder was given, or if not then both video and ann was given
if args.folder == None and (args.video == None or args.ann == None):
print(
"Error: invalid inputs given. Either -folder, or both -video and -ann must be specified."
)
if args.folder != None:
# Read all files in the folder and call the appropriate function on each
# video/annotation pair found
# TODO: implement
pass
else:
# Draw bounding boxes on the original video, or ground-truth saliency maps,
# depending on if -bb was specified
args.drawing_function(args.video, args.ann, args.verbose)
|
[
"cv2.rectangle",
"numpy.copy",
"xml.etree.ElementTree.parse",
"argparse.ArgumentParser",
"cv2.VideoWriter",
"cv2.imshow",
"numpy.zeros",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.VideoWriter_fourcc",
"cv2.waitKey"
] |
[((873, 891), 'xml.etree.ElementTree.parse', 'ET.parse', (['args.ann'], {}), '(args.ann)\n', (881, 891), True, 'import xml.etree.ElementTree as ET\n'), ((1101, 1129), 'cv2.VideoCapture', 'cv2.VideoCapture', (['args.video'], {}), '(args.video)\n', (1117, 1129), False, 'import cv2\n'), ((1464, 1495), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'DIVX'"], {}), "(*'DIVX')\n", (1486, 1495), False, 'import cv2\n'), ((1675, 1774), 'cv2.VideoWriter', 'cv2.VideoWriter', (["(args.video[:name_index] + '_annotated.avi')", 'fourcc', 'framerate', '(width, height)'], {}), "(args.video[:name_index] + '_annotated.avi', fourcc,\n framerate, (width, height))\n", (1690, 1774), False, 'import cv2\n'), ((5807, 5830), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (5828, 5830), False, 'import cv2\n'), ((5901, 5919), 'xml.etree.ElementTree.parse', 'ET.parse', (['args.ann'], {}), '(args.ann)\n', (5909, 5919), True, 'import xml.etree.ElementTree as ET\n'), ((6129, 6157), 'cv2.VideoCapture', 'cv2.VideoCapture', (['args.video'], {}), '(args.video)\n', (6145, 6157), False, 'import cv2\n'), ((6492, 6523), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'DIVX'"], {}), "(*'DIVX')\n", (6514, 6523), False, 'import cv2\n'), ((6703, 6807), 'cv2.VideoWriter', 'cv2.VideoWriter', (["(args.video[:name_index] + '_groundtruth.avi')", 'fourcc', 'framerate', '(width, height)', '(0)'], {}), "(args.video[:name_index] + '_groundtruth.avi', fourcc,\n framerate, (width, height), 0)\n", (6718, 6807), False, 'import cv2\n'), ((6870, 6911), 'numpy.zeros', 'np.zeros', (['(height, width)'], {'dtype': 'np.uint8'}), '((height, width), dtype=np.uint8)\n', (6878, 6911), True, 'import numpy as np\n'), ((10851, 10874), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (10872, 10874), False, 'import cv2\n'), ((10938, 11063), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Draw annotations, either on original videos or as ground-truth saliency maps"""'}), "(description=\n 'Draw annotations, either on original videos or as ground-truth saliency maps'\n )\n", (10961, 11063), False, 'import argparse\n'), ((7017, 7037), 'numpy.copy', 'np.copy', (['blank_frame'], {}), '(blank_frame)\n', (7024, 7037), True, 'import numpy as np\n'), ((9041, 9061), 'numpy.copy', 'np.copy', (['blank_frame'], {}), '(blank_frame)\n', (9048, 9061), True, 'import numpy as np\n'), ((3604, 3630), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'frame'], {}), "('frame', frame)\n", (3614, 3630), False, 'import cv2\n'), ((5616, 5642), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'frame'], {}), "('frame', frame)\n", (5626, 5642), False, 'import cv2\n'), ((8761, 8787), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'frame'], {}), "('frame', frame)\n", (8771, 8787), False, 'import cv2\n'), ((10660, 10686), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'frame'], {}), "('frame', frame)\n", (10670, 10686), False, 'import cv2\n'), ((3646, 3660), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3657, 3660), False, 'import cv2\n'), ((5658, 5672), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (5669, 5672), False, 'import cv2\n'), ((8277, 8342), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x_tl, y_tl)', '(x_br, y_br)', '(255)', 'cv2.FILLED'], {}), '(frame, (x_tl, y_tl), (x_br, y_br), 255, cv2.FILLED)\n', (8290, 8342), False, 'import cv2\n'), ((8803, 8817), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (8814, 8817), False, 'import cv2\n'), ((10290, 10355), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x_tl, y_tl)', '(x_br, y_br)', '(255)', 'cv2.FILLED'], {}), '(frame, (x_tl, y_tl), (x_br, y_br), 255, cv2.FILLED)\n', (10303, 10355), False, 'import cv2\n'), ((10702, 10716), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (10713, 10716), False, 'import cv2\n')]
|
import os
import tempfile
import pytest
import numpy as np
try:
import h5py
except ImportError:
h5py = None
from msl.io import read, HDF5Writer, JSONWriter
from msl.io.readers import HDF5Reader
from helper import read_sample, roots_equal
@pytest.mark.skipif(h5py is None, reason='h5py not installed')
def test_read_write_convert():
root1 = read_sample('hdf5_sample.h5')
# write as HDF5 then read
writer = HDF5Writer(tempfile.gettempdir() + '/msl-hdf5-writer-temp.h5')
writer.write(root=root1, mode='w')
root2 = read(writer.file)
assert root2.file == writer.file
assert roots_equal(root1, root2)
os.remove(writer.file)
# convert to JSON then back to HDF5
json_writer = JSONWriter(tempfile.gettempdir() + '/msl-json-writer-temp.json')
json_writer.write(root=root1, mode='w')
root_json = read(json_writer.file)
assert root_json.file == json_writer.file
assert roots_equal(root1, root_json)
os.remove(json_writer.file)
writer2 = HDF5Writer(tempfile.gettempdir() + '/msl-hdf5-writer-temp2.h5')
writer2.write(root=root_json, mode='w')
root3 = read(writer2.file)
assert root3.file == writer2.file
assert roots_equal(root1, root3)
os.remove(writer2.file)
for root in [root1, root2, root3]:
assert isinstance(root, HDF5Reader)
for key, value in root.items():
k, v = str(key), str(value)
k, v = repr(key), repr(value)
order = ['D0', 'G0', 'G1A', 'D1', 'G1B', 'D2', 'D3', 'G2']
for i, key in enumerate(root.keys()):
assert os.path.basename(key) == order[i]
assert len(root.metadata) == 3
assert root.metadata['version_h5py'] == '2.8.0'
assert root.metadata.version_hdf5 == '1.10.2'
assert root.metadata['date_created'] == '2018-08-28 15:16:43.904990'
assert 'D0' in root
assert 'G0' in root
d0 = root['D0']
assert root.is_dataset(d0)
assert d0.shape == (10, 4)
assert d0.dtype.str == '<f4'
assert len(d0.metadata) == 2
assert d0.metadata['temperature'] == 21.2
assert d0.metadata.temperature_units == 'deg C'
g0 = root.G0
assert root.is_group(g0)
assert len(g0.metadata) == 1
assert all(g0.metadata['count'] == [1, 2, 3, 4, 5])
assert 'G1A' in g0
assert 'G1B' in g0
g1a = g0['G1A']
assert root.is_group(g1a)
assert len(g1a.metadata) == 2
assert g1a.metadata['one'] == 1
assert g1a.metadata['a'] == 'A'
g1b = g0['G1B']
assert root.is_group(g1b)
assert len(g1b.metadata) == 2
assert g1b.metadata['one'] == 1
assert g1b.metadata['b'] == 'B'
assert 'D1' in g0['G1A']
d1 = root.G0.G1A.D1
assert root.is_dataset(d1)
assert len(d1.metadata) == 0
assert d1.shape == (3, 3)
assert d1.dtype.str == '<f8'
assert 'D2' in g1b
assert 'D3' in g0.G1B
assert 'G2' in root.G0.G1B
d2 = g1b['D2']
assert root.is_dataset(d2)
assert len(d2.metadata) == 2
assert d2.metadata['voltage'] == 132.4
assert d2.metadata['voltage_units'] == 'uV'
assert d2.shape == (10,)
assert d2.dtype.str == '<i4'
assert d2[3] == 90
d3 = g1b.D3
assert root.is_dataset(d3)
assert len(d3.metadata) == 0
assert d3.shape == (10,)
assert d3.dtype.str == '<i4'
assert d3[7] == 51
g2 = root.G0.G1B.G2
assert root.is_group(g2)
assert len(g2.metadata) == 1
assert g2.metadata['hello'] == 'world'
@pytest.mark.skipif(h5py is None, reason='h5py not installed')
def test_raises():
root = read_sample('hdf5_sample.h5')
writer = HDF5Writer()
assert writer.file is None
# no file was specified
with pytest.raises(ValueError, match=r'must specify a file'):
writer.write(root=root)
# root must be a Root object
with pytest.raises(TypeError, match=r'Root'):
writer.write(file='whatever', root=list(root.datasets())[0])
with pytest.raises(TypeError, match=r'Root'):
writer.write(file='whatever', root=list(root.groups())[0])
with pytest.raises(TypeError, match=r'Root'):
writer.write(file='whatever', root='Root')
# cannot overwrite a file by default
file = tempfile.gettempdir() + '/msl-hdf5-writer-temp.h5'
with open(file, mode='wt') as fp:
fp.write('Hi')
with pytest.raises(OSError, match=r'File exists'):
writer.write(file=file, root=root)
with pytest.raises(OSError, match=r'File exists'):
writer.write(file=file, root=root, mode='x')
with pytest.raises(OSError, match=r'File exists'):
writer.write(file=file, root=root, mode='w-')
# invalid mode
for m in ['r', 'b', 'w+b']:
with pytest.raises(ValueError, match=r'Invalid mode'):
writer.write(file=file, root=root, mode=m)
# r+ is a valid mode, but the file must already exist
with pytest.raises(OSError, match=r'File does not exist'):
writer.write(file='does_not.exist', root=root, mode='r+')
# by specifying the proper mode one can overwrite a file
writer.write(file=file, root=root, mode='w')
assert roots_equal(root, read(file))
writer.write(file=file, root=root, mode='a')
assert roots_equal(root, read(file))
writer.write(file=file, root=root, mode='r+')
assert roots_equal(root, read(file))
os.remove(file)
@pytest.mark.skipif(h5py is None, reason='h5py not installed')
def test_numpy_unicode_dtype():
writer = HDF5Writer()
writer.add_metadata(wide_chars=np.array(['1', '-4e+99', 'True'], dtype='<U6'))
writer.create_dataset('wide_chars', data=np.random.random(100).reshape(4, 25).astype('<U32'))
file = tempfile.gettempdir() + '/msl-hdf5-writer-temp.h5'
writer.save(file, mode='w')
root = read(file)
assert np.array_equal(root.metadata.wide_chars, writer.metadata.wide_chars)
# the following array_equal assertion fails so we iterate over all elements instead
# assert np.array_equal(root.wide_chars.astype('<U32'), writer.wide_chars)
for a, b in zip(root.wide_chars.astype('<U32').flatten(), writer.wide_chars.flatten()):
assert a == b
os.remove(file)
|
[
"helper.read_sample",
"numpy.random.random",
"msl.io.read",
"msl.io.HDF5Writer",
"numpy.array",
"numpy.array_equal",
"pytest.raises",
"tempfile.gettempdir",
"pytest.mark.skipif",
"os.path.basename",
"helper.roots_equal",
"os.remove"
] |
[((252, 313), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(h5py is None)'], {'reason': '"""h5py not installed"""'}), "(h5py is None, reason='h5py not installed')\n", (270, 313), False, 'import pytest\n'), ((3668, 3729), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(h5py is None)'], {'reason': '"""h5py not installed"""'}), "(h5py is None, reason='h5py not installed')\n", (3686, 3729), False, 'import pytest\n'), ((5540, 5601), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(h5py is None)'], {'reason': '"""h5py not installed"""'}), "(h5py is None, reason='h5py not installed')\n", (5558, 5601), False, 'import pytest\n'), ((357, 386), 'helper.read_sample', 'read_sample', (['"""hdf5_sample.h5"""'], {}), "('hdf5_sample.h5')\n", (368, 386), False, 'from helper import read_sample, roots_equal\n'), ((545, 562), 'msl.io.read', 'read', (['writer.file'], {}), '(writer.file)\n', (549, 562), False, 'from msl.io import read, HDF5Writer, JSONWriter\n'), ((611, 636), 'helper.roots_equal', 'roots_equal', (['root1', 'root2'], {}), '(root1, root2)\n', (622, 636), False, 'from helper import read_sample, roots_equal\n'), ((641, 663), 'os.remove', 'os.remove', (['writer.file'], {}), '(writer.file)\n', (650, 663), False, 'import os\n'), ((848, 870), 'msl.io.read', 'read', (['json_writer.file'], {}), '(json_writer.file)\n', (852, 870), False, 'from msl.io import read, HDF5Writer, JSONWriter\n'), ((928, 957), 'helper.roots_equal', 'roots_equal', (['root1', 'root_json'], {}), '(root1, root_json)\n', (939, 957), False, 'from helper import read_sample, roots_equal\n'), ((962, 989), 'os.remove', 'os.remove', (['json_writer.file'], {}), '(json_writer.file)\n', (971, 989), False, 'import os\n'), ((1124, 1142), 'msl.io.read', 'read', (['writer2.file'], {}), '(writer2.file)\n', (1128, 1142), False, 'from msl.io import read, HDF5Writer, JSONWriter\n'), ((1192, 1217), 'helper.roots_equal', 'roots_equal', (['root1', 'root3'], {}), '(root1, root3)\n', (1203, 1217), False, 'from helper import read_sample, roots_equal\n'), ((1222, 1245), 'os.remove', 'os.remove', (['writer2.file'], {}), '(writer2.file)\n', (1231, 1245), False, 'import os\n'), ((3760, 3789), 'helper.read_sample', 'read_sample', (['"""hdf5_sample.h5"""'], {}), "('hdf5_sample.h5')\n", (3771, 3789), False, 'from helper import read_sample, roots_equal\n'), ((3804, 3816), 'msl.io.HDF5Writer', 'HDF5Writer', ([], {}), '()\n', (3814, 3816), False, 'from msl.io import read, HDF5Writer, JSONWriter\n'), ((5521, 5536), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (5530, 5536), False, 'import os\n'), ((5647, 5659), 'msl.io.HDF5Writer', 'HDF5Writer', ([], {}), '()\n', (5657, 5659), False, 'from msl.io import read, HDF5Writer, JSONWriter\n'), ((5948, 5958), 'msl.io.read', 'read', (['file'], {}), '(file)\n', (5952, 5958), False, 'from msl.io import read, HDF5Writer, JSONWriter\n'), ((5970, 6038), 'numpy.array_equal', 'np.array_equal', (['root.metadata.wide_chars', 'writer.metadata.wide_chars'], {}), '(root.metadata.wide_chars, writer.metadata.wide_chars)\n', (5984, 6038), True, 'import numpy as np\n'), ((6326, 6341), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (6335, 6341), False, 'import os\n'), ((3886, 3940), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""must specify a file"""'}), "(ValueError, match='must specify a file')\n", (3899, 3940), False, 'import pytest\n'), ((4018, 4056), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""Root"""'}), "(TypeError, match='Root')\n", (4031, 4056), False, 'import pytest\n'), ((4137, 4175), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""Root"""'}), "(TypeError, match='Root')\n", (4150, 4175), False, 'import pytest\n'), ((4254, 4292), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""Root"""'}), "(TypeError, match='Root')\n", (4267, 4292), False, 'import pytest\n'), ((4399, 4420), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (4418, 4420), False, 'import tempfile\n'), ((4520, 4563), 'pytest.raises', 'pytest.raises', (['OSError'], {'match': '"""File exists"""'}), "(OSError, match='File exists')\n", (4533, 4563), False, 'import pytest\n'), ((4618, 4661), 'pytest.raises', 'pytest.raises', (['OSError'], {'match': '"""File exists"""'}), "(OSError, match='File exists')\n", (4631, 4661), False, 'import pytest\n'), ((4726, 4769), 'pytest.raises', 'pytest.raises', (['OSError'], {'match': '"""File exists"""'}), "(OSError, match='File exists')\n", (4739, 4769), False, 'import pytest\n'), ((5064, 5115), 'pytest.raises', 'pytest.raises', (['OSError'], {'match': '"""File does not exist"""'}), "(OSError, match='File does not exist')\n", (5077, 5115), False, 'import pytest\n'), ((5324, 5334), 'msl.io.read', 'read', (['file'], {}), '(file)\n', (5328, 5334), False, 'from msl.io import read, HDF5Writer, JSONWriter\n'), ((5414, 5424), 'msl.io.read', 'read', (['file'], {}), '(file)\n', (5418, 5424), False, 'from msl.io import read, HDF5Writer, JSONWriter\n'), ((5505, 5515), 'msl.io.read', 'read', (['file'], {}), '(file)\n', (5509, 5515), False, 'from msl.io import read, HDF5Writer, JSONWriter\n'), ((5853, 5874), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (5872, 5874), False, 'import tempfile\n'), ((442, 463), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (461, 463), False, 'import tempfile\n'), ((734, 755), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (753, 755), False, 'import tempfile\n'), ((1015, 1036), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (1034, 1036), False, 'import tempfile\n'), ((4891, 4938), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Invalid mode"""'}), "(ValueError, match='Invalid mode')\n", (4904, 4938), False, 'import pytest\n'), ((5695, 5741), 'numpy.array', 'np.array', (["['1', '-4e+99', 'True']"], {'dtype': '"""<U6"""'}), "(['1', '-4e+99', 'True'], dtype='<U6')\n", (5703, 5741), True, 'import numpy as np\n'), ((1586, 1607), 'os.path.basename', 'os.path.basename', (['key'], {}), '(key)\n', (1602, 1607), False, 'import os\n'), ((5788, 5809), 'numpy.random.random', 'np.random.random', (['(100)'], {}), '(100)\n', (5804, 5809), True, 'import numpy as np\n')]
|
import numpy as np
from ringity.classes.diagram import PersistenceDiagram
def read_pdiagram(fname, **kwargs):
"""
Wrapper for numpy.genfromtxt.
"""
return PersistenceDiagram(np.genfromtxt(fname, **kwargs))
def write_pdiagram(dgm, fname, **kwargs):
"""
Wrapper for numpy.savetxt.
"""
array = np.array(dgm)
np.savetxt(fname, array, **kwargs)
|
[
"numpy.array",
"numpy.genfromtxt",
"numpy.savetxt"
] |
[((329, 342), 'numpy.array', 'np.array', (['dgm'], {}), '(dgm)\n', (337, 342), True, 'import numpy as np\n'), ((347, 381), 'numpy.savetxt', 'np.savetxt', (['fname', 'array'], {}), '(fname, array, **kwargs)\n', (357, 381), True, 'import numpy as np\n'), ((191, 221), 'numpy.genfromtxt', 'np.genfromtxt', (['fname'], {}), '(fname, **kwargs)\n', (204, 221), True, 'import numpy as np\n')]
|
import functools
import os
from argparse import ArgumentParser
import networkx
import numpy as np
from visualize import heatmap
class MatchingClustering(object):
def __init__(self, n_clusters):
self.n_clusters = n_clusters
def fit_predict(self, X):
total = len(X)
grouping = [{i} for i in range(total)]
while len(grouping) > self.n_clusters:
gx = networkx.Graph()
gx.add_nodes_from(list(range(len(grouping))))
for i in range(len(grouping)):
for j in range(i + 1, len(grouping)):
w = sum([X[x][y] + 1 for x in grouping[i] for y in grouping[j]])
gx.add_edge(i, j, weight=w + 1)
ret = networkx.algorithms.max_weight_matching(gx, maxcardinality=True)
ret = sorted(list(ret))
new_grouping = [grouping[a] | grouping[b] for a, b in ret]
grouping = new_grouping
if len(grouping) != self.n_clusters:
raise ValueError("Cannot satisfy need: splitting to {} clusters.".format(self.n_clusters))
ret = np.zeros(total, dtype=np.int)
for i, g in enumerate(grouping):
ret[list(g)] = i
return ret
def main():
parser = ArgumentParser()
parser.add_argument("dir", type=str)
parser.add_argument("--num_classes", default=2, type=int)
args = parser.parse_args()
cor_matrix = np.loadtxt(os.path.join(args.dir, "corr_heatmap.txt"))
grouping = np.loadtxt(os.path.join(args.dir, "group_info.txt"), dtype=np.int)
group_number = np.max(grouping) + 1
result_grouping = np.zeros(len(grouping), dtype=np.int)
base = 0
for i in sorted(list(range(group_number)), key=lambda d: np.sum(grouping == d)):
cur_index = np.where(grouping == i)[0]
cor_matrix_grp = cor_matrix[cur_index][:, cur_index]
# print(cor_matrix_grp)
model = MatchingClustering(n_clusters=args.num_classes)
if len(cur_index) < args.num_classes:
result_grouping[cur_index] = np.arange(len(cur_index)) + base
print("Group {}: Too small")
base += len(cur_index)
else:
predict = model.fit_predict(1 - cor_matrix_grp)
print("Group {}: {}".format(i, predict.tolist()))
for j in range(args.num_classes):
result_grouping[cur_index[predict == j]] = base + j
base += args.num_classes
heatmap(cor_matrix_grp, filepath=os.path.join("debug", "heatmap_{}".format(i)))
print(result_grouping.tolist())
if __name__ == "__main__":
main()
|
[
"networkx.algorithms.max_weight_matching",
"argparse.ArgumentParser",
"numpy.where",
"os.path.join",
"networkx.Graph",
"numpy.max",
"numpy.sum",
"numpy.zeros"
] |
[((1247, 1263), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (1261, 1263), False, 'from argparse import ArgumentParser\n'), ((1101, 1130), 'numpy.zeros', 'np.zeros', (['total'], {'dtype': 'np.int'}), '(total, dtype=np.int)\n', (1109, 1130), True, 'import numpy as np\n'), ((1428, 1470), 'os.path.join', 'os.path.join', (['args.dir', '"""corr_heatmap.txt"""'], {}), "(args.dir, 'corr_heatmap.txt')\n", (1440, 1470), False, 'import os\n'), ((1498, 1538), 'os.path.join', 'os.path.join', (['args.dir', '"""group_info.txt"""'], {}), "(args.dir, 'group_info.txt')\n", (1510, 1538), False, 'import os\n'), ((1573, 1589), 'numpy.max', 'np.max', (['grouping'], {}), '(grouping)\n', (1579, 1589), True, 'import numpy as np\n'), ((404, 420), 'networkx.Graph', 'networkx.Graph', ([], {}), '()\n', (418, 420), False, 'import networkx\n'), ((731, 795), 'networkx.algorithms.max_weight_matching', 'networkx.algorithms.max_weight_matching', (['gx'], {'maxcardinality': '(True)'}), '(gx, maxcardinality=True)\n', (770, 795), False, 'import networkx\n'), ((1773, 1796), 'numpy.where', 'np.where', (['(grouping == i)'], {}), '(grouping == i)\n', (1781, 1796), True, 'import numpy as np\n'), ((1729, 1750), 'numpy.sum', 'np.sum', (['(grouping == d)'], {}), '(grouping == d)\n', (1735, 1750), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 27 14:39:08 2020
@author: ravi
"""
import scipy.io as scio
import scipy.io.wavfile as scwav
import numpy as np
import joblib
import pyworld as pw
import os
import warnings
warnings.filterwarnings('ignore')
from tqdm import tqdm
from concurrent.futures import ProcessPoolExecutor
from functools import partial
from glob import glob
from extract_fold_data_hparams import Hparams
from feat_utils import normalize_wav, preprocess_contour
dict_target_emo = {'neutral_angry':['angry', 'neu-ang'], \
'neutral_happy':['happy', 'neu-hap'], \
'neutral_sad':['sad', 'neu-sad']}
def _process_wavs(wav_src, wav_tar, args):
"""
Utterance level features for context expansion
"""
utt_f0_src = list()
utt_f0_tar = list()
utt_ec_src = list()
utt_ec_tar = list()
utt_mfc_src = list()
utt_mfc_tar = list()
try:
src_wav = scwav.read(wav_src)
src = np.asarray(src_wav[1], np.float64)
tar_wav = scwav.read(wav_tar)
tar = np.asarray(tar_wav[1], np.float64)
src = normalize_wav(src, floor=-1, ceil=1)
tar = normalize_wav(tar, floor=-1, ceil=1)
f0_src, t_src = pw.harvest(src, args.fs, frame_period=int(1000*args.win_len))
src_straight = pw.cheaptrick(src, f0_src, t_src, args.fs)
f0_tar, t_tar = pw.harvest(tar, args.fs,frame_period=int(1000*args.win_len))
tar_straight = pw.cheaptrick(tar, f0_tar, t_tar, args.fs)
src_mfc = pw.code_spectral_envelope(src_straight, args.fs, args.n_mels)
tar_mfc = pw.code_spectral_envelope(tar_straight, args.fs, args.n_mels)
ec_src = np.sum(src_mfc, axis=1)
ec_tar = np.sum(tar_mfc, axis=1)
f0_src = preprocess_contour(f0_src)
f0_tar = preprocess_contour(f0_tar)
ec_src = preprocess_contour(ec_src)
ec_tar = preprocess_contour(ec_tar)
f0_src = f0_src.reshape(-1,1)
f0_tar = f0_tar.reshape(-1,1)
ec_src = ec_src.reshape(-1,1)
ec_tar = ec_tar.reshape(-1,1)
min_length = min([len(f0_src), len(f0_tar)])
if min_length<args.frame_len:
return None, None, None, None, None, None, None
else:
for sample in range(args.n_samplings):
start = np.random.randint(0, min_length-args.frame_len+1)
end = start + args.frame_len
utt_f0_src.append(f0_src[start:end,:])
utt_f0_tar.append(f0_tar[start:end,:])
utt_ec_src.append(ec_src[start:end,:])
utt_ec_tar.append(ec_tar[start:end,:])
utt_mfc_src.append(src_mfc[start:end,:])
utt_mfc_tar.append(tar_mfc[start:end,:])
return utt_mfc_src, utt_mfc_tar, utt_f0_src, utt_f0_tar, \
utt_ec_src, utt_ec_tar, int(os.path.basename(wav_src)[:-4])
except Exception as ex:
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
return None, None, None, None, None, None, None
def extract_features(args):
target = dict_target_emo[args.emo_pair][0]
speaker_file_info = joblib.load('/home/ravi/Downloads/Emo-Conv/speaker_file_info.pkl')
speaker_info = speaker_file_info[args.emo_pair]
test_speaker_female = speaker_info[args.fold - 1]
test_speaker_male = speaker_info[5 + args.fold - 1]
test_file_idx = [i for i in range(test_speaker_female[0], test_speaker_female[1]+1)]
test_file_idx += [i for i in range(test_speaker_male[0], test_speaker_male[1]+1)]
src_files = sorted(glob(os.path.join(args.data_folder, 'neutral', '*.wav')))
tar_files = sorted(glob(os.path.join(args.data_folder, target, '*.wav')))
train_f0_src = list()
train_f0_tar = list()
train_ec_src = list()
train_ec_tar = list()
train_mfc_src = list()
train_mfc_tar = list()
valid_f0_src = list()
valid_f0_tar = list()
valid_ec_src = list()
valid_ec_tar = list()
valid_mfc_src = list()
valid_mfc_tar = list()
test_f0_src = list()
test_f0_tar = list()
test_ec_src = list()
test_ec_tar = list()
test_mfc_src = list()
test_mfc_tar = list()
train_files = list()
valid_files = list()
test_files = list()
executor = ProcessPoolExecutor(max_workers=6)
futures = []
for (s,t) in zip(src_files, tar_files):
print("Processing: {0}".format(s))
futures.append(executor.submit(partial(_process_wavs, s, t,
args=args)))
results = [future.result() for future in tqdm(futures)]
for i in range(len(results)):
result = results[i]
mfc_src = result[0]
mfc_tar = result[1]
f0_src = result[2]
f0_tar = result[3]
ec_src = result[4]
ec_tar = result[5]
file_idx= result[6]
# mfc_src, mfc_tar, f0_src, \
# f0_tar, ec_src, ec_tar, file_idx = _process_wavs(s,t,args)
if mfc_src:
if file_idx in test_file_idx:
test_mfc_src.append(mfc_src)
test_mfc_tar.append(mfc_tar)
test_f0_src.append(f0_src)
test_f0_tar.append(f0_tar)
test_ec_src.append(ec_src)
test_ec_tar.append(ec_tar)
test_files.append(int(os.path.basename(s)[:-4]))
else:
if np.random.rand()<args.eval_size:
valid_mfc_src.append(mfc_src)
valid_mfc_tar.append(mfc_tar)
valid_f0_src.append(f0_src)
valid_f0_tar.append(f0_tar)
valid_ec_src.append(ec_src)
valid_ec_tar.append(ec_tar)
valid_files.append(int(os.path.basename(s)[:-4]))
else:
train_mfc_src.append(mfc_src)
train_mfc_tar.append(mfc_tar)
train_f0_src.append(f0_src)
train_f0_tar.append(f0_tar)
train_ec_src.append(ec_src)
train_ec_tar.append(ec_tar)
train_files.append(int(os.path.basename(s)[:-4]))
data_dict = {
'train_mfc_feat_src':np.asarray(train_mfc_src, np.float32),
'train_mfc_feat_tar':np.asarray(train_mfc_tar, np.float32),
'train_f0_feat_src':np.asarray(train_f0_src, np.float32),
'train_f0_feat_tar':np.asarray(train_f0_tar, np.float32),
'train_ec_feat_src':np.asarray(train_ec_src, np.float32),
'train_ec_feat_tar':np.asarray(train_ec_tar, np.float32),
'valid_mfc_feat_src':np.asarray(valid_mfc_src, np.float32),
'valid_mfc_feat_tar':np.asarray(valid_mfc_tar, np.float32),
'valid_f0_feat_src':np.asarray(valid_f0_src, np.float32),
'valid_f0_feat_tar':np.asarray(valid_f0_tar, np.float32),
'valid_ec_feat_src':np.asarray(valid_ec_src, np.float32),
'valid_ec_feat_tar':np.asarray(valid_ec_tar, np.float32),
'test_mfc_feat_src':np.asarray(test_mfc_src, np.float32),
'test_mfc_feat_tar':np.asarray(test_mfc_tar, np.float32),
'test_f0_feat_src':np.asarray(test_f0_src, np.float32),
'test_f0_feat_tar':np.asarray(test_f0_tar, np.float32),
'test_ec_feat_src':np.asarray(test_ec_src, np.float32),
'test_ec_feat_tar':np.asarray(test_ec_tar, np.float32),
'train_files':np.reshape(np.array(train_files), (-1,1)),
'valid_files':np.reshape(np.array(valid_files), (-1,1)),
'test_files':np.reshape(np.array(test_files), (-1,1))
}
return data_dict
if __name__ == '__main__':
hparams = Hparams()
parser = hparams.parser
hp = parser.parse_args()
hp.data_folder = '/home/ravi/Downloads/Emo-Conv/neutral-sad/all_above_0.5'
hp.emo_pair = 'neutral_sad'
for i in range(1, 6):
hp.fold = i
data_dict = extract_features(hp)
scio.savemat('/home/ravi/Desktop/neu-sad_fold_{0}.mat'.format(i), data_dict)
del data_dict
|
[
"feat_utils.preprocess_contour",
"numpy.random.rand",
"pyworld.code_spectral_envelope",
"pyworld.cheaptrick",
"tqdm.tqdm",
"numpy.asarray",
"extract_fold_data_hparams.Hparams",
"os.path.join",
"numpy.sum",
"numpy.array",
"numpy.random.randint",
"scipy.io.wavfile.read",
"functools.partial",
"concurrent.futures.ProcessPoolExecutor",
"joblib.load",
"os.path.basename",
"feat_utils.normalize_wav",
"warnings.filterwarnings"
] |
[((244, 277), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (267, 277), False, 'import warnings\n'), ((3408, 3474), 'joblib.load', 'joblib.load', (['"""/home/ravi/Downloads/Emo-Conv/speaker_file_info.pkl"""'], {}), "('/home/ravi/Downloads/Emo-Conv/speaker_file_info.pkl')\n", (3419, 3474), False, 'import joblib\n'), ((4563, 4597), 'concurrent.futures.ProcessPoolExecutor', 'ProcessPoolExecutor', ([], {'max_workers': '(6)'}), '(max_workers=6)\n', (4582, 4597), False, 'from concurrent.futures import ProcessPoolExecutor\n'), ((8058, 8067), 'extract_fold_data_hparams.Hparams', 'Hparams', ([], {}), '()\n', (8065, 8067), False, 'from extract_fold_data_hparams import Hparams\n'), ((1018, 1037), 'scipy.io.wavfile.read', 'scwav.read', (['wav_src'], {}), '(wav_src)\n', (1028, 1037), True, 'import scipy.io.wavfile as scwav\n'), ((1052, 1086), 'numpy.asarray', 'np.asarray', (['src_wav[1]', 'np.float64'], {}), '(src_wav[1], np.float64)\n', (1062, 1086), True, 'import numpy as np\n'), ((1106, 1125), 'scipy.io.wavfile.read', 'scwav.read', (['wav_tar'], {}), '(wav_tar)\n', (1116, 1125), True, 'import scipy.io.wavfile as scwav\n'), ((1140, 1174), 'numpy.asarray', 'np.asarray', (['tar_wav[1]', 'np.float64'], {}), '(tar_wav[1], np.float64)\n', (1150, 1174), True, 'import numpy as np\n'), ((1198, 1234), 'feat_utils.normalize_wav', 'normalize_wav', (['src'], {'floor': '(-1)', 'ceil': '(1)'}), '(src, floor=-1, ceil=1)\n', (1211, 1234), False, 'from feat_utils import normalize_wav, preprocess_contour\n'), ((1249, 1285), 'feat_utils.normalize_wav', 'normalize_wav', (['tar'], {'floor': '(-1)', 'ceil': '(1)'}), '(tar, floor=-1, ceil=1)\n', (1262, 1285), False, 'from feat_utils import normalize_wav, preprocess_contour\n'), ((1401, 1443), 'pyworld.cheaptrick', 'pw.cheaptrick', (['src', 'f0_src', 't_src', 'args.fs'], {}), '(src, f0_src, t_src, args.fs)\n', (1414, 1443), True, 'import pyworld as pw\n'), ((1558, 1600), 'pyworld.cheaptrick', 'pw.cheaptrick', (['tar', 'f0_tar', 't_tar', 'args.fs'], {}), '(tar, f0_tar, t_tar, args.fs)\n', (1571, 1600), True, 'import pyworld as pw\n'), ((1620, 1681), 'pyworld.code_spectral_envelope', 'pw.code_spectral_envelope', (['src_straight', 'args.fs', 'args.n_mels'], {}), '(src_straight, args.fs, args.n_mels)\n', (1645, 1681), True, 'import pyworld as pw\n'), ((1700, 1761), 'pyworld.code_spectral_envelope', 'pw.code_spectral_envelope', (['tar_straight', 'args.fs', 'args.n_mels'], {}), '(tar_straight, args.fs, args.n_mels)\n', (1725, 1761), True, 'import pyworld as pw\n'), ((1780, 1803), 'numpy.sum', 'np.sum', (['src_mfc'], {'axis': '(1)'}), '(src_mfc, axis=1)\n', (1786, 1803), True, 'import numpy as np\n'), ((1821, 1844), 'numpy.sum', 'np.sum', (['tar_mfc'], {'axis': '(1)'}), '(tar_mfc, axis=1)\n', (1827, 1844), True, 'import numpy as np\n'), ((1871, 1897), 'feat_utils.preprocess_contour', 'preprocess_contour', (['f0_src'], {}), '(f0_src)\n', (1889, 1897), False, 'from feat_utils import normalize_wav, preprocess_contour\n'), ((1915, 1941), 'feat_utils.preprocess_contour', 'preprocess_contour', (['f0_tar'], {}), '(f0_tar)\n', (1933, 1941), False, 'from feat_utils import normalize_wav, preprocess_contour\n'), ((1960, 1986), 'feat_utils.preprocess_contour', 'preprocess_contour', (['ec_src'], {}), '(ec_src)\n', (1978, 1986), False, 'from feat_utils import normalize_wav, preprocess_contour\n'), ((2004, 2030), 'feat_utils.preprocess_contour', 'preprocess_contour', (['ec_tar'], {}), '(ec_tar)\n', (2022, 2030), False, 'from feat_utils import normalize_wav, preprocess_contour\n'), ((6544, 6581), 'numpy.asarray', 'np.asarray', (['train_mfc_src', 'np.float32'], {}), '(train_mfc_src, np.float32)\n', (6554, 6581), True, 'import numpy as np\n'), ((6617, 6654), 'numpy.asarray', 'np.asarray', (['train_mfc_tar', 'np.float32'], {}), '(train_mfc_tar, np.float32)\n', (6627, 6654), True, 'import numpy as np\n'), ((6688, 6724), 'numpy.asarray', 'np.asarray', (['train_f0_src', 'np.float32'], {}), '(train_f0_src, np.float32)\n', (6698, 6724), True, 'import numpy as np\n'), ((6758, 6794), 'numpy.asarray', 'np.asarray', (['train_f0_tar', 'np.float32'], {}), '(train_f0_tar, np.float32)\n', (6768, 6794), True, 'import numpy as np\n'), ((6828, 6864), 'numpy.asarray', 'np.asarray', (['train_ec_src', 'np.float32'], {}), '(train_ec_src, np.float32)\n', (6838, 6864), True, 'import numpy as np\n'), ((6898, 6934), 'numpy.asarray', 'np.asarray', (['train_ec_tar', 'np.float32'], {}), '(train_ec_tar, np.float32)\n', (6908, 6934), True, 'import numpy as np\n'), ((6969, 7006), 'numpy.asarray', 'np.asarray', (['valid_mfc_src', 'np.float32'], {}), '(valid_mfc_src, np.float32)\n', (6979, 7006), True, 'import numpy as np\n'), ((7042, 7079), 'numpy.asarray', 'np.asarray', (['valid_mfc_tar', 'np.float32'], {}), '(valid_mfc_tar, np.float32)\n', (7052, 7079), True, 'import numpy as np\n'), ((7113, 7149), 'numpy.asarray', 'np.asarray', (['valid_f0_src', 'np.float32'], {}), '(valid_f0_src, np.float32)\n', (7123, 7149), True, 'import numpy as np\n'), ((7183, 7219), 'numpy.asarray', 'np.asarray', (['valid_f0_tar', 'np.float32'], {}), '(valid_f0_tar, np.float32)\n', (7193, 7219), True, 'import numpy as np\n'), ((7253, 7289), 'numpy.asarray', 'np.asarray', (['valid_ec_src', 'np.float32'], {}), '(valid_ec_src, np.float32)\n', (7263, 7289), True, 'import numpy as np\n'), ((7323, 7359), 'numpy.asarray', 'np.asarray', (['valid_ec_tar', 'np.float32'], {}), '(valid_ec_tar, np.float32)\n', (7333, 7359), True, 'import numpy as np\n'), ((7393, 7429), 'numpy.asarray', 'np.asarray', (['test_mfc_src', 'np.float32'], {}), '(test_mfc_src, np.float32)\n', (7403, 7429), True, 'import numpy as np\n'), ((7464, 7500), 'numpy.asarray', 'np.asarray', (['test_mfc_tar', 'np.float32'], {}), '(test_mfc_tar, np.float32)\n', (7474, 7500), True, 'import numpy as np\n'), ((7533, 7568), 'numpy.asarray', 'np.asarray', (['test_f0_src', 'np.float32'], {}), '(test_f0_src, np.float32)\n', (7543, 7568), True, 'import numpy as np\n'), ((7601, 7636), 'numpy.asarray', 'np.asarray', (['test_f0_tar', 'np.float32'], {}), '(test_f0_tar, np.float32)\n', (7611, 7636), True, 'import numpy as np\n'), ((7669, 7704), 'numpy.asarray', 'np.asarray', (['test_ec_src', 'np.float32'], {}), '(test_ec_src, np.float32)\n', (7679, 7704), True, 'import numpy as np\n'), ((7737, 7772), 'numpy.asarray', 'np.asarray', (['test_ec_tar', 'np.float32'], {}), '(test_ec_tar, np.float32)\n', (7747, 7772), True, 'import numpy as np\n'), ((3850, 3900), 'os.path.join', 'os.path.join', (['args.data_folder', '"""neutral"""', '"""*.wav"""'], {}), "(args.data_folder, 'neutral', '*.wav')\n", (3862, 3900), False, 'import os\n'), ((3931, 3978), 'os.path.join', 'os.path.join', (['args.data_folder', 'target', '"""*.wav"""'], {}), "(args.data_folder, target, '*.wav')\n", (3943, 3978), False, 'import os\n'), ((4886, 4899), 'tqdm.tqdm', 'tqdm', (['futures'], {}), '(futures)\n', (4890, 4899), False, 'from tqdm import tqdm\n'), ((7811, 7832), 'numpy.array', 'np.array', (['train_files'], {}), '(train_files)\n', (7819, 7832), True, 'import numpy as np\n'), ((7881, 7902), 'numpy.array', 'np.array', (['valid_files'], {}), '(valid_files)\n', (7889, 7902), True, 'import numpy as np\n'), ((7950, 7970), 'numpy.array', 'np.array', (['test_files'], {}), '(test_files)\n', (7958, 7970), True, 'import numpy as np\n'), ((2434, 2487), 'numpy.random.randint', 'np.random.randint', (['(0)', '(min_length - args.frame_len + 1)'], {}), '(0, min_length - args.frame_len + 1)\n', (2451, 2487), True, 'import numpy as np\n'), ((4746, 4785), 'functools.partial', 'partial', (['_process_wavs', 's', 't'], {'args': 'args'}), '(_process_wavs, s, t, args=args)\n', (4753, 4785), False, 'from functools import partial\n'), ((3030, 3055), 'os.path.basename', 'os.path.basename', (['wav_src'], {}), '(wav_src)\n', (3046, 3055), False, 'import os\n'), ((5709, 5725), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (5723, 5725), True, 'import numpy as np\n'), ((5645, 5664), 'os.path.basename', 'os.path.basename', (['s'], {}), '(s)\n', (5661, 5664), False, 'import os\n'), ((6077, 6096), 'os.path.basename', 'os.path.basename', (['s'], {}), '(s)\n', (6093, 6096), False, 'import os\n'), ((6461, 6480), 'os.path.basename', 'os.path.basename', (['s'], {}), '(s)\n', (6477, 6480), False, 'import os\n')]
|
import numpy as np
import cv2
import errno
# set environment variable
import os
os.environ['OPENCV_IO_ENABLE_JASPER']= 'TRUE' # allows JPEG2000 format
# path of this file
det_path = os.path.split(os.path.abspath(__file__))[0] + '/'
class DimensionError(Exception):
"""
raised when the image does not meet the required
maximum dimensions of 1024 x 1024.
"""
def __init__(self, h, w):
message = "Image is too big " + str((h, w))
message += "; max allowed size is (1024, 1024)"
super(DimensionError, self).__init__(message)
def detect_largest_face(in_path, out_path=None, min_conf=0.8):
"""
Detects largest face using the DNN face detection algorithm
from cv2 library.
Args:
in_path (str): path of the input image file
out_path (str, optional): path of the cropped image file. Defaults to None
min_conf (float, optional): threshold confidence to detect face. Defaults to 0.8
Returns:
bounding_box: an 2x2 array of two (x,y) coordinates
for top left and bottom right of the bounding box
"""
# check input file path
if not os.path.isfile(in_path):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), in_path)
# check output file path
if out_path:
try:
with open(out_path, 'w') as f:
pass
except OSError:
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), out_path)
# read file
img = cv2.imread(in_path)
# check image dimensions
h, w = img.shape[:2]
if h > 1024 or w > 1024:
raise DimensionError(h, w)
# detect faces using DNN algorithm from cv2
net = cv2.dnn.readNetFromCaffe(
det_path + "models/deploy.prototxt",
det_path + "models/res10_300x300_ssd_iter_140000.caffemodel"
)
rgb_mean = np.mean(img, axis=(0, 1)) # mean rgb values to remove effects of illumination
blob = cv2.dnn.blobFromImage(cv2.resize(img, (300,300)), 1.0, (300, 300), rgb_mean)
net.setInput(blob)
faces = net.forward()
# get the most confident faces
conf_faces = np.array(list(filter(lambda x: x[2] > min_conf, faces[0, 0])))
# check if any faces exist
assert len(conf_faces) > 0, "No faces found!"
# get the largest face
first_face = 0 # let first face be biggest face
first_box = conf_faces[first_face, 3:7] * np.array([w, h, w, h])
sx, sy, ex, ey = first_box.astype("int")
for i in range(1, conf_faces.shape[0]):
box = conf_faces[i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
if (endX - startX)*(endY - startY) > (ex - sx)*(ey - sy):
sx, sy, ex, ey = startX, startY, endX, endY
# save the crop
if out_path:
largest_crop = img[sy:ey, sx:ex]
saved_file = cv2.imwrite(out_path, largest_crop)
# return the largest bounding box
bounding_box = [(sx, sy), (ex, ey)]
return bounding_box
|
[
"numpy.mean",
"cv2.imwrite",
"os.strerror",
"cv2.dnn.readNetFromCaffe",
"os.path.isfile",
"numpy.array",
"os.path.abspath",
"cv2.resize",
"cv2.imread"
] |
[((1585, 1604), 'cv2.imread', 'cv2.imread', (['in_path'], {}), '(in_path)\n', (1595, 1604), False, 'import cv2\n'), ((1783, 1910), 'cv2.dnn.readNetFromCaffe', 'cv2.dnn.readNetFromCaffe', (["(det_path + 'models/deploy.prototxt')", "(det_path + 'models/res10_300x300_ssd_iter_140000.caffemodel')"], {}), "(det_path + 'models/deploy.prototxt', det_path +\n 'models/res10_300x300_ssd_iter_140000.caffemodel')\n", (1807, 1910), False, 'import cv2\n'), ((1949, 1974), 'numpy.mean', 'np.mean', (['img'], {'axis': '(0, 1)'}), '(img, axis=(0, 1))\n', (1956, 1974), True, 'import numpy as np\n'), ((1203, 1226), 'os.path.isfile', 'os.path.isfile', (['in_path'], {}), '(in_path)\n', (1217, 1226), False, 'import os\n'), ((2060, 2087), 'cv2.resize', 'cv2.resize', (['img', '(300, 300)'], {}), '(img, (300, 300))\n', (2070, 2087), False, 'import cv2\n'), ((2488, 2510), 'numpy.array', 'np.array', (['[w, h, w, h]'], {}), '([w, h, w, h])\n', (2496, 2510), True, 'import numpy as np\n'), ((2947, 2982), 'cv2.imwrite', 'cv2.imwrite', (['out_path', 'largest_crop'], {}), '(out_path, largest_crop)\n', (2958, 2982), False, 'import cv2\n'), ((200, 225), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (215, 225), False, 'import os\n'), ((1274, 1299), 'os.strerror', 'os.strerror', (['errno.ENOENT'], {}), '(errno.ENOENT)\n', (1285, 1299), False, 'import os\n'), ((2636, 2658), 'numpy.array', 'np.array', (['[w, h, w, h]'], {}), '([w, h, w, h])\n', (2644, 2658), True, 'import numpy as np\n'), ((1521, 1546), 'os.strerror', 'os.strerror', (['errno.ENOENT'], {}), '(errno.ENOENT)\n', (1532, 1546), False, 'import os\n')]
|
"""
NCL_coneff_16.py
================
This script illustrates the following concepts:
- Showing features of the new color display model
- Using a NCL colormap with levels to assign a color palette to contours
- Drawing partially transparent filled contours
See following URLs to see the reproduced NCL plot & script:
- Original NCL script: https://www.ncl.ucar.edu/Applications/Scripts/coneff_16.ncl
- Original NCL plot: https://www.ncl.ucar.edu/Applications/Images/coneff_16_1_lg.png
"""
###############################################################################
# Import packages:
import numpy as np
import xarray as xr
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import geocat.datafiles as gdf
from geocat.viz import cmaps as gvcmaps
from geocat.viz import util as gvutil
###############################################################################
# Read in data:
# Open a netCDF data file using xarray default engine and load the data into xarrays
ds = xr.open_dataset(gdf.get('netcdf_files/uv300.nc'))
U = ds.U[1, :, :]
###############################################################################
# Plot:
# Generate figure (set its size (width, height) in inches)
plt.figure(figsize=(14, 7))
# Generate axes, using Cartopy
projection = ccrs.PlateCarree()
ax = plt.axes(projection=projection)
# Use global map and draw coastlines
ax.set_global()
ax.coastlines()
# Import an NCL colormap
newcmp = gvcmaps.BlueYellowRed
# Contourf-plot data (for filled contours)
# Note, min-max contour levels are hard-coded. contourf's automatic contour value selector produces fractional values.
p = U.plot.contourf(ax=ax,
vmin=-16.0,
vmax=44,
levels=16,
cmap=newcmp,
add_colorbar=False,
transform=projection,
extend='neither')
# Add horizontal colorbar
cbar = plt.colorbar(p, orientation='horizontal', shrink=0.5)
cbar.ax.tick_params(labelsize=14)
cbar.set_ticks(np.linspace(-12, 40, 14))
# Use geocat.viz.util convenience function to set axes tick values
gvutil.set_axes_limits_and_ticks(ax,
xticks=np.linspace(-180, 180, 13),
yticks=np.linspace(-90, 90, 7))
# Use geocat.viz.util convenience function to make plots look like NCL plots by using latitude, longitude tick labels
gvutil.add_lat_lon_ticklabels(ax)
# Use geocat.viz.util convenience function to add minor and major tick lines
gvutil.add_major_minor_ticks(ax, labelsize=12)
# Use geocat.viz.util convenience function to add titles to left and right of the plot axis.
gvutil.set_titles_and_labels(ax,
maintitle="Color contours mask filled land",
lefttitle=U.long_name,
lefttitlefontsize=16,
righttitle=U.units,
righttitlefontsize=16,
xlabel="",
ylabel="")
# Show the plot
plt.show()
|
[
"geocat.datafiles.get",
"geocat.viz.util.add_major_minor_ticks",
"matplotlib.pyplot.colorbar",
"cartopy.crs.PlateCarree",
"matplotlib.pyplot.figure",
"numpy.linspace",
"matplotlib.pyplot.axes",
"geocat.viz.util.add_lat_lon_ticklabels",
"geocat.viz.util.set_titles_and_labels",
"matplotlib.pyplot.show"
] |
[((1221, 1248), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(14, 7)'}), '(figsize=(14, 7))\n', (1231, 1248), True, 'import matplotlib.pyplot as plt\n'), ((1294, 1312), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (1310, 1312), True, 'import cartopy.crs as ccrs\n'), ((1318, 1349), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': 'projection'}), '(projection=projection)\n', (1326, 1349), True, 'import matplotlib.pyplot as plt\n'), ((1946, 1999), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['p'], {'orientation': '"""horizontal"""', 'shrink': '(0.5)'}), "(p, orientation='horizontal', shrink=0.5)\n", (1958, 1999), True, 'import matplotlib.pyplot as plt\n'), ((2432, 2465), 'geocat.viz.util.add_lat_lon_ticklabels', 'gvutil.add_lat_lon_ticklabels', (['ax'], {}), '(ax)\n', (2461, 2465), True, 'from geocat.viz import util as gvutil\n'), ((2544, 2590), 'geocat.viz.util.add_major_minor_ticks', 'gvutil.add_major_minor_ticks', (['ax'], {'labelsize': '(12)'}), '(ax, labelsize=12)\n', (2572, 2590), True, 'from geocat.viz import util as gvutil\n'), ((2685, 2886), 'geocat.viz.util.set_titles_and_labels', 'gvutil.set_titles_and_labels', (['ax'], {'maintitle': '"""Color contours mask filled land"""', 'lefttitle': 'U.long_name', 'lefttitlefontsize': '(16)', 'righttitle': 'U.units', 'righttitlefontsize': '(16)', 'xlabel': '""""""', 'ylabel': '""""""'}), "(ax, maintitle=\n 'Color contours mask filled land', lefttitle=U.long_name,\n lefttitlefontsize=16, righttitle=U.units, righttitlefontsize=16, xlabel\n ='', ylabel='')\n", (2713, 2886), True, 'from geocat.viz import util as gvutil\n'), ((3093, 3103), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3101, 3103), True, 'import matplotlib.pyplot as plt\n'), ((1020, 1052), 'geocat.datafiles.get', 'gdf.get', (['"""netcdf_files/uv300.nc"""'], {}), "('netcdf_files/uv300.nc')\n", (1027, 1052), True, 'import geocat.datafiles as gdf\n'), ((2049, 2073), 'numpy.linspace', 'np.linspace', (['(-12)', '(40)', '(14)'], {}), '(-12, 40, 14)\n', (2060, 2073), True, 'import numpy as np\n'), ((2220, 2246), 'numpy.linspace', 'np.linspace', (['(-180)', '(180)', '(13)'], {}), '(-180, 180, 13)\n', (2231, 2246), True, 'import numpy as np\n'), ((2288, 2311), 'numpy.linspace', 'np.linspace', (['(-90)', '(90)', '(7)'], {}), '(-90, 90, 7)\n', (2299, 2311), True, 'import numpy as np\n')]
|
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import FuncFormatter
filepath = '/Users/huangjiaming/Documents/developer/ETreeLearning/res/losses/delay_etree.txt'
x = []
num = 0
with open(filepath) as fp:
for line in fp:
c = list(map(int, line.split()))
x = c
print(np.mean(x), np.std(x))
fig,(ax0,ax1) = plt.subplots(nrows=2,figsize=(9,6))
# pdf概率分布图
ax0.hist(x, 100, normed=1, histtype='bar', facecolor='blue', edgecolor="black", alpha=0.9)
ax0.set_title('')
ax0.set_xlabel('Delay / ms')
ax0.set_ylabel('Percent')
#cdf累计概率函数
ax1.hist(x,100,normed=1,histtype='bar',facecolor='red', edgecolor="black", alpha=0.9,cumulative=True,rwidth=0.8)
ax1.set_title("cdf")
ax1.set_xlabel('Delay / ms')
ax1.set_ylabel('Percent')
fig.subplots_adjust(hspace=0.4)
plt.savefig('./reports/20200301/delay_etree_100_nodes', dpi=600)
|
[
"numpy.mean",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.subplots",
"numpy.std"
] |
[((360, 397), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'figsize': '(9, 6)'}), '(nrows=2, figsize=(9, 6))\n', (372, 397), True, 'import matplotlib.pyplot as plt\n'), ((853, 917), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./reports/20200301/delay_etree_100_nodes"""'], {'dpi': '(600)'}), "('./reports/20200301/delay_etree_100_nodes', dpi=600)\n", (864, 917), True, 'import matplotlib.pyplot as plt\n'), ((317, 327), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (324, 327), True, 'import numpy as np\n'), ((329, 338), 'numpy.std', 'np.std', (['x'], {}), '(x)\n', (335, 338), True, 'import numpy as np\n')]
|
import numpy as np
from typing import List, Tuple
class InterfaceSolver():
"""
Informal interface for solving class needed to interact with rubiks
environment.
"""
def __init__(self, depth:int, possible_moves: List[str]) -> None:
"""
Will be passed depth, i.e. number of backwards turns cube has been
randomly shuffled from solved.
Additionally will be passed the list of acceptable moves that should
be returned by the get_action method. Can be upper or lower case
characters but must be in possible_moves. Each character corresponds to
a face of the cube, upper case is a clockwise turn, lower case is
counter clockwise, will be passed as all upper case.
"""
pass
def get_name(self) -> str:
"""
Each solver will have an associated button in the GUI, this text will
be what is displayed on the button.
"""
def clear(self) -> None:
"""
Will be called before every fresh solve. Can use to reset any necessary
parameters.
"""
pass
def get_action(self, cube_state:np.array) -> Tuple[str,bool]:
"""
Will be passed cube state as a 6x3x3 np.array, where the first index
represents the 6 sides of the cube, and the 2nd and 3rd index form a
3x3 table representing each of the 9 faces on one side of the cube. Each
entry has an integer value {0,5} representing a color. A solved cube is
when for each side, each 3x3 matrix only contains one value. Can assume
cube_state results from taking previous action on previous cube_state.
Must return character from possible_moves passed in init, can be upper
or lower case as described above.
Also must return boolean value indicating if solver is terminating
(True), or not (False). If terminating can either provide action and it
will be executed and then terminate, or can pass action as None, and
solving will be terminated without any action.
"""
pass
def find_shortest_path(node_1,node_2):
"""
Given two nodes indicated by a string of their move sequence from the start
node (forward moves only), this method returns the shortest move sequence
from node_1 to node_2.
"""
# Change to lists
node_1 = list(node_1)
node_1_common = node_1.copy()
node_2 = list(node_2)
node_2_common = node_2.copy()
# Get length of smaller
small_length = min(len(node_1),len(node_2))
# get to smallest common parent node
for i in range(small_length):
if (node_1[i] == node_2[i]):
# then pop because they are the same
node_1_common.pop(0)
node_2_common.pop(0)
else:
# as soon as this isn't true cant get any closer parent node
break
# Now generate path by reversing path to node_1, and follow path to node_2
shortest_path = [x.lower() for x in node_1_common[::-1]]
shortest_path.extend(node_2_common)
return shortest_path
class DepthFirstSearch(InterfaceSolver):
"""
Implements a depth first search algorithm bounded by depth provided.
"""
def __init__(self, depth, possible_moves):
self.depth = depth
self.possible_moves = possible_moves
def get_name(self):
return "DFS"
def depth_first_search(self,current_move,depth):
# If past max depth just retun
if depth < 0:
return
# Otherwise append move to list (or if empty/starting do nothing)
if not len(current_move) == 0:
self.moves_to_make.append(current_move)
# Now go through each possible move/node from here recursively
for m in self.possible_moves:
self.depth_first_search(m,depth-1)
# Now before return, undo current move
if not len(current_move) == 0:
self.moves_to_make.append(current_move.lower())
return
def clear(self):
"""
Because depth bounded and possible moves do not change, can pre-compute
all actions, and then will terminate via main if solved, or here if out
of pre-computed moves.
"""
# Make list of all moves using recursive depth first search
self.moves_to_make = []
self.depth_first_search("",self.depth)
# Reverse string so that popping is constant time
self.moves_to_make.reverse()
def get_action(self, cube_state):
"""
If only one move left provide last action and terminate, otherwise,
pop
Get action based off current index, increment index, and return
"""
terminating = False
if len(self.moves_to_make) == 1:
terminating = True
return self.moves_to_make.pop(), terminating
class BreadthFirstSearch(InterfaceSolver):
"""
Implements a breadth first search algorithm bounded by depth provided.
"""
def __init__(self, depth, possible_moves):
self.depth = depth
self.possible_moves = possible_moves
def get_name(self):
return "BFS"
def clear(self):
"""
Because depth bounded and possible moves do not change, can pre-compute
all actions, and then will terminate via main if solved, or here if out
of pre-computed moves.
"""
# Simulating going through and popping from list below, but just pop and
# append to main list which will be used in action.
save_moves_to_make = []
track_moves_to_make = []
track_moves_to_make.extend(self.possible_moves)
save_moves_to_make.extend(self.possible_moves)
while len(track_moves_to_make) > 0:
# Get next move
next_move = track_moves_to_make.pop(0)
# Now go through neighbors of this next_move/node
for m in self.possible_moves:
to_append = next_move+m
if len(to_append) > self.depth:
continue
else:
track_moves_to_make.append(to_append)
save_moves_to_make.append(to_append)
# Now make completed move list using shortest path between each
self.moves_to_make = []
for i in range(len(save_moves_to_make)):
if i ==0:
self.moves_to_make.append(save_moves_to_make[0])
else:
self.moves_to_make.extend(find_shortest_path(save_moves_to_make[i-1],save_moves_to_make[i]))
# Reverse string so that popping is constant time
self.moves_to_make.reverse()
def get_action(self, cube_state):
"""
If only one move left provide last action and terminate, otherwise,
pop
Get action based off current index, increment index, and return
"""
terminating = False
if len(self.moves_to_make) == 1:
terminating = True
return self.moves_to_make.pop(), terminating
class BestFirstSearch(InterfaceSolver):
"""
Implements a best first search algorithm bounded by depth provided.
Here the metric is used is percentage complete.
For each side the number of squares with the same color as the center is
computed. This metric is summed for each side anid is divide by the total
number of cube faces.
"""
def __init__(self, depth, possible_moves):
self.depth = depth
self.possible_moves = possible_moves
def get_name(self):
return "BestFS"
def clear(self):
self.cube_state_move = []
self.cube_state_values = []
self.actions = []
self.possible_moves_for_node = []
self.last_action = ""
self.move_queue = []
self.previous_node = ""
def get_value(self,cube_state):
total_equal = 0
total = cube_state.size
for i in range(6):
center_val = cube_state[i,1,1]
total_equal += np.sum(cube_state[i,:,:]==center_val)
return float(total_equal)/total
def get_action_nodes(self, cube_state):
"""
Method to actually select next nodes given known values.
"""
# 1. Get value of current cube_state, and append state and value, and nodes_moved_to
self.cube_state_values.append(self.get_value(cube_state))
self.cube_state_move.append(self.last_action)
self.possible_moves_for_node.append(self.possible_moves.copy())
if len(self.actions) == 0:
self.actions.append("")
# 2. Now find best current state
ranked_idx = np.argsort(np.array(self.cube_state_values))[::-1]
next_move = None
for idx in ranked_idx:
# If no more possible moves, just continue, if have move make it and break
if len(self.possible_moves_for_node[idx]) == 0 or len(self.cube_state_move[idx])==self.depth:
continue
else:
next_move = self.cube_state_move[idx] + self.possible_moves_for_node[idx].pop()
break
# 3. If next_move is still none, terminate, no more moves to make
if next_move is None:
return None
# 4. Otherwise, save action and return
self.last_action = next_move
return next_move
def get_action(self,cube_state):
"""
Need wrapper method for node selection to process transitions between
nodes.
"""
terminating = False
# 1. If move queue empty, calculate next node using method, find path and append
if len(self.move_queue) == 0:
next_node = self.get_action_nodes(cube_state)
if next_node is None:
self.move_queue.append(None)
terminating = True
else:
node_path = find_shortest_path(self.previous_node,next_node)
self.move_queue.extend(node_path)
self.previous_node = next_node
# 2. Set previous node, pop and take action
return self.move_queue.pop(0), terminating
#
|
[
"numpy.sum",
"numpy.array"
] |
[((8033, 8074), 'numpy.sum', 'np.sum', (['(cube_state[i, :, :] == center_val)'], {}), '(cube_state[i, :, :] == center_val)\n', (8039, 8074), True, 'import numpy as np\n'), ((8674, 8706), 'numpy.array', 'np.array', (['self.cube_state_values'], {}), '(self.cube_state_values)\n', (8682, 8706), True, 'import numpy as np\n')]
|
import sys
from glob import glob
from serial import Serial, SerialException
import numpy as np
BAUD_RATE = 9600
PORT = 'COM5'
READ_TIMEOUT = 1
LOWER_BOUND = 0.01
UPPER_BOUND = 0.4
class SerialCommunication():
""" Manages the communication and sends the data to the Arduino """
def __init__(self):
self._serial_channel = Serial()
self._serial_channel.port = PORT
self._serial_channel.baudrate = BAUD_RATE
@property
def baudrate(self):
return self._serial_channel.baudrate
@baudrate.setter
def baudrate(self, new_baudrate):
if not self._serial_channel.is_open:
self._serial_channel.baudrate = new_baudrate
else:
raise Exception("Close connection before changing baudrate")
@property
def port(self):
return self._serial_channel.port
@port.setter
def set_port(self, new_port):
if not self._serial_channel.is_open:
self._serial_channel.port = new_port
else:
raise Exception("Close connection before changing port")
def get_available_serial_ports(self):
""" Returns a list of all ports that can be opened """
if self._serial_channel.is_open:
raise Exception("Close connection before")
result = []
for port in self._list_all_possibles_ports():
try:
Serial(port).close()
result.append(port)
except (OSError, SerialException):
pass
return result
def establish_communication(self):
"""
Enables the communication with the arduino with the latest parameters
Throws a SerialException is it cannot connect to port
"""
try:
self._serial_channel.open()
except SerialException as error:
print("Error when connecting to serial %s port" % (self._serial_channel.port))
raise(SerialException)
def send_data(self, data):
""" prints feedback data from the arduino and sends the new data """
if self._is_data_available():
print(("Reading : ", self._read_bytes(len(data))))
data = [x[1] for x in data]
if self._is_data_valid(data):
value_to_send = self._get_clipped_signals(data)
print(('Sending', value_to_send))
try:
self._serial_channel.write(bytearray(value_to_send))
except SerialTimeoutException as e:
print('Error when sending data to microcontroller:' + str(e))
def close_communication(self):
self._serial_channel.close()
def _list_all_possibles_ports(self):
if sys.platform.startswith('win'):
ports = ['COM%s' % (i + 1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# this excludes your current terminal "/dev/tty"
ports = glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports = glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
return ports
def _read_bytes(self, nb_bytes=1):
bytes_received = []
for _ in range(nb_bytes):
bytes_received.append(self._serial_channel.read(1))
return [ord(byte) for byte in bytes_received if byte]
def _is_data_available(self):
return self._serial_channel is not None and self._serial_channel.is_open and self._serial_channel.in_waiting
def _is_data_valid(self, data):
return self._serial_channel is not None and self._serial_channel.is_open and not np.any(np.isnan(data))
def _get_clipped_signals(self, signals):
clipped_list = np.clip(signals, LOWER_BOUND, UPPER_BOUND)
return [int(255 * (x - LOWER_BOUND)/(UPPER_BOUND - LOWER_BOUND)) for x in clipped_list]
|
[
"numpy.clip",
"sys.platform.startswith",
"serial.Serial",
"numpy.isnan",
"glob.glob"
] |
[((342, 350), 'serial.Serial', 'Serial', ([], {}), '()\n', (348, 350), False, 'from serial import Serial, SerialException\n'), ((2715, 2745), 'sys.platform.startswith', 'sys.platform.startswith', (['"""win"""'], {}), "('win')\n", (2738, 2745), False, 'import sys\n'), ((3799, 3841), 'numpy.clip', 'np.clip', (['signals', 'LOWER_BOUND', 'UPPER_BOUND'], {}), '(signals, LOWER_BOUND, UPPER_BOUND)\n', (3806, 3841), True, 'import numpy as np\n'), ((2820, 2852), 'sys.platform.startswith', 'sys.platform.startswith', (['"""linux"""'], {}), "('linux')\n", (2843, 2852), False, 'import sys\n'), ((2856, 2889), 'sys.platform.startswith', 'sys.platform.startswith', (['"""cygwin"""'], {}), "('cygwin')\n", (2879, 2889), False, 'import sys\n'), ((2972, 2997), 'glob.glob', 'glob', (['"""/dev/tty[A-Za-z]*"""'], {}), "('/dev/tty[A-Za-z]*')\n", (2976, 2997), False, 'from glob import glob\n'), ((3011, 3044), 'sys.platform.startswith', 'sys.platform.startswith', (['"""darwin"""'], {}), "('darwin')\n", (3034, 3044), False, 'import sys\n'), ((3066, 3084), 'glob.glob', 'glob', (['"""/dev/tty.*"""'], {}), "('/dev/tty.*')\n", (3070, 3084), False, 'from glob import glob\n'), ((3713, 3727), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (3721, 3727), True, 'import numpy as np\n'), ((1391, 1403), 'serial.Serial', 'Serial', (['port'], {}), '(port)\n', (1397, 1403), False, 'from serial import Serial, SerialException\n')]
|
import sys
import time
import threading
import grpc
import numpy
import soundfile as sf
import tensorflow as tf
import _init_paths
import audioset.vggish_input as vggish_input
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
tf.app.flags.DEFINE_integer('concurrency', 1, 'concurrent inference requests limit')
tf.app.flags.DEFINE_integer('num_tests', 100, 'Number of test sample')
tf.app.flags.DEFINE_string('server', '0.0.0.0:8500', 'PredictionService host:port')
tf.app.flags.DEFINE_string('work_dir', '/tmp', 'Working directory')
FLAGS = tf.app.flags.FLAGS
class _ResultCounter(object):
def __init__(self, num_tests, concurrency):
self._num_tests = num_tests
self._concurrency = concurrency
self._error = 0
self._done = 0
self._active = 0
self._condition = threading.Condition()
self._start_time = -1
self._end_time = 0
def inc_done(self):
with self._condition:
self._done += 1
if self._done == self._num_tests:
self.set_end_time(time.time())
self._condition.notify()
def dec_active(self):
with self._condition:
self._active -= 1
self._condition.notify()
def throttle(self):
with self._condition:
if self._start_time == -1:
self._start_time = time.time()
while self._active == self._concurrency:
self._condition.wait()
self._active += 1
def set_start_time(self, start_time):
self._start_time = start_time
def set_end_time(self, end_time):
self._end_time = end_time
def get_throughput(self):
if self._end_time == 0:
self.set_end_time(time.time())
print(self._end_time - self._start_time)
return self._num_tests / (self._end_time - self._start_time)
def time_to_sample(t, sr, factor):
return round(sr * t / factor)
def _create_rpc_callback(label, result_counter):
def _callback(result_future):
exception = result_future.exception()
if exception:
# result_counter.inc_error()
print(exception)
else:
print('normal')
sys.stdout.write('.')
sys.stdout.flush()
response = numpy.array(result_future.result().outputs['output'].float_val)
result_counter.inc_done()
result_counter.dec_active()
return _callback
def inference(hostport, work_dir, concurrency, num_tests):
audio_path = 'test_DB/test_airport.wav'
num_secs = 1
sc_start = 0
sc_end = 2000
wav_data, sr = sf.read(audio_path, dtype='int16')
assert wav_data.dtype == numpy.int16, 'Bad sample type: %r' % wav_data.dtype
samples = wav_data / 32768.0 # Convert to [-1.0, +1.0]
sc_center = time_to_sample((sc_start + sc_end) / 2, sr, 1000.0)
# print('Center is {} when sample_rate is {}'.format(sc_center, sr))
data_length = len(samples)
data_width = time_to_sample(num_secs, sr, 1.0)
half_input_width = int(data_width / 2)
if sc_center < half_input_width:
pad_width = half_input_width - sc_center
samples = numpy.pad(samples, [(pad_width, 0), (0, 0)], mode='constant', constant_values=0)
sc_center += pad_width
elif sc_center + half_input_width > data_length:
pad_width = sc_center + half_input_width - data_length
samples = numpy.pad(samples, [(0, pad_width), (0, 0)], mode='constant', constant_values=0)
samples = samples[sc_center - half_input_width: sc_center + half_input_width]
audio_input = vggish_input.waveform_to_examples(samples, sr)
print(audio_input.dtype)
audio_input = audio_input.astype(numpy.float32)
channel = grpc.insecure_channel(hostport)
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
result_counter = _ResultCounter(num_tests, concurrency)
for _ in range(num_tests):
request = predict_pb2.PredictRequest()
request.model_spec.name = 'vgg'
request.model_spec.signature_name = 'prediction'
print(audio_input.shape)
request.inputs['input'].CopyFrom(tf.contrib.util.make_tensor_proto(audio_input, shape=audio_input.shape))
result_counter.throttle()
result_future = stub.Predict.future(request, 5.0)
result_future.add_done_callback(_create_rpc_callback(None, result_counter))
return result_counter.get_throughput()
def main(_):
if FLAGS.num_tests > 10000:
print('num_tests should not be greater than 10k')
return
if not FLAGS.server:
print('please specify server host:port')
return
tfs_throughput = inference(FLAGS.server, FLAGS.work_dir, FLAGS.concurrency, FLAGS.num_tests)
print('\n TFS Thoughput: %s requests/sec' % (tfs_throughput))
if __name__ == '__main__':
tf.app.run()
|
[
"sys.stdout.flush",
"tensorflow.app.flags.DEFINE_integer",
"tensorflow_serving.apis.predict_pb2.PredictRequest",
"tensorflow_serving.apis.prediction_service_pb2_grpc.PredictionServiceStub",
"grpc.insecure_channel",
"tensorflow.app.flags.DEFINE_string",
"sys.stdout.write",
"numpy.pad",
"audioset.vggish_input.waveform_to_examples",
"time.time",
"tensorflow.contrib.util.make_tensor_proto",
"soundfile.read",
"threading.Condition",
"tensorflow.app.run"
] |
[((293, 381), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""concurrency"""', '(1)', '"""concurrent inference requests limit"""'], {}), "('concurrency', 1,\n 'concurrent inference requests limit')\n", (320, 381), True, 'import tensorflow as tf\n'), ((378, 448), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""num_tests"""', '(100)', '"""Number of test sample"""'], {}), "('num_tests', 100, 'Number of test sample')\n", (405, 448), True, 'import tensorflow as tf\n'), ((449, 536), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""server"""', '"""0.0.0.0:8500"""', '"""PredictionService host:port"""'], {}), "('server', '0.0.0.0:8500',\n 'PredictionService host:port')\n", (475, 536), True, 'import tensorflow as tf\n'), ((533, 600), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""work_dir"""', '"""/tmp"""', '"""Working directory"""'], {}), "('work_dir', '/tmp', 'Working directory')\n", (559, 600), True, 'import tensorflow as tf\n'), ((2715, 2749), 'soundfile.read', 'sf.read', (['audio_path'], {'dtype': '"""int16"""'}), "(audio_path, dtype='int16')\n", (2722, 2749), True, 'import soundfile as sf\n'), ((3689, 3735), 'audioset.vggish_input.waveform_to_examples', 'vggish_input.waveform_to_examples', (['samples', 'sr'], {}), '(samples, sr)\n', (3722, 3735), True, 'import audioset.vggish_input as vggish_input\n'), ((3831, 3862), 'grpc.insecure_channel', 'grpc.insecure_channel', (['hostport'], {}), '(hostport)\n', (3852, 3862), False, 'import grpc\n'), ((3874, 3932), 'tensorflow_serving.apis.prediction_service_pb2_grpc.PredictionServiceStub', 'prediction_service_pb2_grpc.PredictionServiceStub', (['channel'], {}), '(channel)\n', (3923, 3932), False, 'from tensorflow_serving.apis import prediction_service_pb2_grpc\n'), ((4942, 4954), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (4952, 4954), True, 'import tensorflow as tf\n'), ((883, 904), 'threading.Condition', 'threading.Condition', ([], {}), '()\n', (902, 904), False, 'import threading\n'), ((3262, 3347), 'numpy.pad', 'numpy.pad', (['samples', '[(pad_width, 0), (0, 0)]'], {'mode': '"""constant"""', 'constant_values': '(0)'}), "(samples, [(pad_width, 0), (0, 0)], mode='constant', constant_values=0\n )\n", (3271, 3347), False, 'import numpy\n'), ((4042, 4070), 'tensorflow_serving.apis.predict_pb2.PredictRequest', 'predict_pb2.PredictRequest', ([], {}), '()\n', (4068, 4070), False, 'from tensorflow_serving.apis import predict_pb2\n'), ((2298, 2319), 'sys.stdout.write', 'sys.stdout.write', (['"""."""'], {}), "('.')\n", (2314, 2319), False, 'import sys\n'), ((2332, 2350), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2348, 2350), False, 'import sys\n'), ((3508, 3593), 'numpy.pad', 'numpy.pad', (['samples', '[(0, pad_width), (0, 0)]'], {'mode': '"""constant"""', 'constant_values': '(0)'}), "(samples, [(0, pad_width), (0, 0)], mode='constant', constant_values=0\n )\n", (3517, 3593), False, 'import numpy\n'), ((4242, 4313), 'tensorflow.contrib.util.make_tensor_proto', 'tf.contrib.util.make_tensor_proto', (['audio_input'], {'shape': 'audio_input.shape'}), '(audio_input, shape=audio_input.shape)\n', (4275, 4313), True, 'import tensorflow as tf\n'), ((1428, 1439), 'time.time', 'time.time', ([], {}), '()\n', (1437, 1439), False, 'import time\n'), ((1813, 1824), 'time.time', 'time.time', ([], {}), '()\n', (1822, 1824), False, 'import time\n'), ((1125, 1136), 'time.time', 'time.time', ([], {}), '()\n', (1134, 1136), False, 'import time\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 24 21:46:56 2020
@author: adwait
"""
import numpy as np
import cv2
import pims
from tkinter import messagebox, Tk
from PIL import ImageFont, ImageDraw, Image
from PyQt5.QtGui import QIcon
import logging
class MainRecordFunctions:
def recordVideo(self, frame1, frame2):
logging.debug("recordvideo")
if self.forceData.force_filepath == "":
start_framenum = -1
end_framenum = -1
else:
# self.forceData.getArea(self.frameTime, self.dataDict)
start_framenum = self.forceData.plot_slice2.start
end_framenum = self.forceData.plot_slice2.stop + 1
if self.recordStatus == True:
if int(self.framePos) >= start_framenum:
h , w = 512, 512 #TODO: include this gui
# dim = (w, h)
if frame2.ndim == 2:
frame2 = cv2.cvtColor(frame2, cv2.COLOR_GRAY2BGR)
## if self.showContours.isChecked() == False:
## roi = self.roiBound
## self.merged_frame[:h, :w] = self.image_resize(frame1[roi[1]:roi[3],
## roi[0]:roi[2]],
## w, h, inter = cv2.INTER_AREA)
## else:
self.merged_frame[:h, :w], scaleFactor = self.image_resize(frame1, w, h,
inter = cv2.INTER_AREA)
if self.configRecWindow.fourRec.isChecked() == True:
if self.forceData.force_filepath == "" or self.cap2 == None:
root = Tk()
root.withdraw()
messagebox.showinfo("Error!", "Check 2nd video file or force data file. Not found!")
root.destroy()
self.record_frame() #finish recording
self.playStatus = False #pause video
return
## frame2 = cv2.cvtColor(self.frame_contours, cv2.COLOR_GRAY2BGR)
# frame2 = self.frame_contour.copy()
# ret, frame3 = self.cap2.read()
# self.forceData.getArea(self.frameTime, self.dataDict)
# self.forceData.plotData(self.lengthUnit.currentText()) #prepare plot
# frame4 = cv2.resize(cv2.cvtColor(self.forceData.convertPlot(), cv2.COLOR_RGB2BGR),
# (w, h), interpolation = cv2.INTER_AREA)
#only record till plot range. continue playing to get all data
if int(self.framePos) == end_framenum:
# if ret == False: #video at end
logging.debug("2nd video end")
# self.cap2.release()
self.cap2 = None
self.record_frame() #finish recording
self.playStatus = True
return
else:
frame2 = self.frame_contour.copy()
frame3 = self.cap2[self.framePos-1]#self.cap2.read()
self.forceData.getArea(self.frameTime, self.dataDict)
# self.forceData.plotData(self.imageDataUnitDict) #prepare plot
self.forceData.plotImageAnimate(int(self.framePos))
frame4 = cv2.resize(cv2.cvtColor(self.forceData.convertPlot(), cv2.COLOR_RGB2BGR),
(w, h), interpolation = cv2.INTER_AREA)
# framenumber1 = self.cap.get(cv2.CAP_PROP_POS_FRAMES)
# framenumber2 = self.cap2.get(cv2.CAP_PROP_POS_FRAMES)
# logging.debug('%s, %s, %s', "position", framenumber1, framenumber2)
# if framenumber1 != framenumber2: #check both videos are in sync
# root = Tk()
# root.withdraw()
# messagebox.showinfo("Error!", "Video frame numbers dont match!\n" +
# "Video-1 frame:\t" + str(framenumber1) + "\n" +
# "Video-2 frame:\t" + str(framenumber2))
# root.destroy()
# self.record_frame() #finish recording
# self.playStatus = False #pause video
# return
# logging.debug('%s, %s, %s', "position", self.cap.get(cv2.CAP_PROP_POS_FRAMES),
# self.cap2.get(cv2.CAP_PROP_POS_FRAMES))
self.merged_frame[:h, w:], r = self.image_resize(frame3, w, h,
inter = cv2.INTER_AREA)
self.merged_frame[h:, :w], r = self.image_resize(frame2, w, h,
inter = cv2.INTER_AREA)
self.merged_frame[h:, w:], r = self.image_resize(frame4, w, h,
inter = cv2.INTER_AREA)
# Write video2 title
font = cv2.FONT_HERSHEY_SIMPLEX
bottomLeftCornerOfText = (int(1.0*w), int(0.05*h))
fontScale = 1.5
fontColor = (0,0,250)
thickness = 3
lineType = 1
cv2.putText(self.merged_frame,
self.configRecWindow.video2Title.text(),
bottomLeftCornerOfText, font,fontScale,
fontColor,thickness, lineType)
else:
#only record till plot range. continue playing to get all data
if int(self.framePos) == end_framenum:
self.record_frame() #finish recording
self.playStatus = True
return
else:
self.merged_frame[:h, w:], r = self.image_resize(frame2, w, h,
inter = cv2.INTER_AREA)
# Write video1 title
font = cv2.FONT_HERSHEY_SIMPLEX
bottomLeftCornerOfText = (int(0.0*w), int(0.05*h))
fontScale = 1.5
fontColor = (0,0,250)
thickness = 3
lineType = 1
cv2.putText(self.merged_frame,
self.configRecWindow.video1Title.text(),
bottomLeftCornerOfText, font,fontScale,
fontColor,thickness, lineType)
# Write time
font = cv2.FONT_HERSHEY_SIMPLEX
bottomLeftCornerOfText = (int(1.55*w), int(0.1*h))
fontScale = 2
fontColor = (0,200,200)
thickness = 10
lineType = 2
logging.debug('%s, %s', self.frameTime.item(int(self.framePos-1)), bottomLeftCornerOfText)
text = 'Time: ' + "{0:.3f}".format(self.frameTime.item(int(self.framePos-1))) + ' s'
cv2.putText(self.merged_frame, text,
bottomLeftCornerOfText, font,fontScale,
fontColor,thickness, lineType)
#Draw scale bar
logging.debug('%s, %s', scaleFactor, "scalef")
pixLength = scaleFactor * self.pixelValue.value()
scalepos1 = (int(0.8*w), int(0.95*h))
scalepos2 = (int(scalepos1[0] + pixLength), scalepos1[1])
scalelabelpos = (int(scalepos1[0] + 0.5 * (pixLength - 100)),
scalepos1[1] + 10) #length of label is 51 pixels
cv2.line(self.merged_frame, scalepos1, scalepos2,
fontColor, thickness)
fontScale = 1
thickness = 5
color = (0,200,200)
text = str(int(self.lengthValue.value())) + ' ' + self.lengthUnit.currentText()
font = ImageFont.truetype("arial.ttf", 28, encoding="unic")
img_pil = Image.fromarray(self.merged_frame)
draw = ImageDraw.Draw(img_pil)
draw.text(scalelabelpos, text, font = font, fill = color)
self.merged_frame = np.array(img_pil)
logging.debug('%s, %s, %s', self.merged_frame.shape, w, h)
self.out.write(self.merged_frame)
cv2.namedWindow("Recording Preview", cv2.WINDOW_KEEPRATIO)
cv2.imshow("Recording Preview", self.merged_frame)
cv2.resizeWindow("Recording Preview", 800, 400)
# elif self.configRecWindow.fourRec.isChecked() == True:
# ret, frame3 = self.cap2.read()
def record_frame(self):
logging.debug("record_frame")
if self.recordStatus == True:
self.out.release()
self.recordBtn.setIcon(QIcon('images/record.png'))
self.recordBtn.setEnabled(False)
self.middleleftGroupBox.setEnabled(True)
self.bcGroupBox.setEnabled(True)
self.dftGroupBox.setEnabled(True)
self.threshGroupBox.setEnabled(True)
self.threshROIGroupBox.setEnabled(True)
self.dataGroupBox.setEnabled(True)
self.roiBtn.setEnabled(True)
self.analyzeVideo.setEnabled(True)
self.recordStatus = False
self.playStatus = False
else:
self.recordBtn.setIcon(QIcon('images/recording.png'))
self.middleleftGroupBox.setEnabled(False)
self.bcGroupBox.setEnabled(False)
self.dftGroupBox.setEnabled(False)
self.threshGroupBox.setEnabled(False)
self.threshROIGroupBox.setEnabled(False)
self.dataGroupBox.setEnabled(False)
self.roiBtn.setEnabled(False)
self.analyzeVideo.setEnabled(False)
self.recordStatus = True
self.playback()
## self.recordStatus = not self.recordStatus
#function to resize frame for recording
def image_resize(self, image, width = None, height = None, inter = cv2.INTER_AREA):
dim = None
(h, w) = image.shape[:2]
resized = np.zeros([height, width, 3], dtype = np.uint8)
if width is None and height is None:
return image, 0
if width is None:
r = height / float(h)
dim = (int(w * r), height)
elif height is None:
r = width / float(w)
dim = (width, int(h * r))
else:
rh = height / float(h)
rw = width / float(w)
if rh < rw:
r = rh
dim = (int(w * r), height)
else:
r = rw
dim = (width, int(h * r))
hdiff = int((height - dim[1])/2)
wdiff = int((width - dim[0])/2)
resized[hdiff:(hdiff + dim[1]),
wdiff:(wdiff + dim[0])] = cv2.resize(image, dim,
interpolation = inter)
logging.debug('%s, %s', dim, resized.shape)
return resized, r
def showRecWindow(self): #open recording configuration window
self.configRecWindow.showWindow(self.recordingPath)
def configureRecord(self):
if self.videoPath != "":
self.w = self.roiBound[2] - self.roiBound[0]
self.h = self.roiBound[3] - self.roiBound[1]
logging.debug('%s, %s, %s', "configurerecord", self.w, self.h)
self.codecChoices = {'DIVX': cv2.VideoWriter_fourcc(*'DIVX'),
'MJPG': cv2.VideoWriter_fourcc('M','J','P','G'),
'FFV1': cv2.VideoWriter_fourcc('F','F','V','1')}
fourcc = self.codecChoices.get(self.configRecWindow.codec.currentText())
self.recordingPath = self.configRecWindow.textbox.toPlainText()
if self.configRecWindow.fourRec.isChecked() == True:
i = 2
else:
i = 1
w = 2 * 512 #TODO: include this in gui (check above)
h = i * 512
size = (w, h)
## fps = self.frameRate
fps = self.configRecWindow.fps.value() #fixed playback fps
self.out = cv2.VideoWriter(self.recordingPath, fourcc, fps, size)
self.merged_frame = np.empty([h, w, 3], dtype = np.uint8)
logging.debug('%s, %s', self.recordingPath, self.merged_frame.shape)
self.recordBtn.setEnabled(True)
videofile2 = self.configRecWindow.videoTextbox.toPlainText() #second video
logging.debug(videofile2)
if videofile2 != "":
# self.cap2 = cv2.VideoCapture(videofile2)
self.cap2 = pims.Video(videofile2)
self.configRecWindow.close()
self.seekSlider.setValue(1) #reset to beginning
# self.showContours.setChecked(False) #uncheck show contours
self.clear_data() #clear data
|
[
"PyQt5.QtGui.QIcon",
"logging.debug",
"cv2.imshow",
"pims.Video",
"PIL.ImageDraw.Draw",
"numpy.array",
"cv2.resizeWindow",
"cv2.line",
"PIL.ImageFont.truetype",
"cv2.VideoWriter",
"numpy.empty",
"cv2.VideoWriter_fourcc",
"tkinter.messagebox.showinfo",
"cv2.putText",
"cv2.cvtColor",
"cv2.resize",
"cv2.namedWindow",
"PIL.Image.fromarray",
"numpy.zeros",
"tkinter.Tk"
] |
[((352, 380), 'logging.debug', 'logging.debug', (['"""recordvideo"""'], {}), "('recordvideo')\n", (365, 380), False, 'import logging\n'), ((9583, 9612), 'logging.debug', 'logging.debug', (['"""record_frame"""'], {}), "('record_frame')\n", (9596, 9612), False, 'import logging\n'), ((11078, 11122), 'numpy.zeros', 'np.zeros', (['[height, width, 3]'], {'dtype': 'np.uint8'}), '([height, width, 3], dtype=np.uint8)\n', (11086, 11122), True, 'import numpy as np\n'), ((11882, 11925), 'cv2.resize', 'cv2.resize', (['image', 'dim'], {'interpolation': 'inter'}), '(image, dim, interpolation=inter)\n', (11892, 11925), False, 'import cv2\n'), ((11991, 12034), 'logging.debug', 'logging.debug', (['"""%s, %s"""', 'dim', 'resized.shape'], {}), "('%s, %s', dim, resized.shape)\n", (12004, 12034), False, 'import logging\n'), ((13615, 13640), 'logging.debug', 'logging.debug', (['videofile2'], {}), '(videofile2)\n', (13628, 13640), False, 'import logging\n'), ((12412, 12474), 'logging.debug', 'logging.debug', (['"""%s, %s, %s"""', '"""configurerecord"""', 'self.w', 'self.h'], {}), "('%s, %s, %s', 'configurerecord', self.w, self.h)\n", (12425, 12474), False, 'import logging\n'), ((13265, 13319), 'cv2.VideoWriter', 'cv2.VideoWriter', (['self.recordingPath', 'fourcc', 'fps', 'size'], {}), '(self.recordingPath, fourcc, fps, size)\n', (13280, 13319), False, 'import cv2\n'), ((13355, 13390), 'numpy.empty', 'np.empty', (['[h, w, 3]'], {'dtype': 'np.uint8'}), '([h, w, 3], dtype=np.uint8)\n', (13363, 13390), True, 'import numpy as np\n'), ((13406, 13474), 'logging.debug', 'logging.debug', (['"""%s, %s"""', 'self.recordingPath', 'self.merged_frame.shape'], {}), "('%s, %s', self.recordingPath, self.merged_frame.shape)\n", (13419, 13474), False, 'import logging\n'), ((13752, 13774), 'pims.Video', 'pims.Video', (['videofile2'], {}), '(videofile2)\n', (13762, 13774), False, 'import pims\n'), ((7806, 7919), 'cv2.putText', 'cv2.putText', (['self.merged_frame', 'text', 'bottomLeftCornerOfText', 'font', 'fontScale', 'fontColor', 'thickness', 'lineType'], {}), '(self.merged_frame, text, bottomLeftCornerOfText, font,\n fontScale, fontColor, thickness, lineType)\n', (7817, 7919), False, 'import cv2\n'), ((8041, 8087), 'logging.debug', 'logging.debug', (['"""%s, %s"""', 'scaleFactor', '"""scalef"""'], {}), "('%s, %s', scaleFactor, 'scalef')\n", (8054, 8087), False, 'import logging\n'), ((8464, 8535), 'cv2.line', 'cv2.line', (['self.merged_frame', 'scalepos1', 'scalepos2', 'fontColor', 'thickness'], {}), '(self.merged_frame, scalepos1, scalepos2, fontColor, thickness)\n', (8472, 8535), False, 'import cv2\n'), ((8788, 8840), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['"""arial.ttf"""', '(28)'], {'encoding': '"""unic"""'}), "('arial.ttf', 28, encoding='unic')\n", (8806, 8840), False, 'from PIL import ImageFont, ImageDraw, Image\n'), ((8868, 8902), 'PIL.Image.fromarray', 'Image.fromarray', (['self.merged_frame'], {}), '(self.merged_frame)\n', (8883, 8902), False, 'from PIL import ImageFont, ImageDraw, Image\n'), ((8927, 8950), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['img_pil'], {}), '(img_pil)\n', (8941, 8950), False, 'from PIL import ImageFont, ImageDraw, Image\n'), ((9063, 9080), 'numpy.array', 'np.array', (['img_pil'], {}), '(img_pil)\n', (9071, 9080), True, 'import numpy as np\n'), ((9104, 9162), 'logging.debug', 'logging.debug', (['"""%s, %s, %s"""', 'self.merged_frame.shape', 'w', 'h'], {}), "('%s, %s, %s', self.merged_frame.shape, w, h)\n", (9117, 9162), False, 'import logging\n'), ((9231, 9289), 'cv2.namedWindow', 'cv2.namedWindow', (['"""Recording Preview"""', 'cv2.WINDOW_KEEPRATIO'], {}), "('Recording Preview', cv2.WINDOW_KEEPRATIO)\n", (9246, 9289), False, 'import cv2\n'), ((9307, 9357), 'cv2.imshow', 'cv2.imshow', (['"""Recording Preview"""', 'self.merged_frame'], {}), "('Recording Preview', self.merged_frame)\n", (9317, 9357), False, 'import cv2\n'), ((9375, 9422), 'cv2.resizeWindow', 'cv2.resizeWindow', (['"""Recording Preview"""', '(800)', '(400)'], {}), "('Recording Preview', 800, 400)\n", (9391, 9422), False, 'import cv2\n'), ((9720, 9746), 'PyQt5.QtGui.QIcon', 'QIcon', (['"""images/record.png"""'], {}), "('images/record.png')\n", (9725, 9746), False, 'from PyQt5.QtGui import QIcon\n'), ((10309, 10338), 'PyQt5.QtGui.QIcon', 'QIcon', (['"""images/recording.png"""'], {}), "('images/recording.png')\n", (10314, 10338), False, 'from PyQt5.QtGui import QIcon\n'), ((12517, 12548), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'DIVX'"], {}), "(*'DIVX')\n", (12539, 12548), False, 'import cv2\n'), ((12592, 12634), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (['"""M"""', '"""J"""', '"""P"""', '"""G"""'], {}), "('M', 'J', 'P', 'G')\n", (12614, 12634), False, 'import cv2\n'), ((12680, 12722), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (['"""F"""', '"""F"""', '"""V"""', '"""1"""'], {}), "('F', 'F', 'V', '1')\n", (12702, 12722), False, 'import cv2\n'), ((966, 1006), 'cv2.cvtColor', 'cv2.cvtColor', (['frame2', 'cv2.COLOR_GRAY2BGR'], {}), '(frame2, cv2.COLOR_GRAY2BGR)\n', (978, 1006), False, 'import cv2\n'), ((1823, 1827), 'tkinter.Tk', 'Tk', ([], {}), '()\n', (1825, 1827), False, 'from tkinter import messagebox, Tk\n'), ((1894, 1982), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""Error!"""', '"""Check 2nd video file or force data file. Not found!"""'], {}), "('Error!',\n 'Check 2nd video file or force data file. Not found!')\n", (1913, 1982), False, 'from tkinter import messagebox, Tk\n'), ((2995, 3025), 'logging.debug', 'logging.debug', (['"""2nd video end"""'], {}), "('2nd video end')\n", (3008, 3025), False, 'import logging\n')]
|
from abc import ABC, abstractmethod
import collections
import statistics
import numpy as np
import sklearn.metrics
import torch
class Evaluator(ABC):
"""Class to evaluate model outputs and report the result.
"""
def __init__(self):
self.reset()
@abstractmethod
def add_predictions(self, predictions, targets):
pass
@abstractmethod
def get_report(self):
pass
@abstractmethod
def reset(self):
pass
class MulticlassClassificationEvaluator(Evaluator):
def add_predictions(self, predictions, targets):
""" Evaluate a batch of predictions.
Args:
predictions: the model output tensor. Shape (N, num_class)
targets: the golden truths. Shape (N,)
"""
assert len(predictions) == len(targets)
assert len(targets.shape) == 1
# top-1 accuracy
_, indices = torch.topk(predictions, 1)
correct = indices.view(-1).eq(targets)
correct_num = int(correct.long().sum(0))
self.top1_correct_num += correct_num
# top-5 accuracy
k = min(5, predictions.shape[1])
_, indices = torch.topk(predictions, k)
correct = indices == targets.view(-1, 1).long().expand(-1, k)
self.top5_correct_num += int(correct.long().sum())
# Average precision
target_vec = torch.zeros_like(predictions, dtype=torch.uint8)
for i, t in enumerate(targets):
target_vec[i, t] = 1
ap = sklearn.metrics.average_precision_score(target_vec.view(-1).cpu().numpy(), predictions.view(-1).cpu().numpy(), average='macro')
self.ap += ap * len(predictions)
self.total_num += len(predictions)
def get_report(self):
return {'top1_accuracy': float(self.top1_correct_num) / self.total_num if self.total_num else 0.0,
'top5_accuracy': float(self.top5_correct_num) / self.total_num if self.total_num else 0.0,
'average_precision': self.ap / self.total_num if self.total_num else 0.0}
def reset(self):
self.top1_correct_num = 0
self.top5_correct_num = 0
self.ap = 0
self.total_num = 0
class MultilabelClassificationEvaluator(Evaluator):
def add_predictions(self, predictions, targets):
""" Evaluate a batch of predictions.
Args:
predictions: the model output tensor. Shape (N, num_class)
targets: the golden truths. Shape (N, num_class)
"""
assert len(predictions) == len(targets)
targets = targets.to(torch.uint8)
num = torch.mul(predictions > 0.5, targets).long().sum(1) # shape (N,)
den = torch.add(predictions > 0.5, targets).ge(1).long().sum(1) # shape (N,)
den[den == 0] = 1 # To avoid zero-division. If den==0, num should be zero as well.
self.correct_num += torch.sum(num.to(torch.float32) / den.to(torch.float32))
ap = sklearn.metrics.average_precision_score(targets.view(-1).cpu().numpy(), predictions.view(-1).cpu().numpy(), average='macro')
self.ap += ap * len(predictions)
self.total_num += len(predictions)
def get_report(self):
return {'accuracy_50': float(self.correct_num) / self.total_num if self.total_num else 0.0,
'average_precision': self.ap / self.total_num if self.total_num else 0.0}
def reset(self):
self.correct_num = 0
self.ap = 0
self.total_num = 0
class ObjectDetectionSingleIOUEvaluator(Evaluator):
def __init__(self, iou):
super(ObjectDetectionSingleIOUEvaluator, self).__init__()
self.iou = iou
def add_predictions(self, predictions, targets):
""" Evaluate list of image with object detection results using single IOU evaluation.
Args:
predictions: list of predictions [[[label_idx, probability, L, T, R, B], ...], [...], ...]
targets: list of image targets [[[label_idx, L, T, R, B], ...], ...]
"""
assert len(predictions) == len(targets)
eval_predictions = collections.defaultdict(list)
eval_ground_truths = collections.defaultdict(dict)
for img_idx, prediction in enumerate(predictions):
for bbox in prediction:
label = int(bbox[0])
eval_predictions[label].append([img_idx, float(bbox[1]), float(bbox[2]), float(bbox[3]), float(bbox[4]), float(bbox[5])])
for img_idx, target in enumerate(targets):
for bbox in target:
label = int(bbox[0])
if img_idx not in eval_ground_truths[label]:
eval_ground_truths[label][img_idx] = []
eval_ground_truths[label][img_idx].append([float(bbox[1]), float(bbox[2]), float(bbox[3]), float(bbox[4])])
class_indices = set(list(eval_predictions.keys()) + list(eval_ground_truths.keys()))
for class_index in class_indices:
is_correct, probabilities = self._evaluate_predictions(eval_ground_truths[class_index], eval_predictions[class_index], self.iou)
true_num = sum([len(l) for l in eval_ground_truths[class_index].values()])
self.is_correct[class_index].extend(is_correct)
self.probabilities[class_index].extend(probabilities)
self.true_num[class_index] += true_num
def _calculate_area(self, rect):
w = rect[2] - rect[0]+1e-5
h = rect[3] - rect[1]+1e-5
return float(w * h) if w > 0 and h > 0 else 0.0
def _calculate_iou(self, rect0, rect1):
rect_intersect = [max(rect0[0], rect1[0]),
max(rect0[1], rect1[1]),
min(rect0[2], rect1[2]),
min(rect0[3], rect1[3])]
area_intersect = self._calculate_area(rect_intersect)
return area_intersect / (self._calculate_area(rect0) + self._calculate_area(rect1) - area_intersect)
def _is_true_positive(self, prediction, ground_truth, already_detected, iou_threshold):
image_id = prediction[0]
prediction_rect = prediction[2:6]
if image_id not in ground_truth:
return False, already_detected
ious = np.array([self._calculate_iou(prediction_rect, g) for g in ground_truth[image_id]])
best_bb = np.argmax(ious)
best_iou = ious[best_bb]
if best_iou < iou_threshold or (image_id, best_bb) in already_detected:
return False, already_detected
already_detected.add((image_id, best_bb))
return True, already_detected
def _evaluate_predictions(self, ground_truths, predictions, iou_threshold):
""" Evaluate the correctness of the given predictions.
Args:
ground_truths: List of ground truths for the class. {image_id: [[left, top, right, bottom], [...]], ...}
predictions: List of predictions for the class. [[image_id, probability, left, top, right, bottom], [...], ...]
iou_threshold: Minimum IOU hreshold to be considered as a same bounding box.
"""
# Sort the predictions by the probability
sorted_predictions = sorted(predictions, key=lambda x: -x[1])
already_detected = set()
is_correct = []
for prediction in sorted_predictions:
correct, already_detected = self._is_true_positive(prediction, ground_truths, already_detected,
iou_threshold)
is_correct.append(correct)
is_correct = np.array(is_correct)
probabilities = np.array([p[1] for p in sorted_predictions])
return is_correct, probabilities
def _calculate_average_precision(self, is_correct, probabilities, true_num):
if true_num == 0:
return 0
if not is_correct or not any(is_correct):
return 0
recall = float(np.sum(is_correct)) / true_num
return sklearn.metrics.average_precision_score(is_correct, probabilities) * recall
def get_report(self):
all_aps = []
for class_index in self.is_correct:
ap = self._calculate_average_precision(self.is_correct[class_index],
self.probabilities[class_index],
self.true_num[class_index])
all_aps.append(ap)
mean_ap = statistics.mean(all_aps) if all_aps else 0
return {'mAP_{}'.format(int(self.iou*100)): mean_ap}
def reset(self):
self.is_correct = collections.defaultdict(list)
self.probabilities = collections.defaultdict(list)
self.true_num = collections.defaultdict(int)
class ObjectDetectionEvaluator(Evaluator):
def __init__(self, iou_values=[0.3, 0.5, 0.75, 0.9]):
self.evaluators = [ObjectDetectionSingleIOUEvaluator(iou) for iou in iou_values]
super(ObjectDetectionEvaluator, self).__init__()
def add_predictions(self, predictions, targets):
for evaluator in self.evaluators:
evaluator.add_predictions(predictions, targets)
def get_report(self):
report = {}
for evaluator in self.evaluators:
report.update(evaluator.get_report())
return report
def reset(self):
for evaluator in self.evaluators:
evaluator.reset()
|
[
"statistics.mean",
"torch.mul",
"torch.topk",
"numpy.argmax",
"numpy.array",
"numpy.sum",
"torch.add",
"collections.defaultdict",
"torch.zeros_like"
] |
[((904, 930), 'torch.topk', 'torch.topk', (['predictions', '(1)'], {}), '(predictions, 1)\n', (914, 930), False, 'import torch\n'), ((1160, 1186), 'torch.topk', 'torch.topk', (['predictions', 'k'], {}), '(predictions, k)\n', (1170, 1186), False, 'import torch\n'), ((1366, 1414), 'torch.zeros_like', 'torch.zeros_like', (['predictions'], {'dtype': 'torch.uint8'}), '(predictions, dtype=torch.uint8)\n', (1382, 1414), False, 'import torch\n'), ((4069, 4098), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (4092, 4098), False, 'import collections\n'), ((4128, 4157), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (4151, 4157), False, 'import collections\n'), ((6290, 6305), 'numpy.argmax', 'np.argmax', (['ious'], {}), '(ious)\n', (6299, 6305), True, 'import numpy as np\n'), ((7523, 7543), 'numpy.array', 'np.array', (['is_correct'], {}), '(is_correct)\n', (7531, 7543), True, 'import numpy as np\n'), ((7568, 7612), 'numpy.array', 'np.array', (['[p[1] for p in sorted_predictions]'], {}), '([p[1] for p in sorted_predictions])\n', (7576, 7612), True, 'import numpy as np\n'), ((8538, 8567), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (8561, 8567), False, 'import collections\n'), ((8597, 8626), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (8620, 8626), False, 'import collections\n'), ((8651, 8679), 'collections.defaultdict', 'collections.defaultdict', (['int'], {}), '(int)\n', (8674, 8679), False, 'import collections\n'), ((8386, 8410), 'statistics.mean', 'statistics.mean', (['all_aps'], {}), '(all_aps)\n', (8401, 8410), False, 'import statistics\n'), ((7878, 7896), 'numpy.sum', 'np.sum', (['is_correct'], {}), '(is_correct)\n', (7884, 7896), True, 'import numpy as np\n'), ((2595, 2632), 'torch.mul', 'torch.mul', (['(predictions > 0.5)', 'targets'], {}), '(predictions > 0.5, targets)\n', (2604, 2632), False, 'import torch\n'), ((2675, 2712), 'torch.add', 'torch.add', (['(predictions > 0.5)', 'targets'], {}), '(predictions > 0.5, targets)\n', (2684, 2712), False, 'import torch\n')]
|
"""
This module exrtacts features from the data, saves the feauters
from all measurements to global results file and creates
one file for every sensor with all measurements.
:copyright: (c) 2022 by <NAME>, Hochschule-Bonn-Rhein-Sieg
:license: see LICENSE for more details.
"""
from pyexpat import features
import pandas as pd
from scipy.signal import chirp, find_peaks, peak_widths
import matplotlib.pyplot as plt
import numpy as np
from pathlib import Path
######################################################################################################################
## this class creates objects for every sensor and stores all measurment for this sensor ##
class Sensor:
"""This is for creating objects for every sensor and stores the data from all measurements
in this object. Sensor names are picked up in properties. One DataFrame for every sensor is created
Args:
properties (dictionary): properties is a dictionary with all parameters for evaluating the data
"""
def __init__(self, properties):
"""
constructor method
"""
df_dict = {} # dicht with df for each sensor, one for all measurments
self.properties = properties
for sensor in self.properties['sensors']:
df_dict[sensor] = pd.DataFrame()
self.df_dict = df_dict
def add_item(self,df, name): # append data from measurement in global sensor df
"""This function sorts the passed DataFrame into those of the sensor
object and names the respective columns with the name of the measurement.
Args:
df (pandas.DataFrame): The columns of the DataFrame should match those in the properties.json file.
name (string): Measurement name
"""
sensors_to_delelte = []
for sensor in self.df_dict:
try:
self.df_dict[sensor][name] = df[sensor]
except:
print('no data for {0} in {1}'.format(sensor, name))
sensors_to_delelte.append(sensor)
# deleting dataframes for sensors not included in measurements
for del_sensor in sensors_to_delelte:
del self.df_dict[del_sensor]
print('deleting {} from results'.format(del_sensor))
def save_items(self, path): # save one file for every sensor with all measurements
"""This function saves all DataFrames contained in the sensor object, one file
is saved per sensor. A folder "results" is created in the root folder where
the files are stored.
Args:
path (string): Path to the folder in which the measurement folders are stored
"""
for sensor in self.df_dict:
name = sensor + '_gesamt'
save_df(self.df_dict[sensor], path, name)
######################################################################################################################
## this class creates plots with all sensors for every measurement ##
class Plot:
"""
This class creates plot objects. For each one, an image with all sensors of a measurement is created.
Args:
name (string): Name of the measurement
size (int): Number of sensors to be plotted
properties (dictionary): properties is a dictionary with all parameters for evaluating the data
"""
def __init__(self,name, size, properties):
"""
constructor method
"""
self.plot_properties = properties['plot_properties']['measurement_plot']
self.properties = properties
self.fig, self.axs = plt.subplots(size, sharex=True, dpi=self.plot_properties['dpi'], figsize=self.plot_properties['size'])
self.name = name
self.i = 0
def add_subplot(self, sensor, df_corr, peak_properties, results_half, results_full, peaks):
"""This function assigns a subplot for the corresponding sensor to the plot object.
Args:
sensor (string): Name of the sensor
df_corr (pandas.DataFrame): Dataframe with prepared data from measurement
peak_properties (dictionary): peak_properties is a dictionary with data about extracted peaks
results_half (numpy.array): Array with from measurement extracted feauters for the half peak
results_full (numpy.array): Array with from measurement extracted feauters for the full peak
peaks (numpy.array): Array with from measurement extracted feauters for detected peaks
"""
self.axs[self.i].plot(df_corr[sensor], color=self.properties['sensors'][sensor]['color'])
## print peaks in plot
if peaks.size != 0:
self.axs[self.i].plot(df_corr.index[peaks], df_corr[sensor][df_corr.index[peaks]], "x")
self.axs[self.i].vlines(x=df_corr.index[peaks][0], ymin=df_corr[sensor][df_corr.index[peaks][0]] - peak_properties["prominences"],
ymax=df_corr[sensor][df_corr.index[peaks][0]], color="C1")
self.axs[self.i].hlines(y=peak_properties["width_heights"], xmin=df_corr.index[int(peak_properties["left_ips"])],
xmax=df_corr.index[int(peak_properties["right_ips"])], color="C1")
self.axs[self.i].hlines(y=results_full[1], xmin=df_corr.index[int(results_full[2])],
xmax=df_corr.index[int(results_full[3])],
color="C2")
self.axs[self.i].hlines(y=results_half[1], xmin=df_corr.index[int(results_half[2])],
xmax=df_corr.index[int(results_half[3])],
color="C2")
label = sensor + ' [V]'
self.axs[self.i].set_ylabel(label, rotation=0, loc='top', fontsize = self.plot_properties['label_size'])
self.axs[self.i].tick_params(axis='y', labelsize= self.plot_properties['font_size'])
self.axs[self.i].grid()
try:
self.axs[self.i].set_yticks(np.arange(0,np.max(df_corr[sensor]),round(np.max(df_corr[sensor])/3, 2)))
except:
self.axs[self.i].set_yticks(np.arange(0,5,5/3))
self.i = self.i +1
def show_fig(self, path):
"""This function saves the created plot object in the folder "results\\plots\\single_measurements".
Args:
path (string): Path to the folder in which the measurement folders are stored
"""
self.axs[-1].set_xlabel("time [s]" , fontsize = self.plot_properties['label_size'])
plt.xticks(fontsize=self.plot_properties['font_size'])
self.axs[-1].get_shared_x_axes().join(*self.axs)
self.fig.tight_layout()
path = path + '\\results\\plots\\single_measurements'
Path(path).mkdir(parents=True, exist_ok=True)
path = path + '\\' + self.name + '.jpeg'
self.fig.tight_layout()
# plt.show()
self.fig.savefig(path)
plt.close(self.fig)
def width_clip(x, threshold):
"""This function extracts the feauter "width clip", which calculates the length at which a signal is too large for the measuring range.
Args:
x (list): Time series from which the feature is to be extracted
threshold (float): Value from which an exceeding of the measuring range is
Return:
width clip (float): Returns the length in which the signal is greater than the measuring range.
"""
x = x.tolist()
flag = False
list_peaks = []
start = 0
end = 0
for i in range(len(x)):
if flag == False and x[i] > threshold:
flag = True
start = i
elif flag == True and x[i] < threshold:
flag = False
end = i
list_peaks.append(end-start)
if len(list_peaks) == 0 or np.max(list_peaks) <= 4:
return 0
else:
return np.max(list_peaks)
def running_mean(x):
"""This function calculates a moving average of a time series of data. Here N is the sample interval over which the smoothing takes place.
Args:
x (list): Time series to be smoothed
Returns:
smoothed data (list): Returns the smoothed data
"""
N = 20 # über wie viele Werte wird geglättet
return np.convolve(x, np.ones((N,))/N)[(N-1):]
def get_slope(x,t):
"""This function calculates the slope of a peak from exceeding the threshold to the maximum.
Args:
x (list): x Values from which the slope is to be determined
t (list): time section from which the slope is to be determined
Returns:
slope (float): slope of the section
"""
end = 0
flag = False
for i in range(len(x)-1):
if flag == False:
if x[i+1] > x[i]:
pass
else:
end = i
flag = True
slope = (x[end]-x[0])/(t[end]-t[0])
return slope
def evaluate_sensor(df, sensor, threshold):
"""This function calculates the slope of a peak from exceeding the threshold to the maximum.
Args:
df (pandas.DataFrame): DateFrame with all sensors from one measurement
sensor (string): sensor to evaluate
threshold (float): Value from which an exceeding of the measuring range is determined
Return:
peaks (numpy.array): extracted peaks
properties (dictionary): properties of measurement
results_half (numpy.array): extracted feauters from peak half
results_full (numpy.array): extracted feauters from peak full
result_dict (dictionary): dictionary with extracted feauters
"""
peaks, peak_properties = find_peaks(df[sensor], prominence=0, width=1, distance=20000, height=threshold)
results_half = peak_widths(df[sensor], peaks, rel_height=0.5)
results_full = peak_widths(df[sensor], peaks, rel_height=0.99)
try:
df_peak = pd.DataFrame(df[sensor].iloc[int(results_full[2]):int(results_full[3])])
except:
pass
# functions for feature extraction
def get_peak():
return df.index[int(peaks[0])]
def get_start():
return df.index[int(results_full[2])]
def get_stop():
return df.index[int(results_full[3])]
def get_width():
return df.index[int(results_full[3])] - df.index[int(results_full[2])]
def get_width_half():
return df.index[int(results_half[3])] - df.index[int(results_half[2])]
def get_height():
return df[sensor][df.index[int(peaks[0])]]
def get_integral():
return np.trapz(df_peak[sensor] ,x=df_peak.index)
def get_slope_2():
return get_slope(df_peak[sensor].tolist(), df_peak.index.tolist())
def get_width_clip():
return width_clip(df[sensor], 4.9)
def get_width_heigth():
return (df.index[int(results_full[3])] - df.index[int(results_full[2])])/(df[sensor][df.index[int(peaks[0])]])
values = [get_peak, get_start, get_stop,get_width, get_width_half, get_height, get_integral, get_slope_2, get_width_clip, get_width_heigth]
features = "peak[s] start[s] stop[s] width[s] width_half[s] height intetegral[Vs] slope[V/s] width_clip[s] width_heigth[s/V]".split()
#build the json result for this measurement
result_dict = {}
for feature, value in zip(features,values):
name = "{0}_{1} {2}".format(sensor, feature[:feature.find('[')], feature[feature.find('['):])
try:
result_dict[name] = value()
except:
result_dict[name] = 0
return (peaks, peak_properties, results_half, results_full, result_dict)
def cut_peakarea(df, sensor_to_cut,sensors_norm):
"""This function cuts out the sigbificant range of a measurement.
A part from the maximum of the "sensor_to_cut" - "place_before_peak"
to the maximum of the "sensor_to_cut" + "place_after_peak" is cut out.
In addition, columns with the smoothed data of the corresponding sensors are added.
Args:
df (pandas.DataFrame): DateFrame with all sensors from one measurement
sensor_to_cut (string): Sensor with which the time period is to be determined
sensors_norm (list): List of sensors to be normalised
Returns:
df_corr (pandas.DataFrame): DataFrame with significant range of measurement with smoothed values
"""
place_before_peak = 1000
place_after_peak = 10000
step = 0.00001
len_signal = step * (place_after_peak + place_before_peak)
# cuts the important part of the file, adds running mean col and ammount of signals
try:
# error = 1/0
index_sensor_to_cut_max = df[sensor_to_cut].idxmax(axis = 0)
if index_sensor_to_cut_max <= place_before_peak:
index_sensor_to_cut_max = place_before_peak
elif index_sensor_to_cut_max >= (len(df.index)- place_after_peak):
index_sensor_to_cut_max = len(df.index)- place_after_peak
except:
print('no maximum found')
index_sensor_to_cut_max = len(df.index)//2
df_corr = df.iloc[index_sensor_to_cut_max - place_before_peak:index_sensor_to_cut_max + place_after_peak].apply(np.abs)
df_corr['time [s]'] = np.arange(0, 0.11, 0.00001)
for sensor in sensors_norm:
df_corr[[sensor + '_nm']] = df_corr[[sensor]].apply(running_mean)
df_corr.set_index('time [s]', inplace=True)
return df_corr
## saving the result df ##
def save_df(df, path, name):
"""This function saves a DataFrame to csv in the results folder.
Param:
df (pandas.DataFrame): DataFrame to save
path (string): path to root directory of data
name (string): Name under which the file is to be saved
"""
path = path + '\\results'
Path(path).mkdir(parents=True, exist_ok=True)
path = path + '\\' + name + '.csv'
print(name + 'saved as ' + path)
df.to_csv(path, sep=';', decimal=',', index = True)
def read_file(path,decimal,name, path_out, object_raw, properties):
"""This function reads files of the raw data. The data is evaluated
and features are extracted. A plot is created for each file.
The function returns a dict with all extracted features
Args:
path (string): path to measurements file
decimal (string): decimal of stored data
name (string): name of the measurement
path_out (string): path to save the figures
object_raw (object): figure object for plotting measurement
properties (dictionary): properties from properties json
Returns:
dict_result (dictionary): dictionary with all extracted feauters for a measurement
"""
sensors = properties['sensors']
path = path + path[path.rfind('\\'):] + '.txt'
dict_result = {}
df_measurement = pd.read_csv(path, delimiter='\t', decimal=decimal, dtype=float)
df_corr = cut_peakarea(df_measurement, properties['sensor_to_cut'], properties['sensors_norm'])
object_raw.add_item(df_corr, name) # adding data from measurement to df for each sensor including all measurements
fig = Plot(name,len(df_corr.columns), properties)
df_corr = df_corr.reindex(sorted(df_corr.columns), axis=1)
for this_sensor in df_corr.columns:
peaks, peak_properties, results_half, results_full, this_dict_result = evaluate_sensor(df_corr, this_sensor, sensors[this_sensor]['threshold'])
dict_result.update(this_dict_result)
fig.add_subplot(this_sensor, df_corr, peak_properties, results_half, results_full, peaks)
fig.show_fig(path_out)
return dict_result
|
[
"numpy.trapz",
"numpy.ones",
"pandas.read_csv",
"matplotlib.pyplot.xticks",
"pathlib.Path",
"numpy.max",
"matplotlib.pyplot.close",
"scipy.signal.peak_widths",
"scipy.signal.find_peaks",
"pandas.DataFrame",
"matplotlib.pyplot.subplots",
"numpy.arange"
] |
[((9673, 9752), 'scipy.signal.find_peaks', 'find_peaks', (['df[sensor]'], {'prominence': '(0)', 'width': '(1)', 'distance': '(20000)', 'height': 'threshold'}), '(df[sensor], prominence=0, width=1, distance=20000, height=threshold)\n', (9683, 9752), False, 'from scipy.signal import chirp, find_peaks, peak_widths\n'), ((9772, 9818), 'scipy.signal.peak_widths', 'peak_widths', (['df[sensor]', 'peaks'], {'rel_height': '(0.5)'}), '(df[sensor], peaks, rel_height=0.5)\n', (9783, 9818), False, 'from scipy.signal import chirp, find_peaks, peak_widths\n'), ((9838, 9885), 'scipy.signal.peak_widths', 'peak_widths', (['df[sensor]', 'peaks'], {'rel_height': '(0.99)'}), '(df[sensor], peaks, rel_height=0.99)\n', (9849, 9885), False, 'from scipy.signal import chirp, find_peaks, peak_widths\n'), ((13172, 13197), 'numpy.arange', 'np.arange', (['(0)', '(0.11)', '(1e-05)'], {}), '(0, 0.11, 1e-05)\n', (13181, 13197), True, 'import numpy as np\n'), ((14769, 14832), 'pandas.read_csv', 'pd.read_csv', (['path'], {'delimiter': '"""\t"""', 'decimal': 'decimal', 'dtype': 'float'}), "(path, delimiter='\\t', decimal=decimal, dtype=float)\n", (14780, 14832), True, 'import pandas as pd\n'), ((3601, 3708), 'matplotlib.pyplot.subplots', 'plt.subplots', (['size'], {'sharex': '(True)', 'dpi': "self.plot_properties['dpi']", 'figsize': "self.plot_properties['size']"}), "(size, sharex=True, dpi=self.plot_properties['dpi'], figsize=\n self.plot_properties['size'])\n", (3613, 3708), True, 'import matplotlib.pyplot as plt\n'), ((6463, 6517), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': "self.plot_properties['font_size']"}), "(fontsize=self.plot_properties['font_size'])\n", (6473, 6517), True, 'import matplotlib.pyplot as plt\n'), ((6873, 6892), 'matplotlib.pyplot.close', 'plt.close', (['self.fig'], {}), '(self.fig)\n', (6882, 6892), True, 'import matplotlib.pyplot as plt\n'), ((7825, 7843), 'numpy.max', 'np.max', (['list_peaks'], {}), '(list_peaks)\n', (7831, 7843), True, 'import numpy as np\n'), ((10559, 10601), 'numpy.trapz', 'np.trapz', (['df_peak[sensor]'], {'x': 'df_peak.index'}), '(df_peak[sensor], x=df_peak.index)\n', (10567, 10601), True, 'import numpy as np\n'), ((1296, 1310), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1308, 1310), True, 'import pandas as pd\n'), ((7758, 7776), 'numpy.max', 'np.max', (['list_peaks'], {}), '(list_peaks)\n', (7764, 7776), True, 'import numpy as np\n'), ((13741, 13751), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (13745, 13751), False, 'from pathlib import Path\n'), ((6677, 6687), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (6681, 6687), False, 'from pathlib import Path\n'), ((8247, 8260), 'numpy.ones', 'np.ones', (['(N,)'], {}), '((N,))\n', (8254, 8260), True, 'import numpy as np\n'), ((5942, 5965), 'numpy.max', 'np.max', (['df_corr[sensor]'], {}), '(df_corr[sensor])\n', (5948, 5965), True, 'import numpy as np\n'), ((6060, 6082), 'numpy.arange', 'np.arange', (['(0)', '(5)', '(5 / 3)'], {}), '(0, 5, 5 / 3)\n', (6069, 6082), True, 'import numpy as np\n'), ((5972, 5995), 'numpy.max', 'np.max', (['df_corr[sensor]'], {}), '(df_corr[sensor])\n', (5978, 5995), True, 'import numpy as np\n')]
|
import krippendorff
import pandas as pd
import numpy as np
from . import utils
def r_to_z(r):
return np.arctanh(r)
def z_to_r(z):
return np.tanh(z)
def confidence_interval(r, conf_level=95, stat=np.mean):
z = r_to_z(r)
ci = utils.bootstrap_ci(z, stat=stat, conf_level=conf_level)
ci = z_to_r(ci)
return pd.Series({'lo': ci[0], 'hi': ci[1]})
def average(r):
return z_to_r(np.mean(r_to_z(r)))
def krippendorffs_alpha(data, level_of_measurement='ratio'):
return krippendorff.alpha(reliability_data=data,
level_of_measurement=level_of_measurement)
|
[
"numpy.arctanh",
"pandas.Series",
"numpy.tanh",
"krippendorff.alpha"
] |
[((107, 120), 'numpy.arctanh', 'np.arctanh', (['r'], {}), '(r)\n', (117, 120), True, 'import numpy as np\n'), ((149, 159), 'numpy.tanh', 'np.tanh', (['z'], {}), '(z)\n', (156, 159), True, 'import numpy as np\n'), ((334, 371), 'pandas.Series', 'pd.Series', (["{'lo': ci[0], 'hi': ci[1]}"], {}), "({'lo': ci[0], 'hi': ci[1]})\n", (343, 371), True, 'import pandas as pd\n'), ((504, 593), 'krippendorff.alpha', 'krippendorff.alpha', ([], {'reliability_data': 'data', 'level_of_measurement': 'level_of_measurement'}), '(reliability_data=data, level_of_measurement=\n level_of_measurement)\n', (522, 593), False, 'import krippendorff\n')]
|
import numpy as np #we use numpy alot
def main():
i = 0 #declare i = 0
n = 10 #declare n = 10
x = 119.0 #float x, these have a .
#we can use numpy to quickly make arrays
y = np.zeros(n, dtype=float) #declares 10 zeros
#we can use for loops to iterate through a variable
for i in range(n): #i in range [0, n-1]
y[i] = 2.0 * float(i) + 1.0 #set y = 2i+1 as floats
for y_element in y:
print(y_element)
if __name__ == "__main__":
main()
|
[
"numpy.zeros"
] |
[((189, 213), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': 'float'}), '(n, dtype=float)\n', (197, 213), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib as plt
from collections import Counter
from math import log
import sys
import time
class ListQueue:
def __init__(self, capacity):
self.__capacity = capacity
self.__data = [None] * self.__capacity
self.__size = 0
self.__front = 0
self.__end = 0
def __len__(self):
return self.__size
def is_empty(self):
return self.__size == 0
def first(self):
if self.is_empty():
print('Queue is empty.')
else:
return self.__data[self.__front]
def dequeue(self):
if self.is_empty():
print('Queue is empty.')
return None
answer = self.__data[self.__front]
self.__data[self.__front] = None
self.__front = (self.__front + 1) % self.__capacity
self.__size -= 1
return answer
def enqueue(self, e):
if self.__size == self.__capacity:
print('The queue is full.')
return None
self.__data[self.__end] = e
self.__end = (self.__end + 1) % self.__capacity
self.__size += 1
def __str__(self):
return str(self.__data)
def __repr__(self):
return str(self)
class TreeNode():
"""树结点"""
def __init__(self, feature_idx=None, feature_val=None, feature_name=None, node_val=None, child=None):
"""
feature_idx:
该结点对应的划分特征索引
feature_val:
划分特征对应的值 二叉树 所以这里的value应该是一个特定的值
feature_name:
划分特征名
node_val:
该结点存储的值,**只有叶结点才存储类别**
child:
子树
"""
self._feature_idx = feature_idx
self._feature_val = feature_val
self._feature_name = feature_name
# 叶结点存储类别
self._node_val = node_val
# 非叶结点存储划分信息
self._child = child
def DFSearch(self):
if self._child != None:
print(self._feature_name)
print(self._feature_val)
if self._child is None:
print(self._node_val)
return
else:
if self._child[0] is not None:
self._child[0].DFSearch()
if self._child[1] is not None:
self._child[1].DFSearch()
def BFSearch(self):
q = ListQueue(2000)
q.enqueue(self)
while q.is_empty() is False:
cNode = q.dequeue()
if cNode._child is not None:
q.enqueue(cNode._child[0])
q.enqueue(cNode._child[1])
if cNode._feature_name is not None and cNode._feature_val is not None:
print(cNode._feature_name)
print(cNode._feature_val)
elif cNode._node_val is not None:
print(cNode._node_val)
class DecisionTreeScratch():
"""决策树算法Scratch实现"""
def __init__(self, feature_name, etype="gain"):
"""
feature_name:
每列特征名
etype:
可取值有
gain: 使用信息增益
ratio: 使用信息增益比
gini: 使用基尼系数
"""
self._root = None
self._fea_name = feature_name
self._etype = etype
def _build_tree(self, X, y):
"""
构建树
X:
用于构建子树的数据集
y:
X对应的标签
"""
# 子树只剩下一个类别时直接置为叶结点
if np.unique(y).shape[0] == 1:
return TreeNode(node_val=y[0])
max_gain, max_fea_idx, fea_val = self.try_split(
X, y, choice=self._etype)
feature_name = self._fea_name[max_fea_idx]
child_tree = dict()
# 遍历所选特征每一个可能的值,对每一个值构建子树
# 该子树对应的数据集和标签
child_X_l = X[X[:, max_fea_idx] <= fea_val]
child_y_l = y[X[:, max_fea_idx] <= fea_val]
child_X_r = X[X[:, max_fea_idx] > fea_val]
child_y_r = y[X[:, max_fea_idx] > fea_val]
# 构建子树
child_tree[0] = self._build_tree(child_X_l, child_y_l)
child_tree[1] = self._build_tree(child_X_r, child_y_r)
return TreeNode(max_fea_idx, feature_name=feature_name, child=child_tree, feature_val=fea_val)
def get_entropy(self, y):
"""
计算熵
y:
数据集标签
"""
entropy = 0
# 计算每个类别的数量
num_ck = np.unique(y, return_counts=True)
for i in range(len(num_ck[0])):
p = num_ck[1][i] / len(y)
entropy -= p * np.log2(p)
return entropy
def get_conditional_entropy(self, x, y, value):
"""
计算条件熵
x:
数据集的某个特征对应的列向量,训练集中一行表示一个样本,列表示特征
y:
数据集标签
"""
cond_entropy = 0
X_l, X_r, y_l, y_r = self.split(
x.reshape(x.shape[0], 1), y, 0, value=value)
sub_entropy1 = self.get_entropy(y_l)
sub_entropy2 = self.get_entropy(y_r)
p1 = len(y_l) / len(y)
p2 = len(y_r) / len(y)
cond_entropy = p1*sub_entropy1 + p2*sub_entropy2
return cond_entropy
def get_gain(self, x, y, value):
"""
计算信息增益
x:
数据集的某个特征对应的列向量,训练集中一行表示一个样本,列表示特征
y:
数据集标签
"""
return self.get_entropy(y) - self.get_conditional_entropy(x, y, value=value)
def get_gain_ration(self, x, y, value):
"""
计算信息增益比
x:
数据集的某个特征对应的列向量,训练集中一行表示一个样本,列表示特征
y:
数据集标签
"""
m = self.get_gain(x, y, value=value)
n = self.get_entropy(x)
if n != 0:
return m/n
else:
return 0
def split(self, X, y, d, value):
index_a = (X[:, d] <= value)
index_b = (X[:, d] > value)
return X[index_a], X[index_b], y[index_a], y[index_b]
def gini(self, y):
counter = Counter(y)
res = 1.0
for num in counter.values():
p = num/len(y)
res -= p**2
return res
def try_split(self, X, y, choice=None):
best_g = -np.inf
if choice == 'gini':
best_g = np.inf
best_d, best_v = -1, -1
for d in range(X.shape[1]):
sorted_index = np.argsort(X[:, d])
for i in range(1, len(X)):
if X[sorted_index[i-1], d] != X[sorted_index[i], d]:
v = (X[sorted_index[i-1], d]+X[sorted_index[i], d])/2
x_l, x_r, y_l, y_r = self.split(X, y, d, v)
length = len(y_l) + len(y_r)
if choice == 'gini':
g = len(y_l)/length*self.gini(y_l) + \
len(y_r)/length*self.gini(y_r)
if g < best_g:
best_g, best_d, best_v = g, d, v
elif choice == 'gain':
g = self.get_gain(X[:, d], y, value=v)
if g > best_g:
best_g, best_v, best_d = g, v, d
else:
g = self.get_gain_ration(X[:, d], y, value=v)
if g > best_g:
best_g, best_v, best_d = g, v, d
return best_g, best_d, best_v
if __name__ == '__main__':
data = np.genfromtxt('train.csv', dtype=np.float64,
encoding='utf-8-sig', delimiter=',')
X = data[1:51, :-2]
y = data[1:51, -1]
tree = DecisionTreeScratch(etype='ratio', feature_name=np.array(['fixed acidity', ' volatile acidity', ' citric acid', ' residual sugar',
' chlorides', ' free sulfur dioxide', ' total sulfur dioxide', ' density', ' pH', ' sulphates', ' alcohol', ' quality', 'result']))
root = tree._build_tree(X, y)
root.DFSearch()
root.BFSearch()
|
[
"numpy.unique",
"collections.Counter",
"numpy.argsort",
"numpy.array",
"numpy.log2",
"numpy.genfromtxt"
] |
[((7410, 7495), 'numpy.genfromtxt', 'np.genfromtxt', (['"""train.csv"""'], {'dtype': 'np.float64', 'encoding': '"""utf-8-sig"""', 'delimiter': '""","""'}), "('train.csv', dtype=np.float64, encoding='utf-8-sig',\n delimiter=',')\n", (7423, 7495), True, 'import numpy as np\n'), ((4406, 4438), 'numpy.unique', 'np.unique', (['y'], {'return_counts': '(True)'}), '(y, return_counts=True)\n', (4415, 4438), True, 'import numpy as np\n'), ((5958, 5968), 'collections.Counter', 'Counter', (['y'], {}), '(y)\n', (5965, 5968), False, 'from collections import Counter\n'), ((6329, 6348), 'numpy.argsort', 'np.argsort', (['X[:, d]'], {}), '(X[:, d])\n', (6339, 6348), True, 'import numpy as np\n'), ((7627, 7852), 'numpy.array', 'np.array', (["['fixed acidity', ' volatile acidity', ' citric acid', ' residual sugar',\n ' chlorides', ' free sulfur dioxide', ' total sulfur dioxide',\n ' density', ' pH', ' sulphates', ' alcohol', ' quality', 'result']"], {}), "(['fixed acidity', ' volatile acidity', ' citric acid',\n ' residual sugar', ' chlorides', ' free sulfur dioxide',\n ' total sulfur dioxide', ' density', ' pH', ' sulphates', ' alcohol',\n ' quality', 'result'])\n", (7635, 7852), True, 'import numpy as np\n'), ((4547, 4557), 'numpy.log2', 'np.log2', (['p'], {}), '(p)\n', (4554, 4557), True, 'import numpy as np\n'), ((3476, 3488), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (3485, 3488), True, 'import numpy as np\n')]
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.3
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/always-newbie161/probml-notebooks/blob/jax_vdvae/notebooks/vdvae_jax_cifar_demo.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="cTSe7I6g45v8"
# This notebook shows demo working with vdvae in jax and the code used is from [vdvae-jax](https://github.com/j-towns/vdvae-jax) from [<NAME>](https://j-towns.github.io/)
# + [markdown] id="PxtpxTPEMS4C"
# ## Setup
# + id="ipHVirxUHTDJ"
from google.colab import auth
auth.authenticate_user()
# + colab={"base_uri": "https://localhost:8080/"} id="Z6gM2ytSHnO0" outputId="3e63de9d-6808-4cd9-eb1f-08996a6a7fed"
project_id = 'probml'
# !gcloud config set project {project_id}
# + id="a3__DVx74sso" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="579bc832-9028-49f3-c164-c426d32f66a6"
'''
this should be the format of the checkpoint filetree:
checkpoint_path >> model(optimizer)_checkpoint_file.
checkpoint_path_ema >> ema_checkpoint_file
'''
checkpoint_path='/content/vdvae_cifar10_2.86/latest_cifar10'
# checkpoints are downloaded at these paths.
# vdvae_cifar10_2.86/latest_cifar10 - optimizer+mode
# vdvae_cifar10_2.86/latest_cifar10_ema - ema_params'
# + id="4_RnWXhwIV85" colab={"base_uri": "https://localhost:8080/"} cellView="form" outputId="de8dedaf-bdd3-4fb7-99ee-7cfe96229d1c"
#@title Download checkpoints
# !gsutil cp -r gs://gsoc_bucket/vdvae_cifar10_2.86 ./
# !ls -l /content/vdvae_cifar10_2.86/latest_cifar10
# !ls -l /content/vdvae_cifar10_2.86/latest_cifar10_ema
# + colab={"base_uri": "https://localhost:8080/"} id="z3fThb8PIYHG" outputId="8406f5b2-cb50-42f5-aa78-4dc4f85afb02"
# !git clone https://github.com/j-towns/vdvae-jax.git
# + colab={"base_uri": "https://localhost:8080/"} id="053XPypoMobJ" outputId="0e415f07-00a4-4815-c2c5-288236ac2c98"
# %cd vdvae-jax
# + colab={"base_uri": "https://localhost:8080/"} id="X1hY6VqmNApP" outputId="41014f01-32bf-4377-85e5-e18328d2161a"
# !pip install --quiet flax
# + id="y013geSvWQUg"
import os
try:
os.environ['COLAB_TPU_ADDR']
import jax.tools.colab_tpu
jax.tools.colab_tpu.setup_tpu()
except:
pass
# + colab={"base_uri": "https://localhost:8080/"} id="XDzBF1uZXOlu" outputId="929c368c-4610-49b0-bc94-76b891bc9b0e"
import jax
jax.local_devices()
# + [markdown] id="KrFas8alNwJ0"
# ## Model
# (for cifar10)
# + [markdown] id="4Mr89HhnTbaF"
# ### Setting up hyperparams
# + id="B0QZ6aKoP08z"
from hps import HPARAMS_REGISTRY, Hyperparams, add_vae_arguments
from train_helpers import setup_save_dirs
import argparse
import dataclasses
H = Hyperparams()
parser = argparse.ArgumentParser()
parser = add_vae_arguments(parser)
parser.set_defaults(hps= 'cifar10',conv_precision='highest')
H = dataclasses.replace(H, **vars(parser.parse_args([])))
hparam_sets = [x for x in H.hps.split(',') if x]
for hp_set in hparam_sets:
hps = HPARAMS_REGISTRY[hp_set]
parser.set_defaults(**hps)
H = dataclasses.replace(H, **vars(parser.parse_args([])))
H = setup_save_dirs(H)
# + [markdown] id="NisrtOPlfmef"
# This model is a hierarchical model with multiple stochastic blocks with multiple deterministic layers. You can know about model skeleton by observing the encoder and decoder "strings"
#
# **How to understand the string:**
# * blocks are comma seperated
# * `axb` implies there are `b` res blocks(set of Conv layers) for dimensions `axa`
# * `amb` implies it is a mixin block which increases the inter-image dims from `a` to `b` using **nearest neighbour upsampling** (used in decoder)
# * `adb` implies it's a block with avg-pooling layer which reduces the dims from `a` to `b`(used in encoder)
#
# for more understanding refer to this [paper](https://arxiv.org/abs/2011.10650)
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="-OyvG1KbP2qT" outputId="bc0a16e1-0cbb-4951-c5ef-e8310bc9deb4"
hparams = dataclasses.asdict(H)
for k in ['enc_blocks','dec_blocks','zdim','n_batch','device_count']:
print(f'{k}:{hparams[k]}')
# + id="FGD3wwRxvF3Y"
from utils import logger
from jax.interpreters.xla import DeviceArray
log = logger(H.logdir)
if H.log_wandb:
import wandb
def logprint(*args, pprint=False, **kwargs):
if len(args) > 0: log(*args)
wandb.log({k: np.array(x) if type(x) is DeviceArray else x for k, x in kwargs.items()})
wandb.init(config=dataclasses.asdict(H))
else:
logprint = log
# + colab={"base_uri": "https://localhost:8080/"} id="cABtXQvqSG2Z" outputId="2c43dea8-4c53-44cc-dd91-0c7577d07a7e"
import numpy as np
from jax import lax
import torch
import imageio
from PIL import Image
import glob
from torch.utils.data import DataLoader
from torchvision import transforms
np.random.seed(H.seed)
torch.manual_seed(H.seed)
H = dataclasses.replace(
H,
conv_precision = {'default': lax.Precision.DEFAULT,
'high': lax.Precision.HIGH,
'highest': lax.Precision.HIGHEST}[H.conv_precision],
seed_init =H.seed,
seed_sample=H.seed + 1,
seed_train =H.seed + 2 + H.host_id,
seed_eval =H.seed + 2 + H.host_count + H.host_id,
)
print('training model on ', H.dataset)
# + [markdown] id="Gs8bNNXpTMxZ"
# ### Downloading cifar10 dataset
# + colab={"base_uri": "https://localhost:8080/"} id="4An20_C-SvCT" outputId="023f5c9a-87fd-4ad8-abc3-0945b9fe4374"
# !./setup_cifar10.sh
# + [markdown] id="Js-LK-vojdSw"
# ### Setting up the model, data and the preprocess fn.
# + id="AylLXttfTSca"
from data import set_up_data
H, data_train, data_valid_or_test, preprocess_fn = set_up_data(H)
# + colab={"base_uri": "https://localhost:8080/"} id="GWsr1xszZ_te" outputId="a5ba8d4e-b088-46ec-ac31-b4fbd250618d"
from train_helpers import load_vaes
H = dataclasses.replace(H, restore_path=checkpoint_path)
optimizer, ema_params, start_epoch = load_vaes(H, logprint)
# + colab={"base_uri": "https://localhost:8080/"} id="PEH8BtbmaK4O" outputId="f32e3fa2-746e-404b-bbae-aaca80078568"
start_epoch # no.of.epochs trained
# + colab={"base_uri": "https://localhost:8080/"} id="9nAJ3EGLICEh" outputId="6a47c0b6-aaf0-45a3-8a1c-b0c6bb6b3d40"
# Hparams for the current model
hparams = dataclasses.asdict(H)
for i, k in enumerate(sorted(hparams)):
logprint(f'type=hparam, key={k}, value={getattr(H, k)}')
# + [markdown] id="HS2o9uFqjgyv"
# ### Evaluation
# + colab={"base_uri": "https://localhost:8080/"} id="jhiF_NjEuWQv" outputId="b0d88a47-5af0-4452-d1c0-88d90ef1a71e"
from train import run_test_eval
run_test_eval(H, ema_params, data_valid_or_test, preprocess_fn, logprint)
# + [markdown] id="tppWoc_hypdn"
# ### Function to save and show of batch of images given as a numpy array.
#
#
# + id="AJbKzeuzzGcS"
def zoom_in(fname, shape):
im = Image.open(fname)
resized_im = im.resize(shape)
resized_im.save(fname)
def save_n_show(images, order, image_shape, fname, zoom=True, show=False):
n_rows, n_images = order
im = images.reshape((n_rows, n_images, *image_shape))\
.transpose([0, 2, 1, 3, 4])\
.reshape([n_rows * image_shape[0],
n_images * image_shape[1], 3])
print(f'printing samples to {fname}')
imageio.imwrite(fname, im)
if zoom:
zoom_in(fname, (640, 64)) # w=640, h=64
if show:
display(Image.open(fname))
# + [markdown] id="9TlNptkdd5ME"
# ## Generations
# + id="EcnvaTn3iJfo"
n_images = 10
num_temperatures = 3
image_shape = [H.image_size,H.image_size,H.image_channels]
H = dataclasses.replace(H, num_images_visualize=n_images, num_temperatures_visualize=num_temperatures)
# + [markdown] id="LDHUzIgBbjuX"
# Images will be saved in the following dir
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="EhJ17q1dfSNu" outputId="fb923dee-dc4d-4e68-e2c5-20f3f41874c1"
H.save_dir
# + [markdown] id="Xm_BYJYjiuzt"
# As the model params are replicated over multiple devices, unreplicated copy of them is made to use it for sampling and generations.
# + id="VJbqZRxWilR9"
from jax import random
from vae import VAE
from flax import jax_utils
from functools import partial
rng = random.PRNGKey(H.seed_sample)
ema_apply = partial(VAE(H).apply,{'params': jax_utils.unreplicate(ema_params)})
forward_uncond_samples = partial(ema_apply, method=VAE(H).forward_uncond_samples)
# + colab={"base_uri": "https://localhost:8080/"} id="XF5dvNqeRcIC" outputId="477884a0-d016-43c3-96ac-26b3cfd65d55"
temperatures = [1.0, 0.9, 0.8, 0.7]
for t in temperatures[:H.num_temperatures_visualize]:
im = forward_uncond_samples(n_images, rng, t=t)
im = np.asarray(im)
save_n_show(im, [1,n_images], image_shape, f'{H.save_dir}/generations-tem-{t}.png')
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="RdypV3PJfyfN" outputId="bc5042cf-54c7-4380-e2f2-d36ab4951d65"
for t in temperatures[:H.num_temperatures_visualize]:
print("="*25)
print(f"Generation of {n_images} new images for t={t}")
print("="*25)
fname = f'{H.save_dir}/generations-tem-{t}.png'
display(Image.open(fname))
# + [markdown] id="89M1-l8Ogd2k"
# ## Reconstructions
# + id="014yXaJfgfhq"
n_images = 10
image_shape = [H.image_size,H.image_size,H.image_channels]
# + [markdown] id="z5xtClDEYTI-"
# Preprocessing images before getting the latents
# + id="81EExYe0glPu"
from train import get_sample_for_visualization
viz_batch_original, viz_batch_processed = get_sample_for_visualization(
data_valid_or_test, preprocess_fn, n_images, H.dataset)
# + [markdown] id="eDENCERSiMm6"
# Getting the partial functions from the model methods
# + id="vPpzIoM_hQHK"
forward_get_latents = partial(ema_apply, method=VAE(H).forward_get_latents)
forward_samples_set_latents = partial(
ema_apply, method=VAE(H).forward_samples_set_latents)
# + [markdown] id="AnNFN7S7YZe1"
# Getting latents of different levels.
# + id="nt2_Zjqlha1U"
zs = [s['z'] for s in forward_get_latents(viz_batch_processed, rng)]
# + [markdown] id="7RA8e6qJYcqF"
# No of latent observations used depends on `H.num_variables_visualize `, altering it gives different resolutions of the reconstructions.
# + id="ThgwoF6ihe9e"
recons = []
lv_points = np.floor(np.linspace(0, 1, H.num_variables_visualize + 2) * len(zs)).astype(int)[1:-1]
for i in lv_points:
recons.append(forward_samples_set_latents(n_images, zs[:i], rng, t=0.1))
# + [markdown] id="iawVwy7XYp9Z"
# Original Images
# + colab={"base_uri": "https://localhost:8080/", "height": 115} id="ih0D1sfRhy6F" outputId="8696bbaf-2a7c-4d89-9d7d-ebea19d37e7a"
orig_im = np.array(viz_batch_original)
print("Original test images")
save_n_show(orig_im, [1, n_images], image_shape, f'{H.save_dir}/orig_test.png', show=True)
# + [markdown] id="vbFgprJuYr7R"
# Reconstructions.
# + colab={"base_uri": "https://localhost:8080/", "height": 809} id="Ol7rNCgfh57R" outputId="e8d562cf-206e-42ae-a84b-5a5fd02489e8"
for i,r in enumerate(recons):
r = np.array(r)
print("="*25)
print(f"Generation of {n_images} new images for {i+1}x resolution")
print("="*25)
fname = f'{H.save_dir}/recon_test-res-{i+1}x.png'
save_n_show(r, [1, n_images], image_shape, fname, show=True)
|
[
"hps.Hyperparams",
"jax.local_devices",
"google.colab.auth.authenticate_user",
"numpy.array",
"data.set_up_data",
"train_helpers.setup_save_dirs",
"train.get_sample_for_visualization",
"jax.random.PRNGKey",
"dataclasses.asdict",
"argparse.ArgumentParser",
"numpy.asarray",
"flax.jax_utils.unreplicate",
"numpy.linspace",
"numpy.random.seed",
"train.run_test_eval",
"jax.tools.colab_tpu.setup_tpu",
"vae.VAE",
"utils.logger",
"dataclasses.replace",
"train_helpers.load_vaes",
"torch.manual_seed",
"PIL.Image.open",
"imageio.imwrite",
"hps.add_vae_arguments"
] |
[((849, 873), 'google.colab.auth.authenticate_user', 'auth.authenticate_user', ([], {}), '()\n', (871, 873), False, 'from google.colab import auth\n'), ((2613, 2632), 'jax.local_devices', 'jax.local_devices', ([], {}), '()\n', (2630, 2632), False, 'import jax\n'), ((2927, 2940), 'hps.Hyperparams', 'Hyperparams', ([], {}), '()\n', (2938, 2940), False, 'from hps import HPARAMS_REGISTRY, Hyperparams, add_vae_arguments\n'), ((2950, 2975), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2973, 2975), False, 'import argparse\n'), ((2985, 3010), 'hps.add_vae_arguments', 'add_vae_arguments', (['parser'], {}), '(parser)\n', (3002, 3010), False, 'from hps import HPARAMS_REGISTRY, Hyperparams, add_vae_arguments\n'), ((3337, 3355), 'train_helpers.setup_save_dirs', 'setup_save_dirs', (['H'], {}), '(H)\n', (3352, 3355), False, 'from train_helpers import setup_save_dirs\n'), ((4207, 4228), 'dataclasses.asdict', 'dataclasses.asdict', (['H'], {}), '(H)\n', (4225, 4228), False, 'import dataclasses\n'), ((4427, 4443), 'utils.logger', 'logger', (['H.logdir'], {}), '(H.logdir)\n', (4433, 4443), False, 'from utils import logger\n'), ((5020, 5042), 'numpy.random.seed', 'np.random.seed', (['H.seed'], {}), '(H.seed)\n', (5034, 5042), True, 'import numpy as np\n'), ((5043, 5068), 'torch.manual_seed', 'torch.manual_seed', (['H.seed'], {}), '(H.seed)\n', (5060, 5068), False, 'import torch\n'), ((5073, 5366), 'dataclasses.replace', 'dataclasses.replace', (['H'], {'conv_precision': "{'default': lax.Precision.DEFAULT, 'high': lax.Precision.HIGH, 'highest':\n lax.Precision.HIGHEST}[H.conv_precision]", 'seed_init': 'H.seed', 'seed_sample': '(H.seed + 1)', 'seed_train': '(H.seed + 2 + H.host_id)', 'seed_eval': '(H.seed + 2 + H.host_count + H.host_id)'}), "(H, conv_precision={'default': lax.Precision.DEFAULT,\n 'high': lax.Precision.HIGH, 'highest': lax.Precision.HIGHEST}[H.\n conv_precision], seed_init=H.seed, seed_sample=H.seed + 1, seed_train=H\n .seed + 2 + H.host_id, seed_eval=H.seed + 2 + H.host_count + H.host_id)\n", (5092, 5366), False, 'import dataclasses\n'), ((5902, 5916), 'data.set_up_data', 'set_up_data', (['H'], {}), '(H)\n', (5913, 5916), False, 'from data import set_up_data\n'), ((6074, 6126), 'dataclasses.replace', 'dataclasses.replace', (['H'], {'restore_path': 'checkpoint_path'}), '(H, restore_path=checkpoint_path)\n', (6093, 6126), False, 'import dataclasses\n'), ((6164, 6186), 'train_helpers.load_vaes', 'load_vaes', (['H', 'logprint'], {}), '(H, logprint)\n', (6173, 6186), False, 'from train_helpers import load_vaes\n'), ((6498, 6519), 'dataclasses.asdict', 'dataclasses.asdict', (['H'], {}), '(H)\n', (6516, 6519), False, 'import dataclasses\n'), ((6821, 6894), 'train.run_test_eval', 'run_test_eval', (['H', 'ema_params', 'data_valid_or_test', 'preprocess_fn', 'logprint'], {}), '(H, ema_params, data_valid_or_test, preprocess_fn, logprint)\n', (6834, 6894), False, 'from train import run_test_eval\n'), ((7779, 7881), 'dataclasses.replace', 'dataclasses.replace', (['H'], {'num_images_visualize': 'n_images', 'num_temperatures_visualize': 'num_temperatures'}), '(H, num_images_visualize=n_images,\n num_temperatures_visualize=num_temperatures)\n', (7798, 7881), False, 'import dataclasses\n'), ((8395, 8424), 'jax.random.PRNGKey', 'random.PRNGKey', (['H.seed_sample'], {}), '(H.seed_sample)\n', (8409, 8424), False, 'from jax import random\n'), ((9661, 9750), 'train.get_sample_for_visualization', 'get_sample_for_visualization', (['data_valid_or_test', 'preprocess_fn', 'n_images', 'H.dataset'], {}), '(data_valid_or_test, preprocess_fn, n_images, H\n .dataset)\n', (9689, 9750), False, 'from train import get_sample_for_visualization\n'), ((10804, 10832), 'numpy.array', 'np.array', (['viz_batch_original'], {}), '(viz_batch_original)\n', (10812, 10832), True, 'import numpy as np\n'), ((2438, 2469), 'jax.tools.colab_tpu.setup_tpu', 'jax.tools.colab_tpu.setup_tpu', ([], {}), '()\n', (2467, 2469), False, 'import jax\n'), ((7066, 7083), 'PIL.Image.open', 'Image.open', (['fname'], {}), '(fname)\n', (7076, 7083), False, 'from PIL import Image\n'), ((7479, 7505), 'imageio.imwrite', 'imageio.imwrite', (['fname', 'im'], {}), '(fname, im)\n', (7494, 7505), False, 'import imageio\n'), ((8857, 8871), 'numpy.asarray', 'np.asarray', (['im'], {}), '(im)\n', (8867, 8871), True, 'import numpy as np\n'), ((11175, 11186), 'numpy.array', 'np.array', (['r'], {}), '(r)\n', (11183, 11186), True, 'import numpy as np\n'), ((8446, 8452), 'vae.VAE', 'VAE', (['H'], {}), '(H)\n', (8449, 8452), False, 'from vae import VAE\n'), ((8470, 8503), 'flax.jax_utils.unreplicate', 'jax_utils.unreplicate', (['ema_params'], {}), '(ema_params)\n', (8491, 8503), False, 'from flax import jax_utils\n'), ((9294, 9311), 'PIL.Image.open', 'Image.open', (['fname'], {}), '(fname)\n', (9304, 9311), False, 'from PIL import Image\n'), ((4677, 4698), 'dataclasses.asdict', 'dataclasses.asdict', (['H'], {}), '(H)\n', (4695, 4698), False, 'import dataclasses\n'), ((7586, 7603), 'PIL.Image.open', 'Image.open', (['fname'], {}), '(fname)\n', (7596, 7603), False, 'from PIL import Image\n'), ((8558, 8564), 'vae.VAE', 'VAE', (['H'], {}), '(H)\n', (8561, 8564), False, 'from vae import VAE\n'), ((9915, 9921), 'vae.VAE', 'VAE', (['H'], {}), '(H)\n', (9918, 9921), False, 'from vae import VAE\n'), ((10008, 10014), 'vae.VAE', 'VAE', (['H'], {}), '(H)\n', (10011, 10014), False, 'from vae import VAE\n'), ((4581, 4592), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (4589, 4592), True, 'import numpy as np\n'), ((10437, 10485), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(H.num_variables_visualize + 2)'], {}), '(0, 1, H.num_variables_visualize + 2)\n', (10448, 10485), True, 'import numpy as np\n')]
|
"""Performs face alignment and stores face thumbnails in the output directory."""
# MIT License
#
# Copyright (c) 2016 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from scipy import misc
import sys
import os
import argparse
import tensorflow as tf
import numpy as np
import facenet
import align.detect_face
import random
from time import sleep
def main(curr_dir, pnet, rnet, onet, image_path, margin=32, image_size=160, detect_multiple_faces=False):
minsize = 20 # minimum size of face
threshold = [ 0.6, 0.7, 0.7 ] # three steps's threshold
factor = 0.709 # scale factor
files = []
filename = os.path.splitext(os.path.split(image_path)[1])[0]
output_filename = os.path.join(curr_dir, filename+'_face.jpg')
print(image_path)
if not os.path.exists(output_filename):
try:
img = misc.imread(image_path)
except (IOError, ValueError, IndexError) as e:
errorMessage = '{}: {}'.format(image_path, e)
print(errorMessage)
else:
if img.ndim<2:
print('Unable to align "%s"' % image_path)
return 0
if img.ndim == 2:
img = facenet.to_rgb(img)
img = img[:,:,0:3]
bounding_boxes, _ = align.detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
nrof_faces = bounding_boxes.shape[0]
if nrof_faces>0:
det = bounding_boxes[:,0:4]
det_arr = []
img_size = np.asarray(img.shape)[0:2]
if nrof_faces>1:
if detect_multiple_faces:
for i in range(nrof_faces):
det_arr.append(np.squeeze(det[i]))
else:
bounding_box_size = (det[:,2]-det[:,0])*(det[:,3]-det[:,1])
img_center = img_size / 2
offsets = np.vstack([ (det[:,0]+det[:,2])/2-img_center[1], (det[:,1]+det[:,3])/2-img_center[0] ])
offset_dist_squared = np.sum(np.power(offsets,2.0),0)
index = np.argmax(bounding_box_size-offset_dist_squared*2.0) # some extra weight on the centering
det_arr.append(det[index,:])
else:
det_arr.append(np.squeeze(det))
for i, det in enumerate(det_arr):
det = np.squeeze(det)
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(det[0]-margin/2, 0)
bb[1] = np.maximum(det[1]-margin/2, 0)
bb[2] = np.minimum(det[2]+margin/2, img_size[1])
bb[3] = np.minimum(det[3]+margin/2, img_size[0])
cropped = img[bb[1]:bb[3],bb[0]:bb[2],:]
scaled = misc.imresize(cropped, (image_size, image_size), interp='bilinear')
filename_base, file_extension = os.path.splitext(output_filename)
if detect_multiple_faces:
output_filename_n = "{}_{}{}".format(filename_base, i, file_extension)
else:
output_filename_n = "{}{}".format(filename_base, file_extension)
files.append(output_filename_n)
misc.imsave(output_filename_n, scaled)
else:
print('Unable to align "%s"' % image_path)
return 0
return files
|
[
"os.path.exists",
"numpy.minimum",
"numpy.power",
"scipy.misc.imsave",
"os.path.join",
"numpy.asarray",
"os.path.splitext",
"os.path.split",
"numpy.squeeze",
"numpy.argmax",
"scipy.misc.imread",
"numpy.zeros",
"numpy.vstack",
"scipy.misc.imresize",
"numpy.maximum",
"facenet.to_rgb"
] |
[((1824, 1870), 'os.path.join', 'os.path.join', (['curr_dir', "(filename + '_face.jpg')"], {}), "(curr_dir, filename + '_face.jpg')\n", (1836, 1870), False, 'import os\n'), ((1902, 1933), 'os.path.exists', 'os.path.exists', (['output_filename'], {}), '(output_filename)\n', (1916, 1933), False, 'import os\n'), ((1966, 1989), 'scipy.misc.imread', 'misc.imread', (['image_path'], {}), '(image_path)\n', (1977, 1989), False, 'from scipy import misc\n'), ((1769, 1794), 'os.path.split', 'os.path.split', (['image_path'], {}), '(image_path)\n', (1782, 1794), False, 'import os\n'), ((2312, 2331), 'facenet.to_rgb', 'facenet.to_rgb', (['img'], {}), '(img)\n', (2326, 2331), False, 'import facenet\n'), ((2655, 2676), 'numpy.asarray', 'np.asarray', (['img.shape'], {}), '(img.shape)\n', (2665, 2676), True, 'import numpy as np\n'), ((3562, 3577), 'numpy.squeeze', 'np.squeeze', (['det'], {}), '(det)\n', (3572, 3577), True, 'import numpy as np\n'), ((3603, 3630), 'numpy.zeros', 'np.zeros', (['(4)'], {'dtype': 'np.int32'}), '(4, dtype=np.int32)\n', (3611, 3630), True, 'import numpy as np\n'), ((3659, 3693), 'numpy.maximum', 'np.maximum', (['(det[0] - margin / 2)', '(0)'], {}), '(det[0] - margin / 2, 0)\n', (3669, 3693), True, 'import numpy as np\n'), ((3718, 3752), 'numpy.maximum', 'np.maximum', (['(det[1] - margin / 2)', '(0)'], {}), '(det[1] - margin / 2, 0)\n', (3728, 3752), True, 'import numpy as np\n'), ((3777, 3821), 'numpy.minimum', 'np.minimum', (['(det[2] + margin / 2)', 'img_size[1]'], {}), '(det[2] + margin / 2, img_size[1])\n', (3787, 3821), True, 'import numpy as np\n'), ((3846, 3890), 'numpy.minimum', 'np.minimum', (['(det[3] + margin / 2)', 'img_size[0]'], {}), '(det[3] + margin / 2, img_size[0])\n', (3856, 3890), True, 'import numpy as np\n'), ((3977, 4044), 'scipy.misc.imresize', 'misc.imresize', (['cropped', '(image_size, image_size)'], {'interp': '"""bilinear"""'}), "(cropped, (image_size, image_size), interp='bilinear')\n", (3990, 4044), False, 'from scipy import misc\n'), ((4118, 4151), 'os.path.splitext', 'os.path.splitext', (['output_filename'], {}), '(output_filename)\n', (4134, 4151), False, 'import os\n'), ((4480, 4518), 'scipy.misc.imsave', 'misc.imsave', (['output_filename_n', 'scaled'], {}), '(output_filename_n, scaled)\n', (4491, 4518), False, 'from scipy import misc\n'), ((3070, 3175), 'numpy.vstack', 'np.vstack', (['[(det[:, 0] + det[:, 2]) / 2 - img_center[1], (det[:, 1] + det[:, 3]) / 2 -\n img_center[0]]'], {}), '([(det[:, 0] + det[:, 2]) / 2 - img_center[1], (det[:, 1] + det[:,\n 3]) / 2 - img_center[0]])\n', (3079, 3175), True, 'import numpy as np\n'), ((3268, 3324), 'numpy.argmax', 'np.argmax', (['(bounding_box_size - offset_dist_squared * 2.0)'], {}), '(bounding_box_size - offset_dist_squared * 2.0)\n', (3277, 3324), True, 'import numpy as np\n'), ((3468, 3483), 'numpy.squeeze', 'np.squeeze', (['det'], {}), '(det)\n', (3478, 3483), True, 'import numpy as np\n'), ((3211, 3233), 'numpy.power', 'np.power', (['offsets', '(2.0)'], {}), '(offsets, 2.0)\n', (3219, 3233), True, 'import numpy as np\n'), ((2856, 2874), 'numpy.squeeze', 'np.squeeze', (['det[i]'], {}), '(det[i])\n', (2866, 2874), True, 'import numpy as np\n')]
|
from tester.ni_usb_6211 import NiUsb6211
import numpy as np
OUTPUT_READ_CHANNEL = "ai0"
VCC_READ_CHANNEL = "ai1"
TOLERANCE = 0.001
def test_find_devices():
devices = NiUsb6211.find_devices()
assert type(devices) == list, "Not a list!"
if len(devices) > 0:
assert type(devices[0]) == str, "An element is not a string!"
def test_integration():
niUsb6211 = NiUsb6211(output_read_channel=OUTPUT_READ_CHANNEL, vcc_read_channel=VCC_READ_CHANNEL)
assert niUsb6211, "The object does not exist!"
assert niUsb6211.init() == None, "Initialization not successful!"
# Read samples.
# Read only one channel.
samples = niUsb6211.read_samples(1, channels=0)
assert samples, "No sample returned!"
assert samples.shape == (1,), "Wrong shape!"
# Read the other channel.
samples = niUsb6211.read_samples(1, channels=1)
assert samples, "No sample returned!"
assert samples.shape == (1,), "Wrong shape!"
# Read both channels, 1 sample.
samples = niUsb6211.read_samples(1, channels=[0, 1])
assert np.all(samples), "No samples returned!"
assert samples.shape == (2, 1), "Wrong shape!"
# Read both channels, many samples.
samples = niUsb6211.read_samples(10, channels=[0, 1])
assert np.all(samples), "No samples returned!"
assert samples.shape == (2, 10), "Wrong shape!"
# Writing
sample = 1.0
niUsb6211.write_sample(sample)
assert abs(niUsb6211.get_measured_output_voltage() - sample) <= TOLERANCE, f"Output reading ({niUsb6211.get_measured_output_voltage()}) does not match the written value ({sample})."
measurement = niUsb6211.get_measured_output_voltage()
vref = niUsb6211.get_reference_voltage()
def is_number(x):
try:
float(x)
return True
except:
return False
assert is_number(measurement), f"Get_reference_voltage() output ({vref}) not a number!"
assert is_number(vref), f"Get_reference_voltage() output ({vref}) not a number!"
# Test different argument types.
sample = 1
niUsb6211.write_sample(sample)
sample = "1"
niUsb6211.write_sample(sample)
# niUsb6211.read_samples(10, channels=[0, 1])
niUsb6211.deinit()
|
[
"tester.ni_usb_6211.NiUsb6211",
"numpy.all",
"tester.ni_usb_6211.NiUsb6211.find_devices"
] |
[((172, 196), 'tester.ni_usb_6211.NiUsb6211.find_devices', 'NiUsb6211.find_devices', ([], {}), '()\n', (194, 196), False, 'from tester.ni_usb_6211 import NiUsb6211\n'), ((382, 472), 'tester.ni_usb_6211.NiUsb6211', 'NiUsb6211', ([], {'output_read_channel': 'OUTPUT_READ_CHANNEL', 'vcc_read_channel': 'VCC_READ_CHANNEL'}), '(output_read_channel=OUTPUT_READ_CHANNEL, vcc_read_channel=\n VCC_READ_CHANNEL)\n', (391, 472), False, 'from tester.ni_usb_6211 import NiUsb6211\n'), ((1067, 1082), 'numpy.all', 'np.all', (['samples'], {}), '(samples)\n', (1073, 1082), True, 'import numpy as np\n'), ((1268, 1283), 'numpy.all', 'np.all', (['samples'], {}), '(samples)\n', (1274, 1283), True, 'import numpy as np\n')]
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""Defines a class for the COMPAS dataset."""
import pandas as pd
import numpy as np
from .base_wrapper import BasePerformanceDatasetWrapper
from tempeh.constants import FeatureType, Tasks, DataTypes, ClassVars, CompasDatasets # noqa
def compas_data_loader():
""" Downloads COMPAS data from the propublica GitHub repository.
:return: pandas.DataFrame with columns 'sex', 'age', 'juv_fel_count', 'juv_misd_count',
'juv_other_count', 'priors_count', 'two_year_recid', 'age_cat_25 - 45',
'age_cat_Greater than 45', 'age_cat_Less than 25', 'race_African-American',
'race_Caucasian', 'c_charge_degree_F', 'c_charge_degree_M'
"""
data = pd.read_csv("https://raw.githubusercontent.com/propublica/compas-analysis/master/compas-scores-two-years.csv") # noqa: E501
# filter similar to
# https://github.com/propublica/compas-analysis/blob/master/Compas%20Analysis.ipynb
data = data[(data['days_b_screening_arrest'] <= 30) &
(data['days_b_screening_arrest'] >= -30) &
(data['is_recid'] != -1) &
(data['c_charge_degree'] != "O") &
(data['score_text'] != "N/A")]
# filter out all records except the ones with the most common two races
data = data[(data['race'] == 'African-American') | (data['race'] == 'Caucasian')]
# Select relevant columns for machine learning.
# We explicitly leave in age_cat to allow linear classifiers to be non-linear in age
data = data[["sex", "age", "age_cat", "race", "juv_fel_count", "juv_misd_count",
"juv_other_count", "priors_count", "c_charge_degree", "two_year_recid"]]
# map string representation of feature "sex" to 0 for Female and 1 for Male
data = data.assign(sex=(data["sex"] == "Male") * 1)
data = pd.get_dummies(data)
return data
def recover_categorical_encoding_for_compas_race(data):
return np.array(list(map(lambda tuple: "".join(list(tuple)), zip(
[
"African-American" if is_column_true else "" for is_column_true in data[:, 9]
],
[
"Caucasian" if is_column_true else "" for is_column_true in data[:, 10]
]))))
class CompasPerformanceDatasetWrapper(BasePerformanceDatasetWrapper):
"""COMPAS Datasets"""
dataset_map = {
CompasDatasets.COMPAS: (compas_data_loader, "two_year_recid",
[FeatureType.NOMINAL] + [FeatureType.CONTINUOUS] * 5 +
[FeatureType.NOMINAL] * 8)
}
metadata_map = {
CompasDatasets.COMPAS: (Tasks.BINARY, DataTypes.TABULAR, (6172, 14))
}
load_function = None
feature_type = None
target_col = None
def __init__(self, drop_race=True, drop_sex=True):
"""Initializes the COMPAS dataset """
bunch = type(self).load_function()
target = bunch[self._target_col].astype(int)
bunch.drop(self._target_col, axis=1, inplace=True)
bunch = bunch.astype(float)
super().__init__(bunch, target, nrows=self._size[0], data_t=self._feature_type)
self._features = list(bunch)
if drop_race:
self._race_train = recover_categorical_encoding_for_compas_race(self._X_train)
self._race_test = recover_categorical_encoding_for_compas_race(self._X_test)
# race is in columns 9-10 because the super class constructor removes the target
self._X_train = np.delete(self._X_train, np.s_[9:11], axis=1)
self._X_test = np.delete(self._X_test, np.s_[9:11], axis=1)
del[self._features[9:11]]
if drop_sex:
self._sex_train = self._X_train[:, 0]
self._sex_test = self._X_test[:, 0]
self._X_train = np.delete(self._X_train, 0, axis=1)
self._X_test = np.delete(self._X_test, 0, axis=1)
del[self._features[0]]
self._target_names = np.unique(target)
@classmethod
def generate_dataset_class(cls, name, nrows=None):
"""Generate a dataset class.
:param name: the name of the dataset
:type name: str
:param nrows: number of rows to resize the dataset to
:type nrows: int
:rtype: cls
"""
load_function, target_col, feature_type = cls.dataset_map[name]
task, data_type, size = cls.metadata_map[name]
if nrows is not None:
size = (nrows, size[1])
class_name = name.title() + "PerformanceDatasetWrapper"
return type(class_name, (cls, ), {ClassVars.LOAD_FUNCTION: load_function,
ClassVars.FEATURE_TYPE: feature_type,
ClassVars.TASK: task,
ClassVars.DATA_TYPE: data_type,
ClassVars.SIZE: size,
ClassVars.TARGET_COL: target_col})
|
[
"pandas.get_dummies",
"numpy.delete",
"numpy.unique",
"pandas.read_csv"
] |
[((768, 888), 'pandas.read_csv', 'pd.read_csv', (['"""https://raw.githubusercontent.com/propublica/compas-analysis/master/compas-scores-two-years.csv"""'], {}), "(\n 'https://raw.githubusercontent.com/propublica/compas-analysis/master/compas-scores-two-years.csv'\n )\n", (779, 888), True, 'import pandas as pd\n'), ((1888, 1908), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {}), '(data)\n', (1902, 1908), True, 'import pandas as pd\n'), ((4004, 4021), 'numpy.unique', 'np.unique', (['target'], {}), '(target)\n', (4013, 4021), True, 'import numpy as np\n'), ((3536, 3581), 'numpy.delete', 'np.delete', (['self._X_train', 'np.s_[9:11]'], {'axis': '(1)'}), '(self._X_train, np.s_[9:11], axis=1)\n', (3545, 3581), True, 'import numpy as np\n'), ((3609, 3653), 'numpy.delete', 'np.delete', (['self._X_test', 'np.s_[9:11]'], {'axis': '(1)'}), '(self._X_test, np.s_[9:11], axis=1)\n', (3618, 3653), True, 'import numpy as np\n'), ((3841, 3876), 'numpy.delete', 'np.delete', (['self._X_train', '(0)'], {'axis': '(1)'}), '(self._X_train, 0, axis=1)\n', (3850, 3876), True, 'import numpy as np\n'), ((3904, 3938), 'numpy.delete', 'np.delete', (['self._X_test', '(0)'], {'axis': '(1)'}), '(self._X_test, 0, axis=1)\n', (3913, 3938), True, 'import numpy as np\n')]
|
import numpy as np
from scipy.interpolate import CubicSpline
class WaypointTraj(object):
"""
"""
def __init__(self, points):
"""
This is the constructor for the Trajectory object. A fresh trajectory
object will be constructed before each mission. For a waypoint
trajectory, the input argument is an array of 3D destination
coordinates. You are free to choose the times of arrival and the path
taken between the points in any way you like.
You should initialize parameters and pre-compute values such as
polynomial coefficients here.
Inputs:
points, (N, 3) array of N waypoint coordinates in 3D
"""
self.v = 2 #m/s
self.points = points
self.t = np.zeros(len(points),)
if np.shape(self.points) == (3,) or np.shape(self.points) == (1,3):
pass
elif np.shape(self.points) != (3,) or np.shape(self.points) != (1,3):
for i in range(len(self.t)-1):
self.t[(i+1)] = np.linalg.norm((points[(i+1)]-points[i]))/self.v
self.point_t = np.zeros(len(points),)
for i in range(int(len(self.t)-1)):
self.point_t[(i+1)] = self.point_t[i] + self.t[i+1]
self.f = CubicSpline(self.point_t,self.points,axis = 0)
def update(self, t):
"""
Given the present time, return the desired flat output and derivatives.
Inputs
t, time, s
Outputs
flat_output, a dict describing the present desired flat outputs with keys
x, position, m
x_dot, velocity, m/s
x_ddot, acceleration, m/s**2
x_dddot, jerk, m/s**3
x_ddddot, snap, m/s**4
yaw, yaw angle, rad
yaw_dot, yaw rate, rad/s
"""
x = np.zeros((3,))
x_dot = np.zeros((3,))
x_ddot = np.zeros((3,))
x_dddot = np.zeros((3,))
x_ddddot = np.zeros((3,))
yaw = 0
yaw_dot = 0
if np.shape(self.points) == (3,) or np.shape(self.points) == (1,3):
x = np.reshape(self.points,(3,))
elif np.shape(self.points) != (3,) or np.shape(self.points) != (1,3):
if t > self.point_t[-1]:
x = self.points[-1]
else:
x = self.f(t)
flat_output = { 'x':x, 'x_dot':x_dot, 'x_ddot':x_ddot, 'x_dddot':x_dddot, 'x_ddddot':x_ddddot,
'yaw':yaw, 'yaw_dot':yaw_dot}
return flat_output
|
[
"numpy.reshape",
"scipy.interpolate.CubicSpline",
"numpy.zeros",
"numpy.linalg.norm",
"numpy.shape"
] |
[((1920, 1934), 'numpy.zeros', 'np.zeros', (['(3,)'], {}), '((3,))\n', (1928, 1934), True, 'import numpy as np\n'), ((1954, 1968), 'numpy.zeros', 'np.zeros', (['(3,)'], {}), '((3,))\n', (1962, 1968), True, 'import numpy as np\n'), ((1988, 2002), 'numpy.zeros', 'np.zeros', (['(3,)'], {}), '((3,))\n', (1996, 2002), True, 'import numpy as np\n'), ((2022, 2036), 'numpy.zeros', 'np.zeros', (['(3,)'], {}), '((3,))\n', (2030, 2036), True, 'import numpy as np\n'), ((2056, 2070), 'numpy.zeros', 'np.zeros', (['(3,)'], {}), '((3,))\n', (2064, 2070), True, 'import numpy as np\n'), ((2209, 2238), 'numpy.reshape', 'np.reshape', (['self.points', '(3,)'], {}), '(self.points, (3,))\n', (2219, 2238), True, 'import numpy as np\n'), ((821, 842), 'numpy.shape', 'np.shape', (['self.points'], {}), '(self.points)\n', (829, 842), True, 'import numpy as np\n'), ((854, 875), 'numpy.shape', 'np.shape', (['self.points'], {}), '(self.points)\n', (862, 875), True, 'import numpy as np\n'), ((1294, 1340), 'scipy.interpolate.CubicSpline', 'CubicSpline', (['self.point_t', 'self.points'], {'axis': '(0)'}), '(self.point_t, self.points, axis=0)\n', (1305, 1340), False, 'from scipy.interpolate import CubicSpline\n'), ((2128, 2149), 'numpy.shape', 'np.shape', (['self.points'], {}), '(self.points)\n', (2136, 2149), True, 'import numpy as np\n'), ((2161, 2182), 'numpy.shape', 'np.shape', (['self.points'], {}), '(self.points)\n', (2169, 2182), True, 'import numpy as np\n'), ((916, 937), 'numpy.shape', 'np.shape', (['self.points'], {}), '(self.points)\n', (924, 937), True, 'import numpy as np\n'), ((949, 970), 'numpy.shape', 'np.shape', (['self.points'], {}), '(self.points)\n', (957, 970), True, 'import numpy as np\n'), ((2251, 2272), 'numpy.shape', 'np.shape', (['self.points'], {}), '(self.points)\n', (2259, 2272), True, 'import numpy as np\n'), ((2284, 2305), 'numpy.shape', 'np.shape', (['self.points'], {}), '(self.points)\n', (2292, 2305), True, 'import numpy as np\n'), ((1056, 1097), 'numpy.linalg.norm', 'np.linalg.norm', (['(points[i + 1] - points[i])'], {}), '(points[i + 1] - points[i])\n', (1070, 1097), True, 'import numpy as np\n')]
|
# Question 07, Lab 07
# AB Satyaprakash, 180123062
# imports
import pandas as pd
import numpy as np
# functions
def f(t, y):
return y - t**2 + 1
def F(t):
return (t+1)**2 - 0.5*np.exp(t)
def RungeKutta4(t, y, h):
k1 = f(t, y)
k2 = f(t+h/2, y+h*k1/2)
k3 = f(t+h/2, y+h*k2/2)
k4 = f(t+h, y+h*k3)
return y + h*(k1 + 2*k2 + 2*k3 + k4)/6
def AdamsBashforth(t, y, h):
return y[-1] + h*(55*f(t[-1], y[-1]) - 59*f(t[-2], y[-2]) + 37*f(t[-3], y[-3]) - 9*f(t[-4], y[-4]))/24
def AdasmMoulton(t, y, h):
t1 = t[-1]+h
y1 = AdamsBashforth(t, y, h)
return y[-1] + h*(9*f(t1, y1) + 19*f(t[-1], y[-1]) - 5*f(t[-2], y[-2]) + f(t[-3], y[-3]))/24
# program body
t = [0]
y = [0.5]
h = 0.2
t.append(round(t[-1]+h, 1))
y.append(RungeKutta4(t[-1], y[-1], h))
t.append(round(t[-1]+h, 1))
y.append(RungeKutta4(t[-1], y[-1], h))
t.append(round(t[-1]+h, 1))
y.append(RungeKutta4(t[-1], y[-1], h))
yact = []
while t[-1] < 2:
y.append(AdasmMoulton(t, y, h))
t.append(round(t[-1]+h, 1))
for T in t:
yact.append(F(T))
df = pd.DataFrame()
df["Adam's Predictor-Corrector Method"] = pd.Series(y)
df['Actual Value'] = pd.Series(yact)
df.set_index(pd.Series(t), inplace=True)
print(df)
|
[
"pandas.DataFrame",
"numpy.exp",
"pandas.Series"
] |
[((1063, 1077), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1075, 1077), True, 'import pandas as pd\n'), ((1120, 1132), 'pandas.Series', 'pd.Series', (['y'], {}), '(y)\n', (1129, 1132), True, 'import pandas as pd\n'), ((1154, 1169), 'pandas.Series', 'pd.Series', (['yact'], {}), '(yact)\n', (1163, 1169), True, 'import pandas as pd\n'), ((1183, 1195), 'pandas.Series', 'pd.Series', (['t'], {}), '(t)\n', (1192, 1195), True, 'import pandas as pd\n'), ((191, 200), 'numpy.exp', 'np.exp', (['t'], {}), '(t)\n', (197, 200), True, 'import numpy as np\n')]
|
import numpy as np
import cwrapping
GurobiEnv = cwrapping.gurobicpy.GurobiEnv
def make_float64(lists):
newlists = []
for e in lists:
newlists.append(np.float64(e))
return newlists
def check_feasibility(A, b, solution):
RHS = np.dot(A, solution)
if np.sum(RHS - (1.0 - 1e-10) * b > 1e-5) >= 1:
return False
else:
return True
class GurobiOriginalEnv(object):
def __init__(self, A, b, c, solution=None, reward_type='obj'):
A, b, c = make_float64([A, b, c])
self.baseenv = GurobiEnv()
self.baseenv.reset(A, b, c)
self.A0 = A.copy()
self.b0 = b.copy()
self.c0 = c.copy()
self.IPsolution = solution
self.reward_type = reward_type
assert reward_type in ['simple', 'obj']
# upon init, check if the ip problem can be solved by lp
try:
_, done = self._reset()
assert done is False
except NotImplementedError:
print('the env needs to be initialized with nontrivial ip')
def _reset(self):
A,b,cutsa,cutsb,done,objval,xfull,tab = self.baseenv.reset(self.A0, self.b0, self.c0)
self.cutsa = cutsa
self.cutsb = cutsb
self.objval = objval
self.x = xfull
self.tab = tab
return (A, b, self.c0, cutsa, cutsb), done
def reset(self):
s, d = self._reset()
return s
def step(self, action):
if isinstance(action, list):
if len(action) >= 1:
for a in action:
cuta = self.cutsa[a,:]
cutb = self.cutsb[a]
A,b,cutsa,cutsb,done,objval,xfull,tab = self.baseenv.step(cuta, cutb)
if len(action) == 0:
return (self.A0,self.b0,self.c0,[],[]), 0.0, True
elif isinstance(action, int):
cuta = self.cutsa[action,:]
cutb = self.cutsb[action]
A,b,cutsa,cutsb,done,objval,xfull,tab = self.baseenv.step(cuta, cutb)
else:
raise NotImplementedError
# compute reward
if self.reward_type == 'obj':
reward = abs(objval - self.objval)
elif self.reward_type == 'simple':
reward = -1
else:
raise NotImplementedError
self.cutsa = cutsa
self.cutsb = cutsb
self.objval = objval
self.done = done
self.x = xfull
self.tab = tab
return (A, b, self.c0, cutsa, cutsb), reward, done, {}
class timelimit_wrapper(object):
def __init__(self, env, timelimit):
self.env = env
self.timelimit = timelimit
self.counter = 0
def reset(self):
self.counter = 0
return self.env.reset()
def step(self, action):
self.counter += 1
obs, reward, done, info = self.env.step(action)
if self.counter >= self.timelimit:
done = True
return obs, reward, done, info
|
[
"numpy.sum",
"numpy.dot",
"numpy.float64"
] |
[((235, 254), 'numpy.dot', 'np.dot', (['A', 'solution'], {}), '(A, solution)\n', (241, 254), True, 'import numpy as np\n'), ((259, 298), 'numpy.sum', 'np.sum', (['(RHS - (1.0 - 1e-10) * b > 1e-05)'], {}), '(RHS - (1.0 - 1e-10) * b > 1e-05)\n', (265, 298), True, 'import numpy as np\n'), ((155, 168), 'numpy.float64', 'np.float64', (['e'], {}), '(e)\n', (165, 168), True, 'import numpy as np\n')]
|
import numpy as np
import torch
from torch.utils.data import Dataset
class DSpritesDataset(Dataset):
"""dSprites dataset."""
def __init__(self, npz_file:str, transform=None):
"""
Args:
npz_file: Path to the npz file.
root_dir: Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
dataset_zip = np.load(npz_file, allow_pickle=True, encoding='latin1')
self.dataset = self.preprocess_zip(dataset_zip)
del dataset_zip
self.transform = transform
def __len__(self):
return self.dataset['images'].shape[0]
def __getitem__(self, idx):
image = self.dataset['images'][idx]
latents_class = self.dataset['latents_classes'][idx]
latents_value = self.dataset['latents_values'][idx]
sample = (image, latents_class, latents_value)
if self.transform:
sample = self.transform(sample)
return sample
@staticmethod
def preprocess_zip(data_zip):
# TODO: filter out the data we do not need in the future
return {
'images': data_zip['imgs'],
'latents_classes': data_zip['latents_values'],
'latents_values': data_zip['latents_values']
}
|
[
"numpy.load"
] |
[((449, 504), 'numpy.load', 'np.load', (['npz_file'], {'allow_pickle': '(True)', 'encoding': '"""latin1"""'}), "(npz_file, allow_pickle=True, encoding='latin1')\n", (456, 504), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
from tflearn.data_utils import *
from os.path import join
import numpy as np
from skimage import io, transform
from keras.models import load_model
from skimage.color import rgb2lab, lab2rgb
import time
from functools import wraps
import warnings
from tensorflow.python.ops.image_ops import rgb_to_hsv
import tensorflow as tf
from keras import backend as K
warnings.filterwarnings("ignore")
def time_this_function(func):
@wraps(func)
def wrapper(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
end = time.time()
print(func.__name__, end - start)
return result
return wrapper
class DeHaze:
def __init__(self):
self.inputImagsPath, self.outputImagsPath = './Test', './TestOutput'
self.Models = {'model_L': './Model/dehaze_rough_l.h5', 'model_A': './Model/dehaze_rough_a.h5',
'model_B': './Model/dehaze_rough_b.h5', 'model_refine': './Model/dehaze_refine_mse_30.h5'}
self.first_StepResize = (228, 304, 3)
self.extensionCoef_x, self.extensionCoef_y = 6, 8
def SSIM_Loss(self, y_true, y_pred):
y_pred_hsv = rgb_to_hsv(y_pred)
# mae_loss
mae = K.mean(K.abs(y_pred - y_true), axis=-1)
# tv_loss
shape = tf.shape(y_pred)
height, width = shape[1], shape[2]
y = tf.slice(y_pred, [0, 0, 0, 0], tf.stack([-1, height - 1, -1, -1])) - tf.slice(y_pred, [0, 1, 0, 0],
[-1, -1, -1, -1])
x = tf.slice(y_pred, [0, 0, 0, 0], tf.stack([-1, -1, width - 1, -1])) - tf.slice(y_pred, [0, 0, 1, 0],
[-1, -1, -1, -1])
tv_loss = tf.nn.l2_loss(x) / tf.to_float(tf.size(x)) + tf.nn.l2_loss(y) / tf.to_float(tf.size(y))
# ssim_loss
c1 = 0.01 ** 2
c2 = 0.03 ** 2
y_true = tf.transpose(y_true, [0, 2, 3, 1])
y_pred = tf.transpose(y_pred, [0, 2, 3, 1])
patches_true = tf.extract_image_patches(y_true, [1, 8, 8, 1], [1, 2, 2, 1], [1, 1, 1, 1], "SAME")
patches_pred = tf.extract_image_patches(y_pred, [1, 8, 8, 1], [1, 2, 2, 1], [1, 1, 1, 1], "SAME")
# Get mean
u_true = K.mean(patches_true, axis=-1)
u_pred = K.mean(patches_pred, axis=-1)
# Get variance
var_true = K.var(patches_true, axis=-1)
var_pred = K.var(patches_pred, axis=-1)
# Get std dev
std_true = K.sqrt(var_true)
std_pred = K.sqrt(var_pred)
covar_true_pred = std_pred * std_true
ssim = (2 * u_true * u_pred + c1) * (2 * covar_true_pred + c2)
denom = (K.square(u_true) + K.square(u_pred) + c1) * (var_pred + var_true + c2)
ssim /= denom
size = tf.size(y_pred_hsv)
light_loss = tf.nn.l2_loss(y_pred_hsv[:, :, :, 2]) / tf.to_float(size)
total_loss = -0.07 * light_loss + 1.0 * mae - 0.0005 * tv_loss
return total_loss
''' Loading dataset '''
@time_this_function
def loadImages(self):
self.firstStepInputImages = []
self.inputImagesList = os.listdir(self.inputImagsPath)
for pk, pil in enumerate(self.inputImagesList):
prou_img = transform.resize(io.imread(join(self.inputImagsPath, pil)), self.first_StepResize)
self.firstStepInputImages.append(rgb2lab(np.uint8(prou_img * 255.0)))
print("Loading...Done")
''' Haze Removal Part '''
@time_this_function
def first_step(self):
print('#testing images: %s' % (len(self.firstStepInputImages)))
self.firstStepInputImages = np.reshape(self.firstStepInputImages, [-1]+list(self.first_StepResize))
l_pres = load_model(self.Models['model_L']).predict(self.firstStepInputImages)
a_pres = load_model(self.Models['model_A']).predict(self.firstStepInputImages)
b_pres = load_model(self.Models['model_B']).predict(self.firstStepInputImages)
predicts = [[l[0], l[1], a[0], a[1], b[0], b[1]] for l, a, b in zip(l_pres, a_pres, b_pres)]
self.firstOutputImages = [self.restoreCImg(iv, predicts[ik]) for ik, iv in enumerate(self.firstStepInputImages)]
''' Texture Refinement Part '''
@time_this_function
def second_step(self):
self.secondInputImages = []
for pk, pil in enumerate(self.firstOutputImages):
imgc = np.pad(np.reshape(pil, newshape=[-1]+list(pil.shape)), [[0, 0], [self.extensionCoef_x, self.extensionCoef_x], [self.extensionCoef_y, self.extensionCoef_y], [0, 0]], mode='reflect')
self.secondInputImages.append(np.reshape(imgc, newshape=list(imgc.shape[1:4])) / 255.0)
model = load_model(self.Models['model_refine'], custom_objects={'SSIM_Loss': self.SSIM_Loss})
prou_imgs = np.reshape(self.secondInputImages, [-1]+(list(self.secondInputImages[0].shape)))
self.secondOutputImages = np.clip(model.predict(prou_imgs), 0, 1)
[io.imsave(join(self.outputImagsPath, self.inputImagesList[ik]), iv[self.extensionCoef_x:iv.shape[0] - self.extensionCoef_x, self.extensionCoef_y:iv.shape[1] - self.extensionCoef_y, :]) for ik, iv in enumerate(self.secondOutputImages)]
''' Color Transfer '''
def restoreCImg(self, haze_img_lab=None, avg_stds=None):
pre_img = np.zeros(haze_img_lab.shape)
avg_clean, std_clean = np.zeros([3]), np.zeros([3])
avg_haze, std_haze = np.zeros([3]), np.zeros([3])
for channel in range(3):
avg_clean[channel], std_clean[channel] = avg_stds[channel * 2], avg_stds[channel * 2 + 1]
avg_haze[channel], std_haze[channel] = np.mean(haze_img_lab[:, :, channel]), np.std(haze_img_lab[:, :, channel])
pre_img[:, :, channel] = (haze_img_lab[:, :, channel] - avg_haze[channel]) * (std_clean[channel] / std_haze[channel]) + avg_clean[channel]
return np.clip(np.uint8(lab2rgb(pre_img) * 255.0), np.uint8(0), np.uint8(255))
def __del__(self):
print("Done")
def run(self):
self.loadImages()
self.first_step()
self.second_step()
if __name__ == '__main__':
deHaze = DeHaze()
deHaze.run()
|
[
"numpy.uint8",
"tensorflow.shape",
"tensorflow.transpose",
"tensorflow.slice",
"numpy.mean",
"skimage.color.lab2rgb",
"keras.backend.square",
"functools.wraps",
"keras.backend.var",
"tensorflow.extract_image_patches",
"tensorflow.size",
"keras.backend.abs",
"tensorflow.stack",
"keras.backend.sqrt",
"tensorflow.nn.l2_loss",
"numpy.std",
"time.time",
"warnings.filterwarnings",
"keras.models.load_model",
"tensorflow.to_float",
"keras.backend.mean",
"os.path.join",
"tensorflow.python.ops.image_ops.rgb_to_hsv",
"numpy.zeros"
] |
[((380, 413), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (403, 413), False, 'import warnings\n'), ((451, 462), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (456, 462), False, 'from functools import wraps\n'), ((513, 524), 'time.time', 'time.time', ([], {}), '()\n', (522, 524), False, 'import time\n'), ((578, 589), 'time.time', 'time.time', ([], {}), '()\n', (587, 589), False, 'import time\n'), ((1174, 1192), 'tensorflow.python.ops.image_ops.rgb_to_hsv', 'rgb_to_hsv', (['y_pred'], {}), '(y_pred)\n', (1184, 1192), False, 'from tensorflow.python.ops.image_ops import rgb_to_hsv\n'), ((1302, 1318), 'tensorflow.shape', 'tf.shape', (['y_pred'], {}), '(y_pred)\n', (1310, 1318), True, 'import tensorflow as tf\n'), ((1990, 2024), 'tensorflow.transpose', 'tf.transpose', (['y_true', '[0, 2, 3, 1]'], {}), '(y_true, [0, 2, 3, 1])\n', (2002, 2024), True, 'import tensorflow as tf\n'), ((2042, 2076), 'tensorflow.transpose', 'tf.transpose', (['y_pred', '[0, 2, 3, 1]'], {}), '(y_pred, [0, 2, 3, 1])\n', (2054, 2076), True, 'import tensorflow as tf\n'), ((2100, 2186), 'tensorflow.extract_image_patches', 'tf.extract_image_patches', (['y_true', '[1, 8, 8, 1]', '[1, 2, 2, 1]', '[1, 1, 1, 1]', '"""SAME"""'], {}), "(y_true, [1, 8, 8, 1], [1, 2, 2, 1], [1, 1, 1, 1],\n 'SAME')\n", (2124, 2186), True, 'import tensorflow as tf\n'), ((2206, 2292), 'tensorflow.extract_image_patches', 'tf.extract_image_patches', (['y_pred', '[1, 8, 8, 1]', '[1, 2, 2, 1]', '[1, 1, 1, 1]', '"""SAME"""'], {}), "(y_pred, [1, 8, 8, 1], [1, 2, 2, 1], [1, 1, 1, 1],\n 'SAME')\n", (2230, 2292), True, 'import tensorflow as tf\n'), ((2325, 2354), 'keras.backend.mean', 'K.mean', (['patches_true'], {'axis': '(-1)'}), '(patches_true, axis=-1)\n', (2331, 2354), True, 'from keras import backend as K\n'), ((2372, 2401), 'keras.backend.mean', 'K.mean', (['patches_pred'], {'axis': '(-1)'}), '(patches_pred, axis=-1)\n', (2378, 2401), True, 'from keras import backend as K\n'), ((2444, 2472), 'keras.backend.var', 'K.var', (['patches_true'], {'axis': '(-1)'}), '(patches_true, axis=-1)\n', (2449, 2472), True, 'from keras import backend as K\n'), ((2492, 2520), 'keras.backend.var', 'K.var', (['patches_pred'], {'axis': '(-1)'}), '(patches_pred, axis=-1)\n', (2497, 2520), True, 'from keras import backend as K\n'), ((2562, 2578), 'keras.backend.sqrt', 'K.sqrt', (['var_true'], {}), '(var_true)\n', (2568, 2578), True, 'from keras import backend as K\n'), ((2598, 2614), 'keras.backend.sqrt', 'K.sqrt', (['var_pred'], {}), '(var_pred)\n', (2604, 2614), True, 'from keras import backend as K\n'), ((2858, 2877), 'tensorflow.size', 'tf.size', (['y_pred_hsv'], {}), '(y_pred_hsv)\n', (2865, 2877), True, 'import tensorflow as tf\n'), ((4757, 4847), 'keras.models.load_model', 'load_model', (["self.Models['model_refine']"], {'custom_objects': "{'SSIM_Loss': self.SSIM_Loss}"}), "(self.Models['model_refine'], custom_objects={'SSIM_Loss': self.\n SSIM_Loss})\n", (4767, 4847), False, 'from keras.models import load_model\n'), ((5369, 5397), 'numpy.zeros', 'np.zeros', (['haze_img_lab.shape'], {}), '(haze_img_lab.shape)\n', (5377, 5397), True, 'import numpy as np\n'), ((1234, 1256), 'keras.backend.abs', 'K.abs', (['(y_pred - y_true)'], {}), '(y_pred - y_true)\n', (1239, 1256), True, 'from keras import backend as K\n'), ((1443, 1491), 'tensorflow.slice', 'tf.slice', (['y_pred', '[0, 1, 0, 0]', '[-1, -1, -1, -1]'], {}), '(y_pred, [0, 1, 0, 0], [-1, -1, -1, -1])\n', (1451, 1491), True, 'import tensorflow as tf\n'), ((1662, 1710), 'tensorflow.slice', 'tf.slice', (['y_pred', '[0, 0, 1, 0]', '[-1, -1, -1, -1]'], {}), '(y_pred, [0, 0, 1, 0], [-1, -1, -1, -1])\n', (1670, 1710), True, 'import tensorflow as tf\n'), ((2899, 2936), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['y_pred_hsv[:, :, :, 2]'], {}), '(y_pred_hsv[:, :, :, 2])\n', (2912, 2936), True, 'import tensorflow as tf\n'), ((2939, 2956), 'tensorflow.to_float', 'tf.to_float', (['size'], {}), '(size)\n', (2950, 2956), True, 'import tensorflow as tf\n'), ((5429, 5442), 'numpy.zeros', 'np.zeros', (['[3]'], {}), '([3])\n', (5437, 5442), True, 'import numpy as np\n'), ((5444, 5457), 'numpy.zeros', 'np.zeros', (['[3]'], {}), '([3])\n', (5452, 5457), True, 'import numpy as np\n'), ((5487, 5500), 'numpy.zeros', 'np.zeros', (['[3]'], {}), '([3])\n', (5495, 5500), True, 'import numpy as np\n'), ((5502, 5515), 'numpy.zeros', 'np.zeros', (['[3]'], {}), '([3])\n', (5510, 5515), True, 'import numpy as np\n'), ((5986, 5997), 'numpy.uint8', 'np.uint8', (['(0)'], {}), '(0)\n', (5994, 5997), True, 'import numpy as np\n'), ((5999, 6012), 'numpy.uint8', 'np.uint8', (['(255)'], {}), '(255)\n', (6007, 6012), True, 'import numpy as np\n'), ((1405, 1439), 'tensorflow.stack', 'tf.stack', (['[-1, height - 1, -1, -1]'], {}), '([-1, height - 1, -1, -1])\n', (1413, 1439), True, 'import tensorflow as tf\n'), ((1625, 1658), 'tensorflow.stack', 'tf.stack', (['[-1, -1, width - 1, -1]'], {}), '([-1, -1, width - 1, -1])\n', (1633, 1658), True, 'import tensorflow as tf\n'), ((1818, 1834), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['x'], {}), '(x)\n', (1831, 1834), True, 'import tensorflow as tf\n'), ((1863, 1879), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['y'], {}), '(y)\n', (1876, 1879), True, 'import tensorflow as tf\n'), ((3791, 3825), 'keras.models.load_model', 'load_model', (["self.Models['model_L']"], {}), "(self.Models['model_L'])\n", (3801, 3825), False, 'from keras.models import load_model\n'), ((3878, 3912), 'keras.models.load_model', 'load_model', (["self.Models['model_A']"], {}), "(self.Models['model_A'])\n", (3888, 3912), False, 'from keras.models import load_model\n'), ((3965, 3999), 'keras.models.load_model', 'load_model', (["self.Models['model_B']"], {}), "(self.Models['model_B'])\n", (3975, 3999), False, 'from keras.models import load_model\n'), ((5037, 5089), 'os.path.join', 'join', (['self.outputImagsPath', 'self.inputImagesList[ik]'], {}), '(self.outputImagsPath, self.inputImagesList[ik])\n', (5041, 5089), False, 'from os.path import join\n'), ((5702, 5738), 'numpy.mean', 'np.mean', (['haze_img_lab[:, :, channel]'], {}), '(haze_img_lab[:, :, channel])\n', (5709, 5738), True, 'import numpy as np\n'), ((5740, 5775), 'numpy.std', 'np.std', (['haze_img_lab[:, :, channel]'], {}), '(haze_img_lab[:, :, channel])\n', (5746, 5775), True, 'import numpy as np\n'), ((1849, 1859), 'tensorflow.size', 'tf.size', (['x'], {}), '(x)\n', (1856, 1859), True, 'import tensorflow as tf\n'), ((1894, 1904), 'tensorflow.size', 'tf.size', (['y'], {}), '(y)\n', (1901, 1904), True, 'import tensorflow as tf\n'), ((2749, 2765), 'keras.backend.square', 'K.square', (['u_true'], {}), '(u_true)\n', (2757, 2765), True, 'from keras import backend as K\n'), ((2768, 2784), 'keras.backend.square', 'K.square', (['u_pred'], {}), '(u_pred)\n', (2776, 2784), True, 'from keras import backend as K\n'), ((3342, 3372), 'os.path.join', 'join', (['self.inputImagsPath', 'pil'], {}), '(self.inputImagsPath, pil)\n', (3346, 3372), False, 'from os.path import join\n'), ((3451, 3477), 'numpy.uint8', 'np.uint8', (['(prou_img * 255.0)'], {}), '(prou_img * 255.0)\n', (3459, 3477), True, 'import numpy as np\n'), ((5959, 5975), 'skimage.color.lab2rgb', 'lab2rgb', (['pre_img'], {}), '(pre_img)\n', (5966, 5975), False, 'from skimage.color import rgb2lab, lab2rgb\n')]
|
## the noise masks of funcSize are not binarized, this script is to binarize them
import os, json
import nibabel as nib
import numpy as np
from scipy import ndimage
# initalize data
work_dir = '/mindhive/saxelab3/anzellotti/forrest/output_denoise/'
all_subjects = ['sub-01', 'sub-02', 'sub-03', 'sub-04', 'sub-05', 'sub-09', 'sub-10', 'sub-14', 'sub-15', 'sub-16', 'sub-17', 'sub-18', 'sub-19', 'sub-20']
out_dir = '/mindhive/saxelab3/anzellotti/forrest/derivatives/fmriprep/'
### work_dir = '/Users/chloe/Documents/output_denoise/'
### all_subjects = ['sub-02']
### out_dir = '/Users/chloe/Documents/'
mask = '_CSF_WM_mask_union_bin_shrinked_funcSize.nii.gz'
mask_thr = 0.5
# iterate through all subjects
for sub in all_subjects:
# generate union mask
# initialize info
sub_dir = work_dir + sub + '_denoise/'
mask_dir = sub_dir + sub + mask
sub_out_dir = out_dir + sub + '_complete/' + sub + '_ROIs/'
# load data
mask_union = nib.load(mask_dir)
mask_union_affine = mask_union.affine
mask_union_header = mask_union.header
mask_union = mask_union.get_data()
new_mask_union = np.zeros(mask_union.shape)
# make union of the two masks, filter with threshold
for x in range(0, mask_union.shape[0]):
for y in range(0, mask_union.shape[1]):
for z in range(0, mask_union.shape[2]):
if mask_union[x, y, z] >= mask_thr:
new_mask_union[x, y, z] = 1
# save the shrinked mask somewhere
mask_union_img = nib.Nifti1Image(new_mask_union, mask_union_affine, mask_union_header)
nib.save(mask_union_img, sub_out_dir + sub + '_CSF_WM_mask_union_bin_shrinked_funcSize.nii.gz')
|
[
"nibabel.Nifti1Image",
"numpy.zeros",
"nibabel.save",
"nibabel.load"
] |
[((938, 956), 'nibabel.load', 'nib.load', (['mask_dir'], {}), '(mask_dir)\n', (946, 956), True, 'import nibabel as nib\n'), ((1089, 1115), 'numpy.zeros', 'np.zeros', (['mask_union.shape'], {}), '(mask_union.shape)\n', (1097, 1115), True, 'import numpy as np\n'), ((1427, 1496), 'nibabel.Nifti1Image', 'nib.Nifti1Image', (['new_mask_union', 'mask_union_affine', 'mask_union_header'], {}), '(new_mask_union, mask_union_affine, mask_union_header)\n', (1442, 1496), True, 'import nibabel as nib\n'), ((1498, 1597), 'nibabel.save', 'nib.save', (['mask_union_img', "(sub_out_dir + sub + '_CSF_WM_mask_union_bin_shrinked_funcSize.nii.gz')"], {}), "(mask_union_img, sub_out_dir + sub +\n '_CSF_WM_mask_union_bin_shrinked_funcSize.nii.gz')\n", (1506, 1597), True, 'import nibabel as nib\n')]
|
# This is a comparison for the CSSP algorithms on real datasets.
# This is a test for subsampling functions:
## * Projection DPPs
## * Volume sampling
## * Pivoted QR
## * Double Phase
## * Largest leverage scores
##
import sys
sys.path.insert(0, '..')
from CSSPy.dataset_tools import *
from CSSPy.volume_sampler import *
from CSSPy.optimized_projection_dpp_sampler import *
from CSSPy.projection_dpp_sampler import *
from CSSPy.uniform_sampler import *
from CSSPy.evaluation_functions import *
from CSSPy.experiments_tools import *
from CSSPy.visualization_tools import *
import numpy as np
import timeit
import pandas as pd
from matplotlib import pyplot as plt
# Import the dataset
dataset_name = "colon"
dataset_file = dataset_name+str("_X")
clustering_labels_file = dataset_name +str("_Y")
t = timeit.Timer('char in text', setup='text = "sample string"; char = "g"')
X_df = pd.read_csv('datasets/'+dataset_file+'.csv', sep=",", header=None)
X_matrix = X_df.values
# The dimensions of the matrix
d = np.shape(X_matrix)[1]
N = np.shape(X_matrix)[0] -1
# The singular value decomposition of the matrix
k = 10
_,D,V = np.linalg.svd(X_matrix)
V_k = calculate_right_eigenvectors_k_svd(X_matrix,k)
rank_X = np.shape(D)[0]
# Calculate and sort the k-leverage scores
klv_test_1 = np.asarray(list(reversed(np.sort((np.diag(np.dot(np.transpose(V_k),V_k)))))))
# Plot of the k-leverage scores and the cumulative k-leverage scores
plot_cumul_leverage_scores(klv_test_1,dataset_name,k)
def random_error_list_to_min_error_list(error_list):
l_test = len(error_list)
min_value_random_list = min(error_list)
new_list = [min_value_random_list]*l_test
return min_value_random_list
# These lists contains the approximation errors after boosting
boosting_error_fro_volume_sampling_list = []
boosting_error_fro_projection_dpp_list = []
boosting_error_fro_largest_lvs_list = []
boosting_error_fro_pivoted_qr_list = []
boosting_error_fro_double_phase_list = []
boosting_error_fro_aggregated_list = []
# This is the aggregated list for the results of all the algortihms
error_fro_aggregated_list = []
# This parameter equals 0 for the first iteration and 1 for the rest to avoid the repetition of the determinstic sampling
deterministic_algos_flag = 0
# Initialization of the deterministic algorithms
error_fro_list_pivoted_qr_sampling = 0
error_fro_list_largest_lvs_sampling = 0
# Launch the simulations with the following batch sizes
exp_number = 50
boosting_batch = 1
for T_B in list(range(boosting_batch)):
print ("Boosting step")
print(T_B)
error_fro_aggregated_list = []
error_fro_list_double_phase_sampling = launch_exp_double_phase_sampler(X_matrix,dataset_name,k,exp_number,V,D,V_k,"fro")
error_fro_list_optimized_projection_DPP = launch_exp_optimized_projection_dpp(X_matrix,dataset_name,k,exp_number,V,D,V_k,"fro")
error_fro_list_volume_sampling = launch_exp_volume_sampling(X_matrix,dataset_name,k,exp_number,V,D,V_k,"fro")
if deterministic_algos_flag == 0:
error_fro_list_pivoted_qr_sampling = launch_exp_pivoted_qr_sampling(X_matrix,dataset_name,k,exp_number,V,D,V_k,"fro")
error_fro_list_largest_lvs_sampling = launch_exp_largest_leveragescores_sampling(X_matrix,dataset_name,k,exp_number,V,D,V_k,"fro")
min_error_fro_list_pivoted_qr_sampling = error_fro_list_pivoted_qr_sampling[0]
min_error_fro_list_largest_lvs_sampling = error_fro_list_largest_lvs_sampling[0]
deterministic_algos_flag = deterministic_algos_flag +1
min_error_fro_list_volume_sampling = random_error_list_to_min_error_list(error_fro_list_volume_sampling)
min_error_fro_list_optimized_projection_DPP = random_error_list_to_min_error_list(error_fro_list_optimized_projection_DPP)
min_error_fro_list_double_phase_sampling = random_error_list_to_min_error_list(error_fro_list_double_phase_sampling)
min_error_fro_list_largest_lvs_sampling = error_fro_list_largest_lvs_sampling[0]
min_error_fro_list_pivoted_qr_sampling = error_fro_list_pivoted_qr_sampling[0]
boosting_error_fro_volume_sampling_list.append(min_error_fro_list_volume_sampling)
boosting_error_fro_projection_dpp_list.append(min_error_fro_list_optimized_projection_DPP)
boosting_error_fro_largest_lvs_list.append(min_error_fro_list_largest_lvs_sampling)
boosting_error_fro_pivoted_qr_list.append(min_error_fro_list_pivoted_qr_sampling)
boosting_error_fro_double_phase_list.append(min_error_fro_list_double_phase_sampling)
error_fro_aggregated_list.append(error_fro_list_volume_sampling)
error_fro_aggregated_list.append(error_fro_list_optimized_projection_DPP)
error_fro_aggregated_list.append(error_fro_list_largest_lvs_sampling)
error_fro_aggregated_list.append(error_fro_list_pivoted_qr_sampling)
error_fro_aggregated_list.append(error_fro_list_double_phase_sampling)
boosting_error_fro_aggregated_list.append(boosting_error_fro_volume_sampling_list)
boosting_error_fro_aggregated_list.append(boosting_error_fro_projection_dpp_list)
boosting_error_fro_aggregated_list.append(boosting_error_fro_largest_lvs_list)
boosting_error_fro_aggregated_list.append(boosting_error_fro_pivoted_qr_list)
boosting_error_fro_aggregated_list.append(boosting_error_fro_double_phase_list)
# Plot the comparison of the algorithms
plt.figure(figsize=(10, 6))
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
ax = plt.subplot(111)
box1 =plt.boxplot(error_fro_aggregated_list, showfliers=False)
plt.setp(box1['medians'], color='red', linewidth=3)
plt.ylabel(r'$\mathrm{\|\| X- \pi_{C} X \|\| _{Fr}}$', fontsize=16)
plt.gca().xaxis.set_ticklabels(["Volume S.","Projection DPP","Largest lvs","Pivoted QR","Double Phase"])
# Save the results on a txt file
savefile_name = "results/test_2/"+dataset_name+"_allalgos_"+str(exp_number)+"samples_k_"+str(k)+".txt"
np.savetxt(savefile_name, error_fro_aggregated_list, fmt='%f')
# Save the figure on a pdf file
figfile_name= "results/test_2/"+dataset_name+"_allalgos_"+str(exp_number)+"samples_k_"+str(k)+".pdf"
plt.savefig(figfile_name)
# Show the figure
plt.show()
# Plot the comparison of boosting of the algorithms
plt.figure(figsize=(10, 6))
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
ax = plt.subplot(111)
box_2 = plt.boxplot(boosting_error_fro_aggregated_list, showfliers=False)
plt.setp(box_2['medians'], color='red', linewidth=3)
plt.ylabel(r'$\mathrm{\|\| X- \pi_{C} X \|\| _{Fr}}$', fontsize=16)
plt.gca().xaxis.set_ticklabels(["Volume S.","Projection DPP","Largest lvs","Pivoted QR","Double Phase"])
# Save the results on a txt file
savefile_name = "results/test_2/"+dataset_name+"_boosting_allalgos_"+str(exp_number)+"samples_k_"+str(k)+".txt"
np.savetxt(savefile_name, boosting_error_fro_aggregated_list, fmt='%f')
# Save the figure on a pdf file
figfile_name= "results/test_2/"+dataset_name+"_boosting_allalgos_"+str(exp_number)+"samples_k_"+str(k)+".pdf"
plt.savefig(figfile_name)
# Show the figure
plt.show()
|
[
"matplotlib.pyplot.boxplot",
"matplotlib.pyplot.setp",
"sys.path.insert",
"matplotlib.pyplot.savefig",
"timeit.Timer",
"pandas.read_csv",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.yticks",
"numpy.savetxt",
"numpy.linalg.svd",
"numpy.shape",
"numpy.transpose",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
] |
[((230, 254), 'sys.path.insert', 'sys.path.insert', (['(0)', '""".."""'], {}), "(0, '..')\n", (245, 254), False, 'import sys\n'), ((803, 875), 'timeit.Timer', 'timeit.Timer', (['"""char in text"""'], {'setup': '"""text = "sample string"; char = "g\\""""'}), '(\'char in text\', setup=\'text = "sample string"; char = "g"\')\n', (815, 875), False, 'import timeit\n'), ((883, 953), 'pandas.read_csv', 'pd.read_csv', (["('datasets/' + dataset_file + '.csv')"], {'sep': '""","""', 'header': 'None'}), "('datasets/' + dataset_file + '.csv', sep=',', header=None)\n", (894, 953), True, 'import pandas as pd\n'), ((1125, 1148), 'numpy.linalg.svd', 'np.linalg.svd', (['X_matrix'], {}), '(X_matrix)\n', (1138, 1148), True, 'import numpy as np\n'), ((5336, 5363), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (5346, 5363), True, 'from matplotlib import pyplot as plt\n'), ((5365, 5388), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(16)'}), '(fontsize=16)\n', (5375, 5388), True, 'from matplotlib import pyplot as plt\n'), ((5389, 5412), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(16)'}), '(fontsize=16)\n', (5399, 5412), True, 'from matplotlib import pyplot as plt\n'), ((5418, 5434), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (5429, 5434), True, 'from matplotlib import pyplot as plt\n'), ((5442, 5498), 'matplotlib.pyplot.boxplot', 'plt.boxplot', (['error_fro_aggregated_list'], {'showfliers': '(False)'}), '(error_fro_aggregated_list, showfliers=False)\n', (5453, 5498), True, 'from matplotlib import pyplot as plt\n'), ((5499, 5550), 'matplotlib.pyplot.setp', 'plt.setp', (["box1['medians']"], {'color': '"""red"""', 'linewidth': '(3)'}), "(box1['medians'], color='red', linewidth=3)\n", (5507, 5550), True, 'from matplotlib import pyplot as plt\n'), ((5552, 5624), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\mathrm{\\\\|\\\\| X- \\\\pi_{C} X \\\\|\\\\| _{Fr}}$"""'], {'fontsize': '(16)'}), "('$\\\\mathrm{\\\\|\\\\| X- \\\\pi_{C} X \\\\|\\\\| _{Fr}}$', fontsize=16)\n", (5562, 5624), True, 'from matplotlib import pyplot as plt\n'), ((5862, 5924), 'numpy.savetxt', 'np.savetxt', (['savefile_name', 'error_fro_aggregated_list'], {'fmt': '"""%f"""'}), "(savefile_name, error_fro_aggregated_list, fmt='%f')\n", (5872, 5924), True, 'import numpy as np\n'), ((6059, 6084), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figfile_name'], {}), '(figfile_name)\n', (6070, 6084), True, 'from matplotlib import pyplot as plt\n'), ((6104, 6114), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6112, 6114), True, 'from matplotlib import pyplot as plt\n'), ((6172, 6199), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (6182, 6199), True, 'from matplotlib import pyplot as plt\n'), ((6201, 6224), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(16)'}), '(fontsize=16)\n', (6211, 6224), True, 'from matplotlib import pyplot as plt\n'), ((6225, 6248), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(16)'}), '(fontsize=16)\n', (6235, 6248), True, 'from matplotlib import pyplot as plt\n'), ((6254, 6270), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (6265, 6270), True, 'from matplotlib import pyplot as plt\n'), ((6280, 6345), 'matplotlib.pyplot.boxplot', 'plt.boxplot', (['boosting_error_fro_aggregated_list'], {'showfliers': '(False)'}), '(boosting_error_fro_aggregated_list, showfliers=False)\n', (6291, 6345), True, 'from matplotlib import pyplot as plt\n'), ((6346, 6398), 'matplotlib.pyplot.setp', 'plt.setp', (["box_2['medians']"], {'color': '"""red"""', 'linewidth': '(3)'}), "(box_2['medians'], color='red', linewidth=3)\n", (6354, 6398), True, 'from matplotlib import pyplot as plt\n'), ((6400, 6472), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\mathrm{\\\\|\\\\| X- \\\\pi_{C} X \\\\|\\\\| _{Fr}}$"""'], {'fontsize': '(16)'}), "('$\\\\mathrm{\\\\|\\\\| X- \\\\pi_{C} X \\\\|\\\\| _{Fr}}$', fontsize=16)\n", (6410, 6472), True, 'from matplotlib import pyplot as plt\n'), ((6722, 6793), 'numpy.savetxt', 'np.savetxt', (['savefile_name', 'boosting_error_fro_aggregated_list'], {'fmt': '"""%f"""'}), "(savefile_name, boosting_error_fro_aggregated_list, fmt='%f')\n", (6732, 6793), True, 'import numpy as np\n'), ((6937, 6962), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figfile_name'], {}), '(figfile_name)\n', (6948, 6962), True, 'from matplotlib import pyplot as plt\n'), ((6982, 6992), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6990, 6992), True, 'from matplotlib import pyplot as plt\n'), ((1009, 1027), 'numpy.shape', 'np.shape', (['X_matrix'], {}), '(X_matrix)\n', (1017, 1027), True, 'import numpy as np\n'), ((1211, 1222), 'numpy.shape', 'np.shape', (['D'], {}), '(D)\n', (1219, 1222), True, 'import numpy as np\n'), ((1035, 1053), 'numpy.shape', 'np.shape', (['X_matrix'], {}), '(X_matrix)\n', (1043, 1053), True, 'import numpy as np\n'), ((5620, 5629), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5627, 5629), True, 'from matplotlib import pyplot as plt\n'), ((6471, 6480), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6478, 6480), True, 'from matplotlib import pyplot as plt\n'), ((1334, 1351), 'numpy.transpose', 'np.transpose', (['V_k'], {}), '(V_k)\n', (1346, 1351), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Remove transcription sites in the FISH image.
"""
import os
import argparse
import time
import datetime
import sys
import bigfish.stack as stack
import numpy as np
from utils import Logger
from loader import (get_metadata_directory, generate_filename_base,
images_generator)
if __name__ == "__main__":
print()
# parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("experience_directory",
help="Name of the experience directory.",
type=str)
parser.add_argument("--base_directory",
help="Path of the data directory.",
type=str,
default="/Users/arthur/data/2019_racha")
parser.add_argument("--output_directory",
help="Path of the output directory.",
type=str,
default="/Users/arthur/output/2019_racha")
parser.add_argument("--log_directory",
help="Path of the log directory.",
type=str,
default="/Users/arthur/output/2019_racha/log")
# initialize parameters
args = parser.parse_args()
experience_directory = args.experience_directory
base_directory = args.base_directory
output_directory = args.output_directory
# input-output directories
nuc_mask_directory = os.path.join(output_directory, "nuc_mask")
foci_directory = os.path.join(output_directory, "foci_detection")
transcription_site_directory = os.path.join(output_directory,
"transcription_site_removal")
# check directories exist
log_directory = args.log_directory
if not os.path.isdir(base_directory):
raise ValueError("Directory does not exist: {0}"
.format(base_directory))
if not os.path.isdir(output_directory):
raise ValueError("Directory does not exist: {0}"
.format(output_directory))
if not os.path.isdir(nuc_mask_directory):
raise ValueError("Directory does not exist: {0}"
.format(nuc_mask_directory))
if not os.path.isdir(foci_directory):
raise ValueError("Directory does not exist: {0}"
.format(foci_directory))
if not os.path.isdir(transcription_site_directory):
raise ValueError("Directory does not exist: {0}"
.format(transcription_site_directory))
if not os.path.isdir(log_directory):
raise ValueError("Directory does not exist: {0}"
.format(log_directory))
# initialize logging
now = datetime.datetime.now()
date = now.strftime("%Y-%m-%d %H:%M:%S")
log_file = os.path.join(
log_directory, "log" + "_" + experience_directory)
sys.stdout = Logger(log_file)
print("Running {0} file...".format(os.path.basename(__file__)), "\n")
start_time = time.time()
print("Data directory: {0}".format(base_directory))
print("Experience directory name: {0}".format(experience_directory))
print("Output directory: {0}".format(output_directory))
print("Nuclei mask directory: {0}".format(nuc_mask_directory))
print("Foci directory: {0}".format(foci_directory))
print("Transcription site removal directory: {0}"
.format(transcription_site_directory))
print("Log directory: {0}".format(log_directory))
print("Log file: {0}".format(log_file.split("/")[-1]))
print("Date: {0}".format(date), "\n")
print("Files are saved with the pattern "
"'gene_author_puromycin_paper_drug_batch_fov' \n")
print("Removing transcription sites...")
# start analysis
experience = get_metadata_directory(experience_directory)
filename_base = generate_filename_base(experience)
generator = images_generator(base_directory, experience_directory,
return_image=False)
nb_images = 0
for i, _ in enumerate(generator):
filename = filename_base + "_" + str(i)
print("\t", filename)
# spots
path = os.path.join(foci_directory, filename + ".npz")
data = np.load(path)
clustered_spots = data["clustered_spots"]
foci = data["foci"]
# nuclei masks
path = os.path.join(nuc_mask_directory, filename + ".png")
mask_nuc = stack.read_image(path)
nuc = mask_nuc > 0
# spots out of foci and inside foci
spots_out_foci = clustered_spots.copy()
spots_out_foci = spots_out_foci[spots_out_foci[:, 3] == -1, :]
spots_in_foci = clustered_spots.copy()
spots_in_foci = spots_in_foci[spots_in_foci[:, 3] != -1, :]
# remove foci inside nuclei
spots_in_foci_cleaned, foci_cleaned = stack.remove_transcription_site(
mask_nuc=nuc,
spots_in_foci=spots_in_foci,
foci=foci)
# save transcription site-free coordinates
path = os.path.join(transcription_site_directory, filename)
np.savez(path,
spots_out_foci=spots_out_foci,
spots_in_foci=spots_in_foci_cleaned,
foci=foci_cleaned)
nb_images += 1
print()
print("Done ({0} images)!".format(nb_images), "\n")
end_time = time.time()
duration = int(round((end_time - start_time) / 60))
print("Duration: {0} minutes.".format(duration))
|
[
"numpy.savez",
"bigfish.stack.remove_transcription_site",
"argparse.ArgumentParser",
"os.path.join",
"bigfish.stack.read_image",
"utils.Logger",
"datetime.datetime.now",
"os.path.isdir",
"loader.generate_filename_base",
"os.path.basename",
"loader.get_metadata_directory",
"numpy.load",
"time.time",
"loader.images_generator"
] |
[((404, 429), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (427, 429), False, 'import argparse\n'), ((1453, 1495), 'os.path.join', 'os.path.join', (['output_directory', '"""nuc_mask"""'], {}), "(output_directory, 'nuc_mask')\n", (1465, 1495), False, 'import os\n'), ((1517, 1565), 'os.path.join', 'os.path.join', (['output_directory', '"""foci_detection"""'], {}), "(output_directory, 'foci_detection')\n", (1529, 1565), False, 'import os\n'), ((1601, 1661), 'os.path.join', 'os.path.join', (['output_directory', '"""transcription_site_removal"""'], {}), "(output_directory, 'transcription_site_removal')\n", (1613, 1661), False, 'import os\n'), ((2748, 2771), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2769, 2771), False, 'import datetime\n'), ((2832, 2895), 'os.path.join', 'os.path.join', (['log_directory', "('log' + '_' + experience_directory)"], {}), "(log_directory, 'log' + '_' + experience_directory)\n", (2844, 2895), False, 'import os\n'), ((2922, 2938), 'utils.Logger', 'Logger', (['log_file'], {}), '(log_file)\n', (2928, 2938), False, 'from utils import Logger\n'), ((3031, 3042), 'time.time', 'time.time', ([], {}), '()\n', (3040, 3042), False, 'import time\n'), ((3807, 3851), 'loader.get_metadata_directory', 'get_metadata_directory', (['experience_directory'], {}), '(experience_directory)\n', (3829, 3851), False, 'from loader import get_metadata_directory, generate_filename_base, images_generator\n'), ((3872, 3906), 'loader.generate_filename_base', 'generate_filename_base', (['experience'], {}), '(experience)\n', (3894, 3906), False, 'from loader import get_metadata_directory, generate_filename_base, images_generator\n'), ((3923, 3997), 'loader.images_generator', 'images_generator', (['base_directory', 'experience_directory'], {'return_image': '(False)'}), '(base_directory, experience_directory, return_image=False)\n', (3939, 3997), False, 'from loader import get_metadata_directory, generate_filename_base, images_generator\n'), ((5388, 5399), 'time.time', 'time.time', ([], {}), '()\n', (5397, 5399), False, 'import time\n'), ((1791, 1820), 'os.path.isdir', 'os.path.isdir', (['base_directory'], {}), '(base_directory)\n', (1804, 1820), False, 'import os\n'), ((1940, 1971), 'os.path.isdir', 'os.path.isdir', (['output_directory'], {}), '(output_directory)\n', (1953, 1971), False, 'import os\n'), ((2093, 2126), 'os.path.isdir', 'os.path.isdir', (['nuc_mask_directory'], {}), '(nuc_mask_directory)\n', (2106, 2126), False, 'import os\n'), ((2250, 2279), 'os.path.isdir', 'os.path.isdir', (['foci_directory'], {}), '(foci_directory)\n', (2263, 2279), False, 'import os\n'), ((2399, 2442), 'os.path.isdir', 'os.path.isdir', (['transcription_site_directory'], {}), '(transcription_site_directory)\n', (2412, 2442), False, 'import os\n'), ((2576, 2604), 'os.path.isdir', 'os.path.isdir', (['log_directory'], {}), '(log_directory)\n', (2589, 2604), False, 'import os\n'), ((4198, 4245), 'os.path.join', 'os.path.join', (['foci_directory', "(filename + '.npz')"], {}), "(foci_directory, filename + '.npz')\n", (4210, 4245), False, 'import os\n'), ((4261, 4274), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (4268, 4274), True, 'import numpy as np\n'), ((4392, 4443), 'os.path.join', 'os.path.join', (['nuc_mask_directory', "(filename + '.png')"], {}), "(nuc_mask_directory, filename + '.png')\n", (4404, 4443), False, 'import os\n'), ((4463, 4485), 'bigfish.stack.read_image', 'stack.read_image', (['path'], {}), '(path)\n', (4479, 4485), True, 'import bigfish.stack as stack\n'), ((4875, 4964), 'bigfish.stack.remove_transcription_site', 'stack.remove_transcription_site', ([], {'mask_nuc': 'nuc', 'spots_in_foci': 'spots_in_foci', 'foci': 'foci'}), '(mask_nuc=nuc, spots_in_foci=spots_in_foci,\n foci=foci)\n', (4906, 4964), True, 'import bigfish.stack as stack\n'), ((5065, 5117), 'os.path.join', 'os.path.join', (['transcription_site_directory', 'filename'], {}), '(transcription_site_directory, filename)\n', (5077, 5117), False, 'import os\n'), ((5126, 5232), 'numpy.savez', 'np.savez', (['path'], {'spots_out_foci': 'spots_out_foci', 'spots_in_foci': 'spots_in_foci_cleaned', 'foci': 'foci_cleaned'}), '(path, spots_out_foci=spots_out_foci, spots_in_foci=\n spots_in_foci_cleaned, foci=foci_cleaned)\n', (5134, 5232), True, 'import numpy as np\n'), ((2979, 3005), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (2995, 3005), False, 'import os\n')]
|
"""
Methods to search an ImageCollection with brute force, exhaustive search.
"""
import cgi
import abc
import cPickle
import numpy as np
from sklearn.decomposition import PCA
from sklearn.metrics.pairwise import \
manhattan_distances, euclidean_distances, additive_chi2_kernel
import pyflann
from scipy.spatial import cKDTree
import util
from image import Image
from rayleigh.util import TicToc
tt = TicToc()
class SearchableImageCollection(object):
"""
Initialize with a rayleigh.ImageCollection, a distance_metric, and the
number of dimensions to reduce the histograms to.
Parameters
----------
image_collection : rayleigh.ImageCollection
dist_metric : string
must be in self.DISTANCE_METRICS
sigma : nonnegative float
Amount of smoothing applied to histograms.
If 0, none.
num_dimensions : int
number of dimensions to reduce the histograms to, using PCA.
If 0, do not reduce dimensions.
"""
def __init__(self, image_collection, dist_metric, sigma, num_dimensions):
self.ic = image_collection
self.id_ind_map = self.ic.get_id_ind_map()
self.distance_metric = dist_metric
if self.distance_metric not in self.DISTANCE_METRICS:
raise Exception("Unsupported distance metric.")
self.num_dimensions = num_dimensions
self.hists_reduced = self.ic.get_hists()
self.sigma = sigma
if self.sigma > 0:
self.smooth_histograms()
if self.num_dimensions > 0:
self.reduce_dimensionality()
@staticmethod
def load(filename):
"""
Load ImageCollection from filename.
"""
return cPickle.load(open(filename))
def save(self, filename):
"""
Save self to filename.
"""
cPickle.dump(self, open(filename, 'w'), 2)
def smooth_histograms(self):
"""
Smooth histograms with a Gaussian.
"""
for i in range(self.hists_reduced.shape[0]):
color_hist = self.hists_reduced[i, :]
self.hists_reduced[i, :] = util.smooth_histogram(
color_hist, self.ic.palette, self.sigma)
def reduce_dimensionality(self):
"""
Compute and store PCA dimensionality-reduced histograms.
"""
tt.tic('reduce_dimensionality')
self.pca = PCA(n_components=self.num_dimensions, whiten=True)
self.pca.fit(self.hists_reduced)
self.hists_reduced = self.pca.transform(self.hists_reduced)
tt.toc('reduce_dimensionality')
def get_image_hist(self, img_id):
"""
Return the smoothed image histogram of the image with the given id.
Parameters
----------
img_id : string
Returns
-------
color_hist : ndarray
"""
img_ind = self.id_ind_map[img_id]
color_hist = self.hists_reduced[img_ind, :]
return color_hist
def search_by_image_in_dataset(self, img_id, num=20):
"""
Search images in database for similarity to the image with img_id in
the database.
See search_by_color_hist() for implementation.
Parameters
----------
img_id : string
num : int, optional
Returns
-------
query_img_data : dict
results : list
list of dicts of nearest neighbors to query
"""
query_img_data = self.ic.get_image(img_id, no_hist=True)
color_hist = self.get_image_hist(img_id)
results, time_elapsed = self.search_by_color_hist(color_hist, num, reduced=True)
return query_img_data, results, time_elapsed
def search_by_image(self, image_filename, num=20):
"""
Search images in database by color similarity to image.
See search_by_color_hist().
"""
query_img = Image(image_filename)
color_hist = util.histogram_colors_smoothed(
query_img.lab_array, self.ic.palette,
sigma=self.sigma, direct=False)
results, time_elapsed = self.search_by_color_hist(color_hist)
return query_img.as_dict(), results, time_elapsed
def search_by_color_hist(self, color_hist, num=20, reduced=False):
"""
Search images in database by color similarity to the given histogram.
Parameters
----------
color_hist : (K,) ndarray
histogram over the color palette
num : int, optional
number of nearest neighbors to ret
reduced : boolean, optional
is the given color_hist already reduced in dimensionality?
Returns
-------
query_img : dict
info about the query image
results : list
list of dicts of nearest neighbors to query
"""
if self.num_dimensions > 0 and not reduced:
color_hist = self.pca.transform(color_hist)
tt.tic('nn_ind')
nn_ind, nn_dists = self.nn_ind(color_hist, num)
time_elapsed = tt.qtoc('nn_ind')
results = []
# TODO: tone up the amount of data returned: don't need resized size,
# _id, maybe something else?
for ind, dist in zip(nn_ind, nn_dists):
img_id = self.id_ind_map[ind]
img = self.ic.get_image(img_id, no_hist=True)
img['url'] = cgi.escape(img['url'])
img['distance'] = dist
results.append(img)
return results, time_elapsed
@abc.abstractmethod
def nn_ind(self, color_hist, num):
"""
Return num closest nearest neighbors (potentially approximate) to the
query color_hist, and the distances to them.
Override this search method in extending classes.
Parameters
----------
color_hist : (K,) ndarray
histogram over the color palette
num : int
number of nearest neighbors to return.
Returns
-------
nn_ind : (num,) ndarray
Indices of the neighbors in the dataset.
nn_dists (num,) ndarray
Distances to the neighbors returned.
"""
pass
class SearchableImageCollectionExact(SearchableImageCollection):
"""
Search the image collection exhaustively (mainly through np.dot).
"""
DISTANCE_METRICS = ['manhattan', 'euclidean', 'chi_square']
def nn_ind(self, color_hist, num):
"""
Exact nearest neighbor seach through exhaustive comparison.
"""
if self.distance_metric == 'manhattan':
dists = manhattan_distances(color_hist, self.hists_reduced)
elif self.distance_metric == 'euclidean':
dists = euclidean_distances(color_hist, self.hists_reduced, squared=True)
elif self.distance_metric == 'chi_square':
dists = -additive_chi2_kernel(color_hist, self.hists_reduced)
dists = dists.flatten()
nn_ind = np.argsort(dists).flatten()[:num]
nn_dists = dists[nn_ind]
return nn_ind, nn_dists
class SearchableImageCollectionFLANN(SearchableImageCollection):
"""
Search the image collection using the FLANN library for aNN indexing.
The FLANN index is built with automatic tuning of the search algorithm,
which can take a while (~90s on 25K images).
"""
DISTANCE_METRICS = ['manhattan', 'euclidean', 'chi_square']
@staticmethod
def load(filename):
# Saving the flann object results in memory errors, so we use its own
# method to save its index in a separate file.
sic = cPickle.load(open(filename))
return sic.build_index(filename + '_flann_index')
def save(self, filename):
# See comment in load().
flann = self.flann
self.flann = None
cPickle.dump(self, open(filename, 'w'), 2)
flann.save_index(filename + '_flann_index')
self.flann = flann
def __init__(self, image_collection, distance_metric, sigma, dimensions):
super(SearchableImageCollectionFLANN, self).__init__(
image_collection, distance_metric, sigma, dimensions)
self.build_index()
def build_index(self, index_filename=None):
tt.tic('build_index')
pyflann.set_distance_type(self.distance_metric)
self.flann = pyflann.FLANN()
if index_filename:
self.flann.load_index(index_filename, self.hists_reduced)
else:
self.params = self.flann.build_index(
self.hists_reduced, algorithm='autotuned',
sample_fraction=0.3, target_precision=.8,
build_weight=0.01, memory_weight=0.)
print(self.params)
tt.toc('build_index')
return self
def nn_ind(self, color_hist, num):
nn_ind, nn_dists = self.flann.nn_index(
color_hist, num, checks=self.params['checks'])
return nn_ind.flatten(), nn_dists.flatten()
class SearchableImageCollectionCKDTree(SearchableImageCollection):
"""
Use the cKDTree data structure from scipy.spatial for the index.
Parameters:
- LEAF_SIZE (int): The number of points at which the algorithm switches
over to brute-force.
- EPS (non-negative float): Parameter for query(), such that the
k-th returned value is guaranteed to be no further than (1 + eps)
times the distance to the real k-th nearest neighbor.
NOTE: These parameters have not been tuned.
"""
DISTANCE_METRICS = ['manhattan', 'euclidean']
Ps = {'manhattan': 1, 'euclidean': 2}
LEAF_SIZE = 5
EPSILON = 1
@staticmethod
def load(filename):
return cPickle.load(open(filename)).build_index()
def __init__(self, image_collection, distance_metric, sigma, dimensions):
super(SearchableImageCollectionCKDTree, self).__init__(
image_collection, distance_metric, sigma, dimensions)
self.build_index()
def build_index(self):
tt.tic('build_index_ckdtree')
self.ckdtree = cKDTree(self.hists_reduced, self.LEAF_SIZE)
self.p = self.Ps[self.distance_metric]
tt.toc('build_index_ckdtree')
return self
def nn_ind(self, color_hist, num):
nn_dists, nn_ind = self.ckdtree.query(
color_hist, num, eps=self.EPSILON, p=self.p)
return nn_ind.flatten(), nn_dists.flatten()
|
[
"scipy.spatial.cKDTree",
"sklearn.metrics.pairwise.manhattan_distances",
"sklearn.decomposition.PCA",
"sklearn.metrics.pairwise.euclidean_distances",
"util.histogram_colors_smoothed",
"pyflann.set_distance_type",
"sklearn.metrics.pairwise.additive_chi2_kernel",
"numpy.argsort",
"pyflann.FLANN",
"rayleigh.util.TicToc",
"cgi.escape",
"util.smooth_histogram",
"image.Image"
] |
[((408, 416), 'rayleigh.util.TicToc', 'TicToc', ([], {}), '()\n', (414, 416), False, 'from rayleigh.util import TicToc\n'), ((2377, 2427), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'self.num_dimensions', 'whiten': '(True)'}), '(n_components=self.num_dimensions, whiten=True)\n', (2380, 2427), False, 'from sklearn.decomposition import PCA\n'), ((3897, 3918), 'image.Image', 'Image', (['image_filename'], {}), '(image_filename)\n', (3902, 3918), False, 'from image import Image\n'), ((3940, 4045), 'util.histogram_colors_smoothed', 'util.histogram_colors_smoothed', (['query_img.lab_array', 'self.ic.palette'], {'sigma': 'self.sigma', 'direct': '(False)'}), '(query_img.lab_array, self.ic.palette, sigma=\n self.sigma, direct=False)\n', (3970, 4045), False, 'import util\n'), ((8282, 8329), 'pyflann.set_distance_type', 'pyflann.set_distance_type', (['self.distance_metric'], {}), '(self.distance_metric)\n', (8307, 8329), False, 'import pyflann\n'), ((8351, 8366), 'pyflann.FLANN', 'pyflann.FLANN', ([], {}), '()\n', (8364, 8366), False, 'import pyflann\n'), ((10077, 10120), 'scipy.spatial.cKDTree', 'cKDTree', (['self.hists_reduced', 'self.LEAF_SIZE'], {}), '(self.hists_reduced, self.LEAF_SIZE)\n', (10084, 10120), False, 'from scipy.spatial import cKDTree\n'), ((2111, 2173), 'util.smooth_histogram', 'util.smooth_histogram', (['color_hist', 'self.ic.palette', 'self.sigma'], {}), '(color_hist, self.ic.palette, self.sigma)\n', (2132, 2173), False, 'import util\n'), ((5383, 5405), 'cgi.escape', 'cgi.escape', (["img['url']"], {}), "(img['url'])\n", (5393, 5405), False, 'import cgi\n'), ((6606, 6657), 'sklearn.metrics.pairwise.manhattan_distances', 'manhattan_distances', (['color_hist', 'self.hists_reduced'], {}), '(color_hist, self.hists_reduced)\n', (6625, 6657), False, 'from sklearn.metrics.pairwise import manhattan_distances, euclidean_distances, additive_chi2_kernel\n'), ((6728, 6793), 'sklearn.metrics.pairwise.euclidean_distances', 'euclidean_distances', (['color_hist', 'self.hists_reduced'], {'squared': '(True)'}), '(color_hist, self.hists_reduced, squared=True)\n', (6747, 6793), False, 'from sklearn.metrics.pairwise import manhattan_distances, euclidean_distances, additive_chi2_kernel\n'), ((6977, 6994), 'numpy.argsort', 'np.argsort', (['dists'], {}), '(dists)\n', (6987, 6994), True, 'import numpy as np\n'), ((6866, 6918), 'sklearn.metrics.pairwise.additive_chi2_kernel', 'additive_chi2_kernel', (['color_hist', 'self.hists_reduced'], {}), '(color_hist, self.hists_reduced)\n', (6886, 6918), False, 'from sklearn.metrics.pairwise import manhattan_distances, euclidean_distances, additive_chi2_kernel\n')]
|
#!/usr/bin/env python3
import os
import argparse
import numpy as np
from sklearn import preprocessing
from sklearn import datasets
from tqdm import tqdm
class Network(object):
def __init__(self):
self.linear1 = Linear(64, 128)
self.relu1 = ReLU()
self.linear2 = Linear(128, 64)
self.relu2 = ReLU()
self.linear3 = Linear(64, 10)
def forward(self, x):
out = self.relu1(self.linear1(x))
out = self.relu2(self.linear2(out))
out = self.linear3(out)
return out
def __call__(self, x):
return self.forward(x)
class Linear(object):
def __init__(self, input_size, output_size):
self.W = np.zeros((input_size, output_size))
self.cache = None
self.reset_parameters()
def forward(self, x):
self.cache = x
return x @ self.W
def backward(self, grad):
pass
def reset_parameters(self):
var = 1 / self.W.shape[0]
self.W = np.random.normal(loc=0, scale=var, size=self.W.shape)
def __call__(self, x):
return self.forward(x)
class ReLU(object):
def __init__(self):
self.cache = None
def forward(self, x):
self.cache = x
return np.clip(x, a_min=0, a_max=None)
def __call__(self, x):
return self.forward(x)
def softmax(X):
"""https://deepnotes.io/softmax-crossentropy"""
exps = np.exp(X - np.max(X))
return exps / np.sum(exps)
def cross_entropy(X, y):
"""https://deepnotes.io/softmax-crossentropy"""
m = y.shape[0]
p = softmax(X)
log_likelihood = -np.log(p[range(m),y])
loss = np.sum(log_likelihood) / m
return loss
def main(args):
data, target = datasets.load_digits(return_X_y=True)
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data, target = data[indices], target[indices]
splits = (int(0.7 * data.shape[0]), int(0.9 * data.shape[0]))
scaler = preprocessing.StandardScaler().fit(data[:splits[0]])
data = scaler.transform(data)
train, val, test = zip(np.split(data, splits), np.split(target, splits))
net = Network()
for epoch in range(args.epochs):
pred = net(train[0])
loss = cross_entropy(pred, train[1])
import ipdb; ipdb.set_trace()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--epochs", type=int, default=100)
args = parser.parse_args()
main(args)
|
[
"numpy.random.normal",
"numpy.clip",
"argparse.ArgumentParser",
"ipdb.set_trace",
"sklearn.datasets.load_digits",
"numpy.max",
"sklearn.preprocessing.StandardScaler",
"numpy.sum",
"numpy.zeros",
"numpy.split",
"numpy.arange",
"numpy.random.shuffle"
] |
[((1714, 1751), 'sklearn.datasets.load_digits', 'datasets.load_digits', ([], {'return_X_y': '(True)'}), '(return_X_y=True)\n', (1734, 1751), False, 'from sklearn import datasets\n'), ((1766, 1790), 'numpy.arange', 'np.arange', (['data.shape[0]'], {}), '(data.shape[0])\n', (1775, 1790), True, 'import numpy as np\n'), ((1795, 1821), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (1812, 1821), True, 'import numpy as np\n'), ((2329, 2354), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2352, 2354), False, 'import argparse\n'), ((689, 724), 'numpy.zeros', 'np.zeros', (['(input_size, output_size)'], {}), '((input_size, output_size))\n', (697, 724), True, 'import numpy as np\n'), ((987, 1040), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0)', 'scale': 'var', 'size': 'self.W.shape'}), '(loc=0, scale=var, size=self.W.shape)\n', (1003, 1040), True, 'import numpy as np\n'), ((1237, 1268), 'numpy.clip', 'np.clip', (['x'], {'a_min': '(0)', 'a_max': 'None'}), '(x, a_min=0, a_max=None)\n', (1244, 1268), True, 'import numpy as np\n'), ((1449, 1461), 'numpy.sum', 'np.sum', (['exps'], {}), '(exps)\n', (1455, 1461), True, 'import numpy as np\n'), ((1634, 1656), 'numpy.sum', 'np.sum', (['log_likelihood'], {}), '(log_likelihood)\n', (1640, 1656), True, 'import numpy as np\n'), ((2066, 2088), 'numpy.split', 'np.split', (['data', 'splits'], {}), '(data, splits)\n', (2074, 2088), True, 'import numpy as np\n'), ((2090, 2114), 'numpy.split', 'np.split', (['target', 'splits'], {}), '(target, splits)\n', (2098, 2114), True, 'import numpy as np\n'), ((2270, 2286), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (2284, 2286), False, 'import ipdb\n'), ((1420, 1429), 'numpy.max', 'np.max', (['X'], {}), '(X)\n', (1426, 1429), True, 'import numpy as np\n'), ((1952, 1982), 'sklearn.preprocessing.StandardScaler', 'preprocessing.StandardScaler', ([], {}), '()\n', (1980, 1982), False, 'from sklearn import preprocessing\n')]
|
"""
University of Minnesota
Aerospace Engineering and Mechanics - UAV Lab
Copyright 2019 Regents of the University of Minnesota
See: LICENSE.md for complete license details
Author: <NAME>
Analysis for Huginn (mAEWing2) FLT03 and FLT04
"""
#%%
# Import Libraries
import numpy as np
import matplotlib.pyplot as plt
# Hack to allow loading the Core package
if __name__ == "__main__" and __package__ is None:
from sys import path, argv
from os.path import dirname, abspath, join
path.insert(0, abspath(join(dirname(argv[0]), "..")))
path.insert(0, abspath(join(dirname(argv[0]), "..", 'Core')))
del path, argv, dirname, abspath, join
from Core import Loader
from Core import OpenData
# Constants
hz2rps = 2 * np.pi
rps2hz = 1 / hz2rps
#%% File Lists
import os.path as path
pathBase = path.join('/home', 'rega0051', 'FlightArchive', 'Huginn')
#pathBase = path.join('G:', 'Shared drives', 'UAVLab', 'Flight Data', 'Huginn')
#pathBase = path.join('D:/', 'Huginn')
fileList = {}
flt = 'FLT05'
fileList[flt] = {}
fileList[flt]['log'] = path.join(pathBase, 'Huginn' + flt, 'Huginn' + flt + '.h5')
fileList[flt]['config'] = path.join(pathBase, 'Huginn' + flt, 'huginn.json')
fileList[flt]['def'] = path.join(pathBase, 'Huginn' + flt, 'huginn_def.json')
flt = 'FLT06'
fileList[flt] = {}
fileList[flt]['log'] = path.join(pathBase, 'Huginn' + flt, 'Huginn' + flt + '.h5')
fileList[flt]['config'] = path.join(pathBase, 'Huginn' + flt, 'huginn.json')
fileList[flt]['def'] = path.join(pathBase, 'Huginn' + flt, 'huginn_def.json')
#%% Wind/Air Cal
windSegList = [
{'flt': 'FLT05', 'seg': ('time_us', [566483686, 582408497])},
{'flt': 'FLT05', 'seg': ('time_us', [602534178, 622279236])},
{'flt': 'FLT05', 'seg': ('time_us', [637362791, 654286351])},
{'flt': 'FLT05', 'seg': ('time_us', [666668777, 687832534])},
{'flt': 'FLT05', 'seg': ('time_us', [703115100, 766364351])}, # Long!!
{'flt': 'FLT05', 'seg': ('time_us', [788467105, 799488311])},
{'flt': 'FLT05', 'seg': ('time_us', [811669552, 831211361])},
{'flt': 'FLT05', 'seg': ('time_us', [844412511, 861513899])},
{'flt': 'FLT05', 'seg': ('time_us', [873694795, 887575754])},
{'flt': 'FLT05', 'seg': ('time_us', [899096534, 909897237])},
{'flt': 'FLT05', 'seg': ('time_us', [927000000, 950000000])}, # Landing Approach
{'flt': 'FLT06', 'seg': ('time_us', [940358346, 955822061])},
{'flt': 'FLT06', 'seg': ('time_us', [982747328, 1000069848])},
{'flt': 'FLT06', 'seg': ('time_us', [1010491142, 1026492809])},
{'flt': 'FLT06', 'seg': ('time_us', [1036733749, 1054855133])},
{'flt': 'FLT06', 'seg': ('time_us', [1065295790, 1087597269])}, # Slowing Turn
{'flt': 'FLT06', 'seg': ('time_us', [1103958408, 1122539650])},
{'flt': 'FLT06', 'seg': ('time_us', [1140000000, 1165401057])},
{'flt': 'FLT06', 'seg': ('time_us', [1165401057, 1189143263])},
{'flt': 'FLT06', 'seg': ('time_us', [1189143263, 1225000000])}, # Landing Approach
{'flt': 'FLT06', 'seg': ('time_us', [1225000000, 1260000000])}, # Landing Approach
]
oDataWindList = []
for windSeg in windSegList:
fltNum = windSeg['flt']
fileLog = fileList[fltNum]['log']
fileConfig = fileList[fltNum]['config']
oData, h5Data = Loader.Log_RAPTRS(fileLog, fileConfig)
for key in h5Data['Sensor-Processing']['PostProcess']['INS'].keys():
oData[key] = h5Data['Sensor-Processing']['PostProcess']['INS'][key]
oData = OpenData.Decimate(oData, 10)
oDataWindList.append(OpenData.Segment(oData, windSeg['seg']))
fig, ax = plt.subplots(nrows=2)
for oDataWind in oDataWindList:
latGps_deg = oDataWind['rGps_D_ddm'][0]
lonGps_deg = oDataWind['rGps_D_ddm'][1]
latB_deg = oDataWind['rB_D_ddm'][0]
lonB_deg = oDataWind['rB_D_ddm'][1]
ax[0].plot(lonGps_deg, latGps_deg, '.', label='GPS')
ax[0].plot(lonB_deg, latB_deg, label='Ekf')
ax[0].grid()
ax[1].plot(oDataWind['time_s'], oDataWind['vIas_mps'])
ax[1].plot(oDataWind['time_s'], oDataWind['sB_L_rad'][0]*180.0/np.pi)
ax[1].grid()
#%%
## Pre-Optimization, Initial Guess for the Wind
# Over-ride Default Error Model, Optional
pData = {}
pData['5Hole'] = {}
pData['5Hole']['r_B_m'] = np.array([1.0, 0.0, 0.0])
pData['5Hole']['s_B_rad'] = np.array([0.0, 0.0, 0.0]) * 180.0/np.pi
pData['5Hole']['v'] = {}
pData['5Hole']['v']['errorType'] = 'ScaleBias+'
pData['5Hole']['v']['K'] = 1.0
pData['5Hole']['v']['bias'] = 0.0
pData['5Hole']['alt'] = pData['5Hole']['v'].copy()
pData['5Hole']['alpha'] = pData['5Hole']['v'].copy()
pData['5Hole']['beta'] = pData['5Hole']['v'].copy()
pData['5Hole']['v']['K'] = 0.95
#%% Optimize
from Core import AirData
from Core import AirDataCalibration
rad2deg = 180.0 / np.pi
deg2rad = 1 / rad2deg
oDataList = oDataWindList
# Compute the optimal parameters
#opt = {'Method': 'BFGS', 'Options': {'disp': True}}
opt = {'Method': 'L-BFGS-B', 'Options': {'disp': True}}
#opt = {'Method': 'L-BFGS-B', 'Options': {'maxiter': 10, 'disp': True}}
#opt = {'Method': 'CG', 'Options': {'disp': True}}
#%% First Phase - Airspeed and Wind only
opt['wind'] = []
for seg in oDataList:
seg['vMean_AE_L_mps'] = np.asarray([-2.0, 0.0, 0.0])
opt['wind'].append({'val': seg['vMean_AE_L_mps'], 'lb': np.asarray([-10, -10, -3]), 'ub': np.asarray([10, 10, 3])})
opt['param'] = []
opt['param'].append({'val': pData['5Hole']['v']['K'], 'lb': 0.80, 'ub': 1.20})
opt['param'].append({'val': pData['5Hole']['v']['bias'], 'lb': -3.0, 'ub': 3.0})
opt['param'].append({'val': pData['5Hole']['alpha']['K'], 'lb': 1.00, 'ub': 1.00})
opt['param'].append({'val': pData['5Hole']['alpha']['bias'], 'lb': -0.0 * deg2rad, 'ub': 0.0 * deg2rad})
opt['param'].append({'val': pData['5Hole']['beta']['K'], 'lb': 1.00, 'ub': 1.00})
opt['param'].append({'val': pData['5Hole']['beta']['bias'], 'lb': -0.0 * deg2rad, 'ub': 0.0 * deg2rad})
#AirDataCalibration.CostFunc(xOpt, optInfo, oDataList, param)
opt['Result'] = AirDataCalibration.EstCalib(opt, oDataList, pData['5Hole'])
nSegs = len(oDataWindList)
nWinds = nSegs * 3
vWind = opt['Result']['x'][0:nWinds].reshape((nSegs, 3))
#%% Second Phase - add alpha and beta
if False:
for iSeg, seg in enumerate(oDataList):
seg['vMean_AE_L_mps'] = vWind[iSeg]
opt['wind'][iSeg]['val'] = seg['vMean_AE_L_mps']
opt['wind'][iSeg]['lb'] = seg['vMean_AE_L_mps'] - np.asarray([0.0, 0.0, 0.0])
opt['wind'][iSeg]['ub'] = seg['vMean_AE_L_mps'] + np.asarray([0.0, 0.0, 0.0])
opt['param'][0] = {'val': pData['5Hole']['v']['K'], 'lb': 1.0 * pData['5Hole']['v']['K'], 'ub': 1.0 * pData['5Hole']['v']['K']}
opt['param'][1] = {'val': pData['5Hole']['v']['bias'], 'lb': pData['5Hole']['v']['bias']-0.0, 'ub': pData['5Hole']['v']['bias']+0.0}
opt['param'][2] = {'val': pData['5Hole']['alpha']['K'], 'lb': 0.8, 'ub': 1.2}
opt['param'][3] = {'val': pData['5Hole']['alpha']['bias'], 'lb': -4.0 * deg2rad, 'ub': 4.0 * deg2rad}
opt['param'][4] = {'val': pData['5Hole']['beta']['K'], 'lb': 0.8, 'ub': 1.2}
opt['param'][5] = {'val': pData['5Hole']['beta']['bias'], 'lb': -4.0 * deg2rad, 'ub': 4.0 * deg2rad}
#AirDataCalibration.CostFunc(xOpt, optInfo, oDataList, param)
opt['Result'] = AirDataCalibration.EstCalib(opt, oDataList, pData['5Hole'])
nSegs = len(oDataWindList)
nWinds = nSegs * 3
vWind = opt['Result']['x'][0:nWinds].reshape((nSegs, 3))
#%% Plot the Solution
from SensorModel import SensorErrorModel
for iSeg, oDataWind in enumerate(oDataWindList):
# Update the Flight data with the new calibrations
calib = AirData.ApplyCalibration(oDataWind, pData['5Hole'])
oDataWind.update(calib)
v_BA_B_mps, v_BA_L_mps = AirData.Airspeed2NED(oDataWind['v_PA_P_mps'], oDataWind['sB_L_rad'], pData['5Hole'])
oDataWind['vMean_AE_L_mps'] = vWind[iSeg]
if True:
plt.figure()
plt.subplot(3,1,1)
plt.plot(oDataWind['time_s'], oDataWind['vB_L_mps'][0], label = 'Inertial')
plt.plot(oDataWind['time_s'], v_BA_L_mps[0] + oDataWind['vMean_AE_L_mps'][0], label = 'AirData + Wind')
plt.grid()
plt.legend()
plt.subplot(3,1,2)
plt.plot(oDataWind['time_s'], oDataWind['vB_L_mps'][1])
plt.plot(oDataWind['time_s'], v_BA_L_mps[1] + oDataWind['vMean_AE_L_mps'][1])
plt.grid()
plt.subplot(3,1,3)
plt.plot(oDataWind['time_s'], oDataWind['vB_L_mps'][2])
plt.plot(oDataWind['time_s'], v_BA_L_mps[2] + oDataWind['vMean_AE_L_mps'][2])
plt.grid()
v_AE_L_mps = np.repeat([oDataWind['vMean_AE_L_mps']], oDataWind['vB_L_mps'].shape[-1], axis=0).T
vError_mps = (v_BA_L_mps + v_AE_L_mps) - oDataWind['vB_L_mps']
vErrorMag_mps = np.linalg.norm(vError_mps, axis=0)
vAirUncalMag_mps = oDataWind['vIas_mps']
vAirMag_mps = np.linalg.norm((v_BA_L_mps + v_AE_L_mps), axis=0)
vInertMag_mps = np.linalg.norm(oDataWind['vB_L_mps'], axis=0)
plt.figure(0)
# plt.plot(vAirMag_mps, vInertMag_mps - vAirMag_mps, '.')
plt.plot(vAirMag_mps, vInertMag_mps, '.')
plt.grid()
print('Wind (m/s): ', oDataWind['vMean_AE_L_mps'])
plt.figure(0)
vA_mps = np.linspace(15, 35, 5)
vAcal_mps = SensorErrorModel(vA_mps, pData['5Hole']['v'])
plt.plot(vA_mps, vA_mps, 'k:')
#plt.plot(vA_mps, vA_mps - vG_mps, 'k:')
print('Velocity Gain: ', pData['5Hole']['v']['K'])
print('Velocity Bias: ', pData['5Hole']['v']['bias'])
print('Alpha Gain: ', pData['5Hole']['alpha']['K'])
print('Alpha Bias: ', pData['5Hole']['alpha']['bias'])
print('Beta Gain: ', pData['5Hole']['beta']['K'])
print('Beta Bias: ', pData['5Hole']['beta']['bias'])
|
[
"Core.AirData.ApplyCalibration",
"matplotlib.pyplot.grid",
"Core.AirData.Airspeed2NED",
"Core.AirDataCalibration.EstCalib",
"numpy.array",
"Core.OpenData.Decimate",
"numpy.linalg.norm",
"numpy.repeat",
"matplotlib.pyplot.plot",
"numpy.asarray",
"numpy.linspace",
"Core.Loader.Log_RAPTRS",
"sys.path.join",
"os.path.dirname",
"matplotlib.pyplot.legend",
"Core.OpenData.Segment",
"matplotlib.pyplot.figure",
"SensorModel.SensorErrorModel",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.subplots"
] |
[((811, 868), 'sys.path.join', 'path.join', (['"""/home"""', '"""rega0051"""', '"""FlightArchive"""', '"""Huginn"""'], {}), "('/home', 'rega0051', 'FlightArchive', 'Huginn')\n", (820, 868), False, 'from sys import path, argv\n'), ((1059, 1118), 'sys.path.join', 'path.join', (['pathBase', "('Huginn' + flt)", "('Huginn' + flt + '.h5')"], {}), "(pathBase, 'Huginn' + flt, 'Huginn' + flt + '.h5')\n", (1068, 1118), False, 'from sys import path, argv\n'), ((1145, 1195), 'sys.path.join', 'path.join', (['pathBase', "('Huginn' + flt)", '"""huginn.json"""'], {}), "(pathBase, 'Huginn' + flt, 'huginn.json')\n", (1154, 1195), False, 'from sys import path, argv\n'), ((1219, 1273), 'sys.path.join', 'path.join', (['pathBase', "('Huginn' + flt)", '"""huginn_def.json"""'], {}), "(pathBase, 'Huginn' + flt, 'huginn_def.json')\n", (1228, 1273), False, 'from sys import path, argv\n'), ((1331, 1390), 'sys.path.join', 'path.join', (['pathBase', "('Huginn' + flt)", "('Huginn' + flt + '.h5')"], {}), "(pathBase, 'Huginn' + flt, 'Huginn' + flt + '.h5')\n", (1340, 1390), False, 'from sys import path, argv\n'), ((1417, 1467), 'sys.path.join', 'path.join', (['pathBase', "('Huginn' + flt)", '"""huginn.json"""'], {}), "(pathBase, 'Huginn' + flt, 'huginn.json')\n", (1426, 1467), False, 'from sys import path, argv\n'), ((1491, 1545), 'sys.path.join', 'path.join', (['pathBase', "('Huginn' + flt)", '"""huginn_def.json"""'], {}), "(pathBase, 'Huginn' + flt, 'huginn_def.json')\n", (1500, 1545), False, 'from sys import path, argv\n'), ((3667, 3688), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)'}), '(nrows=2)\n', (3679, 3688), True, 'import matplotlib.pyplot as plt\n'), ((4315, 4340), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0])\n', (4323, 4340), True, 'import numpy as np\n'), ((6044, 6103), 'Core.AirDataCalibration.EstCalib', 'AirDataCalibration.EstCalib', (['opt', 'oDataList', "pData['5Hole']"], {}), "(opt, oDataList, pData['5Hole'])\n", (6071, 6103), False, 'from Core import AirDataCalibration\n'), ((9199, 9212), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {}), '(0)\n', (9209, 9212), True, 'import matplotlib.pyplot as plt\n'), ((9222, 9244), 'numpy.linspace', 'np.linspace', (['(15)', '(35)', '(5)'], {}), '(15, 35, 5)\n', (9233, 9244), True, 'import numpy as np\n'), ((9257, 9302), 'SensorModel.SensorErrorModel', 'SensorErrorModel', (['vA_mps', "pData['5Hole']['v']"], {}), "(vA_mps, pData['5Hole']['v'])\n", (9273, 9302), False, 'from SensorModel import SensorErrorModel\n'), ((9303, 9333), 'matplotlib.pyplot.plot', 'plt.plot', (['vA_mps', 'vA_mps', '"""k:"""'], {}), "(vA_mps, vA_mps, 'k:')\n", (9311, 9333), True, 'import matplotlib.pyplot as plt\n'), ((3350, 3388), 'Core.Loader.Log_RAPTRS', 'Loader.Log_RAPTRS', (['fileLog', 'fileConfig'], {}), '(fileLog, fileConfig)\n', (3367, 3388), False, 'from Core import Loader\n'), ((3560, 3588), 'Core.OpenData.Decimate', 'OpenData.Decimate', (['oData', '(10)'], {}), '(oData, 10)\n', (3577, 3588), False, 'from Core import OpenData\n'), ((5262, 5290), 'numpy.asarray', 'np.asarray', (['[-2.0, 0.0, 0.0]'], {}), '([-2.0, 0.0, 0.0])\n', (5272, 5290), True, 'import numpy as np\n'), ((7305, 7364), 'Core.AirDataCalibration.EstCalib', 'AirDataCalibration.EstCalib', (['opt', 'oDataList', "pData['5Hole']"], {}), "(opt, oDataList, pData['5Hole'])\n", (7332, 7364), False, 'from Core import AirDataCalibration\n'), ((7664, 7715), 'Core.AirData.ApplyCalibration', 'AirData.ApplyCalibration', (['oDataWind', "pData['5Hole']"], {}), "(oDataWind, pData['5Hole'])\n", (7688, 7715), False, 'from Core import AirData\n'), ((7774, 7863), 'Core.AirData.Airspeed2NED', 'AirData.Airspeed2NED', (["oDataWind['v_PA_P_mps']", "oDataWind['sB_L_rad']", "pData['5Hole']"], {}), "(oDataWind['v_PA_P_mps'], oDataWind['sB_L_rad'], pData[\n '5Hole'])\n", (7794, 7863), False, 'from Core import AirData\n'), ((8786, 8820), 'numpy.linalg.norm', 'np.linalg.norm', (['vError_mps'], {'axis': '(0)'}), '(vError_mps, axis=0)\n', (8800, 8820), True, 'import numpy as np\n'), ((8884, 8931), 'numpy.linalg.norm', 'np.linalg.norm', (['(v_BA_L_mps + v_AE_L_mps)'], {'axis': '(0)'}), '(v_BA_L_mps + v_AE_L_mps, axis=0)\n', (8898, 8931), True, 'import numpy as np\n'), ((8954, 8999), 'numpy.linalg.norm', 'np.linalg.norm', (["oDataWind['vB_L_mps']"], {'axis': '(0)'}), "(oDataWind['vB_L_mps'], axis=0)\n", (8968, 8999), True, 'import numpy as np\n'), ((9005, 9018), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {}), '(0)\n', (9015, 9018), True, 'import matplotlib.pyplot as plt\n'), ((9084, 9125), 'matplotlib.pyplot.plot', 'plt.plot', (['vAirMag_mps', 'vInertMag_mps', '"""."""'], {}), "(vAirMag_mps, vInertMag_mps, '.')\n", (9092, 9125), True, 'import matplotlib.pyplot as plt\n'), ((9130, 9140), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (9138, 9140), True, 'import matplotlib.pyplot as plt\n'), ((3614, 3653), 'Core.OpenData.Segment', 'OpenData.Segment', (['oData', "windSeg['seg']"], {}), "(oData, windSeg['seg'])\n", (3630, 3653), False, 'from Core import OpenData\n'), ((4369, 4394), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (4377, 4394), True, 'import numpy as np\n'), ((7928, 7940), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7938, 7940), True, 'import matplotlib.pyplot as plt\n'), ((7949, 7969), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(1)'], {}), '(3, 1, 1)\n', (7960, 7969), True, 'import matplotlib.pyplot as plt\n'), ((7976, 8049), 'matplotlib.pyplot.plot', 'plt.plot', (["oDataWind['time_s']", "oDataWind['vB_L_mps'][0]"], {'label': '"""Inertial"""'}), "(oDataWind['time_s'], oDataWind['vB_L_mps'][0], label='Inertial')\n", (7984, 8049), True, 'import matplotlib.pyplot as plt\n'), ((8060, 8166), 'matplotlib.pyplot.plot', 'plt.plot', (["oDataWind['time_s']", "(v_BA_L_mps[0] + oDataWind['vMean_AE_L_mps'][0])"], {'label': '"""AirData + Wind"""'}), "(oDataWind['time_s'], v_BA_L_mps[0] + oDataWind['vMean_AE_L_mps'][0\n ], label='AirData + Wind')\n", (8068, 8166), True, 'import matplotlib.pyplot as plt\n'), ((8172, 8182), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (8180, 8182), True, 'import matplotlib.pyplot as plt\n'), ((8191, 8203), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (8201, 8203), True, 'import matplotlib.pyplot as plt\n'), ((8212, 8232), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(2)'], {}), '(3, 1, 2)\n', (8223, 8232), True, 'import matplotlib.pyplot as plt\n'), ((8239, 8294), 'matplotlib.pyplot.plot', 'plt.plot', (["oDataWind['time_s']", "oDataWind['vB_L_mps'][1]"], {}), "(oDataWind['time_s'], oDataWind['vB_L_mps'][1])\n", (8247, 8294), True, 'import matplotlib.pyplot as plt\n'), ((8303, 8380), 'matplotlib.pyplot.plot', 'plt.plot', (["oDataWind['time_s']", "(v_BA_L_mps[1] + oDataWind['vMean_AE_L_mps'][1])"], {}), "(oDataWind['time_s'], v_BA_L_mps[1] + oDataWind['vMean_AE_L_mps'][1])\n", (8311, 8380), True, 'import matplotlib.pyplot as plt\n'), ((8389, 8399), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (8397, 8399), True, 'import matplotlib.pyplot as plt\n'), ((8408, 8428), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (8419, 8428), True, 'import matplotlib.pyplot as plt\n'), ((8435, 8490), 'matplotlib.pyplot.plot', 'plt.plot', (["oDataWind['time_s']", "oDataWind['vB_L_mps'][2]"], {}), "(oDataWind['time_s'], oDataWind['vB_L_mps'][2])\n", (8443, 8490), True, 'import matplotlib.pyplot as plt\n'), ((8499, 8576), 'matplotlib.pyplot.plot', 'plt.plot', (["oDataWind['time_s']", "(v_BA_L_mps[2] + oDataWind['vMean_AE_L_mps'][2])"], {}), "(oDataWind['time_s'], v_BA_L_mps[2] + oDataWind['vMean_AE_L_mps'][2])\n", (8507, 8576), True, 'import matplotlib.pyplot as plt\n'), ((8585, 8595), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (8593, 8595), True, 'import matplotlib.pyplot as plt\n'), ((8614, 8699), 'numpy.repeat', 'np.repeat', (["[oDataWind['vMean_AE_L_mps']]", "oDataWind['vB_L_mps'].shape[-1]"], {'axis': '(0)'}), "([oDataWind['vMean_AE_L_mps']], oDataWind['vB_L_mps'].shape[-1],\n axis=0)\n", (8623, 8699), True, 'import numpy as np\n'), ((5351, 5377), 'numpy.asarray', 'np.asarray', (['[-10, -10, -3]'], {}), '([-10, -10, -3])\n', (5361, 5377), True, 'import numpy as np\n'), ((5385, 5408), 'numpy.asarray', 'np.asarray', (['[10, 10, 3]'], {}), '([10, 10, 3])\n', (5395, 5408), True, 'import numpy as np\n'), ((6459, 6486), 'numpy.asarray', 'np.asarray', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (6469, 6486), True, 'import numpy as np\n'), ((6545, 6572), 'numpy.asarray', 'np.asarray', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (6555, 6572), True, 'import numpy as np\n'), ((520, 536), 'os.path.dirname', 'dirname', (['argv[0]'], {}), '(argv[0])\n', (527, 536), False, 'from os.path import dirname, abspath, join\n'), ((578, 594), 'os.path.dirname', 'dirname', (['argv[0]'], {}), '(argv[0])\n', (585, 594), False, 'from os.path import dirname, abspath, join\n')]
|
#! /usr/bin/env python
########################################################################
# #
# Resums the non-global logarithms, needs ngl_resum.py #
# #
# If using ngl_resum, please cite #
# doi:10.1007/JHEP09(2020)029 #
# https://inspirehep.net/literature/1798660 #
# #
########################################################################
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__date__ = 'October 19, 2020'
import time
import numpy as np
import argparse
import ngl_resum as ngl
parser = argparse.ArgumentParser(description='This code shows how to '\
'use ngl_resum to shower a single dipole aligned with the '\
'z-axis, both legs with velocity b. The outside region is '\
'defined by the symmetric rapidity gap from -y to y. '\
'This code was used to produce some of the results in '\
'Section 4 of arXiv:2006.00014')
parser.add_argument('-b','--beta', help='beta of dipole legs', \
default=1, type=float)
parser.add_argument('-y','--ymax', help='ymax of outside region', \
default=0.8, type=float)
parser.add_argument('-n','--nsh', help='number of showerings', \
default=100, type=int)
parser.add_argument('-t','--tmax', help='maximal shower time tmax', \
default=0.1, type=float)
parser.add_argument('-m','--nbins', help='number of bins in hists', \
default=100, type=int)
parser.add_argument('-c','--cutoff', help='cutoff of shower', \
default=6, type=float)
parser.add_argument('-s','--seed', help='random seed', \
default=None, type=int)
args = vars(parser.parse_args())
nbins=int(args['nbins'])
tmax=float(args['tmax'])
nsh=int(args['nsh'])
showerCutoff=float(args['cutoff'])
b=float(args['beta'])
if not(args['seed'] is None) : np.random.seed(args['seed'])
dipole=[ngl.FourVector(1,0,0,b),ngl.FourVector(1,0,0,-b)]
ev=ngl.Event(feedDipole=dipole)
def _outside(self,vec):
rapRangeMax=float(args['ymax'])
rapRangeMin=0.0
return (abs(vec.rap)<rapRangeMax) and (abs(vec.rap)>=rapRangeMin)
outsideRegion=ngl.OutsideRegion()
outsideRegion.outside = _outside.__get__(outsideRegion,\
ngl.OutsideRegion)
shower=ngl.Shower(ev,outsideRegion,nsh,nbins,tmax,showerCutoff)
timeStart = time.time()
shower.shower()
print('runtime=', time.time()-timeStart,' sec')
print('*************************************')
print('* t LL(t) dS(t) * ')
print('*************************************\n')
print('*** Binned Result ***\n\n')
for i in range(0,shower.resLL.nbins):
print( round(shower.resLL.centerBinValue[i],4),' ', \
shower.resLL.entries[i],' ', \
np.sqrt(shower.resLL.squaredError[i]))
print('\n\n' )
snlo=shower.ngl1Loop
snloError=np.sqrt((shower.ngl1LoopSq-shower.ngl1Loop**2)/(nsh))
print('snlo=',snlo)
print('snloError=',snloError)
print('\n')
snnlo=shower.ngl2Loop+0.5*snlo**2
#Error(snnlo)=|d(snnlo)/d(fullNGL2Loop)*Error(fullNGL2Loop)|
# + |d(snnlo)/d(snlo)*Error(snlo)|
snnloError=abs(np.sqrt((shower.ngl2LoopSq-shower.ngl2Loop**2)/(nsh)))\
+abs(snlo*snloError)
print('snnlo=',snnlo)
print('snnloError=',snnloError)
print('\n')
print('\n')
|
[
"numpy.sqrt",
"argparse.ArgumentParser",
"ngl_resum.FourVector",
"ngl_resum.Shower",
"ngl_resum.Event",
"ngl_resum.OutsideRegion",
"numpy.random.seed",
"time.time"
] |
[((838, 1158), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""This code shows how to use ngl_resum to shower a single dipole aligned with the z-axis, both legs with velocity b. The outside region is defined by the symmetric rapidity gap from -y to y. This code was used to produce some of the results in Section 4 of arXiv:2006.00014"""'}), "(description=\n 'This code shows how to use ngl_resum to shower a single dipole aligned with the z-axis, both legs with velocity b. The outside region is defined by the symmetric rapidity gap from -y to y. This code was used to produce some of the results in Section 4 of arXiv:2006.00014'\n )\n", (861, 1158), False, 'import argparse\n'), ((2180, 2208), 'ngl_resum.Event', 'ngl.Event', ([], {'feedDipole': 'dipole'}), '(feedDipole=dipole)\n', (2189, 2208), True, 'import ngl_resum as ngl\n'), ((2375, 2394), 'ngl_resum.OutsideRegion', 'ngl.OutsideRegion', ([], {}), '()\n', (2392, 2394), True, 'import ngl_resum as ngl\n'), ((2507, 2568), 'ngl_resum.Shower', 'ngl.Shower', (['ev', 'outsideRegion', 'nsh', 'nbins', 'tmax', 'showerCutoff'], {}), '(ev, outsideRegion, nsh, nbins, tmax, showerCutoff)\n', (2517, 2568), True, 'import ngl_resum as ngl\n'), ((2577, 2588), 'time.time', 'time.time', ([], {}), '()\n', (2586, 2588), False, 'import time\n'), ((3090, 3147), 'numpy.sqrt', 'np.sqrt', (['((shower.ngl1LoopSq - shower.ngl1Loop ** 2) / nsh)'], {}), '((shower.ngl1LoopSq - shower.ngl1Loop ** 2) / nsh)\n', (3097, 3147), True, 'import numpy as np\n'), ((2085, 2113), 'numpy.random.seed', 'np.random.seed', (["args['seed']"], {}), "(args['seed'])\n", (2099, 2113), True, 'import numpy as np\n'), ((2126, 2152), 'ngl_resum.FourVector', 'ngl.FourVector', (['(1)', '(0)', '(0)', 'b'], {}), '(1, 0, 0, b)\n', (2140, 2152), True, 'import ngl_resum as ngl\n'), ((2150, 2177), 'ngl_resum.FourVector', 'ngl.FourVector', (['(1)', '(0)', '(0)', '(-b)'], {}), '(1, 0, 0, -b)\n', (2164, 2177), True, 'import ngl_resum as ngl\n'), ((2630, 2641), 'time.time', 'time.time', ([], {}), '()\n', (2639, 2641), False, 'import time\n'), ((3001, 3038), 'numpy.sqrt', 'np.sqrt', (['shower.resLL.squaredError[i]'], {}), '(shower.resLL.squaredError[i])\n', (3008, 3038), True, 'import numpy as np\n'), ((3367, 3424), 'numpy.sqrt', 'np.sqrt', (['((shower.ngl2LoopSq - shower.ngl2Loop ** 2) / nsh)'], {}), '((shower.ngl2LoopSq - shower.ngl2Loop ** 2) / nsh)\n', (3374, 3424), True, 'import numpy as np\n')]
|
import time
import picamera
import numpy as np
import cv2
with picamera.PiCamera() as camera:
camera.resolution = (3280, 2464)
camera. start_preview()
time. sleep(2)
camera.capture('image.data', 'yuv')
##################################################
fd = open('image.data', 'rb')
f=np.fromfile(fd, dtype=np.uint8, count=3280*2464)
im = f.reshape((3280, 2464))
fd.close()
cv2.imwrite('rawconverted.jpg', im)
|
[
"cv2.imwrite",
"numpy.fromfile",
"picamera.PiCamera",
"time.sleep"
] |
[((301, 351), 'numpy.fromfile', 'np.fromfile', (['fd'], {'dtype': 'np.uint8', 'count': '(3280 * 2464)'}), '(fd, dtype=np.uint8, count=3280 * 2464)\n', (312, 351), True, 'import numpy as np\n'), ((390, 425), 'cv2.imwrite', 'cv2.imwrite', (['"""rawconverted.jpg"""', 'im'], {}), "('rawconverted.jpg', im)\n", (401, 425), False, 'import cv2\n'), ((63, 82), 'picamera.PiCamera', 'picamera.PiCamera', ([], {}), '()\n', (80, 82), False, 'import picamera\n'), ((163, 176), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (173, 176), False, 'import time\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import numpy as np
import scipy.io.wavfile as wav
import random
import tables
import pickle
def feed_to_hdf5(feature_vector, subject_num, utterance_train_storage, utterance_test_storage, label_train_storage,
label_test_storage):
"""
:param feature_vector: The feature vector for each sound file of shape: (num_frames,num_features_per_frame,num_channles.)
:param subject_num: The subject class in 'int' format.
:param utterance_storage: The HDF5 object for storing utterance feature map.
:param label_train_storage: The HDF5 object for storing train label.
:param label_test_storage: The HDF5 object for storing test label.
:return: Each utterance will be stored in HDF5 file.
"""
num_utterances_per_speaker = 20
stride_step = 20
utterance_length = 80
num_frames = feature_vector.shape[0]
num_samples = int(np.floor((num_frames - utterance_length - num_utterances_per_speaker) / float(stride_step))) + 1
# Half of the samples will be fed for training.
range_training = range(int(4 * num_samples / 5))
range_training = range(1)
for sample_index in range_training:
# initial index of each utterance
init = sample_index * stride_step
utterance = np.zeros((1, 80, 40, 20), dtype=np.float32)
for utterance_speaker in range(num_utterances_per_speaker):
utterance[:, :, :, utterance_speaker] = feature_vector[None,
init + utterance_speaker:init + utterance_speaker + utterance_length,
:, 0]
utterance_train_storage.append(utterance)
label_train_storage.append((np.array([subject_num + 1], dtype=np.int32)))
# The second half of each sound file will be used for testing on the same subject.
range_testing = range(int(4 * num_samples / 5), int(num_samples))
range_testing = range(1,2)
for sample_index in range_testing:
# initial index of each utterance
init = sample_index * stride_step
utterance = np.zeros((1, 80, 40, 20), dtype=np.float32)
for utterance_speaker in range(num_utterances_per_speaker):
utterance[:, :, :, utterance_speaker] = feature_vector[None,
init + utterance_speaker:init + utterance_speaker + utterance_length,
:, 0]
utterance_test_storage.append(utterance)
label_test_storage.append((np.array([subject_num + 1], dtype=np.int32)))
|
[
"numpy.array",
"numpy.zeros"
] |
[((1387, 1430), 'numpy.zeros', 'np.zeros', (['(1, 80, 40, 20)'], {'dtype': 'np.float32'}), '((1, 80, 40, 20), dtype=np.float32)\n', (1395, 1430), True, 'import numpy as np\n'), ((2216, 2259), 'numpy.zeros', 'np.zeros', (['(1, 80, 40, 20)'], {'dtype': 'np.float32'}), '((1, 80, 40, 20), dtype=np.float32)\n', (2224, 2259), True, 'import numpy as np\n'), ((1838, 1881), 'numpy.array', 'np.array', (['[subject_num + 1]'], {'dtype': 'np.int32'}), '([subject_num + 1], dtype=np.int32)\n', (1846, 1881), True, 'import numpy as np\n'), ((2665, 2708), 'numpy.array', 'np.array', (['[subject_num + 1]'], {'dtype': 'np.int32'}), '([subject_num + 1], dtype=np.int32)\n', (2673, 2708), True, 'import numpy as np\n')]
|
import numpy as np
from bokeh.models import ColumnDataSource, HoverTool
from bokeh.palettes import Cividis256 as Pallete
from bokeh.plotting import Figure, figure
from bokeh.transform import factor_cmap
def draw_interactive_scatter_plot(
texts: np.ndarray,
xs: np.ndarray,
ys: np.ndarray,
values: np.ndarray,
labels: np.ndarray,
text_column: str,
label_column: str,
) -> Figure:
# Smooth down values for coloring, by taking the entropy = log10(perplexity) and multiply it by 10000
values = ((np.log10(values)) * 10000).round().astype(int)
# Normalize values to range between 0-255, to assign a color for each value
max_value = values.max()
min_value = values.min()
if max_value - min_value == 0:
values_color = np.ones(len(values))
else:
values_color = (
((values - min_value) / (max_value - min_value) * 255).round().astype(int)
)
values_color_sorted = sorted(values_color)
values_list = values.astype(str).tolist()
values_sorted = sorted(values_list)
labels_list = labels.astype(str).tolist()
source = ColumnDataSource(
data=dict(x=xs, y=ys, text=texts, label=values_list, original_label=labels_list)
)
hover = HoverTool(
tooltips=[(text_column, "@text{safe}"), (label_column, "@original_label")]
)
p = figure(plot_width=800, plot_height=800, tools=[hover])
p.circle(
"x",
"y",
size=10,
source=source,
fill_color=factor_cmap(
"label",
palette=[Pallete[id_] for id_ in values_color_sorted],
factors=values_sorted,
),
)
p.axis.visible = False
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
p.toolbar.logo = None
return p
|
[
"bokeh.transform.factor_cmap",
"numpy.log10",
"bokeh.plotting.figure",
"bokeh.models.HoverTool"
] |
[((1245, 1334), 'bokeh.models.HoverTool', 'HoverTool', ([], {'tooltips': "[(text_column, '@text{safe}'), (label_column, '@original_label')]"}), "(tooltips=[(text_column, '@text{safe}'), (label_column,\n '@original_label')])\n", (1254, 1334), False, 'from bokeh.models import ColumnDataSource, HoverTool\n'), ((1353, 1407), 'bokeh.plotting.figure', 'figure', ([], {'plot_width': '(800)', 'plot_height': '(800)', 'tools': '[hover]'}), '(plot_width=800, plot_height=800, tools=[hover])\n', (1359, 1407), False, 'from bokeh.plotting import Figure, figure\n'), ((1507, 1609), 'bokeh.transform.factor_cmap', 'factor_cmap', (['"""label"""'], {'palette': '[Pallete[id_] for id_ in values_color_sorted]', 'factors': 'values_sorted'}), "('label', palette=[Pallete[id_] for id_ in values_color_sorted],\n factors=values_sorted)\n", (1518, 1609), False, 'from bokeh.transform import factor_cmap\n'), ((530, 546), 'numpy.log10', 'np.log10', (['values'], {}), '(values)\n', (538, 546), True, 'import numpy as np\n')]
|
def plot_power_spectra(kbins, deltab_2, deltac_2, deltac_2_nodeconv, tf, ax=None):
'''
Plot density and velocity power spectra and compare with CAMB
'''
import numpy as np
import matplotlib.pylab as plt
from seren3.cosmology.transfer_function import TF
if ax is None:
ax = plt.gca()
k, pkb = tf.TF_Pk(TF.B)
k, pkc = tf.TF_Pk(TF.C)
ax.loglog(kbins, deltac_2, label="CDM", color="royalblue", linewidth=2.)
ax.loglog(kbins, deltac_2_nodeconv, color="navy", linestyle='--')
ax.loglog(kbins, deltab_2, label="Baryons", color="darkorange", linewidth=2.)
# CAMB
deltab_2_CAMB = pkb * (k ** 3.) / (2. * np.pi ** 2.)
deltac_2_CAMB = pkc * (k ** 3.) / (2. * np.pi ** 2.)
# direc = '/lustre/scratch/astro/ds381/simulations/baryon_drift/100Mpc/z200/zoom/lvl14/'
# fname = "%s/input_powerspec_baryon.txt" % direc
# ps_data = np.loadtxt(fname, unpack=True)
# k = ps_data[0]
# P_bar = ps_data[1] * (2 * np.pi) ** 3 * tf._norm
# fname = "%s/input_powerspec_cdm.txt" % direc
# ps_data = np.loadtxt(fname, unpack=True)
# P_cdm = ps_data[1] * (2 * np.pi) ** 3 * tf._norm
# deltac_2_CAMB = P_cdm * (k ** 3.)
# deltab_2_CAMB = P_bar * (k ** 3.)
ax.loglog(k, deltac_2_CAMB, color="royalblue", linestyle=":")
ax.loglog(k, deltab_2_CAMB, color="darkorange", linestyle=":")
ax.set_xlabel(r"k [Mpc$^{-1}$ h a$^{-1}$]", fontsize=20)
# ax.set_ylabel(r"$\mathcal{P}(k)$", fontsize=20)
ax.legend(loc="upper left", frameon=False, prop={"size" : 18})
# plt.xlim(0.001, 100)
ax.set_xlim(0.01, 1e4)
ax.set_ylim(1e-12, 2)
def plot_velocity_power_spectra(kbins, vdeltab_2, vdeltac_2, tf, ax=None):
'''
Plot density and velocity power spectra and compare with CAMB
'''
import numpy as np
import matplotlib.pylab as plt
from seren3.cosmology import linear_velocity_ps
from seren3.cosmology.transfer_function import TF
if ax is None:
ax = plt.gca()
k, pkb = tf.TF_Pk(TF.B)
k, pkc = tf.TF_Pk(TF.C)
ix = np.where(~np.isnan(vdeltab_2))
ax.loglog(kbins[ix][3:], vdeltac_2[ix][3:], label="CDM", color="royalblue", linewidth=2.)
ax.loglog(kbins[ix][3:], vdeltab_2[ix][3:], label="Baryons", color="darkorange", linewidth=2.)
# CAMB
deltab_2_CAMB = pkb * (k ** 3.) / (2. * np.pi ** 2.)
deltac_2_CAMB = pkc * (k ** 3.) / (2. * np.pi ** 2.)
cosmo = tf.cosmo
vdeltab_2_CAMB = linear_velocity_ps(k, np.sqrt(deltab_2_CAMB), **cosmo)**2
vdeltac_2_CAMB = linear_velocity_ps(k, np.sqrt(deltac_2_CAMB), **cosmo)**2
vnorm = vdeltab_2_CAMB/deltab_2_CAMB
k, pkb = tf.TF_Pk(TF.VBARYON)
k, pkc = tf.TF_Pk(TF.VCDM)
vdeltab_2_CAMB = pkb * (k ** 3.) / (2. * np.pi ** 2.) * vnorm * 0.702
vdeltac_2_CAMB = pkc * (k ** 3.) / (2. * np.pi ** 2.) * vnorm * 0.702
# direc = '/lustre/scratch/astro/ds381/simulations/baryon_drift/100Mpc/z200/zoom/lvl14/'
# fname = "%s/input_powerspec_baryon.txt" % direc
# ps_data = np.loadtxt(fname, unpack=True)
# k = ps_data[0]
# P_bar = ps_data[1] * (2 * np.pi) ** 3 * tf._norm
# fname = "%s/input_powerspec_cdm.txt" % direc
# ps_data = np.loadtxt(fname, unpack=True)
# P_cdm = ps_data[1] * (2 * np.pi) ** 3 * tf._norm
# deltac_2_CAMB = P_cdm * (k ** 3.)
# deltab_2_CAMB = P_bar * (k ** 3.)
ax.loglog(k, vdeltac_2_CAMB, color="royalblue", linestyle=":")
ax.loglog(k, vdeltab_2_CAMB, color="darkorange", linestyle=":")
ax.set_xlabel(r"k [Mpc$^{-1}$ h a$^{-1}$]", fontsize=20)
ax.set_ylabel(r"$\mathcal{P}_{v}(k)$ [km s$^{-1}$]", fontsize=20)
ax.legend(loc="lower left", frameon=False, prop={"size" : 18})
# plt.xlim(0.001, 100)
ax.set_xlim(0.01, 1e4)
# ax.set_ylim(1e-12, 2)
def plot_velocity(data_9, data_14):
import matplotlib.pylab as plt
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(12, 6))
for ax, data in zip(axs.flatten(), [data_9, data_14]):
kbins, deltab_2, deltac_2, deltac_2_nodeconv, tf = data
kbins = kbins[3:]
deltab_2 = deltab_2[3:]
deltac_2 = deltac_2[3:]
deltac_2_nodeconv = deltac_2_nodeconv[3:]
plot_velocity_power_spectra(kbins, deltab_2, deltac_2, tf, ax=ax)
# axs[0].set_ylabel(r"$\mathcal{P}(k)$", fontsize=20)
axs[0].set_ylabel(r"$\mathcal{P}_{v}(k)$ [km s$^{-1}$]", fontsize=20)
fig.tight_layout()
plt.show()
def plot(data_9, data_14):
import matplotlib.pylab as plt
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(12, 6))
for ax, data in zip(axs.flatten(), [data_9, data_14]):
kbins, deltab_2, deltac_2, deltac_2_nodeconv, tf = data
kbins = kbins[ix][3:]
deltab_2 = deltab_2[3:]
deltac_2 = deltac_2[3:]
deltac_2_nodeconv = deltac_2_nodeconv[3:]
plot_power_spectra(kbins, deltab_2, deltac_2, deltac_2_nodeconv, tf, ax=ax)
# kbins, vdeltab_2, vdeltac_2, tf = data
# plot_velocity_power_spectra(kbins, deltab_2, deltac_2, tf, ax=ax)
axs[0].set_ylabel(r"$\mathcal{P}(k)$", fontsize=20)
# axs[0].set_ylabel(r"$\mathcal{P}_{v}(k)$ [km s$^{-1}$]", fontsize=20)
fig.tight_layout()
plt.show()
def plot_power_spectra_bias(kbins_bias, deltab_2_bias, deltac_2_bias, kbins, deltab_2, deltac_2, tf, ax=None):
'''
Plot density and velocity power spectra and compare with CAMB
'''
import numpy as np
import matplotlib.pylab as plt
from seren3.cosmology.transfer_function import TF
import matplotlib.gridspec as gridspec
fig = plt.figure(figsize=(8,6))
gs = gridspec.GridSpec(5,4,wspace=0.,hspace=0.)
ax = fig.add_subplot(gs[2:,:])
ax2 = fig.add_subplot(gs[:2,:], sharex=ax)
k, pkb = tf.TF_Pk(TF.B)
k, pkc = tf.TF_Pk(TF.C)
ix = np.where(~np.isnan(deltab_2_bias))
ax.loglog(kbins_bias[ix], deltac_2_bias[ix], label="CDM", color="royalblue", linewidth=2.)
ax.loglog(kbins_bias[ix], deltab_2_bias[ix], label="Baryons", color="darkorange", linewidth=2.)
ix = np.where(~np.isnan(deltab_2))
ax.loglog(kbins[ix], deltac_2[ix], color="royalblue", linewidth=2., linestyle="--")
ax.loglog(kbins[ix], deltab_2[ix], color="darkorange", linewidth=2., linestyle="--")
ax.loglog([0.0001, 0.0001], [100, 100], color="k", linewidth=2., linestyle="-", label="Biased")
ax.loglog([0.0001, 0.0001], [100, 100], color="k", linewidth=2., linestyle="--", label="Unbiased")
ax2.plot(kbins_bias[ix], deltac_2_bias[ix]/deltac_2[ix], color="royalblue", linewidth=2.)
ax2.plot(kbins_bias[ix], deltab_2_bias[ix]/deltab_2[ix], color="darkorange", linewidth=2.)
ax2.plot(np.linspace(0.1, 3000), np.ones(50), linestyle=":", color="k", label="Unity")
# CAMB
deltab_2_CAMB = pkb * (k ** 3.) / (2. * np.pi ** 2.)
deltac_2_CAMB = pkc * (k ** 3.) / (2. * np.pi ** 2.)
# direc = '/lustre/scratch/astro/ds381/simulations/baryon_drift/100Mpc/z200/zoom/lvl14/'
# fname = "%s/input_powerspec_baryon.txt" % direc
# ps_data = np.loadtxt(fname, unpack=True)
# k = ps_data[0]
# P_bar = ps_data[1] * (2 * np.pi) ** 3 * tf._norm
# fname = "%s/input_powerspec_cdm.txt" % direc
# ps_data = np.loadtxt(fname, unpack=True)
# P_cdm = ps_data[1] * (2 * np.pi) ** 3 * tf._norm
# deltac_2_CAMB = P_cdm * (k ** 3.)
# deltab_2_CAMB = P_bar * (k ** 3.)
ax.loglog(k, deltac_2_CAMB, color="royalblue", linestyle=":", alpha=0.5)
ax.loglog(k, deltab_2_CAMB, color="darkorange", linestyle=":", alpha=0.5)
ax.set_xlabel(r"k [Mpc$^{-1}$ h a$^{-1}$]", fontsize=20)
ax.set_ylabel(r"$\mathcal{P}(k)$", fontsize=20)
ax.legend(loc="lower left", ncol=2, frameon=False, prop={"size" : 18})
# plt.xlim(0.001, 100)
ax.set_xlim(1, 2000)
ax.set_ylim(1e-8, 2)
ax2.set_ylim(-0.2, 1.2)
ax2.set_ylabel(r"$b(k,v_{bc})$", fontsize=20)
ax2.set_title(r"$|v_{bc,\mathrm{rec}}|$ = 19.06 km s$^{-1}$", fontsize=20)
ax2.legend(loc="lower left", frameon=False, prop={"size" : 20})
plt.setp(ax2.get_xticklabels(), visible=False)
|
[
"matplotlib.pylab.gca",
"matplotlib.pylab.subplots",
"numpy.sqrt",
"numpy.ones",
"matplotlib.pylab.figure",
"matplotlib.gridspec.GridSpec",
"numpy.linspace",
"numpy.isnan",
"matplotlib.pylab.show"
] |
[((3864, 3911), 'matplotlib.pylab.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(2)', 'figsize': '(12, 6)'}), '(nrows=1, ncols=2, figsize=(12, 6))\n', (3876, 3911), True, 'import matplotlib.pylab as plt\n'), ((4410, 4420), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (4418, 4420), True, 'import matplotlib.pylab as plt\n'), ((4501, 4548), 'matplotlib.pylab.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(2)', 'figsize': '(12, 6)'}), '(nrows=1, ncols=2, figsize=(12, 6))\n', (4513, 4548), True, 'import matplotlib.pylab as plt\n'), ((5186, 5196), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (5194, 5196), True, 'import matplotlib.pylab as plt\n'), ((5559, 5585), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (5569, 5585), True, 'import matplotlib.pylab as plt\n'), ((5594, 5641), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(5)', '(4)'], {'wspace': '(0.0)', 'hspace': '(0.0)'}), '(5, 4, wspace=0.0, hspace=0.0)\n', (5611, 5641), True, 'import matplotlib.gridspec as gridspec\n'), ((310, 319), 'matplotlib.pylab.gca', 'plt.gca', ([], {}), '()\n', (317, 319), True, 'import matplotlib.pylab as plt\n'), ((1990, 1999), 'matplotlib.pylab.gca', 'plt.gca', ([], {}), '()\n', (1997, 1999), True, 'import matplotlib.pylab as plt\n'), ((6643, 6665), 'numpy.linspace', 'np.linspace', (['(0.1)', '(3000)'], {}), '(0.1, 3000)\n', (6654, 6665), True, 'import numpy as np\n'), ((6667, 6678), 'numpy.ones', 'np.ones', (['(50)'], {}), '(50)\n', (6674, 6678), True, 'import numpy as np\n'), ((2077, 2096), 'numpy.isnan', 'np.isnan', (['vdeltab_2'], {}), '(vdeltab_2)\n', (2085, 2096), True, 'import numpy as np\n'), ((2483, 2505), 'numpy.sqrt', 'np.sqrt', (['deltab_2_CAMB'], {}), '(deltab_2_CAMB)\n', (2490, 2505), True, 'import numpy as np\n'), ((2562, 2584), 'numpy.sqrt', 'np.sqrt', (['deltac_2_CAMB'], {}), '(deltac_2_CAMB)\n', (2569, 2584), True, 'import numpy as np\n'), ((5797, 5820), 'numpy.isnan', 'np.isnan', (['deltab_2_bias'], {}), '(deltab_2_bias)\n', (5805, 5820), True, 'import numpy as np\n'), ((6038, 6056), 'numpy.isnan', 'np.isnan', (['deltab_2'], {}), '(deltab_2)\n', (6046, 6056), True, 'import numpy as np\n')]
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
from unittest.mock import patch
import numpy as np
from ...common import testing
from . import core
@testing.parametrized(
bragg=("bragg", [2.93, 2.18, 2.35, 2.12, 31.53, 15.98, 226.69, 193.11]),
morpho=("morpho", [280.36, 52.96, 208.16, 72.69, 89.92, 60.37, 226.69, 193.11]),
chirped=("chirped", [280.36, 52.96, 104.08, 36.34, 31.53, 15.98, 226.69, 193.11]),
)
def test_photonics_transforms(pb: str, expected: List[float]) -> None:
np.random.seed(24)
with patch("shutil.which", return_value="here"):
func = core.Photonics(pb, 16) # should be 8... but it is actually not allowed. Nevermind here, HACK IT NEXT LINE
func.instrumentation.args[0]._dimension = 8 # type: ignore
x = np.random.normal(0, 1, size=8)
(output,), _ = func.instrumentation.data_to_arguments(x)
np.testing.assert_almost_equal(output, expected, decimal=2)
np.random.seed(24)
x2 = np.random.normal(0, 1, size=8)
np.testing.assert_almost_equal(x, x2, decimal=2, err_msg="x was modified in the process")
def test_morpho_transform_constraints() -> None:
with patch("shutil.which", return_value="here"):
func = core.Photonics("morpho", 60)
x = np.random.normal(0, 5, size=60) # std 5 to play with boundaries
(output,), _ = func.instrumentation.data_to_arguments(x)
assert np.all(output >= 0)
q = len(x) // 4
assert np.all(output[:q] <= 300)
assert np.all(output[q: 3 * q] <= 600)
assert np.all(output[2 * q: 3 * q] >= 30)
assert np.all(output[3 * q:] <= 300)
def test_photonics() -> None:
with patch("shutil.which", return_value="here"):
photo = core.Photonics("bragg", 16)
with patch("nevergrad.instrumentation.utils.CommandFunction.__call__", return_value="line1\n12\n"):
with patch("nevergrad.instrumentation.utils.CommandFunction.__call__", return_value="line1\n12\n"):
output = photo(np.zeros(16))
np.testing.assert_equal(output, 12)
# check error
with patch("nevergrad.instrumentation.utils.CommandFunction.__call__", return_value="line1\n"):
np.testing.assert_raises(RuntimeError, photo, np.zeros(16).tolist())
np.testing.assert_raises(AssertionError, photo, np.zeros(12).tolist())
|
[
"numpy.random.normal",
"numpy.testing.assert_equal",
"numpy.testing.assert_almost_equal",
"numpy.zeros",
"numpy.random.seed",
"numpy.all",
"unittest.mock.patch"
] |
[((674, 692), 'numpy.random.seed', 'np.random.seed', (['(24)'], {}), '(24)\n', (688, 692), True, 'import numpy as np\n'), ((940, 970), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {'size': '(8)'}), '(0, 1, size=8)\n', (956, 970), True, 'import numpy as np\n'), ((1036, 1095), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['output', 'expected'], {'decimal': '(2)'}), '(output, expected, decimal=2)\n', (1066, 1095), True, 'import numpy as np\n'), ((1100, 1118), 'numpy.random.seed', 'np.random.seed', (['(24)'], {}), '(24)\n', (1114, 1118), True, 'import numpy as np\n'), ((1128, 1158), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {'size': '(8)'}), '(0, 1, size=8)\n', (1144, 1158), True, 'import numpy as np\n'), ((1163, 1257), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['x', 'x2'], {'decimal': '(2)', 'err_msg': '"""x was modified in the process"""'}), "(x, x2, decimal=2, err_msg=\n 'x was modified in the process')\n", (1193, 1257), True, 'import numpy as np\n'), ((1409, 1440), 'numpy.random.normal', 'np.random.normal', (['(0)', '(5)'], {'size': '(60)'}), '(0, 5, size=60)\n', (1425, 1440), True, 'import numpy as np\n'), ((1546, 1565), 'numpy.all', 'np.all', (['(output >= 0)'], {}), '(output >= 0)\n', (1552, 1565), True, 'import numpy as np\n'), ((1597, 1622), 'numpy.all', 'np.all', (['(output[:q] <= 300)'], {}), '(output[:q] <= 300)\n', (1603, 1622), True, 'import numpy as np\n'), ((1634, 1664), 'numpy.all', 'np.all', (['(output[q:3 * q] <= 600)'], {}), '(output[q:3 * q] <= 600)\n', (1640, 1664), True, 'import numpy as np\n'), ((1677, 1710), 'numpy.all', 'np.all', (['(output[2 * q:3 * q] >= 30)'], {}), '(output[2 * q:3 * q] >= 30)\n', (1683, 1710), True, 'import numpy as np\n'), ((1723, 1752), 'numpy.all', 'np.all', (['(output[3 * q:] <= 300)'], {}), '(output[3 * q:] <= 300)\n', (1729, 1752), True, 'import numpy as np\n'), ((2139, 2174), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['output', '(12)'], {}), '(output, 12)\n', (2162, 2174), True, 'import numpy as np\n'), ((702, 744), 'unittest.mock.patch', 'patch', (['"""shutil.which"""'], {'return_value': '"""here"""'}), "('shutil.which', return_value='here')\n", (707, 744), False, 'from unittest.mock import patch\n'), ((1313, 1355), 'unittest.mock.patch', 'patch', (['"""shutil.which"""'], {'return_value': '"""here"""'}), "('shutil.which', return_value='here')\n", (1318, 1355), False, 'from unittest.mock import patch\n'), ((1794, 1836), 'unittest.mock.patch', 'patch', (['"""shutil.which"""'], {'return_value': '"""here"""'}), "('shutil.which', return_value='here')\n", (1799, 1836), False, 'from unittest.mock import patch\n'), ((1891, 1988), 'unittest.mock.patch', 'patch', (['"""nevergrad.instrumentation.utils.CommandFunction.__call__"""'], {'return_value': '"""line1\n12\n"""'}), "('nevergrad.instrumentation.utils.CommandFunction.__call__',\n return_value='line1\\n12\\n')\n", (1896, 1988), False, 'from unittest.mock import patch\n'), ((2202, 2295), 'unittest.mock.patch', 'patch', (['"""nevergrad.instrumentation.utils.CommandFunction.__call__"""'], {'return_value': '"""line1\n"""'}), "('nevergrad.instrumentation.utils.CommandFunction.__call__',\n return_value='line1\\n')\n", (2207, 2295), False, 'from unittest.mock import patch\n'), ((1999, 2096), 'unittest.mock.patch', 'patch', (['"""nevergrad.instrumentation.utils.CommandFunction.__call__"""'], {'return_value': '"""line1\n12\n"""'}), "('nevergrad.instrumentation.utils.CommandFunction.__call__',\n return_value='line1\\n12\\n')\n", (2004, 2096), False, 'from unittest.mock import patch\n'), ((2121, 2133), 'numpy.zeros', 'np.zeros', (['(16)'], {}), '(16)\n', (2129, 2133), True, 'import numpy as np\n'), ((2422, 2434), 'numpy.zeros', 'np.zeros', (['(12)'], {}), '(12)\n', (2430, 2434), True, 'import numpy as np\n'), ((2347, 2359), 'numpy.zeros', 'np.zeros', (['(16)'], {}), '(16)\n', (2355, 2359), True, 'import numpy as np\n')]
|
#!env python
import collections
import queue
import logging
import enum
import functools
import json
import time
import os
import gzip
import shutil
import random # ONLY USED FOR RANDOM DELAY AT BEGINNING.
import numpy as np
import argparse
import sys
sys.path.append("../src-testbed")
import events
import common
import placement_controller
class LocalController(object):
def __init__(self, simulation):
self.simulation = simulation
def requestInference(self, curr_time, request):
new_events = []
if len(self.simulation.model_placements.getWorkersFromModel(request.model)) > 0:
# There is a placement of the model already
worker = self.selectWorker(self.simulation.model_placements.getWorkersFromModel(request.model))
new_events.extend(worker.assignRequest(curr_time, request, model_miss=False))
elif self.simulation.flags.do_reactive:
new_events.extend(self.simulation.placement_controller.requestPlacement(curr_time, request))
else:
logging.error("No available workers found")
request.markRejected()
new_events.append( (curr_time, events.RequestCompletionEvent(self.simulation, request)) )
return new_events
def selectWorker(self, possible_workers):
return self.simulation.rng.choice(possible_workers)
class PlacementController(object):
def __init__(self, simulation, flags):
self.simulation = simulation
self.flags = flags
self.model_placements = self.simulation.model_placements
self.placement_controller = placement_controller.PlacementController(flags)
self.placement_controller.model_placements = self.model_placements # Overwrite with our model_placements
def requestPlacement(self, curr_time, request):
new_events = []
model_info = {
model: {
"open_requests" : (self.simulation.metrics.per_model_requests[model] - self.simulation.metrics.per_model_responses[model]),
"last_used" : model.last_used,
"requests_submitted": self.simulation.metrics.per_model_requests[model],
"placement_count" : len(self.model_placements.getWorkersFromModel(model)),
"load_latency" : model.getLoadLatency(),
"exec_latency" : model.getExecLatency(),
"loaded_size" : model.getSize(),
}
for model in self.model_placements.getModels()
}
self.placement_controller.setModelInfo(model_info)
self.placement_controller.requestToAddModels([request.model], request.id)
# TODO: Figure out the proper logic on these. Specifically, this should be negotiated through the local controller
while not self.placement_controller.model_placements.removals.empty():
# First we schedule all removals
worker, model = self.model_placements.removals.get()
new_events.extend(worker.removeModel(curr_time, model))
self.simulation.mark_as_saturated()
while not self.placement_controller.model_placements.additions.empty():
# Next we schedule all additions
worker, model = self.model_placements.additions.get()
new_events.extend(worker.addModel(curr_time, model))
# Next we schedule the model on the chosen worker (or see what worker can now take it and assign it)
if len(self.simulation.model_placements.getWorkersFromModel(request.model)) > 0:
worker = self.simulation.local_controller.selectWorker(self.simulation.model_placements.getWorkersFromModel(request.model))
new_events.extend(worker.assignRequest(curr_time, request, model_miss=True))
else:
request.markRejected()
new_events.append( (curr_time, events.RequestCompletionEvent(self.simulation, request)) )
return new_events
@functools.total_ordering
class Worker(object):
class QueueItem(object):
def __init__(self, item, latency):
self.item = item
self.latency = latency
def getLatency(self):
return self.latency
def __init__(self, simulation, worker_name, *args, **kwargs):
self.simulation = simulation
self.name = worker_name
self.executing = False
self.queue = queue.Queue()
self.models_loaded = set()
def __str__(self):
return self.name
def __hash__(self):
return hash(self.name)
def __lt__(self, other):
return self.name < other.name
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.name == other.name
else:
return self.name == other
def assignRequest(self, curr_time, request, model_miss):
new_events = []
request.assignToWorker(curr_time, model_miss)
self.queue.put(self.__class__.QueueItem(request, request.model.getExecLatency()))
if not self.executing:
new_events.extend(self.startExecuting(curr_time))
return new_events
def removeModel(self, curr_time, model):
new_events = []
event_to_add = events.ModelRemovalEvent(self.simulation, self, model)
self.queue.put(self.QueueItem(event_to_add, model.getUnloadLatency()))
if not self.executing:
new_events.extend(self.startExecuting(curr_time))
return new_events
def addModel(self, curr_time, model):
new_events = []
self.queue.put(self.QueueItem(events.ModelAdditionEvent(self.simulation, self, model), model.getLoadLatency()))
if not self.executing:
new_events.extend(self.startExecuting(curr_time))
return new_events
def _removeModel(self, curr_time, model):
new_events = []
print(f"({curr_time:0.3f}) Removing {model} from {self}")
self.models_loaded.remove(model)
return new_events
def _addModel(self, curr_time, model):
new_events = []
print(f"({curr_time:0.3f}) Adding {model} to {self}")
self.models_loaded.add(model)
return new_events
def startExecuting(self, curr_time):
new_events = []
if self.executing:
return new_events
if self.queue.empty():
return new_events
self.executing = True
next_queue_item = self.queue.get()
if isinstance(next_queue_item.item, self.simulation.Request):
new_events.extend(next_queue_item.item.model.executeRequest(curr_time))
next_queue_item.item.startExecution(curr_time)
completion_event = events.WorkerQueueCompletionEvent(self.simulation, self, next_queue_item)
new_events.append((curr_time + next_queue_item.getLatency(), completion_event))
return new_events
@functools.total_ordering
class Model(common.ModelPlacements.Model):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def executeRequest(self, curr_time):
new_events = []
self.last_used = curr_time
return new_events
class Simulation(object):
class Metrics(object):
def __init__(self, simulation):
self.simulation = simulation
self.general_metrics = {
"requests_in" : 0,
"requests_out" : 0,
}
self.per_model_requests = collections.defaultdict(int)
self.per_model_responses = collections.defaultdict(int)
self.per_model_latency = collections.defaultdict(list)
def markRequestIn(self, model_name):
self.general_metrics["requests_in"] += 1
self.per_model_requests[model_name] += 1
def markRequestOut(self, model_name, latency):
self.general_metrics["requests_out"] += 1
self.per_model_responses[model_name] += 1
self.per_model_latency[model_name].append(latency)
def reportMetrics(self):
print(f"Requests: {self.general_metrics['requests_out']} / {self.general_metrics['requests_in']} completed")
for model in sorted(self.per_model_latency.keys()):
print(f"{model} : {np.average(self.per_model_latency[model]):0.3f} : {np.average(self.per_model_latency[model]) / self.simulation.models_by_name[model].load_latency:%}")
class Request(object):
class Status(enum.Enum):
INIT = 1
ACCEPTED = 2
REJECTED = 3
EXECUTING = 4
COMPLETED = 5
def __init__(self, simulation, request_id, arrival_time, model_requested, *args, **kwargs):
self.simulation = simulation
self.status = self.__class__.Status.INIT
self.id = int(request_id)
self.model_requested = model_requested
self.model = self.simulation.models_by_name[model_requested]
self.arrival_time = float(arrival_time)
self.assignment_time = float('inf')
self.execution_time = float('inf')
self.completion_time = float('inf')
self.model_miss = False
self.is_saturated = False
def __str__(self):
return f"R({self.id}, {self.arrival_time}, {self.model_requested}, {self.status})"
def markRejected(self):
self.status = self.__class__.Status.REJECTED
def markComplete(self, curr_time):
self.completion_time = curr_time
self.status = self.__class__.Status.COMPLETED
self.simulation.metrics.markRequestOut(self.model_requested, (curr_time-self.arrival_time))
def assignToWorker(self, curr_time, model_miss):
self.assignment_time = curr_time
self.model_miss = model_miss
def startExecution(self, curr_time):
self.execution_time = curr_time
def getResponse(self):
response_dict = {
"request_id" : self.id,
"model" : self.model_requested,
"response" : f"{self.status}",
"placement_delay" : self.assignment_time - self.arrival_time,
"queue_delay" : self.execution_time - self.assignment_time,
"execution_delay" : self.completion_time - self.execution_time,
"overall_latency" : self.completion_time - self.arrival_time,
"model_miss" : self.model_miss,
"saturated" : self.is_saturated,
}
return json.dumps(response_dict)
@classmethod
def fromLine(cls, simulation, line):
return cls(simulation, *(line.split()))
def __init__(self, flags, models_to_be_requested, rng_seed=None, *args, **kwargs):
self.flags = flags
self.rng = np.random.default_rng(rng_seed)
self.results_fid = gzip.open(os.path.join(flags.results_dir, f"{flags.run_identifier}.log.gz"), 'wt')
self.cache_size = flags.max_concurrent_models
self.is_saturated = False
model_descriptions = common.getModelInfo(json_file=flags.model_description_file)
time.sleep(10*random.random())
if not os.path.exists(os.path.join(flags.results_dir, os.path.basename(flags.model_description_file))):
shutil.copy(flags.model_description_file, flags.results_dir)
shutil.copy(flags.workload_file, flags.results_dir)
# Internally important data
self.models_by_name = {
model_name : Model(model_name, model_descriptions[model_name])
for model_name in models_to_be_requested
}
self.workers_by_name = {
worker_name : Worker(self, worker_name)
for worker_name in [f"worker_{i:02d}" for i in range(flags.num_workers_to_add)]
}
self.model_placements = common.ModelPlacements()
for model in self.models_by_name.values():
self.model_placements.addModel(model)
for worker in self.workers_by_name.values():
self.model_placements.addWorker(worker)
self.metrics = self.Metrics(self)
# Components
self.local_controller = LocalController(self)
self.placement_controller = PlacementController(self, self.flags)
# Event Queue
self.event_queue = queue.PriorityQueue()
# Setup some models in cache, because why not
#for worker in sorted(self.workers_by_name.values()):
# for model in self.rng.choice(sorted(self.models_by_name.values()), size=self.cache_size, replace=False):
# self.model_placements.addModelToWorker(worker, model)
#self.model_placements.sync()
def run(self):
logging.info("Starting simulation")
while not self.event_queue.empty():
curr_time, next_event = self.event_queue.get()
logging.debug(f"NextEvent -> ({curr_time} : {next_event}")
events_to_add = next_event.run(curr_time)
for event_tuple in events_to_add:
self.event_queue.put(event_tuple)
logging.info("Simulation complete")
self.metrics.reportMetrics()
self.results_fid.close()
def mark_as_saturated(self):
self.is_saturated = True
def recordExit(self, request):
self.results_fid.write(f"{request.getResponse()}\n")
def getFlags():
parser = argparse.ArgumentParser(
parents=[
common.getParser(add_help=False),
placement_controller.getParser(add_help=False, include_parents=False)
],
conflict_handler='resolve'
)
parser.add_argument("--cache_size", default=3)
parser.add_argument('--workload_file', default="../workload/workload.txt")
parser.add_argument('--model_description_file', default="../workload/models.json")
parser.add_argument('--stop_after', default=float('inf'), type=float)
parser.add_argument('--run_identifier', default=None,
help="Identifier for saving data logs")
parser.add_argument('--results_dir', default="results/")
parser.add_argument('--show_debug', action='store_true')
parser.add_argument('--base_logging_dir', default=os.path.abspath(os.path.join(os.path.dirname(__file__), '../../logs/simulation')) )
parser.add_argument('--run_series', default=None)
flags = parser.parse_args()
if flags.run_identifier is None:
flags.run_identifier = flags.model_eviction_algorithm
flags.run_identifier = f"{flags.run_identifier}.{int(time.time())}"
if flags.run_series is not None:
flags.base_logging_dir = os.path.join(flags.base_logging_dir, flags.run_series)
else:
flags.base_logging_dir = os.path.join(flags.base_logging_dir, flags.run_identifier)
flags.results_dir = flags.base_logging_dir
if not os.path.exists(flags.results_dir):
os.makedirs(flags.results_dir)
return flags
def main():
flags = getFlags()
common.getLogger(hide_debug=(not flags.show_debug))
with open(flags.workload_file) as workload_fid:
models_to_be_requested = set([l.split(' ')[2].strip() for l in workload_fid.readlines()])
simulation = Simulation(flags, models_to_be_requested, cache_size=flags.cache_size, rng_seed=flags.rng_seed)
workload_fid = open(flags.workload_file)
line = workload_fid.readline()
first_request = simulation.Request.fromLine(simulation, line)
simulation.event_queue.put( (first_request.arrival_time, events.RequestArrival(simulation, first_request, workload_fid)) )
simulation.run()
workload_fid.close()
if __name__ == '__main__':
main()
|
[
"numpy.random.default_rng",
"events.WorkerQueueCompletionEvent",
"common.getLogger",
"logging.debug",
"events.ModelAdditionEvent",
"logging.info",
"sys.path.append",
"logging.error",
"os.path.exists",
"common.getParser",
"json.dumps",
"events.ModelRemovalEvent",
"events.RequestCompletionEvent",
"random.random",
"common.getModelInfo",
"events.RequestArrival",
"queue.PriorityQueue",
"placement_controller.getParser",
"common.ModelPlacements",
"numpy.average",
"placement_controller.PlacementController",
"os.path.dirname",
"shutil.copy",
"time.time",
"os.makedirs",
"os.path.join",
"collections.defaultdict",
"os.path.basename",
"queue.Queue"
] |
[((254, 287), 'sys.path.append', 'sys.path.append', (['"""../src-testbed"""'], {}), "('../src-testbed')\n", (269, 287), False, 'import sys\n'), ((14014, 14063), 'common.getLogger', 'common.getLogger', ([], {'hide_debug': '(not flags.show_debug)'}), '(hide_debug=not flags.show_debug)\n', (14030, 14063), False, 'import common\n'), ((1527, 1574), 'placement_controller.PlacementController', 'placement_controller.PlacementController', (['flags'], {}), '(flags)\n', (1567, 1574), False, 'import placement_controller\n'), ((4208, 4221), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (4219, 4221), False, 'import queue\n'), ((4973, 5027), 'events.ModelRemovalEvent', 'events.ModelRemovalEvent', (['self.simulation', 'self', 'model'], {}), '(self.simulation, self, model)\n', (4997, 5027), False, 'import events\n'), ((6303, 6376), 'events.WorkerQueueCompletionEvent', 'events.WorkerQueueCompletionEvent', (['self.simulation', 'self', 'next_queue_item'], {}), '(self.simulation, self, next_queue_item)\n', (6336, 6376), False, 'import events\n'), ((10047, 10078), 'numpy.random.default_rng', 'np.random.default_rng', (['rng_seed'], {}), '(rng_seed)\n', (10068, 10078), True, 'import numpy as np\n'), ((10302, 10361), 'common.getModelInfo', 'common.getModelInfo', ([], {'json_file': 'flags.model_description_file'}), '(json_file=flags.model_description_file)\n', (10321, 10361), False, 'import common\n'), ((11031, 11055), 'common.ModelPlacements', 'common.ModelPlacements', ([], {}), '()\n', (11053, 11055), False, 'import common\n'), ((11473, 11494), 'queue.PriorityQueue', 'queue.PriorityQueue', ([], {}), '()\n', (11492, 11494), False, 'import queue\n'), ((11848, 11883), 'logging.info', 'logging.info', (['"""Starting simulation"""'], {}), "('Starting simulation')\n", (11860, 11883), False, 'import logging\n'), ((12176, 12211), 'logging.info', 'logging.info', (['"""Simulation complete"""'], {}), "('Simulation complete')\n", (12188, 12211), False, 'import logging\n'), ((13676, 13730), 'os.path.join', 'os.path.join', (['flags.base_logging_dir', 'flags.run_series'], {}), '(flags.base_logging_dir, flags.run_series)\n', (13688, 13730), False, 'import os\n'), ((13768, 13826), 'os.path.join', 'os.path.join', (['flags.base_logging_dir', 'flags.run_identifier'], {}), '(flags.base_logging_dir, flags.run_identifier)\n', (13780, 13826), False, 'import os\n'), ((13884, 13917), 'os.path.exists', 'os.path.exists', (['flags.results_dir'], {}), '(flags.results_dir)\n', (13898, 13917), False, 'import os\n'), ((13923, 13953), 'os.makedirs', 'os.makedirs', (['flags.results_dir'], {}), '(flags.results_dir)\n', (13934, 13953), False, 'import os\n'), ((7009, 7037), 'collections.defaultdict', 'collections.defaultdict', (['int'], {}), '(int)\n', (7032, 7037), False, 'import collections\n'), ((7071, 7099), 'collections.defaultdict', 'collections.defaultdict', (['int'], {}), '(int)\n', (7094, 7099), False, 'import collections\n'), ((7131, 7160), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (7154, 7160), False, 'import collections\n'), ((9777, 9802), 'json.dumps', 'json.dumps', (['response_dict'], {}), '(response_dict)\n', (9787, 9802), False, 'import json\n'), ((10118, 10183), 'os.path.join', 'os.path.join', (['flags.results_dir', 'f"""{flags.run_identifier}.log.gz"""'], {}), "(flags.results_dir, f'{flags.run_identifier}.log.gz')\n", (10130, 10183), False, 'import os\n'), ((10511, 10571), 'shutil.copy', 'shutil.copy', (['flags.model_description_file', 'flags.results_dir'], {}), '(flags.model_description_file, flags.results_dir)\n', (10522, 10571), False, 'import shutil\n'), ((10578, 10629), 'shutil.copy', 'shutil.copy', (['flags.workload_file', 'flags.results_dir'], {}), '(flags.workload_file, flags.results_dir)\n', (10589, 10629), False, 'import shutil\n'), ((11983, 12041), 'logging.debug', 'logging.debug', (['f"""NextEvent -> ({curr_time} : {next_event}"""'], {}), "(f'NextEvent -> ({curr_time} : {next_event}')\n", (11996, 12041), False, 'import logging\n'), ((14529, 14591), 'events.RequestArrival', 'events.RequestArrival', (['simulation', 'first_request', 'workload_fid'], {}), '(simulation, first_request, workload_fid)\n', (14550, 14591), False, 'import events\n'), ((999, 1042), 'logging.error', 'logging.error', (['"""No available workers found"""'], {}), "('No available workers found')\n", (1012, 1042), False, 'import logging\n'), ((5303, 5358), 'events.ModelAdditionEvent', 'events.ModelAdditionEvent', (['self.simulation', 'self', 'model'], {}), '(self.simulation, self, model)\n', (5328, 5358), False, 'import events\n'), ((10380, 10395), 'random.random', 'random.random', ([], {}), '()\n', (10393, 10395), False, 'import random\n'), ((12519, 12551), 'common.getParser', 'common.getParser', ([], {'add_help': '(False)'}), '(add_help=False)\n', (12535, 12551), False, 'import common\n'), ((12571, 12640), 'placement_controller.getParser', 'placement_controller.getParser', ([], {'add_help': '(False)', 'include_parents': '(False)'}), '(add_help=False, include_parents=False)\n', (12601, 12640), False, 'import placement_controller\n'), ((13594, 13605), 'time.time', 'time.time', ([], {}), '()\n', (13603, 13605), False, 'import time\n'), ((3726, 3781), 'events.RequestCompletionEvent', 'events.RequestCompletionEvent', (['self.simulation', 'request'], {}), '(self.simulation, request)\n', (3755, 3781), False, 'import events\n'), ((10455, 10501), 'os.path.basename', 'os.path.basename', (['flags.model_description_file'], {}), '(flags.model_description_file)\n', (10471, 10501), False, 'import os\n'), ((13306, 13331), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (13321, 13331), False, 'import os\n'), ((1109, 1164), 'events.RequestCompletionEvent', 'events.RequestCompletionEvent', (['self.simulation', 'request'], {}), '(self.simulation, request)\n', (1138, 1164), False, 'import events\n'), ((7734, 7775), 'numpy.average', 'np.average', (['self.per_model_latency[model]'], {}), '(self.per_model_latency[model])\n', (7744, 7775), True, 'import numpy as np\n'), ((7785, 7826), 'numpy.average', 'np.average', (['self.per_model_latency[model]'], {}), '(self.per_model_latency[model])\n', (7795, 7826), True, 'import numpy as np\n')]
|
# the simplex projection algorithm implemented as a layer, while using the saliency maps to obtain object size estimates
import sys
sys.path.insert(0,'/home/briq/libs/caffe/python')
import caffe
import random
import numpy as np
import scipy.misc
import imageio
import cv2
import scipy.ndimage as nd
import os.path
import scipy.io as sio
class SimplexProjectionLayer(caffe.Layer):
saliency_path = '/media/VOC/saliency/thresholded_saliency_images/'
input_list_path = '/home/briq/libs/CSPN/training/input_list.txt'
def simplexProjectionLinear(self, data_ind, class_ind, V_im, nu):
if(nu<1):
return V_im
heatmap_size = V_im.shape[0]*V_im.shape[1]
theta = np.sum(V_im)
if(theta ==nu): # the size constrain is already satisfied
return V_im
if(theta < nu):
pi = V_im+(nu-theta)/heatmap_size
return pi
V = V_im.flatten()
s = 0.0
p = 0.0
U=V
while(len(U) > 0):
k = random.randint(0, len(U)-1)
uk = U[k]
UG = U[U>=uk]
delta_p = len(UG)
delta_s = np.sum(UG)
if ((s+delta_s)-(p+delta_p)*uk<nu):
s = s+delta_s
p = p+delta_p
U = U[U<uk]
else:
U = UG
U = np.delete(U, np.where(U==uk))
if(p<0.000001):
raise ValueError('rho is too small, apparently something went wrong in the CNN') # happens when nu<1 or V_im=infinity for example
theta = (s-nu)/p
pi = V_im-theta
return pi
def setup(self, bottom, top):
self.num_labels = bottom[0].shape[1]
with open(self.input_list_path) as fp:
self.images = fp.readlines()
random.seed()
def reshape(self, bottom, top):
top[0].reshape(*bottom[0].data.shape)
def forward(self, bottom, top):
for i in range(bottom[0].num):
im_id = int(bottom[2].data[i])
im_name = self.images[im_id].split(' ')[0].split('.')[0]
top[0].data[i] = bottom[0].data[i]
saliency_name = self.saliency_path+im_name+'.mat'
if (not os.path.isfile(saliency_name)):
continue
saliency_im = sio.loadmat(saliency_name, squeeze_me=True)['data']
for c in range(self.num_labels):
if(c==0):
continue
if(bottom[1].data[i,0,0,c]>0.5): # the label is there
instance = bottom[0].data[i][c]
nu = np.sum(saliency_im==c)
if(nu>1):
instance = bottom[0].data[i][c]
top[0].data[i][c]= self.simplexProjectionLinear(i, c, instance, nu)
def backward(self, top, propagate_down, bottom):
pass
|
[
"sys.path.insert",
"numpy.where",
"scipy.io.loadmat",
"random.seed",
"numpy.sum"
] |
[((132, 182), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""/home/briq/libs/caffe/python"""'], {}), "(0, '/home/briq/libs/caffe/python')\n", (147, 182), False, 'import sys\n'), ((711, 723), 'numpy.sum', 'np.sum', (['V_im'], {}), '(V_im)\n', (717, 723), True, 'import numpy as np\n'), ((1802, 1815), 'random.seed', 'random.seed', ([], {}), '()\n', (1813, 1815), False, 'import random\n'), ((1151, 1161), 'numpy.sum', 'np.sum', (['UG'], {}), '(UG)\n', (1157, 1161), True, 'import numpy as np\n'), ((2336, 2379), 'scipy.io.loadmat', 'sio.loadmat', (['saliency_name'], {'squeeze_me': '(True)'}), '(saliency_name, squeeze_me=True)\n', (2347, 2379), True, 'import scipy.io as sio\n'), ((1372, 1389), 'numpy.where', 'np.where', (['(U == uk)'], {}), '(U == uk)\n', (1380, 1389), True, 'import numpy as np\n'), ((2635, 2659), 'numpy.sum', 'np.sum', (['(saliency_im == c)'], {}), '(saliency_im == c)\n', (2641, 2659), True, 'import numpy as np\n')]
|
'''
This file implements JPP-Net for human parsing and pose detection.
'''
import tensorflow as tf
import os
from tensorflow.python.framework import graph_util
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
from tensorflow.python.platform import gfile
import time
class JPP(object):
# Magic numbers are for normalization. You can get details from original JPP-Net repo.
IMG_MEAN = np.array((104.00698793,116.66876762,122.67891434), dtype=np.float32)
def __init__(self, pb_path):
options = tf.GPUOptions(allow_growth=True)
sess = tf.Session(config=tf.ConfigProto(gpu_options=options))
self.sess = tf.Session()
with gfile.FastGFile(pb_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
self.sess.graph.as_default()
tf.import_graph_def(graph_def, name='') # import compute graph
self.sess.run(tf.global_variables_initializer())
self.img_tensor = sess.graph.get_tensor_by_name('img_1:0')
self.pose_tensor = sess.graph.get_tensor_by_name('pose:0')
self.parse_tensor = sess.graph.get_tensor_by_name('parse:0')
def predict(self, img):
'''
img is a human image array with shape (any,any,3)
return a list, [pose, parse]
'''
ret = self.sess.run([self.pose_tensor,self.parse_tensor], feed_dict={self.img_tensor: img-JPP.IMG_MEAN})
return ret
|
[
"tensorflow.Session",
"tensorflow.GraphDef",
"tensorflow.global_variables_initializer",
"numpy.array",
"tensorflow.python.platform.gfile.FastGFile",
"tensorflow.import_graph_def",
"tensorflow.ConfigProto",
"tensorflow.GPUOptions"
] |
[((423, 493), 'numpy.array', 'np.array', (['(104.00698793, 116.66876762, 122.67891434)'], {'dtype': 'np.float32'}), '((104.00698793, 116.66876762, 122.67891434), dtype=np.float32)\n', (431, 493), True, 'import numpy as np\n'), ((548, 580), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'allow_growth': '(True)'}), '(allow_growth=True)\n', (561, 580), True, 'import tensorflow as tf\n'), ((672, 684), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (682, 684), True, 'import tensorflow as tf\n'), ((698, 728), 'tensorflow.python.platform.gfile.FastGFile', 'gfile.FastGFile', (['pb_path', '"""rb"""'], {}), "(pb_path, 'rb')\n", (713, 728), False, 'from tensorflow.python.platform import gfile\n'), ((759, 772), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (770, 772), True, 'import tensorflow as tf\n'), ((874, 913), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def'], {'name': '""""""'}), "(graph_def, name='')\n", (893, 913), True, 'import tensorflow as tf\n'), ((959, 992), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (990, 992), True, 'import tensorflow as tf\n'), ((614, 649), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'gpu_options': 'options'}), '(gpu_options=options)\n', (628, 649), True, 'import tensorflow as tf\n')]
|
#
# GeomProc: geometry processing library in python + numpy
#
# Copyright (c) 2008-2021 <NAME> <<EMAIL>>
# under the MIT License.
#
# See file LICENSE.txt for details on the copyright license.
#
"""This module contains the implicit function class of the GeomProc
geometry processing library used for defining implicit functions and
performing surface reconstruction.
"""
import numpy as np
import math
import random
# Implicit surface class
class impsurf:
"""A class that defines an implicit function
Attributes
----------
evaluate = pointer to a function(array_like x) : float
Function used for evaluating the implicit function at a 3D point
x, returning the signed distance of the surface to point x.
Notes
-----
An implicit function can be setup by calling one of the setup_<name>
methods. After that, the implicit function can be evaluated by
simply calling the impsurf.evaluate(x) method.
"""
def __init__(self):
self.evaluate = None
def compute_displaced_samples(self, pc, epsilon):
"""Create a set of samples displaced along point normals
Parameters
----------
pc : geomproc.pcloud
Input point cloud stored as a point cloud object. Note that
the point cloud needs to have normal vectors associated to
the points
epsilon : float
Amount of displacement to perform along normals
Returns
-------
None
Notes
-----
Given an input point cloud, this method creates a set of samples
that can be used for RBF surface reconstruction. Given an input
point cloud with n points, the method creates a sample set with
n*2 points, where n points are the original points from the
input point cloud, and another n points are created by
displacing each original sample along its normal by a value of
epsilon. The samples are stored in the temporary attribute of
the class called "sample", which is of shape (n*2, 3). Moreover,
the method also creates a vector of displacements called
"displacement", which is of shape (n*2, 1). The vector stores
the displacement of each sample, which is zero for the original
samples and epsilon for the new samples.
See Also
--------
geomproc.impsurf.impsurf.setup_rbf
"""
# Check if points have normals
if pc.normal.shape[0] == 0:
raise RuntimeError('point cloud does not have normals')
# Get number of points in cloud
n = pc.point.shape[0]
# Initialize samples and their displacements
self.sample = np.zeros((n*2, 3))
self.displacement = np.zeros((n*2, 1))
# The first portion of the samples are simply the points in the
# point cloud with displacement 0
self.sample[0:n, :] = pc.point
# Add additional samples displaced from the surface by epsilon. The
# samples are displaced along the normal direction
for i in range(n):
self.sample[n+i, :] = pc.point[i, :] + pc.normal[i, :]*epsilon
self.displacement[n+i] = epsilon
def compute_rbf(self, kernel, vectorized=False):
"""Reconstruct an implicit function from a set of point samples
Parameters
----------
kernel : function
Kernel function of the form kernel(x, y) : float that
computes the dissimilarity between two 3D points x and y,
e.g., kernel = lambda x, y: math.pow(np.linalg.norm(x - y), 3)
vectorized : boolean
If vectorized is True, the method assumes that the kernel
supplied function applies the kernel function to two sets of
points, resulting in a matrix of shape (m, n) for sets of
samples with m and n points. The default value of vectorized
is False
Returns
-------
None
Notes
-----
The method reconstructs an implicit function from a set of point
samples using the RBF method. The method assumes that a set of
samples and displacements have been stored in the temporary
attributes "sample" and "displacement", as described in the help
of method surfrec.impsurf.compute_displaced_samples. The method
then stores a temporary attribute "w" that represents the
weights of radial basis functions (RBFs). The weights define the
implicit function in the form phi(x) = \sum_{i=1}^n
w(i)*kernel(x, sample(i)). The method also stores the given
kernel in the temporary attribute "kernel".
See Also
--------
geomproc.impsurf.impsurf.compute_displaced_samples
geomproc.impsurf.impsurf.setup_rbf
"""
# Check the type of kernel we are using
if vectorized:
# Apply vectorized kernel
self.K = kernel(self.sample, self.sample)
if self.K.shape != (self.sample.shape[0], self.sample.shape[0]):
raise RuntimeError('vectorized kernel returns output of invalid size '+str(self.K.shape))
else:
# Get number of samples
n = self.sample.shape[0]
# Initialize matrix
self.K = np.zeros((n, n))
# Fill matrix entries
for i in range(n):
for j in range(n):
self.K[i, j] = kernel(self.sample[i, :], self.sample[j, :])
# Solve linear system
self.w = np.linalg.solve(self.K, self.displacement)
# Save kernel
self.kernel = kernel
# Remember kernel type
self.vectorized = vectorized
def evaluate_rbf(self, x):
"""Evaluate an implicit function encoded as an RBF
Parameters
----------
x : array_like
3D point where the RBF should be evaluated
Returns
-------
y : float
Scalar value of the implicit function at point x
Notes
-----
The method returns the value of the implicit function at a given
point x. The value is typically the signed distance of the point
to the surface. The method assumes that temporary attributes
"sample", "kernel", and "w" have been stored in the class, as
described in the help of methods
surfrec.impsurf.compute_displaced_samples and surfrec.impsurf.compute_rbf.
See Also
--------
geomproc.impsurf.impsurf.compute_displaced_samples
geomproc.impsurf.impsurf.compute_rbf
geomproc.impsurf.impsurf.setup_rbf
"""
if self.vectorized:
# Make sure input point is a row vector
inx = np.array(x)
if inx.shape[0] > 1:
inx = x[np.newaxis, :]
# Call kernel with all samples
diff = self.kernel(inx, self.sample)
# RBF
y = np.sum(self.w*diff.T)
else:
y = 0.0
for i in range(self.sample.shape[0]):
y += self.w[i]*self.kernel(x, self.sample[i, :])
return y
def setup_rbf(self, pc, epsilon, kernel, vectorized=False):
"""Setup an implicit function based on a set of point samples
Parameters
----------
pc : geomproc.pcloud
Input point cloud stored as a point cloud object. Note that
the point cloud needs to have normal vectors associated to
the points
epsilon : float
Amount of displacement to perform along normals
kernel : function
Kernel function of the form kernel(x, y) : float that
computes the dissimilarity between two 3D points x and y,
e.g., kernel = lambda x, y: math.pow(np.linalg.norm(x - y), 3)
vectorized : boolean
If vectorized is True, the method assumes that the kernel
supplied function applies the kernel function to two sets of
points, resulting in a matrix of shape (m, n) for sets of
samples with m and n points. The default value of vectorized
is False
Returns
-------
None
Notes
-----
Setup an implicit function by reconstructing the function from a
set of point samples using the RBF method. The method first
displaces the original point samples by a certain amount
epsilon, to create additional samples that help avoid a trivial
solution to the surface reconstruction problem. Then, the method
reconstructs a surface with the RBF method based on the given
kernel and solving a linear system. Once the implicit function
is setup, it can be evaluated with the "evaluate" method of the
class, which is a pointer to surfrec.impsurf.evalute_rbf.
See Also
--------
geomproc.impsurf.impsurf.compute_displaced_samples
geomproc.impsurf.impsurf.compute_rbf
geomproc.impsurf.impsurf.evaluate_rbf
"""
self.compute_displaced_samples(pc, epsilon)
self.compute_rbf(kernel, vectorized)
self.evaluate = self.evaluate_rbf
def evaluate_sphere(self, p):
"""Evaluate the implicit function of a sphere
Parameters
----------
p : array_like
3D point where the sphere should be evaluated
Returns
-------
y : float
Scalar value of the implicit function at point p
Notes
-----
The method evaluates the implicit function of a sphere at a
given point. The method assumes that the center and radius of
the sphere have been stored in the temporary attributes "center"
and "sphere" by the method surfrec.impsurf.setup_sphere.
See Also
--------
geomproc.impsurf.impsurf.setup_sphere
Examples
--------
>>> import geomproc
>>> surf = geomproc.impsurf()
>>> surf.setup_sphere(0.5)
>>> val = surf.evaluate([0, 0, 0])
"""
return ((p[0] - self.center[0])*(p[0] - self.center[0]) +
(p[1] - self.center[1])*(p[1] - self.center[1]) +
(p[2] - self.center[2])*(p[2] - self.center[2]) -
self.radius*self.radius)
def setup_sphere(self, radius=1.0, center=[0.0, 0.0, 0.0]):
"""Setup the implicit function of a sphere
Parameters
----------
radius : float
Scalar representing the radius of the sphere (the default
value is 1)
center : array_like
3D point representing the center of the sphere (the default
value is the origin)
Returns
-------
None
Notes
-----
The method sets up the implicit function for a sphere with a
given center and radius. Once the implicit function is setup, it
can be evaluated with the "evaluate" method of the class, which
is a pointer to surfrec.evaluate_sphere.
See Also
--------
geomproc.impsurf.impsurf.evaluate_sphere
Examples
--------
>>> import geomproc
>>> surf = geomproc.impsurf()
>>> surf.setup_sphere(0.5)
>>> val = surf.evaluate([0, 0, 0])
"""
self.center = center
self.radius = radius
self.evaluate = self.evaluate_sphere
def evaluate_torus(self, p):
"""Evaluate the implicit function of a torus
Parameters
----------
p : array_like
3D point where the sphere should be evaluated
Returns
-------
y : float
Scalar value of the implicit function at point p
Notes
-----
The method evaluates the implicit function of a torus at a given
point. The method assumes that the two scalars "radius1" and
"radius2" that describe the torus have been saved into temporary
attributes of the class by the method
surfrec.impsurf.setup_torus.
See Also
--------
geomproc.impsurf.impsurf.setup_torus
Examples
--------
>>> import geomproc
>>> surf = geomproc.impsurf()
>>> surf.setup_torus(0.6, 0.3)
>>> val = surf.evaluate([0, 0, 0])
"""
return math.pow(math.sqrt(p[0]*p[0] + p[1]*p[1]) - self.radius1, 2) + p[2]*p[2] - self.radius2*self.radius2
def setup_torus(self, radius1, radius2):
"""Setup the implicit function of a torus
Parameters
----------
radius1 : float
The distance from the center of the tube to the center of the torus
radius2: float
Radius of the tube
Returns
-------
None
Notes
-----
The method sets up the implicit function for a torus which is
radially symmetric about the z-axis. Once the implicit function
is setup, it can be evaluated with the "evaluate" method of the
class, which is a pointer to surfrec.evaluate_torus.
See Also
--------
geomproc.impsurf.impsurf.evaluate_torus
Examples
--------
>>> import geomproc
>>> surf = geomproc.impsurf()
>>> surf.setup_torus(0.6, 0.3)
>>> val = surf.evaluate([0, 0, 0])
"""
self.radius1 = radius1
self.radius2 = radius2
self.evaluate = self.evaluate_torus
|
[
"numpy.linalg.solve",
"math.sqrt",
"numpy.array",
"numpy.zeros",
"numpy.sum"
] |
[((2726, 2746), 'numpy.zeros', 'np.zeros', (['(n * 2, 3)'], {}), '((n * 2, 3))\n', (2734, 2746), True, 'import numpy as np\n'), ((2773, 2793), 'numpy.zeros', 'np.zeros', (['(n * 2, 1)'], {}), '((n * 2, 1))\n', (2781, 2793), True, 'import numpy as np\n'), ((5602, 5644), 'numpy.linalg.solve', 'np.linalg.solve', (['self.K', 'self.displacement'], {}), '(self.K, self.displacement)\n', (5617, 5644), True, 'import numpy as np\n'), ((5355, 5371), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (5363, 5371), True, 'import numpy as np\n'), ((6825, 6836), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (6833, 6836), True, 'import numpy as np\n'), ((7035, 7058), 'numpy.sum', 'np.sum', (['(self.w * diff.T)'], {}), '(self.w * diff.T)\n', (7041, 7058), True, 'import numpy as np\n'), ((12509, 12545), 'math.sqrt', 'math.sqrt', (['(p[0] * p[0] + p[1] * p[1])'], {}), '(p[0] * p[0] + p[1] * p[1])\n', (12518, 12545), False, 'import math\n')]
|
import pickle
import numpy as np
import matplotlib.pyplot as plt
with open('./quadratic/eval_record.pickle','rb') as loss:
data = pickle.load(loss)
print('Mat_record',len(data['Mat_record']))
#print('bias',data['inter_gradient_record'])
#print('constant',data['intra_record'])
with open('./quadratic/evaluate_record.pickle','rb') as loss1:
data1 = pickle.load(loss1)
x = np.array(data1['x_record'])
print('x_record',x.shape)
#print('bias',data1['inter_gradient_record'])
#print('constant',data1['intra_record'])
#x = range(10000)
#ax = plt.axes(yscale='log')
#ax.plot(x,data,'b')
#plt.show('loss')
|
[
"numpy.array",
"pickle.load"
] |
[((382, 409), 'numpy.array', 'np.array', (["data1['x_record']"], {}), "(data1['x_record'])\n", (390, 409), True, 'import numpy as np\n'), ((135, 152), 'pickle.load', 'pickle.load', (['loss'], {}), '(loss)\n', (146, 152), False, 'import pickle\n'), ((359, 377), 'pickle.load', 'pickle.load', (['loss1'], {}), '(loss1)\n', (370, 377), False, 'import pickle\n')]
|
#--SHAPES and TEXTS--#
import cv2
import numpy as np
#We are going to use the numpy library to create our matrix
#0 stands for black and 1 stands for white
img = np.zeros((512,512,3),np.uint8) # (height,width) and the channel, it gives us value range 0-255
#print(img)
#img[200:300,100:300] = 255,0,0 #whole image is img[:]
#the origin of the image is top left corner in OpenCV
cv2.line(img,(0,0),(img.shape[1],img.shape[0]),(0,255,0),3) #img.shape[1] is width and img.shape[0] is height. Now we got a diagonal line
cv2.line(img,(0,0),(300,300),(200,255,200),3) #image,start,end,color,thickness
cv2.rectangle(img,(0,0),(250,350),(0,0,255),2) #start,end,color,thickness etc. Write cv2.FILLED instead of thickness if you want to fill ur shape
cv2.circle(img,(450,50),30,(255,255,0),5) #center,radius,color,thickness
cv2.putText(img," OPENCV ", (300,100),cv2.FONT_HERSHEY_COMPLEX,0.5,(0,150,0),3) #text,position,font,scale,color,thickness etc. Scale=bigness
cv2.imshow("Matrix", img)
cv2.waitKey(0)
|
[
"cv2.rectangle",
"cv2.line",
"cv2.putText",
"cv2.imshow",
"cv2.circle",
"numpy.zeros",
"cv2.waitKey"
] |
[((173, 206), 'numpy.zeros', 'np.zeros', (['(512, 512, 3)', 'np.uint8'], {}), '((512, 512, 3), np.uint8)\n', (181, 206), True, 'import numpy as np\n'), ((395, 462), 'cv2.line', 'cv2.line', (['img', '(0, 0)', '(img.shape[1], img.shape[0])', '(0, 255, 0)', '(3)'], {}), '(img, (0, 0), (img.shape[1], img.shape[0]), (0, 255, 0), 3)\n', (403, 462), False, 'import cv2\n'), ((534, 587), 'cv2.line', 'cv2.line', (['img', '(0, 0)', '(300, 300)', '(200, 255, 200)', '(3)'], {}), '(img, (0, 0), (300, 300), (200, 255, 200), 3)\n', (542, 587), False, 'import cv2\n'), ((614, 668), 'cv2.rectangle', 'cv2.rectangle', (['img', '(0, 0)', '(250, 350)', '(0, 0, 255)', '(2)'], {}), '(img, (0, 0), (250, 350), (0, 0, 255), 2)\n', (627, 668), False, 'import cv2\n'), ((761, 809), 'cv2.circle', 'cv2.circle', (['img', '(450, 50)', '(30)', '(255, 255, 0)', '(5)'], {}), '(img, (450, 50), 30, (255, 255, 0), 5)\n', (771, 809), False, 'import cv2\n'), ((835, 926), 'cv2.putText', 'cv2.putText', (['img', '""" OPENCV """', '(300, 100)', 'cv2.FONT_HERSHEY_COMPLEX', '(0.5)', '(0, 150, 0)', '(3)'], {}), "(img, ' OPENCV ', (300, 100), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0,\n 150, 0), 3)\n", (846, 926), False, 'import cv2\n'), ((979, 1004), 'cv2.imshow', 'cv2.imshow', (['"""Matrix"""', 'img'], {}), "('Matrix', img)\n", (989, 1004), False, 'import cv2\n'), ((1008, 1022), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1019, 1022), False, 'import cv2\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 13 15:10:58 2021
@author: nguy0936
"""
from pyenvnoise.utils import ptiread
data = ptiread('R:\CMPH-Windfarm Field Study\Hornsdale\set2\Recording-1.1.pti')
import numpy as np
file_name = 'R:\CMPH-Windfarm Field Study\Hornsdale\set2\Recording-1.1.pti'
fid = open(file_name, "r", encoding='utf-8', errors='ignore')
headerlinecnt = 1
numref = 1
## Get all information
# get hearder information setup
# first 15 lines are setup info
tline = fid.readline()
# determine start header line
while tline != '[SETUP START]\n':
numref += 1
headerlinecnt += 1
end_setup = numref + 13
tline = fid.readline()
while headerlinecnt<end_setup:
tline = fid.readline()
headerlinecnt = headerlinecnt + 1
if headerlinecnt == (numref+2):
RECInfoSectionSize = int(tline.partition('=')[2])
if headerlinecnt == (numref+3):
RECInfoSectionPos = int(tline.partition('=')[2])
if headerlinecnt==(numref + 4):
SampleFrequency = int(float(tline.partition('=')[2]))
if headerlinecnt==(numref+5):
numchannels = int(tline.partition('=')[2])
if headerlinecnt==(numref+11):
Sample = int(tline.partition('=')[2])
if headerlinecnt==(numref+12):
Date = tline.partition('=')[2]
if headerlinecnt==(numref+13):
Time = tline.partition('=')[2]
## Get channel info
# the most important infor is correction factor
CorrectionFactor = []
for nchann in range(numchannels):
for i in range(10):
tline = fid.readline()
if tline.partition('=')[0] == 'CorrectionFactor':
CorrectionFactor.append(float(tline.partition('=')[2]))
if tline.partition('=')[0] == 'SampleFrequency':
SampleFrequency = int(tline.partition('=')[2])
## Read binary data
# poiter to main data
# 20 bytes may a subheader which may not important
fid.seek( RECInfoSectionPos + RECInfoSectionSize + 20, 0)
# the size of each segment, it around 250 ms
# fro Fs = 8192 Hz, it is 2048*4 bytes data + 4*4 bytes info (channel id)
dsize = np.fromfile(fid, dtype=np.int16, count=1)
cols = int(Sample/(dsize-4)*numchannels)
# back to start data
fid.seek( RECInfoSectionPos + RECInfoSectionSize + 20, 0)
#print(fid.tell())
# read all data into rawdata and ignore 4 first bytes with info
rawdata = np.fromfile(fid, np.int32).reshape((-1, dsize[0])).T
rawdata = np.delete(rawdata, np.s_[0:4], 0)
## Save data into channels
# calculate factors for actual Pa, full range is 16 bit system
CorrectionFactor = np.array(CorrectionFactor)
factor = CorrectionFactor / 2**16
# initilise array data
Data = np.empty([int(rawdata.shape[0]*rawdata.shape[1]/numchannels), numchannels])
for i in range(numchannels):
Data[:,i]= np.transpose( rawdata[:,i:rawdata.shape[1]:numchannels] ).ravel()*factor[i]
|
[
"numpy.fromfile",
"numpy.delete",
"pyenvnoise.utils.ptiread",
"numpy.array",
"numpy.transpose"
] |
[((133, 209), 'pyenvnoise.utils.ptiread', 'ptiread', (['"""R:\\\\CMPH-Windfarm Field Study\\\\Hornsdale\\\\set2\\\\Recording-1.1.pti"""'], {}), "('R:\\\\CMPH-Windfarm Field Study\\\\Hornsdale\\\\set2\\\\Recording-1.1.pti')\n", (140, 209), False, 'from pyenvnoise.utils import ptiread\n'), ((2162, 2203), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': 'np.int16', 'count': '(1)'}), '(fid, dtype=np.int16, count=1)\n', (2173, 2203), True, 'import numpy as np\n'), ((2482, 2515), 'numpy.delete', 'np.delete', (['rawdata', 'np.s_[0:4]', '(0)'], {}), '(rawdata, np.s_[0:4], 0)\n', (2491, 2515), True, 'import numpy as np\n'), ((2626, 2652), 'numpy.array', 'np.array', (['CorrectionFactor'], {}), '(CorrectionFactor)\n', (2634, 2652), True, 'import numpy as np\n'), ((2419, 2445), 'numpy.fromfile', 'np.fromfile', (['fid', 'np.int32'], {}), '(fid, np.int32)\n', (2430, 2445), True, 'import numpy as np\n'), ((2840, 2896), 'numpy.transpose', 'np.transpose', (['rawdata[:, i:rawdata.shape[1]:numchannels]'], {}), '(rawdata[:, i:rawdata.shape[1]:numchannels])\n', (2852, 2896), True, 'import numpy as np\n')]
|
import numpy as np
from OpenGL.arrays import vbo
from .Mesh_utils import MeshFuncs, MeshSignals, BBox
import openmesh
import copy
from .Shader import *
orig_set_vertex_property_array = openmesh.PolyMesh.set_vertex_property_array
def svpa(self, prop_name, array=None, element_shape=None, element_value=None):
if array is not None:
try:
orig_set_vertex_property_array(self, prop_name, array)
except Exception as e:
print('error when set attribute', prop_name, type(array), array.shape, self.n_vertices())
raise e
return
if element_shape is None:
if element_value is None:
element_shape = ()
else:
element_shape = np.shape(element_value)
if element_value is None:
orig_set_vertex_property_array(self, prop_name, np.empty(element_shape))
else:
orig_set_vertex_property_array(self, prop_name, np.array(
np.broadcast_to(element_value, element_shape)))
openmesh.PolyMesh.set_vertex_property_array = svpa
orig_set_face_property_array = openmesh.PolyMesh.set_face_property_array
def sfpa(self, prop_name, array=None, element_shape=None, element_value=None):
if array is not None:
try:
orig_set_face_property_array(self, prop_name, array)
except Exception as e:
print('error when set attribute', prop_name, type(array), array.shape, self.n_faces())
raise e
return
if element_shape is None:
if element_value is None:
element_shape = ()
else:
element_shape = np.shape(element_value)
if element_value is None:
orig_set_face_property_array(self, prop_name, np.empty(element_shape))
else:
orig_set_face_property_array(self, prop_name, np.array(
np.broadcast_to(element_value, element_shape)))
openmesh.PolyMesh.set_face_property_array = sfpa
orig_set_edge_property_array = openmesh.PolyMesh.set_edge_property_array
def sepa(self, prop_name, array=None, element_shape=None, element_value=None):
if array is not None:
try:
orig_set_edge_property_array(self, prop_name, array)
except Exception as e:
print('error when set attribute', prop_name, type(array), array.shape, self.n_faces())
raise e
return
if element_shape is None:
if element_value is None:
element_shape = ()
else:
element_shape = np.shape(element_value)
if element_value is None:
orig_set_edge_property_array(self, prop_name, np.empty(element_shape))
else:
orig_set_edge_property_array(self, prop_name, np.array(
np.broadcast_to(element_value, element_shape)))
openmesh.PolyMesh.set_edge_property_array = sepa
DATA_TYPE_MAP = {
float: 'float',
int: 'int',
bool: 'bool',
str: 'str',
list: 'list',
tuple: 'tuple',
}
DEFAULT_VALUE_MAP = {
"float": 0.0,
"int": 0,
"vector2": [0, 0],
"vector3": [0, 0, 0],
"vector4": [0, 0, 0, 0],
"matrix3": np.identity(3, dtype=np.float64),
"matrix4": np.identity(4, dtype=np.float64),
"bool": False,
"list": [],
"tuple": {},
"custom": None,
"str": '',
}
DATA_IS_ARRAY_MAP = {
"float": True,
"int": True,
"vector2": True,
"vector3": True,
"vector4": True,
"matrix3": True,
"matrix4": True,
"bool": False,
"list": False,
"tuple": False,
"custom": False,
"str": False,
}
DATA_SHAPE_MAP = {
"float": None,
"int": None,
"vector2": [0, 2],
"vector3": [0, 3],
"vector4": [0, 4],
"matrix3": [0, 3, 3],
"matrix4": [0, 4, 4],
"bool": None,
"list": None,
"tuple": None,
"custom": None,
"str": None,
}
def get_shape(element_num, base_shape):
if base_shape is None:
shape = (element_num,)
else:
base_shape[0] = element_num
shape = tuple(base_shape)
return shape
class Mesh(object):
def __init__(self, mesh=None):
self.opts = {
'color': (1., 1., 1.),
'edgeColor': (0.5, 0.5, 0.5),
'pointColor': (1.0, 1.0, 0.0),
'shader': standShader,
'smooth': True,
'computeNormals': False,
}
self._attributeMap = {
"vertex": {
"pos": {'default_value': [0, 0, 0], 'type': 'vector3', 'is_array': True}
},
"face": {},
"edge": {},
"detail": {}
}
self.signals = MeshSignals()
self._selected = False
self.edge_colors = {
True: (1.0, 1.0, 0.0, 1.0),
False: (0.15, 0.15, 0.15, 1.0)
}
if mesh is None:
self._mesh = openmesh.PolyMesh()
else:
self._mesh = mesh
self._mesh.release_vertex_texcoords1D()
self._mesh.release_vertex_texcoords2D()
self._mesh.release_vertex_texcoords3D()
self._mesh.release_vertex_colors()
self._mesh.release_halfedge_texcoords1D()
self._mesh.release_halfedge_texcoords2D()
self._mesh.release_halfedge_texcoords3D()
self._mesh.release_halfedge_normals()
self._mesh.release_halfedge_colors()
self._mesh.release_face_colors()
self._mesh.release_face_texture_index()
self._mesh.release_edge_colors()
self.bbox = BBox()
self._GLFaces = None
self._flatColor = 0
self.__view = None
@property
def meshFuncs(self):
"""
Get a object which contains some utility mesh functions.
Returns:
MeshFuncs object.
"""
return MeshFuncs(self)
@property
def mesh(self):
"""
Get the real mesh object.
Returns:
openmesh.PolyMesh.
"""
return self._mesh
@property
def bbox_min(self):
"""
Get bounding box min value.
Returns:
list.
"""
vts = self.getVertexes()
if vts is None:
return [0, 0, 0]
try:
_bbox_min = list(np.min(vts, axis=0))
except:
return [0, 0, 0]
return _bbox_min
@property
def bbox_max(self):
"""
Get bounding box max value.
Returns:
list.
"""
vts = self.getVertexes()
if vts is None:
return [0, 0, 0]
try:
_bbox_max = list(np.max(vts, axis=0))
except:
return [0, 0, 0]
return _bbox_max
@property
def bbox_center(self):
"""
Get bounding box center value.
Returns:
list.
"""
_, __, _bbox_center = self.get_bbox_info()
return _bbox_center
def get_bbox_info(self):
"""
Get bounding box min, max, center.
Returns:
min->list, max->list, center->list.
"""
_min = self.bbox_min
_max = self.bbox_max
_center = [(_min[0] + _max[0]) / 2.0,
(_min[1] + _max[1]) / 2.0,
(_min[2] + _max[2]) / 2.0]
return _min, _max, _center
def visible(self):
return True
def _setView(self, v):
self.__view = v
def view(self):
return self.__view
def update(self):
v = self.view()
if v is None:
return
v.update()
def update_GLFace(self):
"""
Prepare the mesh data for OpenGL.
"""
b = self.getTriangulateMesh()
self._GLFaces = b.face_vertex_indices()
def getTriangulateMesh(self):
"""
Triangulate all faces and return a new mesh.
Returns:
openmesh.PolyMesh.
"""
b = copy.deepcopy(self._mesh)
b.triangulate()
return b
def setFlatColor(self, mode):
"""
Set if use flat color for render.
Args:
mode(bool): True means use flat color.
"""
if mode is True:
self._flatColor = 1
else:
self._flatColor = 0
def setSelected(self, sel):
self._selected = sel
# self.opts['edgeColor'] = self.edge_colors[False]
self.update()
def getSelected(self):
return self._selected
def drawBBox(self):
_min, _max, _center = self.get_bbox_info()
size = [abs(_min[0] - _max[0]),
abs(_min[1] - _max[1]),
abs(_min[2] - _max[2])]
tr = self.bbox.set(_center, size)
glMatrixMode(GL_MODELVIEW)
glPushMatrix()
try:
a = np.array(tr.copyDataTo()).reshape((4, 4))
glMultMatrixf(a.transpose())
self.bbox.paint()
finally:
glMatrixMode(GL_MODELVIEW)
glPopMatrix()
def setShader(self, shader):
self.opts['shader'] = shader
self.update()
def shader(self):
return self.opts['shader']
def setColor(self, c):
self.opts['color'] = c
self.update()
def paint(self):
# self.setupGLState()
if self._GLFaces is None:
self.update_GLFace()
verts = self.getVertexes()
if verts is None:
return
glEnableClientState(GL_VERTEX_ARRAY)
glVertexPointerf(verts)
color = self.getColors()
hasColor = color is not None
if not hasColor:
glColor3f(*self.opts['color'])
else:
glEnableClientState(GL_COLOR_ARRAY)
glColorPointerf(color)
if self.view().opts['drawFaces'] and self.getNumFaces() > 0:
with self.shader():
self.shader().setUniform1i("flatColor", self._flatColor)
norms = self.getNormals()
faces = self._GLFaces
uvs = self.getUVs()
try:
if norms is not None:
glEnableClientState(GL_NORMAL_ARRAY)
glNormalPointerf(norms)
if uvs is not None:
glEnableClientState(GL_TEXTURE_COORD_ARRAY)
glTexCoordPointerf(uvs)
if faces is None:
glDrawArrays(GL_TRIANGLES, 0, np.product(verts.shape[:-1]))
else:
faces = faces.astype(np.uint).flatten()
glDrawElements(GL_TRIANGLES, faces.shape[0], GL_UNSIGNED_INT, faces)
finally:
glDisableClientState(GL_NORMAL_ARRAY)
glDisableClientState(GL_TEXTURE_COORD_ARRAY)
if self.view().opts['drawPoints']:
if self._mesh.has_vertex_property("pscale"):
pscale = self.getVertexAttribData("pscale")
else:
pscale = None
if not hasColor:
glColor3f(*self.opts['pointColor'])
with PointShader:
camPos = self.view().cameraPosition()
if camPos is not None:
PointShader.setUniform3f("camPos", camPos)
PointShader.setUniform1i("pixmode", 0)
else:
PointShader.setUniform1i("pixmode", 1)
if pscale is None:
PointShader.setUniform1f("unifrom_scale", 0.5)
glDrawArrays(GL_POINTS, 0, self.getNumVertexes())
else:
PointShader.setUniform1f("unifrom_scale", -1)
pscaleAttrib = PointShader.getAttribLocation("pscale")
vertexScales = vbo.VBO(pscale.flatten())
vertexScales.bind()
glEnableVertexAttribArray(pscaleAttrib)
glVertexAttribPointer(pscaleAttrib, 1, GL_FLOAT, False, 0, None)
glDrawArrays(GL_POINTS, 0, self.getNumVertexes())
vertexScales.unbind()
glDisableClientState(GL_COLOR_ARRAY)
if self.view().opts['drawEdges'] and self.getNumEdges() > 0:
edges = self.getEdges()
# color = self.getEdgesColors()
try:
# if color is None:
glColor3f(*self.opts['edgeColor'])
# else:
# glEnableClientState(GL_COLOR_ARRAY)
# glColorPointerf(color)
edges = edges.flatten()
glDrawElements(GL_LINES, edges.shape[0], GL_UNSIGNED_INT, edges)
finally:
pass
# glDisableClientState(GL_COLOR_ARRAY)
glDisableClientState(GL_VERTEX_ARRAY)
if self._selected:
self.drawBBox()
def getVertexes(self):
"""
Get mesh vertex positions.
Returns:
np.ndarray, shape = (nv,3).
"""
p = self._mesh.points()
if p.shape[0] == 0:
return None
return p
def getFaces(self):
"""
Get mesh faces-vertex indices.
Returns:
list of np.ndarray or None.
"""
f = self._mesh.face_vertex_indices()
if f.shape[0] == 0:
return None
return [face[face >= 0] for face in f]
def getColors(self):
"""
Get mesh vertex colors.
Returns:
np.ndarray, shape = (nv,3) or None.
"""
if self.hasAttribute('vertex', 'color'):
return self.getVertexAttribData("color")
else:
return None
def getUVs(self):
"""
Get mesh vertex texcoords.
Returns:
np.ndarray, shape = (nv,3) or None.
"""
if self.hasAttribute('vertex', 'uv'):
uv = self.getVertexAttribData("uv")
return uv
return None
def getNormals(self):
"""
Get mesh vertex normals.
Returns:
np.ndarray, shape = (nv,3) or None.
"""
if not self.hasAttribute('vertex', 'normal'):
if self.getNumFaces() == 0:
return None
self.createAttribute('vertex', 'normal', attribType='vector3', defaultValue=[0, 0, 0], applyValue=False)
self._mesh.update_vertex_normals()
return self._mesh.vertex_normals()
def getFaceNormals(self):
"""
Get mesh face normals.
Returns:
np.ndarray, shape = (nf,3) or None.
"""
if not self.hasAttribute('face', 'normal'):
if self.getNumFaces() == 0:
return None
self.createAttribute('face', 'normal', attribType='vector3', defaultValue=[0, 0, 0], applyValue=False)
self._mesh.update_face_normals()
return self._mesh.face_normals()
def getVertexFaces(self):
"""
Get mesh vertex-face indices.
Returns:
list of np.ndarray.
"""
vf = self._mesh.vertex_face_indices()
return [face[face >= 0] for face in vf]
def getEdges(self):
"""
Get mesh edge-vertex indices.
Returns:
np.ndarray or None.
"""
e = self._mesh.edge_vertex_indices()
if e.shape[0] == 0:
return None
return e
def getNumVertexes(self):
"""
Get mesh vertices count.
Returns:
int.
"""
return self._mesh.n_vertices()
def getNumFaces(self):
"""
Get mesh faces count.
Returns:
int.
"""
return self._mesh.n_faces()
def getNumEdges(self):
"""
Get mesh edges count.
Returns:
int.
"""
return self._mesh.n_edges()
@property
def attributeMap(self):
"""
Get mesh all attribute info.
Returns:
dict{'vertex':...,'edge':...,'face':...,'detail':...}.
"""
return self._attributeMap
def getAttribNames(self, allInOne=False, with_group=False):
"""
Get attribute names of the mesh.
Args:
allInOne(bool): return all names in one list.
with_group(bool): return names with group names.
Returns:
dict or list.
"""
if with_group:
v = list(self._attributeMap["vertex"].keys())
f = list(self._attributeMap["face"].keys())
e = list(self._attributeMap["edge"].keys())
else:
v = [i for i in self._attributeMap["vertex"].keys() if ":" not in i]
f = [i for i in self._attributeMap["face"].keys() if ":" not in i]
e = [i for i in self._attributeMap["edge"].keys() if ":" not in i]
d = list(self._attributeMap["detail"].keys())
if allInOne:
result = []
result.extend(v)
result.extend(f)
result.extend(e)
result.extend(d)
else:
result = {'vertex': v,
'face': f,
'edge': e,
'detail': d}
return result
def _getAttribType(self, attribClass, name):
"""
Get attribute value type.
Args:
attribClass(str): one of ['vertex', 'edge', 'face', 'detail'].
name(str): specific attribute name.
Returns:
str.
"""
if attribClass == "vertex":
value = self.getVertexAttrib(name, 0)
elif attribClass == "edge":
value = self.getEdgeAttrib(name, 0)
elif attribClass == "face":
value = self.getFaceAttrib(name, 0)
elif attribClass == "detail":
value = self.getDetailAttrib(name)
else:
return 'none'
checkType = type(value)
if checkType is np.ndarray or checkType is list:
if checkType is np.ndarray:
size = value.size
else:
size = len(value)
if size == 2:
return 'vector2'
elif size == 3:
return 'vector3'
elif size == 4:
return 'vector4'
elif size == 9:
return 'matrix3'
elif size == 16:
return 'matrix4'
return DATA_TYPE_MAP.get(checkType, 'none')
def getAttribType(self, attribClass, name):
"""
Get attribute type.
Args:
attribClass(str): one of ['vertex', 'edge', 'face', 'detail'].
name(str): specific attribute name.
Returns:
attribute type(str).
"""
if not self.hasAttribute(attribClass, name):
raise AttributeError("the attribute does't exist!")
return self._attributeMap[attribClass][name]['type']
def getAttribDefaultValue(self, attribClass, name):
"""
Get attribute default value
Args:
attribClass(str): one of ['vertex', 'edge', 'face', 'detail'].
name(str): specific attribute name.
Returns:
default attribute value.
"""
if not self.hasAttribute(attribClass, name):
raise AttributeError("the attribute does't exist!")
return self._attributeMap[attribClass][name]['default_value']
def getAttribIsArray(self, attribClass, name):
"""
Get whether attribute is array.
Args:
attribClass(str): one of ['vertex', 'edge', 'face', 'detail'].
name(str): specific attribute name.
Returns:
bool.
"""
if not self.hasAttribute(attribClass, name):
raise AttributeError("the attribute does't exist!")
return self._attributeMap[attribClass][name]['is_array']
def getAttribInfo(self, attribClass, name):
"""
Get attribute info.
Args:
attribClass(str): one of ['vertex', 'edge', 'face', 'detail'].
name(str): specific attribute name.
Returns:
dict {'default_value': defaultValue, 'type': attribType, is_array': array_mode}.
"""
return self._attributeMap[attribClass][name]
def setAttribInfo(self, attribClass, name, info):
"""
Set attribute info.
Args:
attribClass(str): one of ['vertex', 'edge', 'face', 'detail'].
name(str): specific attribute name.
info(dict): {'default_value': defaultValue, 'type': attribType, is_array': array_mode}.
"""
self._attributeMap[attribClass][name] = info
def createAttribute(self, attribClass, name, attribType=None, defaultValue=None, applyValue=True):
"""
Create a new attribute.
Args:
attribClass(str): one of ['vertex', 'edge', 'face', 'detail'].
name(str): specific attribute name.
attribType(str): type of the attribute value->[float, int, vector2, vector3, vector4, matrix3, matrix4 , bool, list, tuple, custom].
defaultValue(any): default value of the attribute.
applyValue(bool): apply the default value.
"""
if attribType is None:
attribType = self._getAttribType(attribClass, name)
if defaultValue is None:
defaultValue = DEFAULT_VALUE_MAP.get(attribType, None)
array_mode = DATA_IS_ARRAY_MAP.get(attribType, False)
shape = DATA_SHAPE_MAP.get(attribType, None)
if attribType == 'list':
shape = [0, len(defaultValue)]
if attribClass == "vertex":
if name == 'pos':
return
if array_mode:
self._mesh.vertex_property_array(name)
else:
self._mesh.vertex_property(name)
if applyValue:
data = np.broadcast_to(defaultValue, get_shape(self.getNumVertexes(), shape))
if array_mode:
self._mesh.set_vertex_property_array(name, data)
else:
self._mesh.set_vertex_property(name, list(data))
elif attribClass == "face":
if array_mode:
self._mesh.face_property_array(name)
else:
self._mesh.face_property(name)
if applyValue:
data = np.broadcast_to(defaultValue, get_shape(self.getNumFaces(), shape))
if array_mode:
self._mesh.set_face_property_array(name, data)
else:
self._mesh.set_face_property(name, list(data))
elif attribClass == "edge":
if array_mode:
self._mesh.edge_property_array(name)
else:
self._mesh.edge_property(name)
if applyValue:
data = np.broadcast_to(defaultValue, get_shape(self.getNumEdges(), shape))
if array_mode:
self._mesh.set_edge_property_array(name, data)
else:
self._mesh.set_edge_property(name, list(data))
elif attribClass == "detail":
array_mode = False
else:
raise AttributeError("please input attribute class in ['vertex', 'edge', 'face', 'detail']")
self._attributeMap[attribClass][name] = {'default_value': defaultValue, 'type': attribType,
'is_array': array_mode}
def removeAttribute(self, attribClass, name):
"""
Remove a attribute.
Args:
attribClass(str): one of ['vertex', 'edge', 'face', 'detail'].
name(str): specific attribute name.
"""
if attribClass == "vertex":
if name == 'pos':
return
if self._mesh.has_vertex_property(name):
self._mesh.remove_vertex_property(name)
self._attributeMap["vertex"].pop(name)
elif attribClass == "face":
if self._mesh.has_face_property(name):
self._mesh.remove_face_property(name)
self._attributeMap["face"].pop(name)
elif attribClass == "edge":
if self._mesh.has_edge_property(name):
self._mesh.remove_edge_property(name)
self._attributeMap["edge"].pop(name)
elif attribClass == "detail":
if name in self._attributeMap["detail"].keys():
self._attributeMap["detail"].pop(name)
def renameAttribute(self, attribClass, name, new_name):
"""
Rename a attribute
Args:
attribClass(str): one of ['vertex', 'edge', 'face', 'detail']
names(str): specific attribute name
new_names(str): new attribute name
"""
self.copyAttribute(attribClass, name, new_name, True)
def copyAttribute(self, attribClass, from_name, to_name, remove=False):
"""
Copy attribute data to a new attribute.
Args:
attribClass: one of ['vertex', 'edge', 'face', 'detail'].
from_name: specific attribute name.
to_name: new attribute name.
remove: remove the from attribute.
"""
if not self.hasAttribute(attribClass, from_name):
raise AttributeError("attribute {} of {} not exist".format(from_name, attribClass))
if from_name == to_name:
return
attrib_type = self.getAttribType(attribClass, from_name)
default_value = self.getAttribDefaultValue(attribClass, from_name)
if attribClass == "vertex":
a = self.getVertexAttribData(from_name)
self.setVertexAttribData(to_name, a, attrib_type, default_value)
elif attribClass == "face":
a = self.getFaceAttribData(from_name)
self.setFaceAttribData(to_name, a, attrib_type, default_value)
elif attribClass == "edge":
a = self.getEdgeAttribData(from_name)
self.setEdgeAttribData(to_name, a, attrib_type, default_value)
elif attribClass == "detail":
self.createAttribute("detail", to_name, attrib_type, default_value)
if remove:
self.removeAttribute(attribClass, from_name)
def hasAttribute(self, attribClass, name):
"""
Returns whether the mesh contains a specific attribute
Args:
attribClass(str): one of ['vertex', 'edge', 'face', 'detail'].
name(str): specific attribute name.
Returns:
bool.
"""
if attribClass in self._attributeMap.keys():
if name in self._attributeMap[attribClass].keys():
return True
return False
def addVertex(self, pos):
"""
Add a vertex to the mesh.
Args:
pos(list/tuple/np.ndarray): position of the new vertex, type can be: [list,ndarray,tuple].
Returns:
openmesh.VertexHandle.
"""
if type(pos) is list:
return self._mesh.add_vertex(np.array(pos))
elif type(pos) is np.ndarray:
return self._mesh.add_vertex(pos)
elif type(pos) is tuple:
return self._mesh.add_vertex(np.array([pos[0], pos[1], pos[2]]))
def addFace(self, vts):
"""
Add a face to the mesh.
Args:
vts(list): vertices of the new face, type can be: list of [openmesh.VertexHandle, int]
Returns:
openmesh.FaceHandle
"""
self._GLFaces = None
if type(vts[0]) is openmesh.VertexHandle:
return self._mesh.add_face(vts)
else:
return self._mesh.add_face([self._mesh.vertex_handle(i) for i in vts])
def addVertices(self, vts):
"""
Add vertices to the mesh.
Args:
vts: new vertices , np.ndarray or list, shape = (n,3).
"""
self._GLFaces = None
self._mesh.add_vertices(vts)
def addFaces(self, fcs):
"""
Add faces to the mesh.
Args:
fcs: new faces , np.ndarray or list of ndarray.
"""
self._GLFaces = None
self._mesh.add_faces(fcs)
def removeVertex(self, vt, isolate=False, clean=True):
"""
Remove a vertex from mesh.
Args:
vt(int/openmesh.VertexHandle): vertex index or vertex handle.
isolate(bool): if True, delete the connected elements.
clean(bool): if True, garbage collection after delete.
"""
if type(vt) is not openmesh.VertexHandle:
vt = self._mesh.vertex_handle(vt)
if vt.idx() < self.getNumVertexes():
self._mesh.delete_vertex(vt, isolate)
if clean:
self._mesh.garbage_collection()
self._GLFaces = None
def removeFace(self, fc, isolate=False, clean=True):
"""
Remove a face from mesh.
Args:
fc(int/openmesh.FaceHandle): face index or face handle.
isolate(bool): if True, delete the connected elements.
clean(bool): if True, garbage collection after delete.
"""
if type(fc) is not openmesh.FaceHandle:
fc = self._mesh.face_handle(fc)
if fc.idx() < self.getNumFaces():
self._mesh.delete_face(fc, isolate)
if clean:
self._mesh.garbage_collection()
self._GLFaces = None
def removeEdge(self, eg, isolate=False, clean=True):
"""
Remove an edge from mesh.
Args:
eg(int/openmesh.EdgeHandle): edge index or edge handle.
isolate(bool): if True, delete the connected elements.
clean(bool): if True, garbage collection after delete.
"""
if type(eg) is not openmesh.EdgeHandle:
eg = self._mesh.edge_handle(eg)
if eg.idx() < self.getNumEdges():
self._mesh.delete_edge(eg, isolate)
if clean:
self._mesh.garbage_collection()
self._GLFaces = None
def removeVertices(self, vts, isolate=False):
"""
Remove vertices from mesh.
@param vts: list of vertex index or list of vertex handle.
@param isolate: if True, delete the connected elements.
"""
for vt in vts:
self.removeVertex(vt, isolate, False)
self._mesh.garbage_collection()
def removeFaces(self, fcs, isolate=False):
"""
Remove faces from mesh.
Args:
fcs(list): list of face index or list of face handle.
isolate(bool): if True, delete the connected elements.
"""
for fc in fcs:
self.removeFace(fc, isolate, False)
self._mesh.garbage_collection()
def removeEdges(self, egs, isolate=False):
"""
Remove edges from mesh.
Args:
egs(list): list of edge index or list of edge handle.
isolate(bool): if True, delete the connected elements.
"""
for eg in egs:
self.removeEdge(eg, isolate, False)
self._mesh.garbage_collection()
def clear(self):
"""
Clear all mesh data.
"""
self._mesh.clear()
self._attributeMap = {}
self.signals.emit_attribChanged()
self.update()
def getVertexAttribData(self, name):
"""
Get vertex attribute data.
Args:
name(str): specific attribute name.
Returns:
vertex attribute data.
"""
if name == 'pos':
return self._mesh.points()
elif name == 'normal':
return self.getNormals()
else:
if not self.hasAttribute('vertex', name):
raise AttributeError("Attribute {} does't exist!".format(name))
if self.getAttribIsArray('vertex', name):
return self._mesh.vertex_property_array(name)
else:
return self._mesh.vertex_property(name)
def getFaceAttribData(self, name):
"""
Get face attribute data.
Args:
name(str): specific attribute name.
Returns:
face attribute data.
"""
if name == 'normal':
return self.getFaceNormals()
else:
if not self._mesh.has_face_property(name):
raise AttributeError("Attribute {} does't exist!".format(name))
if self.getAttribIsArray('face', name):
return self._mesh.face_property_array(name)
else:
return self._mesh.face_property(name)
def getEdgeAttribData(self, name):
"""
Get edge attribute data.
Args:
name(str): specific attribute name.
Returns:
edge attribute data.
"""
if not self._mesh.has_edge_property(name):
raise AttributeError("Attribute {} does't exist!".format(name))
if self.getAttribIsArray('edge', name):
return self._mesh.edge_property_array(name)
else:
return self._mesh.edge_property(name)
def setVertexAttribData(self, name, data, attribType=None, defaultValue=None):
"""
Set vertex attribute data , if the attribute not exist, create and set it.
Args:
name(str): specific attribute name.
data(lsit/np.ndarray): attribute data.
attribType(str): if the attribute is not exist, we need attribType to create the attribute.
defaultValue(any): if the attribute is not exist, we need defaultValue to create the attribute.
"""
if name == 'pos':
self._mesh.points()[..., [0, 1, 2]] = data
elif name == 'normal':
self.getNormals()[..., [0, 1, 2]] = data
else:
if not self._mesh.has_vertex_property(name):
if defaultValue is None:
defaultValue = data[0]
self.createAttribute('vertex', name, attribType, defaultValue=defaultValue, applyValue=False)
is_array = self.getAttribIsArray('vertex', name)
if is_array:
self._mesh.set_vertex_property_array(name, data)
else:
self._mesh.set_vertex_property(name, data)
self.signals.emit_attribChanged()
def setFaceAttribData(self, name, data, attribType=None, defaultValue=None):
"""
Set face attribute data , if the attribute not exist, create and set it.
Args:
name(str): specific attribute name.
data(lsit/np.ndarray): attribute data.
attribType(str): if the attribute is not exist, we need attribType to create the attribute.
defaultValue(any): if the attribute is not exist, we need defaultValue to create the attribute.
"""
if name == 'normal':
self.getFaceNormals()[..., [0, 1, 2]] = data
else:
if not self._mesh.has_face_property(name):
if defaultValue is None:
defaultValue = data[0]
self.createAttribute('face', name, attribType, defaultValue=defaultValue, applyValue=False)
is_array = self.getAttribIsArray('face', name)
if is_array:
self._mesh.set_face_property_array(name, data)
else:
self._mesh.set_face_property(name, data)
self.signals.emit_attribChanged()
def setEdgeAttribData(self, name, data, attribType=None, defaultValue=None):
"""
Set edge attribute data , if the attribute not exist, create and set it.
Args:
name(str): specific attribute name.
data(lsit/np.ndarray): attribute data.
attribType(str): if the attribute is not exist, we need attribType to create the attribute.
defaultValue(any): if the attribute is not exist, we need defaultValue to create the attribute.
"""
if not self._mesh.has_edge_property(name):
if defaultValue is None:
defaultValue = data[0]
self.createAttribute('edge', name, attribType, defaultValue=defaultValue, applyValue=False)
is_array = self.getAttribIsArray('edge', name)
if is_array:
self._mesh.set_edge_property_array(name, data)
else:
self._mesh.set_edge_property(name, data)
self.signals.emit_attribChanged()
def getVertexAttrib(self, name, index):
"""
Get a vertex attribute value.
Args:
name(str): specific attribute name.
index(int): vertex index.
Returns:
vertex attribute value.
"""
vh = self._mesh.vertex_handle(index)
if self._mesh.has_vertex_property(name):
return self._mesh.vertex_property(name, vh)
if name == 'pos':
return self._mesh.point(vh)
elif name == 'normal':
return self._mesh.normal(vh)
def getFaceAttrib(self, name, index):
"""
Get a face attribute value.
Args:
name(str): specific attribute name.
index(int): face index.
Returns:
face attribute value.
"""
fh = self._mesh.face_handle(index)
if self._mesh.has_face_property(name):
return self._mesh.face_property(name, fh)
if name == 'normal':
return self._mesh.normal(fh)
def getEdgeAttrib(self, name, index):
"""
Get a edge attribute value.
Args:
name(str): specific attribute name.
index(int): edge index.
Returns:
edge attribute value.
"""
eh = self._mesh.edge_handle(index)
if self._mesh.has_edge_property(name):
return self._mesh.edge_property(name, eh)
return None
def setVertexAttrib(self, name, index, value):
"""
Set a vertex attribute value.
Args:
name(str): specific attribute name.
index(int): vertex index.
value(any): attribute value.
"""
vh = self._mesh.vertex_handle(index)
if self._mesh.has_vertex_property(name):
self._mesh.set_vertex_property(name, vh, value)
self.signals.emit_attribChanged()
return True
if name == 'pos':
self._mesh.set_point(vh, value)
return True
elif name == 'normal':
self._mesh.set_normal(vh, value)
return True
return False
def setFaceAttrib(self, name, index, value):
"""
Set a face attribute value.
Args:
name(str): specific attribute name.
index(int): face index.
value(any): attribute value.
"""
fh = self._mesh.face_handle(index)
if self._mesh.has_face_property(name):
self._mesh.set_face_property(name, fh, value)
self.signals.emit_attribChanged()
return True
if name == 'normal':
self._mesh.set_normal(fh, value)
return True
return False
def setEdgeAttrib(self, name, index, value):
"""
Set a edge attribute value.
Args:
name(str): specific attribute name.
index(int): edge index.
value(any): attribute value.
"""
eh = self._mesh.edge_handle(index)
if self._mesh.has_edge_property(name):
self._mesh.set_edge_property(name, eh, value)
self.signals.emit_attribChanged()
return True
return False
def getDetailAttrib(self, name):
"""
Get a detail attribute value.
Args:
name(str): specific attribute name.
Returns:
detail attribute value.
"""
if name in self._attributeMap['detail'].keys():
return self._attributeMap['detail'][name]['default_value']
return None
def setDetailAttrib(self, name, value, attribType=None):
"""
Set a detail attribute value.
Args:
name(str): specific attribute name.
value(any): attribute value.
attribType(str): if the attribute is not exist, we need attribType to create the attribute.
"""
if name in self._attributeMap['detail'].keys():
self._attributeMap['detail'][name]['default_value'] = value
else:
if attribType is None:
raise AttributeError("detail attribute {} not exist, please create it or input attribType".format(name))
self.createAttribute('detail', name, attribType, value)
self.signals.emit_attribChanged()
def getAllVertexAttributes(self):
"""
Get all vertex attribute data.
Returns:
dict {attribute name: attribute data}.
"""
data = {}
for attrib_name in self._attributeMap["vertex"].keys():
data[attrib_name] = self.getVertexAttribData(attrib_name)
return data
def createGroup(self, groupClass, name, default=False):
"""
Create a group.
Args:
groupClass(str): one of ['vertex', 'edge', 'face'].
name(str): specific group name.
default(bool): if True, all elements will in the group.
"""
if groupClass == 'vertex':
name = "v:" + name
elif groupClass == 'face':
name = "f:" + name
elif groupClass == 'edge':
name = "e:" + name
self.createAttribute(groupClass, name, 'bool', default)
def getGroupData(self, groupClass, name):
"""
Get group data.
Args:
groupClass(str): one of ['vertex', 'edge', 'face'].
name(str): specific group name.
Returns:
list of bool.
"""
if groupClass == 'vertex':
name = "v:" + name
if self._mesh.has_vertex_property(name):
return self._mesh.vertex_property_array(name).astype(np.bool)
elif groupClass == 'face':
name = "f:" + name
if self._mesh.has_face_property(name):
return self._mesh.face_property_array(name).astype(np.bool)
elif groupClass == 'edge':
name = "e:" + name
if self._mesh.has_edge_property(name):
return self._mesh.edge_property_array(name).astype(np.bool)
else:
raise AttributeError("class {} does not support group".format(groupClass))
raise AttributeError("Group {} does not exist".format(name))
def setGroupData(self, groupClass, name, data):
"""
Set group data.
Args:
groupClass(str): one of ['vertex', 'edge', 'face'].
name(str): specific group name.
data(list): list of bool.
"""
if groupClass == 'vertex':
name = "v:" + name
self.setVertexAttribData(name, data, 'bool', False)
elif groupClass == 'face':
name = "f:" + name
self.setFaceAttribData(name, data, 'bool', False)
elif groupClass == 'edge':
name = "e:" + name
self.setEdgeAttribData(name, data, 'bool', False)
def getGroup(self, groupClass, name, index):
"""
Get whether a specific element is in the group.
Args:
groupClass(str): one of ['vertex', 'edge', 'face'].
name(str): specific group name.
index(int): element index.
Returns:
group value(bool).
"""
if groupClass == 'vertex':
name = "v:" + name
if self._mesh.has_vertex_property(name):
vh = self._mesh.vertex_handle(index)
return bool(self._mesh.vertex_property(name, vh))
elif groupClass == 'face':
name = "f:" + name
if self._mesh.has_face_property(name):
fh = self._mesh.face_handle(index)
return bool(self._mesh.face_property(name, fh))
elif groupClass == 'edge':
name = "e:" + name
if self._mesh.has_edge_property(name):
eh = self._mesh.edge_handle(index)
return bool(self._mesh.edge_property(name, eh))
def setGroup(self, groupClass, name, index, value):
"""
Set whether a specific element is in the group.
Args:
groupClass(str): one of ['vertex', 'edge', 'face'].
name(str): specific group name.
index(int): element index.
value(bool).
"""
assert type(value) is bool
if groupClass == 'vertex':
self.setVertexAttrib("v:" + name, index, value)
elif groupClass == 'face':
self.setFaceAttrib("f:" + name, index, value)
elif groupClass == 'edge':
self.setEdgeAttrib("e:" + name, index, value)
def getGroupNames(self, allInOne=False):
"""
Get all group names of the mesh.
Args
allInOne(bool): put all names in one list.
Returns:
dict or list.
"""
v = [i[2:] for i in self._attributeMap["vertex"].keys() if ":" in i]
f = [i[2:] for i in self._attributeMap["face"].keys() if ":" in i]
e = [i[2:] for i in self._attributeMap["edge"].keys() if ":" in i]
if allInOne:
result = []
result.extend(v)
result.extend(f)
result.extend(e)
else:
result = {'vertex': v,
'face': f,
'edge': e}
return result
def removeGroup(self, groupClass, name):
"""
Remove a group from mesh.
Args:
groupClass(str): one of ['vertex', 'edge', 'face'].
name(str): specific group name.
"""
if groupClass == 'vertex':
name = "v:" + name
elif groupClass == 'face':
name = "f:" + name
elif groupClass == 'edge':
name = "e:" + name
if ":" in name:
self.removeAttribute(groupClass, name)
def hasGroup(self, groupClass, name):
"""
Get whether the mesh contain a specific group.
Args:
groupClass(str): one of ['vertex', 'edge', 'face'].
name(str): specific group name.
Returns:
bool
"""
if groupClass == 'vertex':
name = "v:" + name
elif groupClass == 'face':
name = "f:" + name
elif groupClass == 'edge':
name = "e:" + name
if ":" in name:
return self.hasAttribute(groupClass, name)
else:
return False
|
[
"numpy.identity",
"numpy.product",
"openmesh.PolyMesh",
"numpy.min",
"numpy.max",
"numpy.array",
"numpy.empty",
"copy.deepcopy",
"numpy.shape",
"numpy.broadcast_to"
] |
[((3084, 3116), 'numpy.identity', 'np.identity', (['(3)'], {'dtype': 'np.float64'}), '(3, dtype=np.float64)\n', (3095, 3116), True, 'import numpy as np\n'), ((3133, 3165), 'numpy.identity', 'np.identity', (['(4)'], {'dtype': 'np.float64'}), '(4, dtype=np.float64)\n', (3144, 3165), True, 'import numpy as np\n'), ((7821, 7846), 'copy.deepcopy', 'copy.deepcopy', (['self._mesh'], {}), '(self._mesh)\n', (7834, 7846), False, 'import copy\n'), ((722, 745), 'numpy.shape', 'np.shape', (['element_value'], {}), '(element_value)\n', (730, 745), True, 'import numpy as np\n'), ((833, 856), 'numpy.empty', 'np.empty', (['element_shape'], {}), '(element_shape)\n', (841, 856), True, 'import numpy as np\n'), ((1608, 1631), 'numpy.shape', 'np.shape', (['element_value'], {}), '(element_value)\n', (1616, 1631), True, 'import numpy as np\n'), ((1717, 1740), 'numpy.empty', 'np.empty', (['element_shape'], {}), '(element_shape)\n', (1725, 1740), True, 'import numpy as np\n'), ((2488, 2511), 'numpy.shape', 'np.shape', (['element_value'], {}), '(element_value)\n', (2496, 2511), True, 'import numpy as np\n'), ((2597, 2620), 'numpy.empty', 'np.empty', (['element_shape'], {}), '(element_shape)\n', (2605, 2620), True, 'import numpy as np\n'), ((4783, 4802), 'openmesh.PolyMesh', 'openmesh.PolyMesh', ([], {}), '()\n', (4800, 4802), False, 'import openmesh\n'), ((946, 991), 'numpy.broadcast_to', 'np.broadcast_to', (['element_value', 'element_shape'], {}), '(element_value, element_shape)\n', (961, 991), True, 'import numpy as np\n'), ((1828, 1873), 'numpy.broadcast_to', 'np.broadcast_to', (['element_value', 'element_shape'], {}), '(element_value, element_shape)\n', (1843, 1873), True, 'import numpy as np\n'), ((2708, 2753), 'numpy.broadcast_to', 'np.broadcast_to', (['element_value', 'element_shape'], {}), '(element_value, element_shape)\n', (2723, 2753), True, 'import numpy as np\n'), ((6156, 6175), 'numpy.min', 'np.min', (['vts'], {'axis': '(0)'}), '(vts, axis=0)\n', (6162, 6175), True, 'import numpy as np\n'), ((6510, 6529), 'numpy.max', 'np.max', (['vts'], {'axis': '(0)'}), '(vts, axis=0)\n', (6516, 6529), True, 'import numpy as np\n'), ((27023, 27036), 'numpy.array', 'np.array', (['pos'], {}), '(pos)\n', (27031, 27036), True, 'import numpy as np\n'), ((27196, 27230), 'numpy.array', 'np.array', (['[pos[0], pos[1], pos[2]]'], {}), '([pos[0], pos[1], pos[2]])\n', (27204, 27230), True, 'import numpy as np\n'), ((10338, 10366), 'numpy.product', 'np.product', (['verts.shape[:-1]'], {}), '(verts.shape[:-1])\n', (10348, 10366), True, 'import numpy as np\n')]
|
import numpy as np
import pyautogui
import imutils
from mss import mss
from PIL import Image
import cv2
import copy
import argparse
from hand_poses import HandPoses
from hand_detect import HandDetect
from delay import Delay
from spotify_controls import SpotifyControls
parser = argparse.ArgumentParser()
parser.add_argument("--detect_threshold", help="minimum percentage of a hand prediction",
type=float, default=0.90)
parser.add_argument("--pose_threshold", help="SVC threshold in classification confidence",
type=float, default=0.90)
parser.add_argument("--path_classifier", help="path to classifier",
type=str, default='models/spotify_gesture_cmd_model.pkl')
parser.add_argument("--moving_average", help="minimum percentage of pose prediction of last frames",
type=float, default=0.85)
parser.add_argument("--frames_in", help="number of frames to consider to predict a pose when in action",
type=int, default=20)
parser.add_argument("--frames_out", help="number of frames to consider to predict a pose",
type=int, default=40)
parser.add_argument("--show_lm", help="show hand landmarks",
type=bool, default=True)
args = parser.parse_args()
hand_detect = HandDetect(detect_threshold=args.detect_threshold)
hand_pose = HandPoses(pose_threshold=args.pose_threshold,
name_classifier=args.path_classifier)
# This will log into Spotify using your personal account with a separate popup window
spotify_controller = SpotifyControls()
delay = Delay(hand_pose.classifier.classes_, moving_average=args.moving_average, frames_in_action=args.frames_in, frames_out=args.frames_out)
webcam = True
if webcam:
cap = cv2.VideoCapture(0)
else:
sct = mss()
with hand_detect.mp_hands.Hands(
max_num_hands=1,
min_detection_confidence=0.6,
min_tracking_confidence=0.5) as hands:
while True:
if webcam:
ret, image = cap.read()
else: # screenshot
ret = True
# image = pyautogui.screenshot()
# image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
# Higher fps with mss for screen grab:
mon = sct.monitors[0]
image = np.array(sct.grab(mon))
image = np.flip(image[:, :, :3], 2)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if not ret: # Image was not successfully read!
print('\rNo image! Is a webcam available?', '', end='')
continue
raw_frame = copy.deepcopy(image)
image = cv2.flip(image, 1)
image_height, image_width, _ = image.shape
#spotify_controller.draw_mouse_rectangle(image)
for (pose, confidence), (lm, mp_lm) in hand_detect.detect_hand(hands=hands,
image=raw_frame,
hand_pose=hand_pose,
delay=delay):
if args.show_lm:
hand_detect.mp_drawing.draw_landmarks(
image, mp_lm, hand_detect.mp_hands.HAND_CONNECTIONS)
if pose is not None:
cv2.putText(image, f"{pose}: ({confidence:.2f})",
(30, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 100), 2)
print(f"\r{pose}: ({confidence:.2f}) ", "", end="")
spotify_controller.execute_cmd(pose=pose, lm=lm, delay=delay, frame=image)
else:
cv2.putText(image, f"Idle", (30, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 100), 2)
if delay.ignore_frames:
cv2.putText(image, f"Position locked", (30, 60),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 100), 2)
key = (cv2.waitKey(10) & 0xFF)
image = cv2.resize(image, (int(image_width * .6),
int(image_height * .6)), interpolation=cv2.INTER_AREA)
# if webcam:
cv2.imshow('frame', image)
if key == ord('q'):
break
if webcam:
cap.release()
cv2.destroyAllWindows()
|
[
"numpy.flip",
"delay.Delay",
"argparse.ArgumentParser",
"mss.mss",
"hand_poses.HandPoses",
"hand_detect.HandDetect",
"cv2.flip",
"cv2.imshow",
"cv2.putText",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.cvtColor",
"copy.deepcopy",
"spotify_controls.SpotifyControls",
"cv2.waitKey"
] |
[((282, 307), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (305, 307), False, 'import argparse\n'), ((1307, 1357), 'hand_detect.HandDetect', 'HandDetect', ([], {'detect_threshold': 'args.detect_threshold'}), '(detect_threshold=args.detect_threshold)\n', (1317, 1357), False, 'from hand_detect import HandDetect\n'), ((1370, 1458), 'hand_poses.HandPoses', 'HandPoses', ([], {'pose_threshold': 'args.pose_threshold', 'name_classifier': 'args.path_classifier'}), '(pose_threshold=args.pose_threshold, name_classifier=args.\n path_classifier)\n', (1379, 1458), False, 'from hand_poses import HandPoses\n'), ((1583, 1600), 'spotify_controls.SpotifyControls', 'SpotifyControls', ([], {}), '()\n', (1598, 1600), False, 'from spotify_controls import SpotifyControls\n'), ((1609, 1746), 'delay.Delay', 'Delay', (['hand_pose.classifier.classes_'], {'moving_average': 'args.moving_average', 'frames_in_action': 'args.frames_in', 'frames_out': 'args.frames_out'}), '(hand_pose.classifier.classes_, moving_average=args.moving_average,\n frames_in_action=args.frames_in, frames_out=args.frames_out)\n', (1614, 1746), False, 'from delay import Delay\n'), ((4278, 4301), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4299, 4301), False, 'import cv2\n'), ((1779, 1798), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (1795, 1798), False, 'import cv2\n'), ((1815, 1820), 'mss.mss', 'mss', ([], {}), '()\n', (1818, 1820), False, 'from mss import mss\n'), ((2608, 2628), 'copy.deepcopy', 'copy.deepcopy', (['image'], {}), '(image)\n', (2621, 2628), False, 'import copy\n'), ((2646, 2664), 'cv2.flip', 'cv2.flip', (['image', '(1)'], {}), '(image, 1)\n', (2654, 2664), False, 'import cv2\n'), ((4174, 4200), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'image'], {}), "('frame', image)\n", (4184, 4200), False, 'import cv2\n'), ((2352, 2379), 'numpy.flip', 'np.flip', (['image[:, :, :3]', '(2)'], {}), '(image[:, :, :3], 2)\n', (2359, 2379), True, 'import numpy as np\n'), ((2400, 2438), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (2412, 2438), False, 'import cv2\n'), ((3972, 3987), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (3983, 3987), False, 'import cv2\n'), ((3302, 3415), 'cv2.putText', 'cv2.putText', (['image', 'f"""{pose}: ({confidence:.2f})"""', '(30, 30)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.7)', '(0, 255, 100)', '(2)'], {}), "(image, f'{pose}: ({confidence:.2f})', (30, 30), cv2.\n FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 100), 2)\n", (3313, 3415), False, 'import cv2\n'), ((3652, 3743), 'cv2.putText', 'cv2.putText', (['image', 'f"""Idle"""', '(30, 30)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.7)', '(0, 255, 100)', '(2)'], {}), "(image, f'Idle', (30, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, \n 255, 100), 2)\n", (3663, 3743), False, 'import cv2\n'), ((3827, 3929), 'cv2.putText', 'cv2.putText', (['image', 'f"""Position locked"""', '(30, 60)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.7)', '(255, 0, 100)', '(2)'], {}), "(image, f'Position locked', (30, 60), cv2.FONT_HERSHEY_SIMPLEX, \n 0.7, (255, 0, 100), 2)\n", (3838, 3929), False, 'import cv2\n')]
|
import numpy as np
from scipy.fft import fft
def CAR(X, labels):
N = X.shape
N_classes = len(np.unique(labels))
data10 = np.zeros((N[0], N[1], 1))
data11 = np.zeros((N[0], N[1], 1))
data12 = np.zeros((N[0], N[1], 1))
data13 = np.zeros((N[0], N[1], 1))
for trial in range(N[2]): ## Média de cada um os trials de todos canais
data = X[:,:,trial]
X_med = np.mean(data, axis = 1).reshape(data.shape[0])
data_car = data - X_med.reshape((X.shape[0],1))
if (labels[trial] == 0):
data10 = np.append(data10, data_car.reshape((data_car.shape[0], data_car.shape[1], 1)), axis = 2)#append na terceira dimensão, dos trials
elif (labels[trial] == 1):
data11 = np.append(data11, data_car.reshape((data_car.shape[0], data_car.shape[1], 1)), axis = 2)
elif (labels[trial] == 2):
data12 = np.append(data12, data_car.reshape((data_car.shape[0], data_car.shape[1], 1)), axis = 2)
elif (labels[trial] == 3):
data13 = np.append(data13, data_car.reshape((data_car.shape[0], data_car.shape[1], 1)), axis = 2)
data10 = np.delete(data10, 0, axis=2)
data11 = np.delete(data11, 0, axis=2)
data12 = np.delete(data12, 0, axis=2)
data13 = np.delete(data13, 0, axis=2)
return data10,data11,data12,data13
def Ext_fft (N, fs, data10, data11, data12, data13, out_chans):
"""
args:
N -> x.shape
fs -> sampling frequency
dataX -> matrix (1536,16, 12)
out_chan -> canais a serem excluidos
"""
N_class = 4; N_trials = 12; n_harmonicas = 2
N_pos = ((N[0]/fs)*np.array([np.array([10,11,12,13])*i for i in range(1,n_harmonicas+1)])).ravel().astype(int)
val_chans = np.array(range(1,17))
val_chans = np.delete(val_chans, [np.where(val_chans == c) for c in out_chans]) #Cria array(1:16) dps exclui os valores a partir de out_chan
N_chans = val_chans.shape[0]
n_features = N_pos.shape[0]
F_dez=np.zeros((N_trials,N_chans*N_class*n_harmonicas)) #vetor de trials X (canais*classes)
F_onze=np.zeros((N_trials,N_chans*N_class*n_harmonicas))
F_doze=np.zeros((N_trials,N_chans*N_class*n_harmonicas))
F_treze=np.zeros((N_trials,N_chans*N_class*n_harmonicas))
for trial in range(0,N_trials):
Chans_XY=0
for chans in val_chans-1:
a = abs(fft(data10[:,chans,trial])) # roda pela posição de N_pos 10,11,12,13
b = abs(fft(data11[:,chans,trial]))
c = abs(fft(data12[:,chans,trial]))
d = abs(fft(data13[:,chans,trial]))
F_dez[trial,Chans_XY+np.array(range(0,n_features))] = a[N_pos[range(0,n_features)]]; # roda pela posição de N_pos 10,11,12,13
F_onze[trial,Chans_XY+np.array(range(0,n_features))] = b[N_pos[range(0,n_features)]]; # roda pela posição de N_pos 10,11,12,13
F_doze[trial,Chans_XY+np.array(range(0,n_features))] = c[N_pos[range(0,n_features)]]; # roda pela posição de N_pos 10,11,12,13
F_treze[trial,Chans_XY+np.array(range(0,n_features))] = d[N_pos[range(0,n_features)]]; # roda pela posição de N_pos 10,11,12,13
Chans_XY += n_features
return F_dez, F_onze, F_doze, F_treze
def CAR_FFT(X,labels, fs):
# FILTRO CAR
d10, d11, d12, d13 = CAR(X,labels)
# EXTRAÇÃO FFT
out_chans = []
#out_chans = [1, 2, 3, 4, 10, 14, 15,16]
F_dez, F_onze, F_doze, F_treze = Ext_fft (*(X.shape, fs, d10, d11, d12, d13), out_chans = out_chans)
F_all = np.vstack([F_dez, F_onze, F_doze, F_treze])
return F_all
|
[
"numpy.mean",
"numpy.unique",
"numpy.where",
"numpy.delete",
"numpy.array",
"numpy.zeros",
"numpy.vstack",
"scipy.fft.fft"
] |
[((140, 165), 'numpy.zeros', 'np.zeros', (['(N[0], N[1], 1)'], {}), '((N[0], N[1], 1))\n', (148, 165), True, 'import numpy as np\n'), ((179, 204), 'numpy.zeros', 'np.zeros', (['(N[0], N[1], 1)'], {}), '((N[0], N[1], 1))\n', (187, 204), True, 'import numpy as np\n'), ((218, 243), 'numpy.zeros', 'np.zeros', (['(N[0], N[1], 1)'], {}), '((N[0], N[1], 1))\n', (226, 243), True, 'import numpy as np\n'), ((257, 282), 'numpy.zeros', 'np.zeros', (['(N[0], N[1], 1)'], {}), '((N[0], N[1], 1))\n', (265, 282), True, 'import numpy as np\n'), ((1161, 1189), 'numpy.delete', 'np.delete', (['data10', '(0)'], {'axis': '(2)'}), '(data10, 0, axis=2)\n', (1170, 1189), True, 'import numpy as np\n'), ((1203, 1231), 'numpy.delete', 'np.delete', (['data11', '(0)'], {'axis': '(2)'}), '(data11, 0, axis=2)\n', (1212, 1231), True, 'import numpy as np\n'), ((1245, 1273), 'numpy.delete', 'np.delete', (['data12', '(0)'], {'axis': '(2)'}), '(data12, 0, axis=2)\n', (1254, 1273), True, 'import numpy as np\n'), ((1287, 1315), 'numpy.delete', 'np.delete', (['data13', '(0)'], {'axis': '(2)'}), '(data13, 0, axis=2)\n', (1296, 1315), True, 'import numpy as np\n'), ((2027, 2081), 'numpy.zeros', 'np.zeros', (['(N_trials, N_chans * N_class * n_harmonicas)'], {}), '((N_trials, N_chans * N_class * n_harmonicas))\n', (2035, 2081), True, 'import numpy as np\n'), ((2124, 2178), 'numpy.zeros', 'np.zeros', (['(N_trials, N_chans * N_class * n_harmonicas)'], {}), '((N_trials, N_chans * N_class * n_harmonicas))\n', (2132, 2178), True, 'import numpy as np\n'), ((2185, 2239), 'numpy.zeros', 'np.zeros', (['(N_trials, N_chans * N_class * n_harmonicas)'], {}), '((N_trials, N_chans * N_class * n_harmonicas))\n', (2193, 2239), True, 'import numpy as np\n'), ((2247, 2301), 'numpy.zeros', 'np.zeros', (['(N_trials, N_chans * N_class * n_harmonicas)'], {}), '((N_trials, N_chans * N_class * n_harmonicas))\n', (2255, 2301), True, 'import numpy as np\n'), ((3590, 3633), 'numpy.vstack', 'np.vstack', (['[F_dez, F_onze, F_doze, F_treze]'], {}), '([F_dez, F_onze, F_doze, F_treze])\n', (3599, 3633), True, 'import numpy as np\n'), ((103, 120), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (112, 120), True, 'import numpy as np\n'), ((1840, 1864), 'numpy.where', 'np.where', (['(val_chans == c)'], {}), '(val_chans == c)\n', (1848, 1864), True, 'import numpy as np\n'), ((408, 429), 'numpy.mean', 'np.mean', (['data'], {'axis': '(1)'}), '(data, axis=1)\n', (415, 429), True, 'import numpy as np\n'), ((2411, 2439), 'scipy.fft.fft', 'fft', (['data10[:, chans, trial]'], {}), '(data10[:, chans, trial])\n', (2414, 2439), False, 'from scipy.fft import fft\n'), ((2500, 2528), 'scipy.fft.fft', 'fft', (['data11[:, chans, trial]'], {}), '(data11[:, chans, trial])\n', (2503, 2528), False, 'from scipy.fft import fft\n'), ((2548, 2576), 'scipy.fft.fft', 'fft', (['data12[:, chans, trial]'], {}), '(data12[:, chans, trial])\n', (2551, 2576), False, 'from scipy.fft import fft\n'), ((2596, 2624), 'scipy.fft.fft', 'fft', (['data13[:, chans, trial]'], {}), '(data13[:, chans, trial])\n', (2599, 2624), False, 'from scipy.fft import fft\n'), ((1681, 1707), 'numpy.array', 'np.array', (['[10, 11, 12, 13]'], {}), '([10, 11, 12, 13])\n', (1689, 1707), True, 'import numpy as np\n')]
|
from typing import Optional, Callable, List
import torch as tc
import numpy as np
from drl.agents.architectures.stateless.abstract import StatelessArchitecture
class Identity(StatelessArchitecture):
"""
Identity architecture. Useful for unit testing.
"""
def __init__(
self,
input_shape: List[int],
w_init: Optional[Callable[[tc.Tensor], None]],
b_init: Optional[Callable[[tc.Tensor], None]]):
"""
Args:
input_dim (List[int]): Input shape.
w_init (Optional[Callable[[tc.Tensor], None]]): Weight initializer.
b_init (Optional[Callable[[tc.Tensor], None]]): Bias initializer.
"""
super().__init__(w_init, b_init)
self._input_shape = input_shape
@property
def input_shape(self) -> List[int]:
return self._input_shape
@property
def output_dim(self) -> int:
return np.prod(self._input_shape)
def forward(self, x, **kwargs):
assert list(x.shape[1:]) == self.input_shape
features = x.reshape(-1, self.output_dim)
return features
|
[
"numpy.prod"
] |
[((938, 964), 'numpy.prod', 'np.prod', (['self._input_shape'], {}), '(self._input_shape)\n', (945, 964), True, 'import numpy as np\n')]
|
'''
Hash and Acoustic Fingerprint Functions
<NAME>
'''
import numpy as np
def findAdjPts(index,A,delay_time,delta_time,delta_freq):
"Find the three closest adjacent points to the anchor point"
adjPts = []
low_x = A[index][0]+delay_time
high_x = low_x+delta_time
low_y = A[index][1]-delta_freq/2
high_y = A[index][1]+delta_freq/2
for i in A:
if ((i[0]>low_x and i[0]<high_x) and (i[1]>low_y and i[1]<high_y)):
adjPts.append(i)
return adjPts
def hashPeaks(A,songID,delay_time,delta_time,delta_freq):
"Create a matrix of peaks hashed as: [[freq_anchor, freq_other, delta_time], time_anchor, songID]"
hashMatrix = np.zeros((len(A)*100,5)) #Assume size limitation
index = 0
numPeaks = len(A)
for i in range(0,numPeaks):
adjPts = findAdjPts(i,A,delay_time,delta_time,delta_freq)
adjNum=len(adjPts)
for j in range(0,adjNum):
hashMatrix[index][0] = A[i][1]
hashMatrix[index][1] = adjPts[j][1]
hashMatrix[index][2] = adjPts[j][0]-A[i][0]
hashMatrix[index][3] = A[i][0]
hashMatrix[index][4] = songID
index=index+1
hashMatrix = hashMatrix[~np.all(hashMatrix==0,axis=1)]
hashMatrix = np.sort(hashMatrix,axis=0)
return hashMatrix
def hashSamplePeaks(A,delay_time,delta_time,delta_freq):
"Create a matrix of peaks hashed as: [[freq_anchor, freq_other, delta_time],time_anchor]"
hashMatrix = np.zeros((len(A)*100,4))
index = 0
numPeaks = len(A)
for i in range(0,numPeaks):
adjPts = findAdjPts(i,A,delay_time,delta_time,delta_freq)
adjNum = len(adjPts)
for j in range(0,adjNum):
hashMatrix[index][0] = A[i][1]
hashMatrix[index][1] = adjPts[j][1]
hashMatrix[index][2] = adjPts[j][0]-A[i][0]
hashMatrix[index][3] = A[i][0]
index=index+1
hashMatrix = hashMatrix[~np.all(hashMatrix==0,axis=1)]
hashMatrix = np.sort(hashMatrix,axis=0)
return hashMatrix
def findTimePairs(hash_database,sample_hash,deltaTime,deltaFreq):
"Find the matching pairs between sample audio file and the songs in the database"
timePairs = []
for i in sample_hash:
for j in hash_database:
if(i[0] > (j[0]-deltaFreq) and i[0] < (j[0] + deltaFreq)):
if(i[1] > (j[1]-deltaFreq) and i[1] < (j[1] + deltaFreq)):
if(i[2] > (j[2]-deltaTime) and i[2] < (j[2] + deltaTime)):
timePairs.append((j[3],i[3],j[4]))
else:
continue
else:
continue
else:
continue
return timePairs
|
[
"numpy.all",
"numpy.sort"
] |
[((1283, 1310), 'numpy.sort', 'np.sort', (['hashMatrix'], {'axis': '(0)'}), '(hashMatrix, axis=0)\n', (1290, 1310), True, 'import numpy as np\n'), ((2025, 2052), 'numpy.sort', 'np.sort', (['hashMatrix'], {'axis': '(0)'}), '(hashMatrix, axis=0)\n', (2032, 2052), True, 'import numpy as np\n'), ((1236, 1267), 'numpy.all', 'np.all', (['(hashMatrix == 0)'], {'axis': '(1)'}), '(hashMatrix == 0, axis=1)\n', (1242, 1267), True, 'import numpy as np\n'), ((1978, 2009), 'numpy.all', 'np.all', (['(hashMatrix == 0)'], {'axis': '(1)'}), '(hashMatrix == 0, axis=1)\n', (1984, 2009), True, 'import numpy as np\n')]
|
from utils.stats_trajectories import trajectory_arclength
import statistics as stats
import numpy as np
import logging
# Returns a matrix of trajectories:
# the entry (i,j) has the paths that go from the goal i to the goal j
def separate_trajectories_between_goals(trajectories, goals_areas):
goals_n = len(goals_areas)
goals = goals_areas[:,1:]
mat = np.empty((goals_n,goals_n),dtype=object)
# Initialize the matrix elements to empty lists
for i in range(goals_n):
for j in range(goals_n):
mat[i][j] = []
not_associated = []
# For all trajectories
for idx,tr in enumerate(trajectories):
x, y = tr[0], tr[1]
traj_len = len(x)
associated_to_goals = False
if traj_len > 2:
# Start and finish points
start_x, start_y = x[0], y[0]
end_x, end_y = x[-1],y[-1]
start_goal, end_goal = None, None
# Find starting and finishing goal
for j in range(goals_n):
if(is_in_area([start_x,start_y], goals[j])):
start_goal = j
for k in range(goals_n):
if(is_in_area([end_x,end_y], goals[k])):
end_goal = k
if start_goal is not None and end_goal is not None:
mat[start_goal][end_goal].append(tr)
associated_to_goals = True
if (not associated_to_goals):
not_associated.append(tr)
return mat,not_associated
# Removes atypical trajectories
def filter_trajectories(trajectories):
n_trajs = len(trajectories)
if n_trajs == 0:
return []
arclen = []
for tr in trajectories:
vec_arclen = trajectory_arclength(tr)
tr_arclen = vec_arclen[-1]
arclen.append(tr_arclen)
# compute the median and SD of the trajectory set
M = stats.median(arclen)
if len(arclen) < 2:
SD = 0.0
else:
SD = stats.stdev(arclen)
# remove trajectories that differ more than 3SD
filtered_set = []
for i in range(n_trajs):
if arclen[i] > 0 and abs(arclen[i] - M) <= 3.0*SD:
filtered_set.append(trajectories[i])
return filtered_set
# Removes atypical trajectories from a multidimensional array
def filter_traj_matrix(raw_path_set_matrix):
all_trajectories = []
# Initialize a nRowsxnCols matrix with empty lists
filtered_matrix = np.empty(raw_path_set_matrix.shape,dtype=object)
for i in range(raw_path_set_matrix.shape[0]):
for j in range(raw_path_set_matrix.shape[1]):
filtered_matrix[i][j] = []
for i in range(raw_path_set_matrix.shape[0]):
for j in range(raw_path_set_matrix.shape[1]):
# If the list of trajectories is non-empty, filter it
if(len(raw_path_set_matrix[i][j]) > 0):
filtered = filter_trajectories(raw_path_set_matrix[i][j])
filtered_matrix[i][j].extend(filtered)
all_trajectories.extend(filtered)
return filtered_matrix, all_trajectories
def start_time(traj):
return traj[2][0]
def get_trajectories_given_time_interval(trajectories, start_time, finish_time):
# Note: the list of trajectories is sorted by initial time
n = len(trajectories)
if n == 0:
logging.error("Empty set")
return []
traj_set = []
i = 0
t = start_time
while(t <= finish_time):
tr = trajectories[i]
t = tr[2][0]
if(start_time <= t and t <= finish_time):
traj_set.append(tr)
i += 1
return traj_set
# Split a trajectory into sub-trajectories between pairs of goals
def break_multigoal_traj(tr, goals):
x, y, t = tr[0], tr[1], tr[2]
traj_set = []
new_x, new_y, new_t = [], [], [] # New trajectory
last_goal = -1 # Last goal
started = False # Flag to indicate that we have started with one goal
for i in range(len(x)):
xy = [x[i], y[i]] # Current position
# Am I in a goal
current_goal = -1
for j in range(len(goals)):
# If the position lies in the goal zone
if is_in_area(xy, goals[j,1:]):
current_goal=j
if current_goal==-1 and last_goal!=-1 and started:
# Split the trajectory just before
traj_set.append([np.array(new_x),np.array(new_y),np.array(new_t)] )
if current_goal==-1 and last_goal!=-1:
# At that point we start the trajectory
# with a point that should be in last_goal
started = True
new_x, new_y, new_t = [x[i-1]], [y[i-1]], [t[i-1]]
last_goal=current_goal
new_x.append(x[i])
new_y.append(y[i])
new_t.append(t[i])
# Coming at the end
if current_goal>0 and i==len(x)-1 and started:
traj_set.append([np.array(new_x),np.array(new_y),np.array(new_t)] )
return traj_set
# Returns 3 lists with the x, y and arc-len values of a trajectory set, respectively
def get_data_from_set(trajectories):
list_x, list_y, list_arclen = [], [], []
for tr in trajectories:
list_x.append(tr[0])
list_y.append(tr[1])
list_arclen.append(trajectory_arclength(tr) )
return list_x, list_y, list_arclen
# Linear regression: f(l) = a + b*l
# Returns the slope of the line and the intercept
def line_parameters(traj, flag):
traj_arclen = trajectory_arclength(traj)
arclen = traj_arclen[-1]
if arclen == 0:
return 0.,0.
x, y = traj[0], traj[1]
if flag == 'x':
b = x[0]
a = (x[-1]-b)/arclen
if flag == 'y':
b = y[0]
a = (y[-1]-b)/arclen
return a, b
# Takes as an input a set of trajectories (between goals)
# and a flag that says whether the orientation is in x or y
def get_linear_prior_mean(trajectories, flag):
n = len(trajectories)
if n == 0:
return [0.,0.,0.]
lineParameters = np.array([ line_parameters(trajectories[i], flag) for i in range(n)])
mean = [np.median(lineParameters[:,0]), np.median(lineParameters[:,1]) ]
var = [np.var(lineParameters[:,0]), np.var(lineParameters[:,1]) ]
cov = np.cov(lineParameters[:,0],lineParameters[:,1])
return mean, var
def arclen_to_time(init_time, arclen, speed):
n = len(arclen)
time = np.zeros(n, dtype=int)
time[0] = init_time
for i in range(1,len(arclen)):
time[i] = int(time[i-1] + (arclen[i]-arclen[i-1])/speed)
return time
# Function to get the ground truth data: n data
def observed_data(traj, n):
if (len(traj)==4):
x, y, l, t = traj
obsX, obsY, obsL, obsT = np.reshape(x[0:n],(-1,1)), np.reshape(y[0:n],(-1,1)), np.reshape(l[0:n],(-1,1)),np.reshape(t[0:n],(-1,1))
obsS = np.reshape(np.divide(np.sqrt(np.square(x[1:n+1]-x[:n])+np.square(y[1:n+1]-y[:n])),t[1:n+1]-t[:n]),(-1,1))
gtX, gtY, gtT = np.reshape(x[n:],(-1,1)), np.reshape(y[n:],(-1,1)),np.reshape(t[n:],(-1,1))
gtS = np.reshape(np.concatenate([np.divide(np.sqrt(np.square(x[n+1:]-x[n:-1])+np.square(y[n+1:]-y[n:-1])),t[n+1:]-t[n:-1]),[0.0]]),(-1,1))
if gtS.shape[0]<2:
return None, None
gtS[-1,0] = gtS[-2,0]
return np.concatenate([obsX, obsY, obsL, obsT, obsS],axis=1),np.concatenate([gtX, gtY, gtT,gtS],axis=1)
else:
if (len(traj)==3):
x, y, t = traj
obsX, obsY, obsT = np.reshape(x[0:n],(-1,1)), np.reshape(y[0:n],(-1,1)), np.reshape(t[0:n],(-1,1))
return np.concatenate([obsX, obsY, obsT],axis=1)
def observed_data_given_time(traj, time):
_, _, t = traj
i = 0
while(t[i] <= time and i < len(t)-1):
i += 1
return observed_data(traj, i)
def reshape_trajectory(traj):
x, y, t = traj[:,0], traj[:,1], traj[:,2]
x.reshape((-1,1))
y.reshape((-1,1))
t.reshape((-1,1))
return [x,y,t]
# Checks if a point (x,y) belongs to an area R
def is_in_area(p, area):
x, y = p[0], p[1]
if(x >= min(area[0::2]) and x <= max(area[0::2])):
if(y >= min(area[1::2]) and y <= max(area[1::2])):
return True
else:
return False
else:
return False
def get_goal_of_point(p, goals):
for i in range(len(goals)):
if is_in_area(p,goals[i]):
return i
return None
# Returns the center of a rectangular area
def goal_center(area):
dx, dy = area[-2] - area[0], area[-1] - area[1]
centroid = [area[0] + dx/2., area[1] + dy/2.]
return centroid
def goal_center_and_size(area):
center = np.array([0.25*float(np.sum(area[::2])),0.25*float(np.sum(area[1::2]))])
size = np.array([float(np.max(area[::2]))-float(np.min(area[::2])),float(np.max(area[1::2]))-float(np.min(area[1::2]))])
return center, size
|
[
"statistics.stdev",
"numpy.median",
"numpy.reshape",
"utils.stats_trajectories.trajectory_arclength",
"numpy.max",
"statistics.median",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.empty",
"numpy.square",
"numpy.concatenate",
"numpy.min",
"numpy.cov",
"logging.error",
"numpy.var"
] |
[((370, 412), 'numpy.empty', 'np.empty', (['(goals_n, goals_n)'], {'dtype': 'object'}), '((goals_n, goals_n), dtype=object)\n', (378, 412), True, 'import numpy as np\n'), ((1884, 1904), 'statistics.median', 'stats.median', (['arclen'], {}), '(arclen)\n', (1896, 1904), True, 'import statistics as stats\n'), ((2437, 2486), 'numpy.empty', 'np.empty', (['raw_path_set_matrix.shape'], {'dtype': 'object'}), '(raw_path_set_matrix.shape, dtype=object)\n', (2445, 2486), True, 'import numpy as np\n'), ((5469, 5495), 'utils.stats_trajectories.trajectory_arclength', 'trajectory_arclength', (['traj'], {}), '(traj)\n', (5489, 5495), False, 'from utils.stats_trajectories import trajectory_arclength\n'), ((6224, 6274), 'numpy.cov', 'np.cov', (['lineParameters[:, 0]', 'lineParameters[:, 1]'], {}), '(lineParameters[:, 0], lineParameters[:, 1])\n', (6230, 6274), True, 'import numpy as np\n'), ((6371, 6393), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': 'int'}), '(n, dtype=int)\n', (6379, 6393), True, 'import numpy as np\n'), ((1726, 1750), 'utils.stats_trajectories.trajectory_arclength', 'trajectory_arclength', (['tr'], {}), '(tr)\n', (1746, 1750), False, 'from utils.stats_trajectories import trajectory_arclength\n'), ((1969, 1988), 'statistics.stdev', 'stats.stdev', (['arclen'], {}), '(arclen)\n', (1980, 1988), True, 'import statistics as stats\n'), ((3316, 3342), 'logging.error', 'logging.error', (['"""Empty set"""'], {}), "('Empty set')\n", (3329, 3342), False, 'import logging\n'), ((6079, 6110), 'numpy.median', 'np.median', (['lineParameters[:, 0]'], {}), '(lineParameters[:, 0])\n', (6088, 6110), True, 'import numpy as np\n'), ((6111, 6142), 'numpy.median', 'np.median', (['lineParameters[:, 1]'], {}), '(lineParameters[:, 1])\n', (6120, 6142), True, 'import numpy as np\n'), ((6155, 6183), 'numpy.var', 'np.var', (['lineParameters[:, 0]'], {}), '(lineParameters[:, 0])\n', (6161, 6183), True, 'import numpy as np\n'), ((6184, 6212), 'numpy.var', 'np.var', (['lineParameters[:, 1]'], {}), '(lineParameters[:, 1])\n', (6190, 6212), True, 'import numpy as np\n'), ((7558, 7600), 'numpy.concatenate', 'np.concatenate', (['[obsX, obsY, obsT]'], {'axis': '(1)'}), '([obsX, obsY, obsT], axis=1)\n', (7572, 7600), True, 'import numpy as np\n'), ((5264, 5288), 'utils.stats_trajectories.trajectory_arclength', 'trajectory_arclength', (['tr'], {}), '(tr)\n', (5284, 5288), False, 'from utils.stats_trajectories import trajectory_arclength\n'), ((6694, 6721), 'numpy.reshape', 'np.reshape', (['x[0:n]', '(-1, 1)'], {}), '(x[0:n], (-1, 1))\n', (6704, 6721), True, 'import numpy as np\n'), ((6721, 6748), 'numpy.reshape', 'np.reshape', (['y[0:n]', '(-1, 1)'], {}), '(y[0:n], (-1, 1))\n', (6731, 6748), True, 'import numpy as np\n'), ((6748, 6775), 'numpy.reshape', 'np.reshape', (['l[0:n]', '(-1, 1)'], {}), '(l[0:n], (-1, 1))\n', (6758, 6775), True, 'import numpy as np\n'), ((6774, 6801), 'numpy.reshape', 'np.reshape', (['t[0:n]', '(-1, 1)'], {}), '(t[0:n], (-1, 1))\n', (6784, 6801), True, 'import numpy as np\n'), ((6945, 6971), 'numpy.reshape', 'np.reshape', (['x[n:]', '(-1, 1)'], {}), '(x[n:], (-1, 1))\n', (6955, 6971), True, 'import numpy as np\n'), ((6971, 6997), 'numpy.reshape', 'np.reshape', (['y[n:]', '(-1, 1)'], {}), '(y[n:], (-1, 1))\n', (6981, 6997), True, 'import numpy as np\n'), ((6996, 7022), 'numpy.reshape', 'np.reshape', (['t[n:]', '(-1, 1)'], {}), '(t[n:], (-1, 1))\n', (7006, 7022), True, 'import numpy as np\n'), ((7271, 7325), 'numpy.concatenate', 'np.concatenate', (['[obsX, obsY, obsL, obsT, obsS]'], {'axis': '(1)'}), '([obsX, obsY, obsL, obsT, obsS], axis=1)\n', (7285, 7325), True, 'import numpy as np\n'), ((7325, 7369), 'numpy.concatenate', 'np.concatenate', (['[gtX, gtY, gtT, gtS]'], {'axis': '(1)'}), '([gtX, gtY, gtT, gtS], axis=1)\n', (7339, 7369), True, 'import numpy as np\n'), ((7463, 7490), 'numpy.reshape', 'np.reshape', (['x[0:n]', '(-1, 1)'], {}), '(x[0:n], (-1, 1))\n', (7473, 7490), True, 'import numpy as np\n'), ((7490, 7517), 'numpy.reshape', 'np.reshape', (['y[0:n]', '(-1, 1)'], {}), '(y[0:n], (-1, 1))\n', (7500, 7517), True, 'import numpy as np\n'), ((7517, 7544), 'numpy.reshape', 'np.reshape', (['t[0:n]', '(-1, 1)'], {}), '(t[0:n], (-1, 1))\n', (7527, 7544), True, 'import numpy as np\n'), ((4393, 4408), 'numpy.array', 'np.array', (['new_x'], {}), '(new_x)\n', (4401, 4408), True, 'import numpy as np\n'), ((4409, 4424), 'numpy.array', 'np.array', (['new_y'], {}), '(new_y)\n', (4417, 4424), True, 'import numpy as np\n'), ((4425, 4440), 'numpy.array', 'np.array', (['new_t'], {}), '(new_t)\n', (4433, 4440), True, 'import numpy as np\n'), ((4912, 4927), 'numpy.array', 'np.array', (['new_x'], {}), '(new_x)\n', (4920, 4927), True, 'import numpy as np\n'), ((4928, 4943), 'numpy.array', 'np.array', (['new_y'], {}), '(new_y)\n', (4936, 4943), True, 'import numpy as np\n'), ((4944, 4959), 'numpy.array', 'np.array', (['new_t'], {}), '(new_t)\n', (4952, 4959), True, 'import numpy as np\n'), ((8623, 8640), 'numpy.sum', 'np.sum', (['area[::2]'], {}), '(area[::2])\n', (8629, 8640), True, 'import numpy as np\n'), ((8653, 8671), 'numpy.sum', 'np.sum', (['area[1::2]'], {}), '(area[1::2])\n', (8659, 8671), True, 'import numpy as np\n'), ((8702, 8719), 'numpy.max', 'np.max', (['area[::2]'], {}), '(area[::2])\n', (8708, 8719), True, 'import numpy as np\n'), ((8727, 8744), 'numpy.min', 'np.min', (['area[::2]'], {}), '(area[::2])\n', (8733, 8744), True, 'import numpy as np\n'), ((8752, 8770), 'numpy.max', 'np.max', (['area[1::2]'], {}), '(area[1::2])\n', (8758, 8770), True, 'import numpy as np\n'), ((8778, 8796), 'numpy.min', 'np.min', (['area[1::2]'], {}), '(area[1::2])\n', (8784, 8796), True, 'import numpy as np\n'), ((6844, 6873), 'numpy.square', 'np.square', (['(x[1:n + 1] - x[:n])'], {}), '(x[1:n + 1] - x[:n])\n', (6853, 6873), True, 'import numpy as np\n'), ((6870, 6899), 'numpy.square', 'np.square', (['(y[1:n + 1] - y[:n])'], {}), '(y[1:n + 1] - y[:n])\n', (6879, 6899), True, 'import numpy as np\n'), ((7081, 7111), 'numpy.square', 'np.square', (['(x[n + 1:] - x[n:-1])'], {}), '(x[n + 1:] - x[n:-1])\n', (7090, 7111), True, 'import numpy as np\n'), ((7108, 7138), 'numpy.square', 'np.square', (['(y[n + 1:] - y[n:-1])'], {}), '(y[n + 1:] - y[n:-1])\n', (7117, 7138), True, 'import numpy as np\n')]
|
import numpy as np
from utils.metrics import variation_ratio, entropy, bald
from utils.progress_bar import Progbar
def get_monte_carlo_metric(metric):
if metric == 'variation_ratio':
return VariationRationMC
elif metric == 'entropy':
return EntropyMC
elif metric == 'bald':
return BaldMC
elif metric == 'random':
return Random
elif metric == 'softmax':
return Softmax
elif metric == 'ceal':
return CEAL
class MonteCarloEvaluation:
def __init__(self, sess, model, data_batch, sizes_batch, labels_batch,
num_classes, num_samples, max_len, verbose):
self.sess = sess
self.model = model
self.data_batch = data_batch
self.sizes_batch = sizes_batch
self.labels_batch = labels_batch
self.num_classes = num_classes
self.num_samples = num_samples
self.max_len = max_len
self.verbose = verbose
def preprocess_batch(self, data_batch, sizes_batch):
preprocessed_batch = []
preprocessed_sizes_batch = []
for data, size in zip(data_batch, sizes_batch):
if len(data) < self.max_len:
data += [0] * (self.max_len - len(data))
elif len(data) > self.max_len:
data = data[:self.max_len]
size = self.max_len
preprocessed_batch.append(data)
preprocessed_sizes_batch.append(size)
return np.array(preprocessed_batch), np.array(preprocessed_sizes_batch)
def initialize_predictions(self):
raise NotImplementedError
def update_predictions(self, predictions, index):
raise NotImplementedError
def evaluate(self):
raise NotImplementedError
def create_feed_dict(self, data_batch, sizes_batch):
self.feed_dict = self.model.create_feed_dict(
data_placeholder=data_batch,
sizes_placeholder=sizes_batch)
def prediction_samples(self, preprocess_batch=True):
predictions = self.initialize_predictions()
if preprocess_batch:
data_batch, sizes_batch = self.preprocess_batch(
self.data_batch, self.sizes_batch)
self.create_feed_dict(data_batch, sizes_batch)
for i in range(self.num_samples):
if self.verbose:
progbar = Progbar(target=self.num_samples)
self.update_predictions(predictions, i)
if self.verbose:
progbar.update(i + 1, [])
return predictions
class VariationRationMC(MonteCarloEvaluation):
def initialize_predictions(self):
return np.zeros(shape=(self.data_batch.shape[0], self.num_samples))
def update_predictions(self, predictions, index):
prediction = self.sess.run(
self.model.predictions,
feed_dict=self.feed_dict)
predictions[:, index] = prediction
def evaluate(self):
all_preds = self.prediction_samples()
mc_counts = self.monte_carlo_samples_count(all_preds)
variation_ratios = np.array(variation_ratio(mc_counts))
return variation_ratios
def monte_carlo_samples_count(self, all_preds):
mc_counts = []
all_preds = all_preds.astype(dtype=np.int64)
for row in all_preds:
bincount = np.bincount(row)
mc_counts.append((bincount, bincount.argmax()))
return mc_counts
def monte_carlo_dropout_evaluate(self, num_data):
all_preds = self.monte_carlo_samples()
mc_counts = self.monte_carlo_samples_count(all_preds)
predictions = np.zeros(shape=(num_data))
for index, (bincount, value) in enumerate(mc_counts):
predictions[index] = value
correct_pred = np.equal(predictions, self.labels_batch)
return np.mean(correct_pred)
class EntropyMC(MonteCarloEvaluation):
def initialize_predictions(self):
return np.zeros(shape=(self.data_batch.shape[0], self.num_classes))
def update_predictions(self, predictions, index):
prediction = self.sess.run(
self.model.predictions_distribution,
feed_dict=self.feed_dict)
predictions += prediction
def evaluate(self):
all_preds = self.prediction_samples()
entropy_values = entropy(all_preds, self.num_samples)
return entropy_values
class BaldMC(MonteCarloEvaluation):
def __init__(self, sess, model, data_batch, sizes_batch, labels_batch,
num_classes, num_samples, max_len, verbose):
super().__init__(sess, model, data_batch, sizes_batch, labels_batch, num_classes,
num_samples, max_len, verbose)
self.dropout_entropy = np.zeros(shape=(self.data_batch.shape[0]))
def initialize_predictions(self):
return np.zeros(shape=(self.data_batch.shape[0], self.num_classes))
def update_predictions(self, predictions, index):
prediction = self.sess.run(
self.model.predictions_distribution,
feed_dict=self.feed_dict)
self.dropout_entropy += entropy(prediction, 1)
predictions += prediction
def evaluate(self):
all_preds = self.prediction_samples()
bald_values = bald(all_preds, self.dropout_entropy, self.num_samples)
return bald_values
class Random(MonteCarloEvaluation):
def __init__(self, sess, model, data_batch, sizes_batch, labels_batch,
num_classes, num_samples, max_len, verbose):
super().__init__(sess, model, data_batch, sizes_batch, labels_batch, num_classes,
0, max_len, verbose)
def create_feed_dict(self, data_batch, sizes_batch):
recurrent_output_dropout = 1
recurrent_state_dropout = 1
embedding_dropout = 1
self.feed_dict = self.model.create_feed_dict(
recurrent_output_dropout=recurrent_output_dropout,
recurrent_state_dropout=recurrent_state_dropout,
embedding_dropout=embedding_dropout,
data_placeholder=data_batch,
sizes_placeholder=sizes_batch)
def initialize_predictions(self):
return None
def evaluate(self):
return np.random.uniform(size=(self.data_batch.shape[0]))
class Softmax(MonteCarloEvaluation):
def __init__(self, sess, model, data_batch, sizes_batch, labels_batch,
num_classes, num_samples, max_len, verbose):
super().__init__(sess, model, data_batch, sizes_batch, labels_batch, num_classes,
1, max_len, verbose)
def create_feed_dict(self, data_batch, sizes_batch):
recurrent_output_dropout = 1
recurrent_state_dropout = 1
embedding_dropout = 1
self.feed_dict = self.model.create_feed_dict(
recurrent_output_dropout=recurrent_output_dropout,
recurrent_state_dropout=recurrent_state_dropout,
embedding_dropout=embedding_dropout,
data_placeholder=data_batch,
sizes_placeholder=sizes_batch)
def initialize_predictions(self):
return np.zeros(shape=(self.data_batch.shape[0], self.num_classes))
def update_predictions(self, predictions, index):
prediction = self.sess.run(
self.model.predictions_distribution,
feed_dict=self.feed_dict)
predictions += prediction
def evaluate(self):
all_preds = self.prediction_samples()
return np.amax(all_preds, axis=1)
class CEAL(BaldMC):
def evaluate(self):
all_preds = self.prediction_samples()
bald_values = bald(all_preds, self.dropout_entropy, self.num_samples)
return bald_values, all_preds
|
[
"numpy.mean",
"utils.metrics.bald",
"utils.metrics.variation_ratio",
"numpy.equal",
"numpy.array",
"numpy.zeros",
"numpy.bincount",
"numpy.random.uniform",
"utils.progress_bar.Progbar",
"numpy.amax",
"utils.metrics.entropy"
] |
[((2651, 2711), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.data_batch.shape[0], self.num_samples)'}), '(shape=(self.data_batch.shape[0], self.num_samples))\n', (2659, 2711), True, 'import numpy as np\n'), ((3625, 3649), 'numpy.zeros', 'np.zeros', ([], {'shape': 'num_data'}), '(shape=num_data)\n', (3633, 3649), True, 'import numpy as np\n'), ((3777, 3817), 'numpy.equal', 'np.equal', (['predictions', 'self.labels_batch'], {}), '(predictions, self.labels_batch)\n', (3785, 3817), True, 'import numpy as np\n'), ((3834, 3855), 'numpy.mean', 'np.mean', (['correct_pred'], {}), '(correct_pred)\n', (3841, 3855), True, 'import numpy as np\n'), ((3951, 4011), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.data_batch.shape[0], self.num_classes)'}), '(shape=(self.data_batch.shape[0], self.num_classes))\n', (3959, 4011), True, 'import numpy as np\n'), ((4321, 4357), 'utils.metrics.entropy', 'entropy', (['all_preds', 'self.num_samples'], {}), '(all_preds, self.num_samples)\n', (4328, 4357), False, 'from utils.metrics import variation_ratio, entropy, bald\n'), ((4743, 4783), 'numpy.zeros', 'np.zeros', ([], {'shape': 'self.data_batch.shape[0]'}), '(shape=self.data_batch.shape[0])\n', (4751, 4783), True, 'import numpy as np\n'), ((4840, 4900), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.data_batch.shape[0], self.num_classes)'}), '(shape=(self.data_batch.shape[0], self.num_classes))\n', (4848, 4900), True, 'import numpy as np\n'), ((5112, 5134), 'utils.metrics.entropy', 'entropy', (['prediction', '(1)'], {}), '(prediction, 1)\n', (5119, 5134), False, 'from utils.metrics import variation_ratio, entropy, bald\n'), ((5262, 5317), 'utils.metrics.bald', 'bald', (['all_preds', 'self.dropout_entropy', 'self.num_samples'], {}), '(all_preds, self.dropout_entropy, self.num_samples)\n', (5266, 5317), False, 'from utils.metrics import variation_ratio, entropy, bald\n'), ((6230, 6278), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'self.data_batch.shape[0]'}), '(size=self.data_batch.shape[0])\n', (6247, 6278), True, 'import numpy as np\n'), ((7121, 7181), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.data_batch.shape[0], self.num_classes)'}), '(shape=(self.data_batch.shape[0], self.num_classes))\n', (7129, 7181), True, 'import numpy as np\n'), ((7482, 7508), 'numpy.amax', 'np.amax', (['all_preds'], {'axis': '(1)'}), '(all_preds, axis=1)\n', (7489, 7508), True, 'import numpy as np\n'), ((7624, 7679), 'utils.metrics.bald', 'bald', (['all_preds', 'self.dropout_entropy', 'self.num_samples'], {}), '(all_preds, self.dropout_entropy, self.num_samples)\n', (7628, 7679), False, 'from utils.metrics import variation_ratio, entropy, bald\n'), ((1474, 1502), 'numpy.array', 'np.array', (['preprocessed_batch'], {}), '(preprocessed_batch)\n', (1482, 1502), True, 'import numpy as np\n'), ((1504, 1538), 'numpy.array', 'np.array', (['preprocessed_sizes_batch'], {}), '(preprocessed_sizes_batch)\n', (1512, 1538), True, 'import numpy as np\n'), ((3090, 3116), 'utils.metrics.variation_ratio', 'variation_ratio', (['mc_counts'], {}), '(mc_counts)\n', (3105, 3116), False, 'from utils.metrics import variation_ratio, entropy, bald\n'), ((3335, 3351), 'numpy.bincount', 'np.bincount', (['row'], {}), '(row)\n', (3346, 3351), True, 'import numpy as np\n'), ((2362, 2394), 'utils.progress_bar.Progbar', 'Progbar', ([], {'target': 'self.num_samples'}), '(target=self.num_samples)\n', (2369, 2394), False, 'from utils.progress_bar import Progbar\n')]
|
from __future__ import print_function
from numpy import pi, arange, sin, cos
import numpy as np
import os.path
import time
from bokeh.objects import (Plot, DataRange1d, LinearAxis, DatetimeAxis,
ColumnDataSource, Glyph, PanTool, WheelZoomTool)
from bokeh.glyphs import Circle
from bokeh import session
x = arange(-2 * pi, 2 * pi, 0.1)
y = sin(x)
# Create an array of times, starting at the current time, and extending
# for len(x) number of hours.
times = np.arange(len(x)) * 3600000 + time.time()
source = ColumnDataSource(
data=dict(x=x, y=y, times=times)
)
xdr = DataRange1d(sources=[source.columns("times")])
ydr = DataRange1d(sources=[source.columns("y")])
circle = Circle(x="times", y="y", fill_color="red", size=5, line_color="black")
glyph_renderer = Glyph(
data_source=source,
xdata_range=xdr,
ydata_range=ydr,
glyph=circle,
)
plot = Plot(x_range=xdr, y_range=ydr, data_sources=[source],
border=80)
xaxis = DatetimeAxis(plot=plot, dimension=0, location="min")
yaxis = LinearAxis(plot=plot, dimension=1, location="min")
pantool = PanTool(dataranges=[xdr, ydr], dimensions=["width", "height"])
wheelzoomtool = WheelZoomTool(dataranges=[xdr, ydr], dimensions=("width", "height"))
plot.renderers.append(glyph_renderer)
plot.tools = [pantool, wheelzoomtool]
sess = session.HTMLFileSession("dateaxis.html")
sess.add(plot, recursive=True)
sess.plotcontext.children.append(plot)
sess.save(js="absolute", css="absolute")
sess.dumpjson(file="dateaxis.json")
print("Wrote %s" % sess.filename)
if __name__ == "__main__":
sess.view()
|
[
"bokeh.session.HTMLFileSession",
"bokeh.objects.Glyph",
"bokeh.objects.WheelZoomTool",
"bokeh.objects.LinearAxis",
"bokeh.objects.PanTool",
"bokeh.objects.DatetimeAxis",
"bokeh.glyphs.Circle",
"numpy.sin",
"bokeh.objects.Plot",
"time.time",
"numpy.arange"
] |
[((317, 345), 'numpy.arange', 'arange', (['(-2 * pi)', '(2 * pi)', '(0.1)'], {}), '(-2 * pi, 2 * pi, 0.1)\n', (323, 345), False, 'from numpy import pi, arange, sin, cos\n'), ((350, 356), 'numpy.sin', 'sin', (['x'], {}), '(x)\n', (353, 356), False, 'from numpy import pi, arange, sin, cos\n'), ((690, 760), 'bokeh.glyphs.Circle', 'Circle', ([], {'x': '"""times"""', 'y': '"""y"""', 'fill_color': '"""red"""', 'size': '(5)', 'line_color': '"""black"""'}), "(x='times', y='y', fill_color='red', size=5, line_color='black')\n", (696, 760), False, 'from bokeh.glyphs import Circle\n'), ((779, 852), 'bokeh.objects.Glyph', 'Glyph', ([], {'data_source': 'source', 'xdata_range': 'xdr', 'ydata_range': 'ydr', 'glyph': 'circle'}), '(data_source=source, xdata_range=xdr, ydata_range=ydr, glyph=circle)\n', (784, 852), False, 'from bokeh.objects import Plot, DataRange1d, LinearAxis, DatetimeAxis, ColumnDataSource, Glyph, PanTool, WheelZoomTool\n'), ((880, 944), 'bokeh.objects.Plot', 'Plot', ([], {'x_range': 'xdr', 'y_range': 'ydr', 'data_sources': '[source]', 'border': '(80)'}), '(x_range=xdr, y_range=ydr, data_sources=[source], border=80)\n', (884, 944), False, 'from bokeh.objects import Plot, DataRange1d, LinearAxis, DatetimeAxis, ColumnDataSource, Glyph, PanTool, WheelZoomTool\n'), ((965, 1017), 'bokeh.objects.DatetimeAxis', 'DatetimeAxis', ([], {'plot': 'plot', 'dimension': '(0)', 'location': '"""min"""'}), "(plot=plot, dimension=0, location='min')\n", (977, 1017), False, 'from bokeh.objects import Plot, DataRange1d, LinearAxis, DatetimeAxis, ColumnDataSource, Glyph, PanTool, WheelZoomTool\n'), ((1026, 1076), 'bokeh.objects.LinearAxis', 'LinearAxis', ([], {'plot': 'plot', 'dimension': '(1)', 'location': '"""min"""'}), "(plot=plot, dimension=1, location='min')\n", (1036, 1076), False, 'from bokeh.objects import Plot, DataRange1d, LinearAxis, DatetimeAxis, ColumnDataSource, Glyph, PanTool, WheelZoomTool\n'), ((1088, 1150), 'bokeh.objects.PanTool', 'PanTool', ([], {'dataranges': '[xdr, ydr]', 'dimensions': "['width', 'height']"}), "(dataranges=[xdr, ydr], dimensions=['width', 'height'])\n", (1095, 1150), False, 'from bokeh.objects import Plot, DataRange1d, LinearAxis, DatetimeAxis, ColumnDataSource, Glyph, PanTool, WheelZoomTool\n'), ((1167, 1235), 'bokeh.objects.WheelZoomTool', 'WheelZoomTool', ([], {'dataranges': '[xdr, ydr]', 'dimensions': "('width', 'height')"}), "(dataranges=[xdr, ydr], dimensions=('width', 'height'))\n", (1180, 1235), False, 'from bokeh.objects import Plot, DataRange1d, LinearAxis, DatetimeAxis, ColumnDataSource, Glyph, PanTool, WheelZoomTool\n'), ((1321, 1361), 'bokeh.session.HTMLFileSession', 'session.HTMLFileSession', (['"""dateaxis.html"""'], {}), "('dateaxis.html')\n", (1344, 1361), False, 'from bokeh import session\n'), ((498, 509), 'time.time', 'time.time', ([], {}), '()\n', (507, 509), False, 'import time\n')]
|
import numpy as np
from skimage.measure import label
from lib.utils_lung_segmentation import get_max_rect_in_mask
def getLargestCC(segmentation):
'''find largest connected component
return: binary mask of the largest connected component'''
labels = label(segmentation)
assert(labels.max() != 0 ) # assume at least 1 CC
largestCC = labels == np.argmax(np.bincount(labels.flat)[1:])+1
return largestCC
def test_max_rect_in_mask(random_blobs):
'''make sure that we can find the largest rectangle inside a bindary mask
check that the coordinates of the rectangle are the coorect ones'''
blobs_largest = getLargestCC(random_blobs)
coords_largest = get_max_rect_in_mask(blobs_largest)
assert coords_largest == (83, 125, 143, 155)
|
[
"numpy.bincount",
"skimage.measure.label",
"lib.utils_lung_segmentation.get_max_rect_in_mask"
] |
[((266, 285), 'skimage.measure.label', 'label', (['segmentation'], {}), '(segmentation)\n', (271, 285), False, 'from skimage.measure import label\n'), ((690, 725), 'lib.utils_lung_segmentation.get_max_rect_in_mask', 'get_max_rect_in_mask', (['blobs_largest'], {}), '(blobs_largest)\n', (710, 725), False, 'from lib.utils_lung_segmentation import get_max_rect_in_mask\n'), ((376, 400), 'numpy.bincount', 'np.bincount', (['labels.flat'], {}), '(labels.flat)\n', (387, 400), True, 'import numpy as np\n')]
|
# Copyright 2018 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ================================================================================================
"""
A :std:doc:`dimod composite <dimod:reference/samplers>` that tiles small problems
multiple times to a Chimera-structured sampler.
The :class:`.TilingComposite` takes a problem that can fit on a small
:std:doc:`Chimera <system:reference/intro>` graph and replicates it across a larger
Chimera graph to obtain samples from multiple areas of the solver in one call.
For example, a 2x2 Chimera lattice could be tiled 64 times (8x8) on a fully-yielded
D-Wave 2000Q system (16x16).
See `Ocean Glossary <https://docs.ocean.dwavesys.com/en/latest/glossary.html>`_ for explanations
of technical terms in descriptions of Ocean tools.
"""
from __future__ import division
from math import sqrt, ceil
import dimod
import dwave_networkx as dnx
import numpy as np
__all__ = ['TilingComposite', 'draw_tiling']
class TilingComposite(dimod.Sampler, dimod.Composite, dimod.Structured):
"""Composite to tile a small problem across a Chimera-structured sampler.
Inherits from :class:`dimod.Sampler`, :class:`dimod.Composite`, and :class:`dimod.Structured`.
Enables parallel sampling for small problems (problems that are minor-embeddable in
a small part of a D-Wave solver's :std:doc:`Chimera <system:reference/intro>` graph).
The notation *CN* refers to a Chimera graph consisting of an NxN grid of unit cells.
Each Chimera unit cell is itself a bipartite graph with shores of size t. The D-Wave 2000Q QPU
supports a C16 Chimera graph: its 2048 qubits are logically mapped into a 16x16 matrix of
unit cell of 8 qubits (t=4).
A problem that can be minor-embedded in a single unit cell, for example, can therefore
be tiled across the unit cells of a D-Wave 2000Q as 16x16 duplicates. This enables
sampling 256 solutions in a single call.
Args:
sampler (:class:`dimod.Sampler`): Structured dimod sampler to be wrapped.
sub_m (int): Number of rows of Chimera unit cells for minor-embedding the problem once.
sub_n (int): Number of columns of Chimera unit cells for minor-embedding the problem once.
t (int, optional, default=4): Size of the shore within each Chimera unit cell.
Examples:
This example instantiates a composed sampler using composite :class:`.TilingComposite`
to tile a QUBO problem on a D-Wave solver, embedding it with composite
:class:`.EmbeddingComposite` and selecting the D-Wave solver with the user's
default :std:doc:`D-Wave Cloud Client configuration file <cloud-client:reference/intro>`.
The two-variable QUBO represents a
logical NOT gate (two nodes with biases of -1 that are coupled with strength 2) and is
easily minor-embedded in a single Chimera cell (it needs only any two coupled qubits) and
so can be tiled multiple times across a D-Wave solver for parallel solution (the two
nodes should typically have opposite values).
>>> from dwave.system.samplers import DWaveSampler
>>> from dwave.system.composites import EmbeddingComposite
>>> from dwave.system.composites import TilingComposite
>>> sampler = EmbeddingComposite(TilingComposite(DWaveSampler(), 1, 1, 4))
>>> Q = {(1, 1): -1, (1, 2): 2, (2, 1): 0, (2, 2): -1}
>>> response = sampler.sample_qubo(Q)
>>> for sample in response.samples(): # doctest: +SKIP
... print(sample)
...
{1: 0, 2: 1}
{1: 1, 2: 0}
{1: 1, 2: 0}
{1: 1, 2: 0}
{1: 0, 2: 1}
{1: 0, 2: 1}
{1: 1, 2: 0}
{1: 0, 2: 1}
{1: 1, 2: 0}
>>> # Snipped above response for brevity
See `Ocean Glossary <https://docs.ocean.dwavesys.com/en/latest/glossary.html>`_
for explanations of technical terms in descriptions of Ocean tools.
"""
nodelist = None
"""list: List of active qubits for the structured solver.
Examples:
This example creates a :class:`.TilingComposite` for a problem that requires
a 2x1 Chimera lattice to solve with a :class:`DWaveSampler` as the sampler.
It prints the active qubits retrieved from a D-Wave solver selected by
the user's default
:std:doc:`D-Wave Cloud Client configuration file <cloud-client:reference/intro>`.
>>> from dwave.system.samplers import DWaveSampler
>>> from dwave.system.composites import TilingComposite
>>> sampler_tile = TilingComposite(DWaveSampler(), 2, 1, 4)
>>> sampler_tile.nodelist # doctest: +SKIP
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
See `Ocean Glossary <https://docs.ocean.dwavesys.com/en/latest/glossary.html>`_
for explanations of technical terms in descriptions of Ocean tools.
"""
edgelist = None
"""list: List of active couplers for the D-Wave solver.
Examples:
This example creates a :class:`.TilingComposite` for a problem that requires
a 1x2 Chimera lattice to solve with a :class:`DWaveSampler` as the sampler.
It prints the active couplers retrieved from a D-Wave solver selected by
the user's default
:std:doc:`D-Wave Cloud Client configuration file <cloud-client:reference/intro>`.
>>> from dwave.system.samplers import DWaveSampler
>>> from dwave.system.composites import TilingComposite
>>> sampler_tile = TilingComposite(DWaveSampler(), 1, 2, 4)
>>> sampler_tile.edgelist # doctest: +SKIP
[[0, 4],
[0, 5],
[0, 6],
[0, 7],
[1, 4],
[1, 5],
[1, 6],
[1, 7],
[2, 4],
[2, 5],
[2, 6],
[2, 7],
[3, 4],
[3, 5],
[3, 6],
[3, 7],
[4, 12],
[5, 13],
[6, 14],
[7, 15],
[8, 12],
[8, 13],
[8, 14],
[8, 15],
[9, 12],
[9, 13],
[9, 14],
[9, 15],
[10, 12],
[10, 13],
[10, 14],
[10, 15],
[11, 12],
[11, 13],
[11, 14],
[11, 15]]
See `Ocean Glossary <https://docs.ocean.dwavesys.com/en/latest/glossary.html>`_
for explanations of technical terms in descriptions of Ocean tools.
"""
parameters = None
"""dict[str, list]: Parameters in the form of a dict.
For an instantiated composed sampler, keys are the keyword parameters accepted by the
child sampler.
Examples:
This example instantiates a :class:`.TilingComposite` sampler using a D-Wave solver
selected by the user's default
:std:doc:`D-Wave Cloud Client configuration file <cloud-client:reference/intro>`
and views the solver's parameters.
>>> from dwave.system.samplers import DWaveSampler
>>> from dwave.system.composites import TilingComposite
>>> sampler_tile = TilingComposite(DWaveSampler(), 1, 1, 4)
>>> sampler_tile.parameters # doctest: +SKIP
{u'anneal_offsets': ['parameters'],
u'anneal_schedule': ['parameters'],
u'annealing_time': ['parameters'],
u'answer_mode': ['parameters'],
u'auto_scale': ['parameters'],
>>> # Snipped above response for brevity
See `Ocean Glossary <https://docs.ocean.dwavesys.com/en/latest/glossary.html>`_
for explanations of technical terms in descriptions of Ocean tools.
"""
properties = None
"""dict: Properties in the form of a dict.
For an instantiated composed sampler, contains one key :code:`'child_properties'` that
has a copy of the child sampler's properties.
Examples:
This example instantiates a :class:`.TilingComposite` sampler using a D-Wave solver
selected by the user's default
:std:doc:`D-Wave Cloud Client configuration file <cloud-client:reference/intro>`
and views the solver's properties.
>>> from dwave.system.samplers import DWaveSampler
>>> from dwave.system.composites import TilingComposite
>>> sampler_tile = TilingComposite(DWaveSampler(), 1, 1, 4)
>>> sampler_tile.properties # doctest: +SKIP
{'child_properties': {u'anneal_offset_ranges': [[-0.2197463755538704,
0.03821687759418928],
[-0.2242514597680286, 0.01718456460967399],
[-0.20860153999435985, 0.05511969218508182],
[-0.2108920134230625, 0.056392603743884134],
[-0.21788292874621265, 0.03360435584845211],
[-0.21700680373359477, 0.005297355417068621],
>>> # Snipped above response for brevity
See `Ocean Glossary <https://docs.ocean.dwavesys.com/en/latest/glossary.html>`_
for explanations of technical terms in descriptions of Ocean tools.
"""
children = None
"""list: The single wrapped structured sampler."""
def __init__(self, sampler, sub_m, sub_n, t=4):
self.parameters = sampler.parameters.copy()
self.properties = properties = {'child_properties': sampler.properties}
tile = dnx.chimera_graph(sub_m, sub_n, t)
self.nodelist = sorted(tile.nodes)
self.edgelist = sorted(sorted(edge) for edge in tile.edges)
# dimod.Structured abstract base class automatically populates adjacency and structure as
# mixins based on nodelist and edgelist
if not isinstance(sampler, dimod.Structured):
# we could also just tile onto the unstructured sampler but in that case we would need
# to know how many tiles to use
raise ValueError("given child sampler should be structured")
self.children = [sampler]
nodes_per_cell = t * 2
edges_per_cell = t * t
m = n = int(ceil(sqrt(ceil(len(sampler.structure.nodelist) / nodes_per_cell)))) # assume square lattice shape
system = dnx.chimera_graph(m, n, t, node_list=sampler.structure.nodelist, edge_list=sampler.structure.edgelist)
c2i = {chimera_index: linear_index for (linear_index, chimera_index) in system.nodes(data='chimera_index')}
sub_c2i = {chimera_index: linear_index for (linear_index, chimera_index) in tile.nodes(data='chimera_index')}
# Count the connections between these qubits
def _between(qubits1, qubits2):
edges = [edge for edge in system.edges if edge[0] in qubits1 and edge[1] in qubits2]
return len(edges)
# Get the list of qubits in a cell
def _cell_qubits(i, j):
return [c2i[(i, j, u, k)] for u in range(2) for k in range(t) if (i, j, u, k) in c2i]
# get a mask of complete cells
cells = [[False for _ in range(n)] for _ in range(m)]
for i in range(m):
for j in range(n):
qubits = _cell_qubits(i, j)
cells[i][j] = len(qubits) == nodes_per_cell and _between(qubits, qubits) == edges_per_cell
# List of 'embeddings'
self.embeddings = properties['embeddings'] = embeddings = []
# For each possible chimera cell check if the next few cells are complete
for i in range(m + 1 - sub_m):
for j in range(n + 1 - sub_n):
# Check if the sub cells are matched
match = all(cells[i + sub_i][j + sub_j] for sub_i in range(sub_m) for sub_j in range(sub_n))
# Check if there are connections between the cells.
for sub_i in range(sub_m):
for sub_j in range(sub_n):
if sub_m > 1 and sub_i < sub_m - 1:
match &= _between(_cell_qubits(i + sub_i, j + sub_j),
_cell_qubits(i + sub_i + 1, j + sub_j)) == t
if sub_n > 1 and sub_j < sub_n - 1:
match &= _between(_cell_qubits(i + sub_i, j + sub_j),
_cell_qubits(i + sub_i, j + sub_j + 1)) == t
if match:
# Pull those cells out into an embedding.
embedding = {}
for sub_i in range(sub_m):
for sub_j in range(sub_n):
cells[i + sub_i][j + sub_j] = False # Mark cell as matched
for u in range(2):
for k in range(t):
embedding[sub_c2i[sub_i, sub_j, u, k]] = {c2i[(i + sub_i, j + sub_j, u, k)]}
embeddings.append(embedding)
if len(embeddings) == 0:
raise ValueError("no tile embeddings found; is the sampler Chimera structured?")
@dimod.bqm_structured
def sample(self, bqm, **kwargs):
"""Sample from the provided binary quadratic model
Args:
bqm (:obj:`dimod.BinaryQuadraticModel`):
Binary quadratic model to be sampled from.
**kwargs:
Optional keyword arguments for the sampling method, specified per solver.
Returns:
:class:`dimod.Response`
Examples:
This example uses :class:`.TilingComposite` to instantiate a composed sampler
that submits a simple Ising problem of just two variables that map to qubits 0 and 1
on the D-Wave solver selected by the user's default
:std:doc:`D-Wave Cloud Client configuration file <cloud-client:reference/intro>`.
(The simplicity of this example obviates the need for an embedding
composite.) Because the problem fits in a single
:std:doc:`Chimera <system:reference/intro>` unit cell, it is tiled
across the solver's entire Chimera graph, resulting in multiple samples.
>>> from dwave.system.samplers import DWaveSampler
>>> from dwave.system.composites import EmbeddingComposite, TilingComposite
>>> sampler = TilingComposite(DWaveSampler(), 1, 1, 4)
>>> response = sampler.sample_ising({0: -1, 1: 1}, {})
>>> for sample in response.samples(): # doctest: +SKIP
... print(sample)
...
{0: 1, 1: -1}
{0: 1, 1: -1}
{0: 1, 1: -1}
{0: 1, 1: -1}
{0: 1, 1: -1}
{0: 1, 1: -1}
{0: 1, 1: -1}
{0: 1, 1: -1}
>>> # Snipped above response for brevity
See `Ocean Glossary <https://docs.ocean.dwavesys.com/en/latest/glossary.html>`_
for explanations of technical terms in descriptions of Ocean tools.
"""
# apply the embeddings to the given problem to tile it across the child sampler
embedded_bqm = dimod.BinaryQuadraticModel.empty(bqm.vartype)
__, __, target_adjacency = self.child.structure
for embedding in self.embeddings:
embedded_bqm.update(dimod.embed_bqm(bqm, embedding, target_adjacency))
# solve the problem on the child system
tiled_response = self.child.sample(embedded_bqm, **kwargs)
responses = []
for embedding in self.embeddings:
embedding = {v: chain for v, chain in embedding.items() if v in bqm.linear}
responses.append(dimod.unembed_response(tiled_response, embedding, bqm))
# stack the records
record = np.rec.array(np.hstack((resp.record for resp in responses)))
vartypes = set(resp.vartype for resp in responses)
if len(vartypes) > 1:
raise RuntimeError("inconsistent vartypes returned")
vartype = vartypes.pop()
info = {}
for resp in responses:
info.update(resp.info)
labels = responses[0].variable_labels
return dimod.Response(record, labels, info, vartype)
@property
def num_tiles(self):
return len(self.embeddings)
def draw_tiling(sampler, t=4):
"""Draw Chimera graph of sampler with colored tiles.
Args:
sampler (:class:`dwave_micro_client_dimod.TilingComposite`): A tiled dimod
sampler to be drawn.
t (int): The size of the shore within each
:std:doc:`Chimera <system:reference/intro>` cell.
Uses :std:doc:`dwave_networkx.draw_chimera <networkx:index>`.
Linear biases are overloaded to color the graph according to which tile each Chimera cell belongs to.
See `Ocean Glossary <https://docs.ocean.dwavesys.com/en/latest/glossary.html>`_
for explanations of technical terms in descriptions of Ocean tools.
"""
child = sampler.child
nodes_per_cell = t * 2
m = n = int(ceil(sqrt(ceil(len(child.structure.nodelist) / nodes_per_cell)))) # assume square lattice shape
system = dnx.chimera_graph(m, n, t, node_list=child.structure.nodelist, edge_list=child.structure.edgelist)
labels = {node: -len(sampler.embeddings) for node in system.nodes} # unused cells are blue
labels.update({node: i for i, embedding in enumerate(sampler.embeddings) for s in embedding.values() for node in s})
dnx.draw_chimera(system, linear_biases=labels)
|
[
"dimod.unembed_response",
"numpy.hstack",
"dimod.Response",
"dimod.BinaryQuadraticModel.empty",
"dwave_networkx.chimera_graph",
"dimod.embed_bqm",
"dwave_networkx.draw_chimera"
] |
[((17215, 17318), 'dwave_networkx.chimera_graph', 'dnx.chimera_graph', (['m', 'n', 't'], {'node_list': 'child.structure.nodelist', 'edge_list': 'child.structure.edgelist'}), '(m, n, t, node_list=child.structure.nodelist, edge_list=\n child.structure.edgelist)\n', (17232, 17318), True, 'import dwave_networkx as dnx\n'), ((17536, 17582), 'dwave_networkx.draw_chimera', 'dnx.draw_chimera', (['system'], {'linear_biases': 'labels'}), '(system, linear_biases=labels)\n', (17552, 17582), True, 'import dwave_networkx as dnx\n'), ((9583, 9617), 'dwave_networkx.chimera_graph', 'dnx.chimera_graph', (['sub_m', 'sub_n', 't'], {}), '(sub_m, sub_n, t)\n', (9600, 9617), True, 'import dwave_networkx as dnx\n'), ((10379, 10486), 'dwave_networkx.chimera_graph', 'dnx.chimera_graph', (['m', 'n', 't'], {'node_list': 'sampler.structure.nodelist', 'edge_list': 'sampler.structure.edgelist'}), '(m, n, t, node_list=sampler.structure.nodelist, edge_list=\n sampler.structure.edgelist)\n', (10396, 10486), True, 'import dwave_networkx as dnx\n'), ((15216, 15261), 'dimod.BinaryQuadraticModel.empty', 'dimod.BinaryQuadraticModel.empty', (['bqm.vartype'], {}), '(bqm.vartype)\n', (15248, 15261), False, 'import dimod\n'), ((16243, 16288), 'dimod.Response', 'dimod.Response', (['record', 'labels', 'info', 'vartype'], {}), '(record, labels, info, vartype)\n', (16257, 16288), False, 'import dimod\n'), ((15859, 15903), 'numpy.hstack', 'np.hstack', (['(resp.record for resp in responses)'], {}), '(resp.record for resp in responses)\n', (15868, 15903), True, 'import numpy as np\n'), ((15392, 15441), 'dimod.embed_bqm', 'dimod.embed_bqm', (['bqm', 'embedding', 'target_adjacency'], {}), '(bqm, embedding, target_adjacency)\n', (15407, 15441), False, 'import dimod\n'), ((15744, 15798), 'dimod.unembed_response', 'dimod.unembed_response', (['tiled_response', 'embedding', 'bqm'], {}), '(tiled_response, embedding, bqm)\n', (15766, 15798), False, 'import dimod\n')]
|
# Copyright (c) 2021, Parallel Systems Architecture Laboratory (PARSA), EPFL &
# Machine Learning and Optimization Laboratory (MLO), EPFL. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the PARSA, EPFL & MLO, EPFL
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
#
# Modified file from Salesforce's LSTM and QRNN Language Model Toolkit
# (https://github.com/salesforce/awd-lstm-lm). See LICENSE for more details.
import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd import Variable
def embedded_dropout(embed, words, dropout=0.1, scale=None):
if dropout:
mask = embed.weight.data.new().resize_((embed.weight.size(0), 1)).bernoulli_(1 - dropout).expand_as(embed.weight) / (1 - dropout)
mask = Variable(mask)
masked_embed_weight = mask * embed.weight
else:
masked_embed_weight = embed.weight
if scale:
masked_embed_weight = scale.expand_as(masked_embed_weight) * masked_embed_weight
padding_idx = embed.padding_idx
if padding_idx is None:
padding_idx = -1
X = X = F.embedding(
words, masked_embed_weight,
padding_idx,
embed.max_norm, embed.norm_type,
embed.scale_grad_by_freq, embed.sparse
)
return X
if __name__ == '__main__':
V = 50
h = 4
bptt = 10
batch_size = 2
embed = torch.nn.Embedding(V, h)
words = np.random.random_integers(low=0, high=V-1, size=(batch_size, bptt))
words = torch.LongTensor(words)
words = Variable(words)
origX = embed(words)
X = embedded_dropout(embed, words)
print(origX)
print(X)
|
[
"numpy.random.random_integers",
"torch.LongTensor",
"torch.nn.functional.embedding",
"torch.autograd.Variable",
"torch.nn.Embedding"
] |
[((2519, 2649), 'torch.nn.functional.embedding', 'F.embedding', (['words', 'masked_embed_weight', 'padding_idx', 'embed.max_norm', 'embed.norm_type', 'embed.scale_grad_by_freq', 'embed.sparse'], {}), '(words, masked_embed_weight, padding_idx, embed.max_norm, embed.\n norm_type, embed.scale_grad_by_freq, embed.sparse)\n', (2530, 2649), True, 'import torch.nn.functional as F\n'), ((2779, 2803), 'torch.nn.Embedding', 'torch.nn.Embedding', (['V', 'h'], {}), '(V, h)\n', (2797, 2803), False, 'import torch\n'), ((2815, 2884), 'numpy.random.random_integers', 'np.random.random_integers', ([], {'low': '(0)', 'high': '(V - 1)', 'size': '(batch_size, bptt)'}), '(low=0, high=V - 1, size=(batch_size, bptt))\n', (2840, 2884), True, 'import numpy as np\n'), ((2893, 2916), 'torch.LongTensor', 'torch.LongTensor', (['words'], {}), '(words)\n', (2909, 2916), False, 'import torch\n'), ((2927, 2942), 'torch.autograd.Variable', 'Variable', (['words'], {}), '(words)\n', (2935, 2942), False, 'from torch.autograd import Variable\n'), ((2219, 2233), 'torch.autograd.Variable', 'Variable', (['mask'], {}), '(mask)\n', (2227, 2233), False, 'from torch.autograd import Variable\n')]
|
'''
Investigating the offset of CIV emission in the Cloudy models
as a function of ionization, nebular metallicity, stellar metallicity,
stellar population type, age, etc.
'''
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from scipy.optimize import curve_fit
from cloudy_func import * # written by TAH
import warnings
from scipy.optimize import OptimizeWarning
warnings.simplefilter("error", OptimizeWarning) # for when no CIV emission seen
__author__ = '<NAME>'
__email__ = '<EMAIL>'
# for the emission line profiles
def gaussian(xaxis, mean, A, sig, offset):
'''
Simple Gaussian functionc
'''
return A * np.exp(-np.power(xaxis-mean, 2.) / (2*np.power(sig, 2.))) + offset
# velocity offset plot using wavelengths instead of redshift
# the spectrum is already at systemic (so that z_sys=0)
def velocity_offset(lam_obs,lam_rest):
return 2.998e5 * ((lam_obs/lam_rest) - 1) # km/s
# model parameters
u = np.arange(-3.5,-1.4,0.2) # full range, 11 ionization points
zneb = [0.1,0.2,0.3,0.5] # full range, some have 0.4 as well
mass = 300 # 300 or 100
stars = 'binary' # binary or single
age = 7 # 7, 7.477, or 8
neb = 0 # for the nebular metallicity
civ = 1548.19 # angstroms
for ion in u[:5]:
# pulling model spectrum
spec = get_cloudy_spec(f'{stars}_cont_{mass}',mass,age,zneb[neb],ioni=ion)
spec['wavelength'] *= 1e4
spec['spectrum'] /= (2.998e18/spec.wavelength.values) # nu*Fnu --> Fnu
# zooming in around CIV
spec = reorder_spectrum(spec) # Cloudy orders it backwards
spec = spec.query('1490 < wavelength < 1610').copy()
spec['spectrum'] /= np.median(spec.spectrum.values) # normalizing it
# plotting CIV area of spectrum
plt.figure(figsize=(9,6))
plt.plot(spec.wavelength,spec.spectrum)
text_kwargs = {'transform':plt.gca().transAxes,'fontsize':15}
plt.text(0.025,0.94,f'logU: {round(ion,1)}',**text_kwargs)
# fitting the CIV emission
# gaussian(xaxis, mean, A, sig, offset)
try:
popt,pcov = curve_fit(gaussian,spec.wavelength,spec.spectrum,p0=[1548,1,2,1])
plt.plot(spec.wavelength,gaussian(spec.wavelength,*popt))
plt.axvline(popt[0])
# calculating offset
offset = velocity_offset(popt[0],civ)
plt.text(0.025,0.05,f'offset: {round(offset,2)} km/s',**text_kwargs)
except OptimizeWarning:
print('\nNo emission detected, finding max value.',end='\n\n')
zoomin = spec.query('1540 < wavelength < 1565').copy()
# wavelength of peak value
peak = zoomin.loc[zoomin['spectrum'].idxmax(),'wavelength']
plt.axvline(peak,ls=':')
# calculating offset using the max value (will likely stay the same)
offset = velocity_offset(peak,civ)
plt.text(0.025,0.05,f'offset: {round(offset,2)} km/s',**text_kwargs)
plt.yscale('log')
plt.ylim(0.25,6)
plt.gca().set_yticks([0.3,1.,3,])
plt.gca().set_yticklabels(['0.3','1.0','3.0',])
plt.xlabel('rest wavelength [$\AA$]')
plt.ylabel('normalized flux')
plt.tight_layout()
plt.show()
plt.close()
print()
# ---------------------------------------------------------- #
# -- running through all models to build table of offsets -- #
# ---------------------------------------------------------- #
# zstellar = [0.1,0.2,0.3,0.5]
# zneb = [0.1,0.3,0.5]
# offsets = pd.DataFrame({'z':[],'zneb':[],'u':[],'offset':[],'age':[],'mass':[],'stars':[]})
# for stars in ['binary','single']:
# print('For stars:',stars)
# for mass in [300,100]:
# print('For mass:',mass)
# for met in zstellar: # stellar metallicity
# print('For Z_stellar:',met)
# for neb in zneb: # nebular metallicity
# # checking for when stellar == nebular when it's 0.3 or 0.5
# if neb == 0.1 and met == 0.3: pass # no need to run this model twice
# elif neb == 0.1 and met == 0.5: pass # no need to run this model twice
# else:
# # need to check if matches stellar
# if neb == 0.1: neb = met # fix nebular to stellar metallicity
# print('For Z_neb:',neb)
# for ion in u:
# print('For logU:',round(ion,1),end=',\t')
# # pulling model spectrum
# spec = get_cloudy_spec(f'{stars}_cont_{mass}',mass,age,met,zneb=neb,ioni=ion)
# spec['wavelength'] *= 1e4
# spec['spectrum'] /= (2.998e18/spec.wavelength.values) # nu*Fnu --> Fnu
# # zooming in around CIV
# spec = spec.query('1490 < wavelength < 1610').copy()
# spec['spectrum'] /= np.median(spec.spectrum.values) # normalizing it
# spec = reorder_spectrum(spec) # Cloudy orders it backwards
# # fitting the CIV emission
# # gaussian(xaxis, mean, A, sig, offset)
# try:
# popt,pcov = curve_fit(gaussian,spec.wavelength,spec.spectrum,p0=[1548,1,2,1])
# # calculating offset
# offset = velocity_offset(popt[0],civ)
# print(f'offset: {round(offset,2)} km/s')
# except OptimizeWarning:
# print('Bad fit/no emission detected.')
# offset = np.nan
# filldf = pd.DataFrame({'z':[met],'zneb':[neb],'u':[ion],'offset':[round(offset,3)],\
# 'age':[int(age)],'mass':[int(mass)],'stars':[stars]})
# offsets = offsets.append(filldf,ignore_index=True)
# print()
# print(end='\n\n')
# print(end='\n\n')
# print(end='\n\n\n')
# # ---------------------------------------- #
# # ---------------------------------------- #
# print('Saving table to file...',end='\n\n')
# df_dtypes = {'zneb':float,'u':float,'offset':float,'age':int,'mass':int,'stars':str}
# offsets = offsets.astype(df_dtypes) # to make sure column dtypes don't change
# # offsets.to_csv('plots-data/offsets_civ.txt',sep='\t',index=False)
# print(offsets.head())
|
[
"scipy.optimize.curve_fit",
"numpy.median",
"matplotlib.pyplot.ylabel",
"numpy.power",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"warnings.simplefilter",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.axvline",
"numpy.arange",
"matplotlib.pyplot.show"
] |
[((427, 474), 'warnings.simplefilter', 'warnings.simplefilter', (['"""error"""', 'OptimizeWarning'], {}), "('error', OptimizeWarning)\n", (448, 474), False, 'import warnings\n'), ((983, 1009), 'numpy.arange', 'np.arange', (['(-3.5)', '(-1.4)', '(0.2)'], {}), '(-3.5, -1.4, 0.2)\n', (992, 1009), True, 'import numpy as np\n'), ((1728, 1759), 'numpy.median', 'np.median', (['spec.spectrum.values'], {}), '(spec.spectrum.values)\n', (1737, 1759), True, 'import numpy as np\n'), ((1819, 1845), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 6)'}), '(figsize=(9, 6))\n', (1829, 1845), True, 'import matplotlib.pyplot as plt\n'), ((1849, 1889), 'matplotlib.pyplot.plot', 'plt.plot', (['spec.wavelength', 'spec.spectrum'], {}), '(spec.wavelength, spec.spectrum)\n', (1857, 1889), True, 'import matplotlib.pyplot as plt\n'), ((2967, 2984), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (2977, 2984), True, 'import matplotlib.pyplot as plt\n'), ((2989, 3006), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0.25)', '(6)'], {}), '(0.25, 6)\n', (2997, 3006), True, 'import matplotlib.pyplot as plt\n'), ((3100, 3138), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""rest wavelength [$\\\\AA$]"""'], {}), "('rest wavelength [$\\\\AA$]')\n", (3110, 3138), True, 'import matplotlib.pyplot as plt\n'), ((3142, 3171), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""normalized flux"""'], {}), "('normalized flux')\n", (3152, 3171), True, 'import matplotlib.pyplot as plt\n'), ((3177, 3195), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3193, 3195), True, 'import matplotlib.pyplot as plt\n'), ((3200, 3210), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3208, 3210), True, 'import matplotlib.pyplot as plt\n'), ((3215, 3226), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3224, 3226), True, 'import matplotlib.pyplot as plt\n'), ((2123, 2194), 'scipy.optimize.curve_fit', 'curve_fit', (['gaussian', 'spec.wavelength', 'spec.spectrum'], {'p0': '[1548, 1, 2, 1]'}), '(gaussian, spec.wavelength, spec.spectrum, p0=[1548, 1, 2, 1])\n', (2132, 2194), False, 'from scipy.optimize import curve_fit\n'), ((2263, 2283), 'matplotlib.pyplot.axvline', 'plt.axvline', (['popt[0]'], {}), '(popt[0])\n', (2274, 2283), True, 'import matplotlib.pyplot as plt\n'), ((1920, 1929), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1927, 1929), True, 'import matplotlib.pyplot as plt\n'), ((2727, 2752), 'matplotlib.pyplot.axvline', 'plt.axvline', (['peak'], {'ls': '""":"""'}), "(peak, ls=':')\n", (2738, 2752), True, 'import matplotlib.pyplot as plt\n'), ((3010, 3019), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3017, 3019), True, 'import matplotlib.pyplot as plt\n'), ((3048, 3057), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3055, 3057), True, 'import matplotlib.pyplot as plt\n'), ((689, 716), 'numpy.power', 'np.power', (['(xaxis - mean)', '(2.0)'], {}), '(xaxis - mean, 2.0)\n', (697, 716), True, 'import numpy as np\n'), ((719, 737), 'numpy.power', 'np.power', (['sig', '(2.0)'], {}), '(sig, 2.0)\n', (727, 737), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
import psoap
from psoap.data import lkca14, redshift, Chunk
from psoap import matrix_functions
from psoap import covariance
from psoap import orbit
# from matplotlib.ticker import FormatStrFormatter as FSF
# from matplotlib.ticker import MaxNLocator
# from matplotlib.ticker import MultipleLocator
# Specify orbital parameters and make a sanity plot
q = 0.2
K = 5.0 # km/s
e = 0.2 #
omega = 10.0 # deg
P = 10.0 # days
T0 = 0.0 # epoch
gamma = 5.0 # km/s
n_epochs = 10
obs_dates = np.array([2.1, 4.9, 8.0, 9.9, 12.2, 16.0, 16.9, 19.1, 22.3, 26.1])
# obs_dates = np.linspace(5, 150, num=n_epochs)
orb = orbit.SB2(q, K, e, omega, P, T0, gamma, obs_dates)
vAs, vBs = orb.get_component_velocities()
dates_fine = np.linspace(0, 30, num=200)
vA_fine, vB_fine = orb.get_component_velocities(dates_fine)
vAs_relative = vAs - vAs[0]
np.save("SB2/vAs_relative.npy", vAs_relative)
vBs_relative = vBs - vBs[0]
np.save("SB2/vBs_relative.npy", vBs_relative)
fig, ax = plt.subplots(nrows=3, figsize=(6,6))
ax[0].plot(dates_fine, vA_fine, "b")
ax[0].plot(orb.obs_dates, vAs, "bo")
ax[0].plot(dates_fine, vB_fine, "g")
ax[0].plot(orb.obs_dates, vBs, "go")
ax[0].axhline(gamma, ls="-.", color="0.5")
ax[-1].set_xlabel(r"$t$ [days]")
ax[0].set_ylabel(r"$v_A$ [km $\mathrm{s}^{-1}$]")
# For subsequent axes, plot velocities of stars relative to first observation.
ax[1].plot(orb.obs_dates, vAs_relative, "bo")
ax[1].set_ylabel(r"$v_A$ relative")
ax[2].plot(orb.obs_dates, vBs_relative, "go")
ax[2].set_ylabel(r"$v_B$ relative")
fig.subplots_adjust(left=0.14, right=0.86, bottom=0.24)
fig.savefig("SB2/orbit.png")
# Load the fake primary spectra we prepared
wl_f, fl_f = np.load("primary_wl_fl.npy")
# Load the fake secondary spectra we prepared
wl_g, fl_g = np.load("secondary_wl_fl.npy")
n_f = len(wl_f)
n_g = len(wl_g)
print("n_f:", n_f, "n_g:", n_g)
# Shorten these to be the same.
if n_f < n_g:
n_pix = n_f
print("Shortening g to f")
else:
n_pix =n_g
print("Shortening f to g")
wl = wl_f[0:n_pix]
fl_f = fl_f[0:n_pix]
fl_g = fl_g[0:n_pix]
# Just assume that wl_f will be wl_g as well.
# Create fake wavelengths with Doppler shifts by apply these to the master wl
wls_f = np.empty((n_epochs, n_pix))
wls_g = np.empty((n_epochs, n_pix))
for i in range(n_epochs):
wls_f[i] = redshift(wl, vAs[i])
wls_g[i] = redshift(wl, vBs[i])
# Falling plot of all eight epochs of each spectrum, overlaid with the velocities for each
# Show spectra on each plot along with chosen amplitude scaling
fig, ax = plt.subplots(nrows=n_epochs, sharex=True)
for i in range(n_epochs):
ax[i].plot(wls_f[i], fl_f, "b")
ax[i].plot(wls_g[i], fl_g, "g")
ax[i].set_ylabel("epoch {:}".format(i))
ax[-1].set_xlabel(r"$\lambda [\AA]$")
fig.savefig("SB2/dataset_noiseless_full.png", dpi=300)
# Here is where we set up the number of chunks, and choose what region of overlaps we want.
# New chunks [start, stop]
# chunk_wls = [[5240, 5250], [5255, 5265], [5270, 5280]]
chunk_wls = [[5265, 5275]]
# Measure this as S/N per resolution element. That means that there is a sqrt(2.5) effect.
# let alpha be the percentage of the primary as the total flux.
ratio = 0.2
alpha = (1 / (ratio + 1))
print("Ratio: {}, alpha: {}".format(ratio, alpha))
# alpha = 0.90
# Assume a S/N = 40, so N = 1.0 / 40
S_N = 60 # per resolution element
noise_amp = 1.0 / (S_N/np.sqrt(2.5)) # per pixel
# Truncate down to a smaller region to ensure overlap between all orders.
for (wl0, wl1) in chunk_wls:
print("Creating chunk {:.0f} to {:.0f}".format(wl0, wl1))
# Keep everything the same size. These are how many pixels we plan to keep in common between
# epochs
ind = (wls_f[0] > wl0) & (wls_f[0] < wl1)
n_pix_common = np.sum(ind)
print("n_pix_common = {}".format(n_pix_common))
# Now choose a narrower, common wl grid, which will just be f.
# Now we should have a giant array of wavelengths that all share the same flux values, but shifted
wls_comb = np.zeros((n_epochs, n_pix_common))
fls_f = np.empty((n_epochs, n_pix_common))
fls_g = np.empty((n_epochs, n_pix_common))
fls_comb = np.empty((n_epochs, n_pix_common))
fls_noise = np.zeros((n_epochs, n_pix_common))
sigma_comb = noise_amp * np.ones((n_epochs, n_pix_common))
for i in range(n_epochs):
# Select a subset of wl_f that has the appropriate number of pixels
ind_0 = np.searchsorted(wls_f[i], wl0)
print("Inserting at index {}, wavelength {:.2f}".format(ind_0, wls_f[i, ind_0]))
wl_common = wls_f[i, ind_0:(ind_0 + n_pix_common)]
# Interpolate the master spectrum onto this grid
interp = interp1d(wls_f[i], fl_f)
fl_f_common = interp(wl_common)
interp = interp1d(wls_g[i], fl_g)
fl_g_common = interp(wl_common)
fl_common = alpha * fl_f_common + (1 - alpha) * fl_g_common
# Add noise to it
fl_common_noise = fl_common + np.random.normal(scale=noise_amp, size=n_pix_common)
# Store into array
wls_comb[i] = wl_common
fls_f[i] = fl_f_common
fls_g[i] = fl_g_common
fls_comb[i] = fl_common
fls_noise[i] = fl_common_noise
fig, ax = plt.subplots(nrows=4, sharex=True)
ax[0].plot(wl_common, alpha * fl_f_common, "b")
ax[0].set_ylabel(r"$f$")
ax[1].plot(wl_common, (1 - alpha) * fl_g_common, "g")
ax[1].set_ylabel(r"$g$")
ax[2].plot(wl_common, fl_common, "k")
ax[2].set_ylabel(r"$f + g$")
ax[3].plot(wl_common, fl_common_noise, "k")
ax[3].set_ylabel(r"$f + g +$ noise")
ax[-1].set_xlabel(r"$\lambda\;[\AA]$")
fig.savefig("SB2/epoch_{}.png".format(i), dpi=300)
# Save the created spectra into a chunk
date_comb = obs_dates[:,np.newaxis] * np.ones_like(wls_comb)
chunkSpec = Chunk(wls_comb, fls_noise, sigma_comb, date_comb)
wl0 = np.min(wls_comb)
wl1 = np.max(wls_comb)
chunkSpec.save(0, wl0, wl1, prefix="SB2/")
# 2D arrays before we have summed them or added noise.
print("STDEV primary", np.std(alpha * fls_f))
print("STDEV secondary", np.std((1 - alpha) * fls_g))
np.save("SB2/fls_f.npy", alpha * fls_f)
np.save("SB2/fls_g.npy", (1 - alpha) * fls_g)
np.save("SB2/fls_comb.npy", fls_comb)
|
[
"numpy.sqrt",
"scipy.interpolate.interp1d",
"numpy.array",
"psoap.orbit.SB2",
"numpy.save",
"numpy.searchsorted",
"numpy.max",
"numpy.linspace",
"numpy.empty",
"numpy.min",
"numpy.random.normal",
"numpy.ones",
"psoap.data.Chunk",
"numpy.std",
"numpy.ones_like",
"psoap.data.redshift",
"numpy.sum",
"numpy.zeros",
"numpy.load",
"matplotlib.pyplot.subplots"
] |
[((577, 643), 'numpy.array', 'np.array', (['[2.1, 4.9, 8.0, 9.9, 12.2, 16.0, 16.9, 19.1, 22.3, 26.1]'], {}), '([2.1, 4.9, 8.0, 9.9, 12.2, 16.0, 16.9, 19.1, 22.3, 26.1])\n', (585, 643), True, 'import numpy as np\n'), ((700, 750), 'psoap.orbit.SB2', 'orbit.SB2', (['q', 'K', 'e', 'omega', 'P', 'T0', 'gamma', 'obs_dates'], {}), '(q, K, e, omega, P, T0, gamma, obs_dates)\n', (709, 750), False, 'from psoap import orbit\n'), ((808, 835), 'numpy.linspace', 'np.linspace', (['(0)', '(30)'], {'num': '(200)'}), '(0, 30, num=200)\n', (819, 835), True, 'import numpy as np\n'), ((925, 970), 'numpy.save', 'np.save', (['"""SB2/vAs_relative.npy"""', 'vAs_relative'], {}), "('SB2/vAs_relative.npy', vAs_relative)\n", (932, 970), True, 'import numpy as np\n'), ((1000, 1045), 'numpy.save', 'np.save', (['"""SB2/vBs_relative.npy"""', 'vBs_relative'], {}), "('SB2/vBs_relative.npy', vBs_relative)\n", (1007, 1045), True, 'import numpy as np\n'), ((1057, 1094), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(3)', 'figsize': '(6, 6)'}), '(nrows=3, figsize=(6, 6))\n', (1069, 1094), True, 'import matplotlib.pyplot as plt\n'), ((1760, 1788), 'numpy.load', 'np.load', (['"""primary_wl_fl.npy"""'], {}), "('primary_wl_fl.npy')\n", (1767, 1788), True, 'import numpy as np\n'), ((1849, 1879), 'numpy.load', 'np.load', (['"""secondary_wl_fl.npy"""'], {}), "('secondary_wl_fl.npy')\n", (1856, 1879), True, 'import numpy as np\n'), ((2290, 2317), 'numpy.empty', 'np.empty', (['(n_epochs, n_pix)'], {}), '((n_epochs, n_pix))\n', (2298, 2317), True, 'import numpy as np\n'), ((2326, 2353), 'numpy.empty', 'np.empty', (['(n_epochs, n_pix)'], {}), '((n_epochs, n_pix))\n', (2334, 2353), True, 'import numpy as np\n'), ((2619, 2660), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'n_epochs', 'sharex': '(True)'}), '(nrows=n_epochs, sharex=True)\n', (2631, 2660), True, 'import matplotlib.pyplot as plt\n'), ((2396, 2416), 'psoap.data.redshift', 'redshift', (['wl', 'vAs[i]'], {}), '(wl, vAs[i])\n', (2404, 2416), False, 'from psoap.data import lkca14, redshift, Chunk\n'), ((2432, 2452), 'psoap.data.redshift', 'redshift', (['wl', 'vBs[i]'], {}), '(wl, vBs[i])\n', (2440, 2452), False, 'from psoap.data import lkca14, redshift, Chunk\n'), ((3828, 3839), 'numpy.sum', 'np.sum', (['ind'], {}), '(ind)\n', (3834, 3839), True, 'import numpy as np\n'), ((4078, 4112), 'numpy.zeros', 'np.zeros', (['(n_epochs, n_pix_common)'], {}), '((n_epochs, n_pix_common))\n', (4086, 4112), True, 'import numpy as np\n'), ((4125, 4159), 'numpy.empty', 'np.empty', (['(n_epochs, n_pix_common)'], {}), '((n_epochs, n_pix_common))\n', (4133, 4159), True, 'import numpy as np\n'), ((4172, 4206), 'numpy.empty', 'np.empty', (['(n_epochs, n_pix_common)'], {}), '((n_epochs, n_pix_common))\n', (4180, 4206), True, 'import numpy as np\n'), ((4222, 4256), 'numpy.empty', 'np.empty', (['(n_epochs, n_pix_common)'], {}), '((n_epochs, n_pix_common))\n', (4230, 4256), True, 'import numpy as np\n'), ((4273, 4307), 'numpy.zeros', 'np.zeros', (['(n_epochs, n_pix_common)'], {}), '((n_epochs, n_pix_common))\n', (4281, 4307), True, 'import numpy as np\n'), ((5930, 5979), 'psoap.data.Chunk', 'Chunk', (['wls_comb', 'fls_noise', 'sigma_comb', 'date_comb'], {}), '(wls_comb, fls_noise, sigma_comb, date_comb)\n', (5935, 5979), False, 'from psoap.data import lkca14, redshift, Chunk\n'), ((5990, 6006), 'numpy.min', 'np.min', (['wls_comb'], {}), '(wls_comb)\n', (5996, 6006), True, 'import numpy as np\n'), ((6017, 6033), 'numpy.max', 'np.max', (['wls_comb'], {}), '(wls_comb)\n', (6023, 6033), True, 'import numpy as np\n'), ((6255, 6294), 'numpy.save', 'np.save', (['"""SB2/fls_f.npy"""', '(alpha * fls_f)'], {}), "('SB2/fls_f.npy', alpha * fls_f)\n", (6262, 6294), True, 'import numpy as np\n'), ((6299, 6344), 'numpy.save', 'np.save', (['"""SB2/fls_g.npy"""', '((1 - alpha) * fls_g)'], {}), "('SB2/fls_g.npy', (1 - alpha) * fls_g)\n", (6306, 6344), True, 'import numpy as np\n'), ((6349, 6386), 'numpy.save', 'np.save', (['"""SB2/fls_comb.npy"""', 'fls_comb'], {}), "('SB2/fls_comb.npy', fls_comb)\n", (6356, 6386), True, 'import numpy as np\n'), ((3460, 3472), 'numpy.sqrt', 'np.sqrt', (['(2.5)'], {}), '(2.5)\n', (3467, 3472), True, 'import numpy as np\n'), ((4338, 4371), 'numpy.ones', 'np.ones', (['(n_epochs, n_pix_common)'], {}), '((n_epochs, n_pix_common))\n', (4345, 4371), True, 'import numpy as np\n'), ((4496, 4526), 'numpy.searchsorted', 'np.searchsorted', (['wls_f[i]', 'wl0'], {}), '(wls_f[i], wl0)\n', (4511, 4526), True, 'import numpy as np\n'), ((4751, 4775), 'scipy.interpolate.interp1d', 'interp1d', (['wls_f[i]', 'fl_f'], {}), '(wls_f[i], fl_f)\n', (4759, 4775), False, 'from scipy.interpolate import interp1d\n'), ((4834, 4858), 'scipy.interpolate.interp1d', 'interp1d', (['wls_g[i]', 'fl_g'], {}), '(wls_g[i], fl_g)\n', (4842, 4858), False, 'from scipy.interpolate import interp1d\n'), ((5299, 5333), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(4)', 'sharex': '(True)'}), '(nrows=4, sharex=True)\n', (5311, 5333), True, 'import matplotlib.pyplot as plt\n'), ((5891, 5913), 'numpy.ones_like', 'np.ones_like', (['wls_comb'], {}), '(wls_comb)\n', (5903, 5913), True, 'import numpy as np\n'), ((6169, 6190), 'numpy.std', 'np.std', (['(alpha * fls_f)'], {}), '(alpha * fls_f)\n', (6175, 6190), True, 'import numpy as np\n'), ((6221, 6248), 'numpy.std', 'np.std', (['((1 - alpha) * fls_g)'], {}), '((1 - alpha) * fls_g)\n', (6227, 6248), True, 'import numpy as np\n'), ((5033, 5085), 'numpy.random.normal', 'np.random.normal', ([], {'scale': 'noise_amp', 'size': 'n_pix_common'}), '(scale=noise_amp, size=n_pix_common)\n', (5049, 5085), True, 'import numpy as np\n')]
|
import cv2
import sys
import json
from image_encoder.image_encoder import decode
import numpy
import requests
# Get user supplied values
def get_image(fpath):
with open(fpath) as f:
record = [json.loads(line) for line in f]
img = decode(record[0]["image"])
return img
def n_faces(fpath):
cascPath = "haarcascade_frontalface_default.xml"
# Create the haar cascade
faceCascade = cv2.CascadeClassifier(cascPath)
# Read the image
img = get_image(fpath)
image = cv2.cvtColor(numpy.array(img), cv2.COLOR_RGB2BGR)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Detect faces in the image
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags = cv2.CASCADE_SCALE_IMAGE
)
output = {}
i = 1
for (x, y, w, h) in faces:
k = "face"+str(i)
output[k] = [int(x),int(y), int(x+w), int(y+h)]
i+=1
print(output)
to_send = {"fpath":fpath, "result":{"faces":output}}
requests.post('http://imagedb:5000/append',json = to_send)
return to_send
if __name__ == "__main__":
from pika_listener import QueueListener
Q = QueueListener(n_faces, 'imageq_n_faces')
Q.run()
# powered by bee
|
[
"image_encoder.image_encoder.decode",
"json.loads",
"requests.post",
"numpy.array",
"cv2.cvtColor",
"cv2.CascadeClassifier",
"pika_listener.QueueListener"
] |
[((247, 273), 'image_encoder.image_encoder.decode', 'decode', (["record[0]['image']"], {}), "(record[0]['image'])\n", (253, 273), False, 'from image_encoder.image_encoder import decode\n'), ((413, 444), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['cascPath'], {}), '(cascPath)\n', (434, 444), False, 'import cv2\n'), ((568, 607), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (580, 607), False, 'import cv2\n'), ((1052, 1109), 'requests.post', 'requests.post', (['"""http://imagedb:5000/append"""'], {'json': 'to_send'}), "('http://imagedb:5000/append', json=to_send)\n", (1065, 1109), False, 'import requests\n'), ((1213, 1253), 'pika_listener.QueueListener', 'QueueListener', (['n_faces', '"""imageq_n_faces"""'], {}), "(n_faces, 'imageq_n_faces')\n", (1226, 1253), False, 'from pika_listener import QueueListener\n'), ((520, 536), 'numpy.array', 'numpy.array', (['img'], {}), '(img)\n', (531, 536), False, 'import numpy\n'), ((205, 221), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (215, 221), False, 'import json\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import optparse
import os
import re
import sys
import vtk
from multiprocessing import Process
import parse_imx
RADIUS = 3 # For Open and Gauss
SCALE = 50.0 # For Rasterization
class RepairMeshParser(optparse.OptionParser):
def __init__(self):
optparse.OptionParser.__init__(self)
self.add_option("-a", "--areas", dest="areas_file",
help="The output areas file in csv", metavar="FILE")
self.add_option("-i", "--input-vrml", dest="input_vrml",
help="The mesh to de repaired in vrml file format", metavar="FILE")
self.add_option("-d", "--auto-dir", dest="vrmls_dir",
help="A directory with a bunch of vrmls", metavar="FILE")
self.add_option("-o", "--output-dir", dest="output_dir",
help="The output dir ussed when provides a dir as input", metavar="FILE")
self.add_option("-s", "--scale-factor", dest="scale", default=50.0,
help="Tehe scale factor used in the rasterization")
self.add_option("-c", "--combine", action="store_true", dest="combine",
help="Combine all polydatas in one object")
def write_image(image, filename):
"""Write vtk image data to file."""
aWriter = vtk.vtkMetaImageWriter()
aWriter.SetInputData(image)
aWriter.SetFileName(filename)
aWriter.SetFileDimensionality(3)
aWriter.SetCompression(False)
aWriter.Write()
def voxelizer(polydata, scale=SCALE, radius=RADIUS):
""" volume voxelization not anti-aliased """
# Get selection boundaries.
(minX, maxX, minY, maxY, minZ, maxZ) = [int(x * scale) for x in
polydata.GetBounds()] # convert tuple of floats to ints
# print(" Selection bounds are %s" % str((minX, maxX, minY, maxY, minZ, maxZ))) # dimensions of the resulting image
# print(" Dimensions: %s" % str((maxX - minX, maxY - minY, maxZ - minZ)))
padd = radius + 6
(minX, maxX, minY, maxY, minZ, maxZ) = (
minX - padd, maxX + padd, minY - padd, maxY + padd, minZ - padd, maxZ + padd)
ps1 = 1.0 / float(scale) # pixel size for the stencil, make sure it's a float division!
ps2 = 1.0 # pixel size for the image
## Convert a surface mesh into an image stencil that can be used to mask an image with vtkImageStencil.
polyToStencilFilter = vtk.vtkPolyDataToImageStencil()
polyToStencilFilter.SetInputData(polydata)
polyToStencilFilter.SetOutputWholeExtent(minX, maxX, minY, maxY, minZ, maxZ)
polyToStencilFilter.SetOutputSpacing(ps1, ps1, ps1)
polyToStencilFilter.SetOutputOrigin(0.0, 0.0, 0.0)
polyToStencilFilter.Update()
# Create an empty (3D) image of appropriate size.
image = vtk.vtkImageData()
image.SetSpacing(ps2, ps2, ps2)
image.SetOrigin(0.0, 0.0, 0.0)
image.SetExtent(minX, maxX, minY, maxY, minZ, maxZ)
image.AllocateScalars(vtk.VTK_UNSIGNED_CHAR, 1)
# Mask the empty image with the image stencil.
# First All the background to 0
# Needed otherwise introduces noise
stencil = vtk.vtkImageStencil()
stencil.SetInputData(image)
stencil.SetStencilData(polyToStencilFilter.GetOutput())
stencil.ReverseStencilOff()
stencil.SetBackgroundValue(0)
stencil.Update()
# Foreground to 255
stencil2 = vtk.vtkImageStencil()
stencil2.SetInputData(stencil.GetOutput())
stencil2.SetStencilData(polyToStencilFilter.GetOutput())
stencil2.ReverseStencilOn()
stencil2.SetBackgroundValue(255)
stencil2.Update()
finishImage = stencil2.GetOutput()
print(finishImage.GetNumberOfCells())
return stencil2.GetOutput()
def axisAligment(actor):
polyData = actor.GetMapper().GetInput()
centerCalculer = vtk.vtkCenterOfMass()
centerCalculer.SetInputData(polyData)
centerCalculer.SetUseScalarsAsWeights(False)
centerCalculer.Update()
center = centerCalculer.GetCenter()
print(center)
centerTransform = vtk.vtkTransform()
centerTransform.Translate(-center[0], -center[1], -center[2])
transformFilter = vtk.vtkTransformFilter()
transformFilter.SetInputData(polyData)
transformFilter.SetTransform(centerTransform)
transformFilter.Update()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(transformFilter.GetOutput())
mapper.Update()
actor.SetMapper(mapper)
polyData = actor.GetMapper().GetInput()
centerCalculer = vtk.vtkCenterOfMass()
centerCalculer.SetInputData(polyData)
centerCalculer.SetUseScalarsAsWeights(False)
centerCalculer.Update()
centerAux = centerCalculer.GetCenter()
print(centerAux)
pointsMatrixAux = []
for i in range(0, polyData.GetNumberOfPoints()):
point = polyData.GetPoint(i)
pointsMatrixAux.append(point)
pointMatrix = np.matrix(pointsMatrixAux)
pointMatrixT = pointMatrix.transpose()
covarianzeMatrix = pointMatrixT * pointMatrix
u, s, vh = np.linalg.svd(covarianzeMatrix, full_matrices=True)
rotationMatrix = vtk.vtkMatrix4x4()
for i in range(3):
for j in range(3):
rotationMatrix.SetElement(i, j, u[i, j])
rotationMatrix.SetElement(i, 3, 0)
for i in range(3):
rotationMatrix.SetElement(3, i, 0)
rotationMatrix.SetElement(3, 3, 1)
rotationTransform = vtk.vtkTransform()
rotationTransform.SetMatrix(rotationMatrix)
transformFilter = vtk.vtkTransformFilter()
transformFilter.SetInputData(actor.GetMapper().GetInput())
transformFilter.SetTransform(rotationTransform)
transformFilter.Update()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(transformFilter.GetOutput())
actor.SetMapper(mapper)
return center, rotationTransform
def open_image(image, radius):
openFilter = vtk.vtkImageDilateErode3D()
openFilter.SetDilateValue(255)
openFilter.SetErodeValue(0)
openFilter.SetKernelSize(radius, radius, radius)
openFilter.SetInputData(image)
openFilter.Update()
return openFilter.GetOutput()
def dump_voxels(actor, filename):
poly = actor.GetMapper().GetInput()
pre_image = voxelizer(poly, 50)
image = open_image(pre_image, RADIUS)
write_image(image, filename)
def open_actor(actor, actor_index=0, scale=SCALE, radius=RADIUS):
poly = actor.GetMapper().GetInput()
pre_image = voxelizer(poly, scale)
opened_image = open_image(pre_image, radius)
gauss = vtk.vtkImageGaussianSmooth()
gauss.SetDimensionality(3)
gauss.SetStandardDeviation(radius, radius, radius)
gauss.SetInputData(opened_image)
gauss.Update()
image_to_contour = gauss.GetOutput()
contour = vtk.vtkMarchingCubes()
contour.SetInputData(image_to_contour)
contour.SetValue(0, 127.5)
contour.ComputeScalarsOff()
contour.Update()
repared_poly = contour.GetOutput()
if repared_poly.GetNumberOfCells() == 0:
print("ERROR: number_of_cells = 0", end=' ')
# write_image(image_to_contour, "/tmp/%d.mhd"%actor_index)
raise ValueError("ERROR: number_of_cells = 0")
# (minX, maxX, minY, maxY, minZ, maxZ) = [int(x) for x in repared_poly.GetBounds()] #convert tuple of floats to ints
# print " Repared bounds are %s"%str((minX, maxX, minY, maxY, minZ, maxZ)) #dimensions of the resulting image
# print " Dimensions: %s"%str((maxX - minX, maxY - minY, maxZ - minZ))
actor.GetMapper().SetInputData(repared_poly)
def compute_area(actor):
polydata = actor.GetMapper().GetInput()
number_of_cells = polydata.GetNumberOfCells()
area = 0
for i in range(number_of_cells):
area += vtk.vtkMeshQuality.TriangleArea(polydata.GetCell(i))
return area
def combine_actors(actors_list):
appender = vtk.vtkAppendPolyData()
for actor in actors_list:
poly = actor.GetMapper().GetInput()
appender.AddInput(poly)
appender.Update()
combined_poly = appender.GetOutput()
combined_actor = vtk.vtkActor()
combined_actor.SetMapper(vtk.vtkPolyDataMapper())
combined_actor.GetMapper().SetInputData(combined_poly)
return combined_actor
def show_actor(actor, ren, rw):
ren.RemoveAllViewProps()
ren.AddActor(actor)
ren.ResetCamera()
rw.Render()
def compute_all_areas(actors_list, scale=SCALE):
areas = []
for i, actor in enumerate(actors_list):
# scale = SCALE
sys.stdout.write("%d " % i)
area_pre = compute_area(actor)
try:
open_actor(actor, i, scale)
except ValueError as e:
# [KNOWN BUG] The sizes are corrected, but not the position
scale = scale * 2
open_actor(actor, i, scale)
area_post = compute_area(actor) / scale ** 2
areas.append((i, area_pre, area_post, area_post / area_pre))
sys.stdout.flush()
print("\n")
return areas
def compute_centroids(actors_list):
return [a.GetCenter() for a in actors_list]
def csv_areas(actors_list, filename, scale=SCALE, names=None):
centroids = compute_centroids(actors_list) # Centroids of original actors
print(
"-------- Repairing original mesh and Calculating areas (This process might take a long time, please wait) -----------")
areas = compute_all_areas(actors_list, scale)
print("-------- Saving CSV file -----------")
if names is not None:
csv = "Object,Pre_Area,Post_Area,Post/Pre,X,Y,Z,Name\n"
for i in range(len(areas)):
data = []
data.extend(areas[i])
data.extend(centroids[i])
data.append(names[i])
csv += "%d,%f,%f,%f,%f,%f,%f,%s\n" % tuple(data)
else:
csv = "Object,Pre_Area,Post_Area,Post/Pre,X,Y,Z\n"
for i in range(len(areas)):
data = []
data.extend(areas[i])
data.extend(centroids[i])
csv += "%d,%f,%f,%f,%f,%f,%f\n" % tuple(data)
with open(filename, 'w') as f:
f.write(csv)
def underScale(actor, scale):
transform = vtk.vtkTransform()
relation = float(1) / float(scale)
transform.Scale(relation, relation, relation)
transformFilter = vtk.vtkTransformFilter()
transformFilter.SetInputData(actor.GetMapper().GetInput())
transformFilter.SetTransform(transform)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(transformFilter.GetOutputPort())
mapper.Update()
actor.SetMapper(mapper)
return actor
def reduceMesh(actor, reduction):
decimate = vtk.vtkDecimatePro()
decimate.SetInputData(actor.GetMapper().GetInput())
decimate.SetTargetReduction(reduction / 100)
decimate.Update()
decimateMapper = vtk.vtkPolyDataMapper()
decimateMapper.SetInputConnection(decimate.GetOutputPort())
decimateMapper.Update()
actor.SetMapper(decimateMapper)
return actor
# Only for future versions of VTK, at the moment is a beta feature
def save_obj(rw, dir, name):
exporter = vtk.vtkOBJExporter()
if not os.path.isdir(dir):
os.makedirs(dir)
path = "%s/%s" % (dir, name)
exporter.SetFilePrefix(path)
exporter.SetRenderWindow(rw)
exporter.Write()
def save_stl(polydata, dir, name):
exporter = vtk.vtkSTLWriter()
if not os.path.isdir(dir):
os.makedirs(dir)
path = '%s/%s.stl' % (dir, name)
exporter.SetFileName(path)
exporter.SetInputData(polydata)
exporter.Write()
def save_vrml(name, dir, rw):
if not os.path.isdir(dir):
os.makedirs(dir)
path = '%s/%s.vrml' % (dir, name)
rw.Render()
exporter = vtk.vtkVRMLExporter()
exporter.SetFileName(path)
exporter.SetRenderWindow(rw)
rw.Render()
exporter.Write()
def initActorForExport(actor, rw, scale, reduction):
ren = rw.GetRenderers().GetFirstRenderer()
actor = underScale(actor, scale)
actor = reduceMesh(actor, reduction)
ren.AddActor(actor)
def toOriginalPos(actor, center, rotationTransform):
rotMat = vtk.vtkMatrix4x4()
rotationTransform.GetTranspose(rotMat)
rotTrans = vtk.vtkTransform()
rotTrans.SetMatrix(rotMat)
transformFilter = vtk.vtkTransformFilter()
transformFilter.SetInputData(actor.GetMapper().GetInput())
transformFilter.SetTransform(rotTrans)
transformFilter.Update()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(transformFilter.GetOutputPort())
mapper.Update()
actor.SetMapper(mapper)
centerTransform = vtk.vtkTransform()
centerTransform.Translate(center[0], center[1], center[2])
transformFilter = vtk.vtkTransformFilter()
transformFilter.SetInputData(actor.GetMapper().GetInput())
transformFilter.SetTransform(centerTransform)
transformFilter.Update()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(transformFilter.GetOutputPort())
mapper.Update()
actor.SetMapper(mapper)
centerCalculer = vtk.vtkCenterOfMass()
centerCalculer.SetInputData(actor.GetMapper().GetInput())
centerCalculer.SetUseScalarsAsWeights(False)
centerCalculer.Update()
center = centerCalculer.GetCenter()
print(center)
def main(input_filename, areas_filename, scale, is_imx, exportPath=None, exportType=False, reduction=70, radius=RADIUS,
combine=False):
# TODO: The following doesn't hide the RenderWindow :/
# factGraphics = vtk.vtkGraphicsFactory()
# factGraphics.SetUseMesaClasses(1)
# factImage = vtk.vtkImagingFactory()
# factImage.SetUseMesaClasses(1)
if exportPath is None:
pos = areas_filename.rfind("/")
filename = os.path.splitext(input_filename)[0]
posFilename = filename.rfind("/")
exportPath = areas_filename[:pos] + "/Meshes" + filename[posFilename:]
else:
filename = os.path.splitext(input_filename)[0]
pos = filename.rfind("/")
if pos == -1:
exportPath += "/"
exportPath += filename[pos:]
print(exportPath)
if is_imx:
vrml_filename = os.path.splitext(input_filename)[0] + ".vrml"
names_filename = os.path.splitext(input_filename)[0] + ".names"
args = ["{}".format(input_filename), "{}".format(vrml_filename), "{}".format(names_filename)]
p = Process(target=parse_imx.main, args=[args])
p.start()
p.join()
names_list = []
with open(names_filename) as f:
for line in f:
line = re.sub(r'\n', '', line)
names_list.append(line)
else:
vrml_filename = input_filename
names_list = None
rw = vtk.vtkRenderWindow()
rwi = vtk.vtkRenderWindowInteractor()
rwi.SetRenderWindow(rw)
# rw.OffScreenRenderingOn()
importer = vtk.vtkVRMLImporter()
importer.SetFileName(vrml_filename)
# importer = vtk.vtk3DSImporter()
# importer.SetFileName("cube.3ds")
importer.Read()
importer.SetRenderWindow(rw)
importer.Update()
rw.Render()
ren = importer.GetRenderer()
actors = ren.GetActors()
actors.InitTraversal()
rwExport = vtk.vtkRenderWindow()
# rwExport.OffScreenRenderingOn()
renExport = vtk.vtkRenderer()
rwExport.AddRenderer(renExport)
rwExport.Render()
if is_imx:
csv = "Object,Pre_Area,Post_Area,Post/Pre,X,Y,Z,Name\n"
else:
csv = "Object,Pre_Area,Post_Area,Post/Pre,X,Y,Z\n"
print(
"-------- Repairing original mesh and Calculating areas (This process might take a long time, please wait) -----------")
for i in range(ren.GetNumberOfPropsRendered()):
sys.stdout.write("%d _" % i)
actor = actors.GetNextActor()
polydata = actor.GetMapper().GetInput()
polydataCopy = vtk.vtkPolyData()
polydataCopy.DeepCopy(polydata)
area_pre = compute_area(actor)
centroid = actor.GetCenter()
rescaled = False
try:
rw.Render()
(center, rotation) = axisAligment(actor)
rw.Render()
open_actor(actor, i, scale, radius)
except ValueError as e:
# [KNOWN BUG] The sizes are corrected, but not the position
scale = scale * 2
open_actor(actor, i, scale, radius)
rescaled = True
area_post = compute_area(actor) / scale ** 2
if is_imx:
data = []
data.extend([i, area_pre, area_post, area_post / area_pre])
data.extend(centroid)
data.append(names_list[i])
csv += "%d,%f,%f,%f,%f,%f,%f,%s\n" % tuple(data)
else:
data = []
data.extend([i, area_pre, area_post, area_post / area_pre])
data.extend(centroid)
csv += "%d,%f,%f,%f,%f,%f,%f\n" % tuple(data)
if exportType != "None":
initActorForExport(actor, rwExport, scale, reduction)
toOriginalPos(actor, center, rotation)
if names_list is not None:
name = names_list[i]
else:
name = i
if exportType == "Stl":
save_stl(actor.GetMapper().GetInput(), exportPath, str(name) + "_R")
save_stl(polydataCopy, exportPath, str(name) + "_O")
renExport.RemoveActor(actor)
elif exportType == "Vrml":
save_vrml(str(name) + "_R", exportPath, rwExport)
renExport.RemoveActor(actor)
actorOld = vtk.vtkActor()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(polydataCopy)
actorOld.SetMapper(mapper)
renExport.AddActor(actorOld)
save_vrml(str(name) + "_O", exportPath, rwExport)
renExport.RemoveActor(actorOld)
elif exportType == "Obj":
save_obj(rwExport, exportPath, str(name) + "_R")
renExport.RemoveActor(actor)
actorOld = vtk.vtkActor()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(polydataCopy)
actorOld.SetMapper(mapper)
renExport.AddActor(actorOld)
save_obj(rwExport, exportPath, str(name) + "_O")
renExport.RemoveActor(actorOld)
ren.RemoveActor(actor)
if rescaled:
scale /= 2
with open(areas_filename, 'w') as f:
f.write(csv)
if is_imx:
os.remove(vrml_filename)
os.remove(names_filename)
rw.Finalize()
print("")
|
[
"vtk.vtkPolyDataToImageStencil",
"multiprocessing.Process",
"vtk.vtkOBJExporter",
"vtk.vtkImageStencil",
"vtk.vtkDecimatePro",
"vtk.vtkVRMLExporter",
"os.remove",
"vtk.vtkVRMLImporter",
"vtk.vtkImageDilateErode3D",
"os.path.isdir",
"vtk.vtkRenderer",
"vtk.vtkMetaImageWriter",
"sys.stdout.flush",
"numpy.matrix",
"optparse.OptionParser.__init__",
"vtk.vtkAppendPolyData",
"os.path.splitext",
"vtk.vtkRenderWindowInteractor",
"vtk.vtkRenderWindow",
"vtk.vtkPolyDataMapper",
"vtk.vtkTransform",
"vtk.vtkActor",
"vtk.vtkImageData",
"numpy.linalg.svd",
"re.sub",
"vtk.vtkImageGaussianSmooth",
"vtk.vtkMarchingCubes",
"vtk.vtkMatrix4x4",
"os.makedirs",
"vtk.vtkPolyData",
"vtk.vtkSTLWriter",
"vtk.vtkCenterOfMass",
"vtk.vtkTransformFilter",
"sys.stdout.write"
] |
[((1355, 1379), 'vtk.vtkMetaImageWriter', 'vtk.vtkMetaImageWriter', ([], {}), '()\n', (1377, 1379), False, 'import vtk\n'), ((2470, 2501), 'vtk.vtkPolyDataToImageStencil', 'vtk.vtkPolyDataToImageStencil', ([], {}), '()\n', (2499, 2501), False, 'import vtk\n'), ((2841, 2859), 'vtk.vtkImageData', 'vtk.vtkImageData', ([], {}), '()\n', (2857, 2859), False, 'import vtk\n'), ((3181, 3202), 'vtk.vtkImageStencil', 'vtk.vtkImageStencil', ([], {}), '()\n', (3200, 3202), False, 'import vtk\n'), ((3422, 3443), 'vtk.vtkImageStencil', 'vtk.vtkImageStencil', ([], {}), '()\n', (3441, 3443), False, 'import vtk\n'), ((3849, 3870), 'vtk.vtkCenterOfMass', 'vtk.vtkCenterOfMass', ([], {}), '()\n', (3868, 3870), False, 'import vtk\n'), ((4071, 4089), 'vtk.vtkTransform', 'vtk.vtkTransform', ([], {}), '()\n', (4087, 4089), False, 'import vtk\n'), ((4179, 4203), 'vtk.vtkTransformFilter', 'vtk.vtkTransformFilter', ([], {}), '()\n', (4201, 4203), False, 'import vtk\n'), ((4340, 4363), 'vtk.vtkPolyDataMapper', 'vtk.vtkPolyDataMapper', ([], {}), '()\n', (4361, 4363), False, 'import vtk\n'), ((4532, 4553), 'vtk.vtkCenterOfMass', 'vtk.vtkCenterOfMass', ([], {}), '()\n', (4551, 4553), False, 'import vtk\n'), ((4911, 4937), 'numpy.matrix', 'np.matrix', (['pointsMatrixAux'], {}), '(pointsMatrixAux)\n', (4920, 4937), True, 'import numpy as np\n'), ((5046, 5097), 'numpy.linalg.svd', 'np.linalg.svd', (['covarianzeMatrix'], {'full_matrices': '(True)'}), '(covarianzeMatrix, full_matrices=True)\n', (5059, 5097), True, 'import numpy as np\n'), ((5120, 5138), 'vtk.vtkMatrix4x4', 'vtk.vtkMatrix4x4', ([], {}), '()\n', (5136, 5138), False, 'import vtk\n'), ((5418, 5436), 'vtk.vtkTransform', 'vtk.vtkTransform', ([], {}), '()\n', (5434, 5436), False, 'import vtk\n'), ((5508, 5532), 'vtk.vtkTransformFilter', 'vtk.vtkTransformFilter', ([], {}), '()\n', (5530, 5532), False, 'import vtk\n'), ((5691, 5714), 'vtk.vtkPolyDataMapper', 'vtk.vtkPolyDataMapper', ([], {}), '()\n', (5712, 5714), False, 'import vtk\n'), ((5884, 5911), 'vtk.vtkImageDilateErode3D', 'vtk.vtkImageDilateErode3D', ([], {}), '()\n', (5909, 5911), False, 'import vtk\n'), ((6521, 6549), 'vtk.vtkImageGaussianSmooth', 'vtk.vtkImageGaussianSmooth', ([], {}), '()\n', (6547, 6549), False, 'import vtk\n'), ((6749, 6771), 'vtk.vtkMarchingCubes', 'vtk.vtkMarchingCubes', ([], {}), '()\n', (6769, 6771), False, 'import vtk\n'), ((7831, 7854), 'vtk.vtkAppendPolyData', 'vtk.vtkAppendPolyData', ([], {}), '()\n', (7852, 7854), False, 'import vtk\n'), ((8045, 8059), 'vtk.vtkActor', 'vtk.vtkActor', ([], {}), '()\n', (8057, 8059), False, 'import vtk\n'), ((10090, 10108), 'vtk.vtkTransform', 'vtk.vtkTransform', ([], {}), '()\n', (10106, 10108), False, 'import vtk\n'), ((10221, 10245), 'vtk.vtkTransformFilter', 'vtk.vtkTransformFilter', ([], {}), '()\n', (10243, 10245), False, 'import vtk\n'), ((10367, 10390), 'vtk.vtkPolyDataMapper', 'vtk.vtkPolyDataMapper', ([], {}), '()\n', (10388, 10390), False, 'import vtk\n'), ((10570, 10590), 'vtk.vtkDecimatePro', 'vtk.vtkDecimatePro', ([], {}), '()\n', (10588, 10590), False, 'import vtk\n'), ((10740, 10763), 'vtk.vtkPolyDataMapper', 'vtk.vtkPolyDataMapper', ([], {}), '()\n', (10761, 10763), False, 'import vtk\n'), ((11022, 11042), 'vtk.vtkOBJExporter', 'vtk.vtkOBJExporter', ([], {}), '()\n', (11040, 11042), False, 'import vtk\n'), ((11272, 11290), 'vtk.vtkSTLWriter', 'vtk.vtkSTLWriter', ([], {}), '()\n', (11288, 11290), False, 'import vtk\n'), ((11631, 11652), 'vtk.vtkVRMLExporter', 'vtk.vtkVRMLExporter', ([], {}), '()\n', (11650, 11652), False, 'import vtk\n'), ((12026, 12044), 'vtk.vtkMatrix4x4', 'vtk.vtkMatrix4x4', ([], {}), '()\n', (12042, 12044), False, 'import vtk\n'), ((12103, 12121), 'vtk.vtkTransform', 'vtk.vtkTransform', ([], {}), '()\n', (12119, 12121), False, 'import vtk\n'), ((12176, 12200), 'vtk.vtkTransformFilter', 'vtk.vtkTransformFilter', ([], {}), '()\n', (12198, 12200), False, 'import vtk\n'), ((12350, 12373), 'vtk.vtkPolyDataMapper', 'vtk.vtkPolyDataMapper', ([], {}), '()\n', (12371, 12373), False, 'import vtk\n'), ((12508, 12526), 'vtk.vtkTransform', 'vtk.vtkTransform', ([], {}), '()\n', (12524, 12526), False, 'import vtk\n'), ((12613, 12637), 'vtk.vtkTransformFilter', 'vtk.vtkTransformFilter', ([], {}), '()\n', (12635, 12637), False, 'import vtk\n'), ((12794, 12817), 'vtk.vtkPolyDataMapper', 'vtk.vtkPolyDataMapper', ([], {}), '()\n', (12815, 12817), False, 'import vtk\n'), ((12951, 12972), 'vtk.vtkCenterOfMass', 'vtk.vtkCenterOfMass', ([], {}), '()\n', (12970, 12972), False, 'import vtk\n'), ((14611, 14632), 'vtk.vtkRenderWindow', 'vtk.vtkRenderWindow', ([], {}), '()\n', (14630, 14632), False, 'import vtk\n'), ((14643, 14674), 'vtk.vtkRenderWindowInteractor', 'vtk.vtkRenderWindowInteractor', ([], {}), '()\n', (14672, 14674), False, 'import vtk\n'), ((14751, 14772), 'vtk.vtkVRMLImporter', 'vtk.vtkVRMLImporter', ([], {}), '()\n', (14770, 14772), False, 'import vtk\n'), ((15088, 15109), 'vtk.vtkRenderWindow', 'vtk.vtkRenderWindow', ([], {}), '()\n', (15107, 15109), False, 'import vtk\n'), ((15164, 15181), 'vtk.vtkRenderer', 'vtk.vtkRenderer', ([], {}), '()\n', (15179, 15181), False, 'import vtk\n'), ((326, 362), 'optparse.OptionParser.__init__', 'optparse.OptionParser.__init__', (['self'], {}), '(self)\n', (356, 362), False, 'import optparse\n'), ((8089, 8112), 'vtk.vtkPolyDataMapper', 'vtk.vtkPolyDataMapper', ([], {}), '()\n', (8110, 8112), False, 'import vtk\n'), ((8466, 8493), 'sys.stdout.write', 'sys.stdout.write', (["('%d ' % i)"], {}), "('%d ' % i)\n", (8482, 8493), False, 'import sys\n'), ((8891, 8909), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (8907, 8909), False, 'import sys\n'), ((11054, 11072), 'os.path.isdir', 'os.path.isdir', (['dir'], {}), '(dir)\n', (11067, 11072), False, 'import os\n'), ((11082, 11098), 'os.makedirs', 'os.makedirs', (['dir'], {}), '(dir)\n', (11093, 11098), False, 'import os\n'), ((11302, 11320), 'os.path.isdir', 'os.path.isdir', (['dir'], {}), '(dir)\n', (11315, 11320), False, 'import os\n'), ((11330, 11346), 'os.makedirs', 'os.makedirs', (['dir'], {}), '(dir)\n', (11341, 11346), False, 'import os\n'), ((11516, 11534), 'os.path.isdir', 'os.path.isdir', (['dir'], {}), '(dir)\n', (11529, 11534), False, 'import os\n'), ((11544, 11560), 'os.makedirs', 'os.makedirs', (['dir'], {}), '(dir)\n', (11555, 11560), False, 'import os\n'), ((14268, 14311), 'multiprocessing.Process', 'Process', ([], {'target': 'parse_imx.main', 'args': '[args]'}), '(target=parse_imx.main, args=[args])\n', (14275, 14311), False, 'from multiprocessing import Process\n'), ((15591, 15619), 'sys.stdout.write', 'sys.stdout.write', (["('%d _' % i)"], {}), "('%d _' % i)\n", (15607, 15619), False, 'import sys\n'), ((15729, 15746), 'vtk.vtkPolyData', 'vtk.vtkPolyData', ([], {}), '()\n', (15744, 15746), False, 'import vtk\n'), ((18414, 18438), 'os.remove', 'os.remove', (['vrml_filename'], {}), '(vrml_filename)\n', (18423, 18438), False, 'import os\n'), ((18447, 18472), 'os.remove', 'os.remove', (['names_filename'], {}), '(names_filename)\n', (18456, 18472), False, 'import os\n'), ((13628, 13660), 'os.path.splitext', 'os.path.splitext', (['input_filename'], {}), '(input_filename)\n', (13644, 13660), False, 'import os\n'), ((13814, 13846), 'os.path.splitext', 'os.path.splitext', (['input_filename'], {}), '(input_filename)\n', (13830, 13846), False, 'import os\n'), ((14036, 14068), 'os.path.splitext', 'os.path.splitext', (['input_filename'], {}), '(input_filename)\n', (14052, 14068), False, 'import os\n'), ((14107, 14139), 'os.path.splitext', 'os.path.splitext', (['input_filename'], {}), '(input_filename)\n', (14123, 14139), False, 'import os\n'), ((14462, 14485), 're.sub', 're.sub', (['"""\\\\n"""', '""""""', 'line'], {}), "('\\\\n', '', line)\n", (14468, 14485), False, 'import re\n'), ((17446, 17460), 'vtk.vtkActor', 'vtk.vtkActor', ([], {}), '()\n', (17458, 17460), False, 'import vtk\n'), ((17486, 17509), 'vtk.vtkPolyDataMapper', 'vtk.vtkPolyDataMapper', ([], {}), '()\n', (17507, 17509), False, 'import vtk\n'), ((17937, 17951), 'vtk.vtkActor', 'vtk.vtkActor', ([], {}), '()\n', (17949, 17951), False, 'import vtk\n'), ((17977, 18000), 'vtk.vtkPolyDataMapper', 'vtk.vtkPolyDataMapper', ([], {}), '()\n', (17998, 18000), False, 'import vtk\n')]
|
#!/usr/bin/env python
from holtztools import plots,html
from astropy.io import fits,ascii
import numpy as np
import math
import pdb
import argparse
import os
import matplotlib.pyplot as plt
def throughplot(instrument='apogee-s',outfile=None,inter=False) :
'''
Routine to make zeropoint/throughput plots from apogeeSci summary files
with information including FWHM, GDRMS, CART
'''
# instrument specific
if instrument == 'apogee-s' :
gain=3.
carts=[20,25]
fiber_rad=0.65
telescope='lco25m'
else :
gain=1.9
carts=[0,10]
fiber_rad=1.
telescope='apo25m'
# read summary data made by mkmonitor
a=fits.open(instrument+'Sci.fits')[1].data
gd = np.where(a['NREADS'] >= 47)[0]
a=a[gd]
# use weather information if we can
clouds=np.zeros(len(a)).astype(int)
nmiss=0
nhave=0
try :
c=ascii.read(os.environ['APOGEEREDUCEPLAN_DIR']+'/data/'+telescope+'/clouds.txt')
try:
for i,p in enumerate(a['PLATE']) :
j=np.where((c['plate'] == p) & (c['MJD'] == a['MJD'][i]) )[0]
if len(j)>0 :
if len(j)>1 :
print('double cloud match',p,a['MJD'][i])
pdb.set_trace()
clouds[i] = c['clouds_level'][j[0]]
nhave+=1
else :
nmiss+=1
print('no clouds match found for',a['MJD'][i],p)
except :
print('error!',i,p,j)
pdb.set_trace()
gd=np.where(clouds <= 1)[0]
a=a[gd]
except :
print('cant open clouds file')
# seeing correction factor
sigma = a['FWHM']/2.354
sigma = a['SEEING']/2.354
ee = 1. - np.exp(-(fiber_rad**2)/(2*sigma**2))
corr = a['ZERONORM']-2.5*np.log10(ee)
gd = np.where(np.isfinite(corr))[0]
a=a[gd]
ee=ee[gd]
corr=corr[gd]
# run number for LCO
run = ((a['MJD']-57850)/29.+0.5).astype(int)
# rough throughput calculation
h=6.63e-27
c=3.e10
lam=1.6e-4
dlam=0.3
dt=10.6
area=math.pi*(125.**2-50.**2)
fvega=11.38e-11
through=10**(0.4*a['ZERONORM'])*h*c/lam/dlam/dt*gain/area/fvega/ee
# straight DHA
dha=a['HA']-a['DESIGN_HA'][:,0]
#dha=np.abs(dha)
# "normalized" DHA
j=np.where(a['HA']<a['DESIGN_HA'][:,0])[0]
dha[j]/=(a['DESIGN_HA'][j,0]-a['DESIGN_HA'][j,1])
j=np.where(a['HA']>=a['DESIGN_HA'][:,0])[0]
dha[j]/=(a['DESIGN_HA'][j,2]-a['DESIGN_HA'][j,0])
#plots with MJD
files=[]
out='monitor/'+instrument+'/'+instrument
# point size by FWHM
psize=a['FWHM']/1.*40
j=np.where(psize == 0.)[0]
psize[j] = 10
# histograms by run
fig,ax=plots.multi(2,3,figsize=(8,12))
file=out+'zero_hist.png'
runs=list(set(run))
runs.append(999)
for r in runs :
gd = np.where(run == r)[0]
if r == 999 :
gd = np.where(run < 999)[0]
if r >= 8 : lw=2
else : lw=1
print(r,len(gd))
try:
n,b,p=plt.hist(a['GDRMS'][gd],histtype='step',bins=np.arange(0,1,0.05),label='{:3d}'.format(r),linewidth=lw,normed=False)
if r == 999 : n/=2
ax[0,0].plot(b[0:-1]+(b[1]-b[0])/2.,n,linewidth=lw,label='{:2d}'.format(r))
ax[0,0].set_xlabel('GDRMS')
except : pass
try:
n,b,p=plt.hist(a['ZERONORM'][gd],histtype='step',bins=np.arange(12,15.5,0.1),linewidth=lw,normed=False,label='{:2d}'.format(r))
if r == 999 : n/=2
ax[0,1].plot(b[0:-1]+(b[1]-b[0])/2.,n,linewidth=lw,label='{:2d}'.format(r))
ax[0,1].set_xlabel('ZERONORM')
n,b,p=plt.hist(corr[gd],histtype='step',bins=np.arange(12,16,0.1),linewidth=lw,normed=False,label='{:3d}'.format(r))
if r == 999 : n/=2
ax[1,0].plot(b[0:-1]+(b[1]-b[0])/2.,n,linewidth=lw,label='{:3d}'.format(r))
ax[1,0].set_xlabel('ZERONORM (adjusted)')
n,b,p=plt.hist(a['ZERORMS'][gd],histtype='step',bins=np.arange(0,1,0.05),linewidth=lw,normed=False,label='{:3d}'.format(r))
if r == 999 : n/=2
ax[1,1].plot(b[0:-1]+(b[1]-b[0])/2.,n,linewidth=lw,label='{:3d}'.format(r))
ax[1,1].set_xlabel('ZERORMS')
n,b,p=plt.hist(through[gd],histtype='step',bins=np.arange(0,0.34,0.02),linewidth=lw,normed=False,label='{:3d}'.format(r))
if r == 999 : n/=2
ax[2,0].plot(b[0:-1]+(b[1]-b[0])/2.,n,linewidth=lw,label='{:3d}'.format(r))
ax[2,0].set_xlabel('THROUGHPUT (adjusted)')
except : pass
if instrument == 'apogee-s' :
ax[0,0].legend(fontsize=6,loc=1,title='Run')
ax[0,1].legend(fontsize=6,loc=2,title='Run')
ax[1,0].legend(fontsize=6,loc=2,title='Run')
ax[1,1].legend(fontsize=6,loc=1,title='Run')
ax[2,1].remove()
fig.tight_layout()
fig.savefig(file)
files.append([os.path.basename(file)])
ctype = [a['FWHM'],a['SEEING'],a['GDRMS'],dha,a['CART']]
name = ['zero_fwhm','zero_seeing','zero_gdrms','zero_dha','zero_cart']
zr=[[0.5,2.],[0.5,2.],[0,0.8],[-2,2],carts]
zt=['FWHM','SEEING','GDRMS','DHA','CART']
for j,c in enumerate(ctype) :
fig,ax=plots.multi(1,4,hspace=0.001,sharex=True,figsize=(24,6))
file=out+name[j]+'.png'
plots.plotc(ax[0],a['MJD'],a['ZERONORM'],c,yr=[12,15.5],zr=zr[j],size=psize,colorbar=True,xt='MJD',yt='ZERONORM',zt=zt[j])
plots.plotc(ax[1],a['MJD'],corr,c,yr=[12,15.5],zr=zr[j],size=psize,colorbar=True,xt='MJD',yt='ZERONORM (adjusted)',zt=zt[j])
plots.plotc(ax[2],a['MJD'],a['ZERORMS'],c,yr=[0,1],zr=zr[j],size=psize,colorbar=True,xt='MJD',yt='ZERORMS',zt=zt[j])
plots.plotc(ax[3],a['MJD'],through,c,yr=[0,0.3],zr=zr[j],size=psize,colorbar=True,xt='MJD',yt='throughput',zt=zt[j])
fig.savefig(file)
files.append([os.path.basename(file)])
fig,ax=plots.multi(1,1)
plots.plotc(ax,a['SEEING'],a['ZERONORM'],a['GDRMS'],xr=[0.,3.0],yr=[13,15.5],zr=[0.2,1.2],xt='Seeing',yt='ZERONORM',zt='GDRMS',colorbar=True,size=1)
#plots.plotc(ax[1],a['SEEING'],corr,a['GDRMS'],xr=[0.,3.0],yr=[13,15.5],zr=[0.2,1.2],xt='Seeing',yt='seeing-corrected ZERONORM',zt='GDRMS',colorbar=True,size=1)
file=out+'_seeing.png'
fig.savefig(file)
files.append([os.path.basename(file)])
out='monitor/'+instrument+'/'+instrument
html.htmltab(files,file=out+'zero.html')
if inter :
pdb.set_trace()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Make throughput plots",
usage="through --instrument apogee-s")
parser.add_argument("-i", "--instrument", type=str,
required=True,
help="instrument to plot",
choices=['apogee-s', 'apogee-n'])
args = parser.parse_args()
throughplot(instrument=args.instrument)
|
[
"numpy.log10",
"argparse.ArgumentParser",
"numpy.where",
"holtztools.html.htmltab",
"numpy.exp",
"holtztools.plots.multi",
"holtztools.plots.plotc",
"numpy.isfinite",
"os.path.basename",
"pdb.set_trace",
"astropy.io.fits.open",
"astropy.io.ascii.read",
"numpy.arange"
] |
[((2788, 2822), 'holtztools.plots.multi', 'plots.multi', (['(2)', '(3)'], {'figsize': '(8, 12)'}), '(2, 3, figsize=(8, 12))\n', (2799, 2822), False, 'from holtztools import plots, html\n'), ((5967, 5984), 'holtztools.plots.multi', 'plots.multi', (['(1)', '(1)'], {}), '(1, 1)\n', (5978, 5984), False, 'from holtztools import plots, html\n'), ((5988, 6160), 'holtztools.plots.plotc', 'plots.plotc', (['ax', "a['SEEING']", "a['ZERONORM']", "a['GDRMS']"], {'xr': '[0.0, 3.0]', 'yr': '[13, 15.5]', 'zr': '[0.2, 1.2]', 'xt': '"""Seeing"""', 'yt': '"""ZERONORM"""', 'zt': '"""GDRMS"""', 'colorbar': '(True)', 'size': '(1)'}), "(ax, a['SEEING'], a['ZERONORM'], a['GDRMS'], xr=[0.0, 3.0], yr=[\n 13, 15.5], zr=[0.2, 1.2], xt='Seeing', yt='ZERONORM', zt='GDRMS',\n colorbar=True, size=1)\n", (5999, 6160), False, 'from holtztools import plots, html\n'), ((6444, 6487), 'holtztools.html.htmltab', 'html.htmltab', (['files'], {'file': "(out + 'zero.html')"}), "(files, file=out + 'zero.html')\n", (6456, 6487), False, 'from holtztools import plots, html\n'), ((6565, 6669), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Make throughput plots"""', 'usage': '"""through --instrument apogee-s"""'}), "(description='Make throughput plots', usage=\n 'through --instrument apogee-s')\n", (6588, 6669), False, 'import argparse\n'), ((744, 771), 'numpy.where', 'np.where', (["(a['NREADS'] >= 47)"], {}), "(a['NREADS'] >= 47)\n", (752, 771), True, 'import numpy as np\n'), ((912, 1001), 'astropy.io.ascii.read', 'ascii.read', (["(os.environ['APOGEEREDUCEPLAN_DIR'] + '/data/' + telescope + '/clouds.txt')"], {}), "(os.environ['APOGEEREDUCEPLAN_DIR'] + '/data/' + telescope +\n '/clouds.txt')\n", (922, 1001), False, 'from astropy.io import fits, ascii\n'), ((1798, 1840), 'numpy.exp', 'np.exp', (['(-fiber_rad ** 2 / (2 * sigma ** 2))'], {}), '(-fiber_rad ** 2 / (2 * sigma ** 2))\n', (1804, 1840), True, 'import numpy as np\n'), ((2374, 2414), 'numpy.where', 'np.where', (["(a['HA'] < a['DESIGN_HA'][:, 0])"], {}), "(a['HA'] < a['DESIGN_HA'][:, 0])\n", (2382, 2414), True, 'import numpy as np\n'), ((2475, 2516), 'numpy.where', 'np.where', (["(a['HA'] >= a['DESIGN_HA'][:, 0])"], {}), "(a['HA'] >= a['DESIGN_HA'][:, 0])\n", (2483, 2516), True, 'import numpy as np\n'), ((2709, 2731), 'numpy.where', 'np.where', (['(psize == 0.0)'], {}), '(psize == 0.0)\n', (2717, 2731), True, 'import numpy as np\n'), ((5293, 5354), 'holtztools.plots.multi', 'plots.multi', (['(1)', '(4)'], {'hspace': '(0.001)', 'sharex': '(True)', 'figsize': '(24, 6)'}), '(1, 4, hspace=0.001, sharex=True, figsize=(24, 6))\n', (5304, 5354), False, 'from holtztools import plots, html\n'), ((5386, 5523), 'holtztools.plots.plotc', 'plots.plotc', (['ax[0]', "a['MJD']", "a['ZERONORM']", 'c'], {'yr': '[12, 15.5]', 'zr': 'zr[j]', 'size': 'psize', 'colorbar': '(True)', 'xt': '"""MJD"""', 'yt': '"""ZERONORM"""', 'zt': 'zt[j]'}), "(ax[0], a['MJD'], a['ZERONORM'], c, yr=[12, 15.5], zr=zr[j],\n size=psize, colorbar=True, xt='MJD', yt='ZERONORM', zt=zt[j])\n", (5397, 5523), False, 'from holtztools import plots, html\n'), ((5515, 5654), 'holtztools.plots.plotc', 'plots.plotc', (['ax[1]', "a['MJD']", 'corr', 'c'], {'yr': '[12, 15.5]', 'zr': 'zr[j]', 'size': 'psize', 'colorbar': '(True)', 'xt': '"""MJD"""', 'yt': '"""ZERONORM (adjusted)"""', 'zt': 'zt[j]'}), "(ax[1], a['MJD'], corr, c, yr=[12, 15.5], zr=zr[j], size=psize,\n colorbar=True, xt='MJD', yt='ZERONORM (adjusted)', zt=zt[j])\n", (5526, 5654), False, 'from holtztools import plots, html\n'), ((5646, 5778), 'holtztools.plots.plotc', 'plots.plotc', (['ax[2]', "a['MJD']", "a['ZERORMS']", 'c'], {'yr': '[0, 1]', 'zr': 'zr[j]', 'size': 'psize', 'colorbar': '(True)', 'xt': '"""MJD"""', 'yt': '"""ZERORMS"""', 'zt': 'zt[j]'}), "(ax[2], a['MJD'], a['ZERORMS'], c, yr=[0, 1], zr=zr[j], size=\n psize, colorbar=True, xt='MJD', yt='ZERORMS', zt=zt[j])\n", (5657, 5778), False, 'from holtztools import plots, html\n'), ((5769, 5900), 'holtztools.plots.plotc', 'plots.plotc', (['ax[3]', "a['MJD']", 'through', 'c'], {'yr': '[0, 0.3]', 'zr': 'zr[j]', 'size': 'psize', 'colorbar': '(True)', 'xt': '"""MJD"""', 'yt': '"""throughput"""', 'zt': 'zt[j]'}), "(ax[3], a['MJD'], through, c, yr=[0, 0.3], zr=zr[j], size=psize,\n colorbar=True, xt='MJD', yt='throughput', zt=zt[j])\n", (5780, 5900), False, 'from holtztools import plots, html\n'), ((6508, 6523), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (6521, 6523), False, 'import pdb\n'), ((694, 728), 'astropy.io.fits.open', 'fits.open', (["(instrument + 'Sci.fits')"], {}), "(instrument + 'Sci.fits')\n", (703, 728), False, 'from astropy.io import fits, ascii\n'), ((1600, 1621), 'numpy.where', 'np.where', (['(clouds <= 1)'], {}), '(clouds <= 1)\n', (1608, 1621), True, 'import numpy as np\n'), ((1864, 1876), 'numpy.log10', 'np.log10', (['ee'], {}), '(ee)\n', (1872, 1876), True, 'import numpy as np\n'), ((1896, 1913), 'numpy.isfinite', 'np.isfinite', (['corr'], {}), '(corr)\n', (1907, 1913), True, 'import numpy as np\n'), ((2927, 2945), 'numpy.where', 'np.where', (['(run == r)'], {}), '(run == r)\n', (2935, 2945), True, 'import numpy as np\n'), ((4990, 5012), 'os.path.basename', 'os.path.basename', (['file'], {}), '(file)\n', (5006, 5012), False, 'import os\n'), ((6369, 6391), 'os.path.basename', 'os.path.basename', (['file'], {}), '(file)\n', (6385, 6391), False, 'import os\n'), ((1572, 1587), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (1585, 1587), False, 'import pdb\n'), ((2989, 3008), 'numpy.where', 'np.where', (['(run < 999)'], {}), '(run < 999)\n', (2997, 3008), True, 'import numpy as np\n'), ((5930, 5952), 'os.path.basename', 'os.path.basename', (['file'], {}), '(file)\n', (5946, 5952), False, 'import os\n'), ((1070, 1125), 'numpy.where', 'np.where', (["((c['plate'] == p) & (c['MJD'] == a['MJD'][i]))"], {}), "((c['plate'] == p) & (c['MJD'] == a['MJD'][i]))\n", (1078, 1125), True, 'import numpy as np\n'), ((3158, 3179), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(0.05)'], {}), '(0, 1, 0.05)\n', (3167, 3179), True, 'import numpy as np\n'), ((3489, 3513), 'numpy.arange', 'np.arange', (['(12)', '(15.5)', '(0.1)'], {}), '(12, 15.5, 0.1)\n', (3498, 3513), True, 'import numpy as np\n'), ((3782, 3804), 'numpy.arange', 'np.arange', (['(12)', '(16)', '(0.1)'], {}), '(12, 16, 0.1)\n', (3791, 3804), True, 'import numpy as np\n'), ((4092, 4113), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(0.05)'], {}), '(0, 1, 0.05)\n', (4101, 4113), True, 'import numpy as np\n'), ((4384, 4408), 'numpy.arange', 'np.arange', (['(0)', '(0.34)', '(0.02)'], {}), '(0, 0.34, 0.02)\n', (4393, 4408), True, 'import numpy as np\n'), ((1286, 1301), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (1299, 1301), False, 'import pdb\n')]
|
from datetime import date
from models import gtfs, config, util, nextbus, routeconfig
import argparse
import shapely
import partridge as ptg
import numpy as np
from pathlib import Path
import requests
import json
import boto3
import gzip
import hashlib
import math
import zipfile
# Downloads and parses the GTFS specification
# and saves the configuration for all routes to S3.
# The S3 object contains data merged from GTFS and the Nextbus API (for agencies using Nextbus).
# The frontend can then request this S3 URL directly without hitting the Python backend.
# For each direction, the JSON object contains a coords array defining the shape of the route,
# where the values are objects containing lat/lon properties:
#
# "coords":[
# {"lat":37.80707,"lon":-122.41727}
# {"lat":37.80727,"lon":-122.41562},
# {"lat":37.80748,"lon":-122.41398},
# {"lat":37.80768,"lon":-122.41234},
# ...
# ]
#
# For each direction, the JSON object also contains a stop_geometry object where the keys are stop IDs
# and the values are objects with a distance property (cumulative distance in meters to that stop along the GTFS # shape),
# and an after_index property (index into the coords array of the last coordinate before that stop).
#
# "stop_geometry":{
# "5184":{"distance":8,"after_index":0},
# "3092":{"distance":279,"after_index":1},
# "3095":{"distance":573,"after_index":3},
# "4502":{"distance":1045,"after_index":8},
# ...
#}
#
# In order to match a Nextbus direction with a GTFS shape_id, this finds the GTFS shape_id for that route where
# distance(first coordinate of shape, first stop location) + distance(last coordinate of shape, last stop location)
# is a minimum.
#
# Currently the script just overwrites the one S3 path, but this process could be extended in the future to
# store different paths for different dates, to allow fetching historical data for route configurations.
#
def match_nextbus_direction(nextbus_route_config, geometry):
shape_start = geometry.coords[0]
shape_end = geometry.coords[-1]
nextbus_dir_infos = nextbus_route_config.get_direction_infos()
terminal_dists = []
for nextbus_dir_info in nextbus_dir_infos:
nextbus_dir_stop_ids = nextbus_dir_info.get_stop_ids()
first_stop_info = nextbus_route_config.get_stop_info(nextbus_dir_stop_ids[0])
last_stop_info = nextbus_route_config.get_stop_info(nextbus_dir_stop_ids[-1])
# Determine distance between first nextbus stop and start of GTFS shape,
# plus distance between last stop and end of GTFS shape,
# for all Nextbus directions for this route.
start_dist = util.haver_distance(first_stop_info.lat, first_stop_info.lon, shape_start[1], shape_start[0])
end_dist = util.haver_distance(last_stop_info.lat, last_stop_info.lon, shape_end[1], shape_end[0])
terminal_dist = start_dist + end_dist
terminal_dists.append(terminal_dist)
terminal_dist_order = np.argsort(terminal_dists)
best_nextbus_dir_index = terminal_dist_order[0] # index of the "best" shape for this direction, with the minimum terminal_dist
best_nextbus_dir_info = nextbus_dir_infos[best_nextbus_dir_index]
best_terminal_dist = terminal_dists[best_nextbus_dir_index]
return best_nextbus_dir_info, best_terminal_dist
def get_stop_geometry(stop_xy, shape_lines_xy, shape_cumulative_dist, start_index):
# Finds the first position of a particular stop along a shape (after the start_index'th line segment in shape_lines_xy),
# using XY coordinates in meters.
# The returned dict is used by the frontend to draw line segments along a route between two stops.
num_shape_lines = len(shape_lines_xy)
best_offset = 99999999
best_index = 0
shape_index = start_index
while shape_index < num_shape_lines:
shape_line_offset = shape_lines_xy[shape_index].distance(stop_xy)
if shape_line_offset < best_offset:
best_offset = shape_line_offset
best_index = shape_index
if best_offset < 50 and shape_line_offset > best_offset:
break
shape_index += 1
shape_point = shapely.geometry.Point(shape_lines_xy[best_index].coords[0])
distance_after_shape_point = stop_xy.distance(shape_point)
distance_to_shape_point = shape_cumulative_dist[best_index]
stop_dist = distance_to_shape_point + distance_after_shape_point
if best_offset > 30:
print(f' stop_dist = {int(stop_dist)} = ({int(distance_to_shape_point)} + {int(distance_after_shape_point)}), offset = {int(best_offset)}, after_index = {best_index} ')
return {
'distance': int(stop_dist), # total distance in meters along the route shape to this stop
'after_index': best_index, # the index of the coordinate of the shape just before this stop
'offset': int(best_offset) # distance in meters between this stop and the closest line segment of shape
}
def get_unique_shapes(direction_trips_df, stop_times_df, stops_map, normalize_gtfs_stop_id):
# Finds the unique shapes associated with a GTFS route/direction, merging shapes that contain common subsequences of stops.
# These unique shapes may represent multiple branches of a route.
# Returns a list of dicts with properties 'shape_id', 'count', and 'stop_ids', sorted by count in descending order.
stop_times_trip_id_values = stop_times_df['trip_id'].values
direction_shape_id_values = direction_trips_df['shape_id'].values
unique_shapes_map = {}
direction_shape_ids, direction_shape_id_counts = np.unique(direction_shape_id_values, return_counts=True)
direction_shape_id_order = np.argsort(-1 * direction_shape_id_counts)
direction_shape_ids = direction_shape_ids[direction_shape_id_order]
direction_shape_id_counts = direction_shape_id_counts[direction_shape_id_order]
for shape_id, shape_id_count in zip(direction_shape_ids, direction_shape_id_counts):
shape_trip = direction_trips_df[direction_shape_id_values == shape_id].iloc[0]
shape_trip_id = shape_trip.trip_id
shape_trip_stop_times = stop_times_df[stop_times_trip_id_values == shape_trip_id].sort_values('stop_sequence')
shape_trip_stop_ids = [
normalize_gtfs_stop_id(gtfs_stop_id)
for gtfs_stop_id in shape_trip_stop_times['stop_id'].values
]
unique_shape_key = hashlib.sha256(json.dumps(shape_trip_stop_ids).encode('utf-8')).hexdigest()[0:12]
#print(f' shape {shape_id} ({shape_id_count})')
if unique_shape_key not in unique_shapes_map:
for other_shape_key, other_shape_info in unique_shapes_map.items():
#print(f" checking match with {shape_id} and {other_shape_info['shape_id']}")
if is_subsequence(shape_trip_stop_ids, other_shape_info['stop_ids']):
print(f" shape {shape_id} is subsequence of shape {other_shape_info['shape_id']}")
unique_shape_key = other_shape_key
break
elif is_subsequence(other_shape_info['stop_ids'], shape_trip_stop_ids):
print(f" shape {other_shape_info['shape_id']} is subsequence of shape {shape_id}")
shape_id_count += other_shape_info['count']
del unique_shapes_map[other_shape_key]
break
if unique_shape_key not in unique_shapes_map:
unique_shapes_map[unique_shape_key] = {
'count': 0,
'shape_id': shape_id,
'stop_ids': shape_trip_stop_ids
}
unique_shapes_map[unique_shape_key]['count'] += shape_id_count
sorted_shapes = sorted(unique_shapes_map.values(), key=lambda shape: -1 * shape['count'])
for shape_info in sorted_shapes:
count = shape_info['count']
shape_id = shape_info['shape_id']
stop_ids = shape_info['stop_ids']
first_stop_id = stop_ids[0]
last_stop_id = stop_ids[-1]
first_stop = stops_map[first_stop_id]
last_stop = stops_map[last_stop_id]
print(f' shape_id: {shape_id} ({count}x) stops:{len(stop_ids)} from {first_stop_id} {first_stop.stop_name} to {last_stop_id} {last_stop.stop_name} {",".join(stop_ids)}')
return sorted_shapes
def download_gtfs_data(agency: config.Agency, gtfs_cache_dir):
gtfs_url = agency.gtfs_url
if gtfs_url is None:
raise Exception(f'agency {agency.id} does not have gtfs_url in config')
cache_dir = Path(gtfs_cache_dir)
if not cache_dir.exists():
print(f'downloading gtfs data from {gtfs_url}')
r = requests.get(gtfs_url)
if r.status_code != 200:
raise Exception(f"Error fetching {gtfs_url}: HTTP {r.status_code}: {r.text}")
zip_path = f'{util.get_data_dir()}/gtfs-{agency.id}.zip'
with open(zip_path, 'wb') as f:
f.write(r.content)
with zipfile.ZipFile(zip_path, 'r') as zip_ref:
zip_ref.extractall(gtfs_cache_dir)
def is_subsequence(smaller, bigger):
smaller_len = len(smaller)
bigger_len = len(bigger)
if smaller_len > bigger_len:
return False
try:
start_pos = bigger.index(smaller[0])
except ValueError:
return False
end_pos = start_pos+smaller_len
if end_pos > bigger_len:
return False
return smaller == bigger[start_pos:end_pos]
def save_routes_for_agency(agency: config.Agency, save_to_s3=True):
agency_id = agency.id
gtfs_cache_dir = f'{util.get_data_dir()}/gtfs-{agency_id}'
download_gtfs_data(agency, gtfs_cache_dir)
feed = ptg.load_geo_feed(gtfs_cache_dir, {})
print(f"Loading {agency_id} routes...")
routes_df = feed.routes
if agency.gtfs_agency_id is not None:
routes_df = routes_df[routes_df.agency_id == agency.gtfs_agency_id]
routes_data = []
print(f"Loading {agency_id} trips...")
trips_df = feed.trips
trips_df['direction_id'] = trips_df['direction_id'].astype(str)
print(f"Loading {agency_id} stop times...")
stop_times_df = feed.stop_times
print(f"Loading {agency_id} shapes...")
shapes_df = feed.shapes
print(f"Loading {agency_id} stops...")
stops_df = feed.stops
# gtfs_stop_ids_map allows looking up row from stops.txt via GTFS stop_id
gtfs_stop_ids_map = {stop.stop_id: stop for stop in stops_df.itertuples()}
stop_id_gtfs_field = agency.stop_id_gtfs_field
# get OpenTransit stop ID for GTFS stop_id (may be the same)
def normalize_gtfs_stop_id(gtfs_stop_id):
if stop_id_gtfs_field != 'stop_id':
return getattr(gtfs_stop_ids_map[gtfs_stop_id], stop_id_gtfs_field)
else:
return gtfs_stop_id
# stops_map allows looking up row from stops.txt via OpenTransit stop ID
if stop_id_gtfs_field != 'stop_id':
stops_map = {getattr(stop, stop_id_gtfs_field): stop for stop in stops_df.itertuples()}
else:
stops_map = gtfs_stop_ids_map
if agency.provider == 'nextbus':
nextbus_route_order = [route.id for route in nextbus.get_route_list(agency.nextbus_id)]
for route in routes_df.itertuples():
gtfs_route_id = route.route_id
short_name = route.route_short_name
long_name = route.route_long_name
if isinstance(short_name, str) and isinstance(long_name, str):
title = f'{short_name} - {long_name}'
elif isinstance(short_name, str):
title = short_name
else:
title = long_name
type = int(route.route_type) if hasattr(route, 'route_type') else None
url = route.route_url if hasattr(route, 'route_url') and isinstance(route.route_url, str) else None
#color = route.route_color
#text_color = route.route_text_color
route_id = getattr(route, agency.route_id_gtfs_field)
if agency.provider == 'nextbus':
route_id = route_id.replace('-', '_') # hack to handle muni route IDs where e.g. GTFS has "T-OWL" but nextbus has "T_OWL"
try:
nextbus_route_config = nextbus.get_route_config(agency.nextbus_id, route_id)
title = nextbus_route_config.title
except Exception as ex:
print(ex)
continue
try:
sort_order = nextbus_route_order.index(route_id)
except ValueError as ex:
print(ex)
sort_order = None
else:
sort_order = int(route.route_sort_order) if hasattr(route, 'route_sort_order') else None
print(f'route {route_id} {title}')
route_data = {
'id': route_id,
'title': title,
'url': url,
'type': type,
#'color': color,
#'text_color': text_color,
'gtfs_route_id': gtfs_route_id,
'sort_order': sort_order,
'stops': {},
'directions': [],
}
directions = []
route_directions_df = feed.get('route_directions.txt') # unofficial trimet gtfs extension
if not route_directions_df.empty:
route_directions_df = route_directions_df[route_directions_df['route_id'] == gtfs_route_id]
else:
route_directions_df = None
routes_data.append(route_data)
route_trips_df = trips_df[trips_df['route_id'] == gtfs_route_id]
route_direction_id_values = route_trips_df['direction_id'].values
def add_custom_direction(custom_direction_info):
direction_id = custom_direction_info['id']
print(f' custom direction = {direction_id}')
gtfs_direction_id = custom_direction_info['gtfs_direction_id']
direction_trips_df = route_trips_df[route_direction_id_values == gtfs_direction_id]
included_stop_ids = custom_direction_info.get('included_stop_ids', [])
excluded_stop_ids = custom_direction_info.get('excluded_stop_ids', [])
shapes = get_unique_shapes(
direction_trips_df=direction_trips_df,
stop_times_df=stop_times_df,
stops_map=stops_map,
normalize_gtfs_stop_id=normalize_gtfs_stop_id
)
def contains_included_stops(shape_stop_ids):
min_index = 0
for stop_id in included_stop_ids:
try:
index = shape_stop_ids.index(stop_id, min_index)
except ValueError:
return False
min_index = index + 1 # stops must appear in same order as in included_stop_ids
return True
def contains_excluded_stop(shape_stop_ids):
for stop_id in excluded_stop_ids:
try:
index = shape_stop_ids.index(stop_id)
return True
except ValueError:
pass
return False
matching_shapes = []
for shape in shapes:
shape_stop_ids = shape['stop_ids']
if contains_included_stops(shape_stop_ids) and not contains_excluded_stop(shape_stop_ids):
matching_shapes.append(shape)
if len(matching_shapes) != 1:
matching_shape_ids = [shape['shape_id'] for shape in matching_shapes]
error_message = f'{len(matching_shapes)} shapes found for route {route_id} with GTFS direction ID {gtfs_direction_id}'
if len(included_stop_ids) > 0:
error_message += f" including {','.join(included_stop_ids)}"
if len(excluded_stop_ids) > 0:
error_message += f" excluding {','.join(excluded_stop_ids)}"
if len(matching_shape_ids) > 0:
error_message += f": {','.join(matching_shape_ids)}"
raise Exception(error_message)
matching_shape = matching_shapes[0]
matching_shape_id = matching_shape['shape_id']
matching_shape_count = matching_shape['count']
print(f' matching shape = {matching_shape_id} ({matching_shape_count} times)')
add_direction(
id=direction_id,
gtfs_shape_id=matching_shape_id,
gtfs_direction_id=gtfs_direction_id,
stop_ids=matching_shape['stop_ids'],
title=custom_direction_info.get('title', None)
)
def add_default_direction(direction_id):
print(f' default direction = {direction_id}')
direction_trips_df = route_trips_df[route_direction_id_values == direction_id]
shapes = get_unique_shapes(
direction_trips_df=direction_trips_df,
stop_times_df=stop_times_df,
stops_map=stops_map,
normalize_gtfs_stop_id=normalize_gtfs_stop_id)
best_shape = shapes[0]
best_shape_id = best_shape['shape_id']
best_shape_count = best_shape['count']
print(f' most common shape = {best_shape_id} ({best_shape_count} times)')
add_direction(
id=direction_id,
gtfs_shape_id=best_shape_id,
gtfs_direction_id=direction_id,
stop_ids=best_shape['stop_ids']
)
def add_direction(id, gtfs_shape_id, gtfs_direction_id, stop_ids, title = None):
if title is None:
default_direction_info = agency.default_directions.get(gtfs_direction_id, {})
title_prefix = default_direction_info.get('title_prefix', None)
last_stop_id = stop_ids[-1]
last_stop = stops_map[last_stop_id]
if title_prefix is not None:
title = f"{title_prefix} to {last_stop.stop_name}"
else:
title = f"To {last_stop.stop_name}"
print(f' title = {title}')
dir_data = {
'id': id,
'title': title,
'gtfs_shape_id': gtfs_shape_id,
'gtfs_direction_id': gtfs_direction_id,
'stops': stop_ids,
'stop_geometry': {},
}
route_data['directions'].append(dir_data)
for stop_id in stop_ids:
stop = stops_map[stop_id]
stop_data = {
'id': stop_id,
'lat': round(stop.geometry.y, 5), # stop_lat in gtfs
'lon': round(stop.geometry.x, 5), # stop_lon in gtfs
'title': stop.stop_name,
'url': stop.stop_url if hasattr(stop, 'stop_url') and isinstance(stop.stop_url, str) else None,
}
route_data['stops'][stop_id] = stop_data
geometry = shapes_df[shapes_df['shape_id'] == gtfs_shape_id]['geometry'].values[0]
# partridge returns GTFS geometries for each shape_id as a shapely LineString
# (https://shapely.readthedocs.io/en/stable/manual.html#linestrings).
# Each coordinate is an array in [lon,lat] format (note: longitude first, latitude second)
dir_data['coords'] = [
{
'lat': round(coord[1], 5),
'lon': round(coord[0], 5)
} for coord in geometry.coords
]
if agency.provider == 'nextbus':
# match nextbus direction IDs with GTFS direction IDs
best_nextbus_dir_info, best_terminal_dist = match_nextbus_direction(nextbus_route_config, geometry)
print(f' {direction_id} = {best_nextbus_dir_info.id} (terminal_dist={int(best_terminal_dist)}) {" (questionable match)" if best_terminal_dist > 300 else ""}')
# dir_data['title'] = best_nextbus_dir_info.title
dir_data['nextbus_direction_id'] = best_nextbus_dir_info.id
start_lat = geometry.coords[0][1]
start_lon = geometry.coords[0][0]
#print(f" start_lat = {start_lat} start_lon = {start_lon}")
deg_lat_dist = util.haver_distance(start_lat, start_lon, start_lat-0.1, start_lon)*10
deg_lon_dist = util.haver_distance(start_lat, start_lon, start_lat, start_lon-0.1)*10
# projection function from lon/lat coordinates in degrees (z ignored) to x/y coordinates in meters.
# satisfying the interface of shapely.ops.transform (https://shapely.readthedocs.io/en/stable/manual.html#shapely.ops.transform).
# This makes it possible to use shapely methods to calculate the distance in meters between geometries
def project_xy(lon, lat, z=None):
return (round((lon - start_lon) * deg_lon_dist, 1), round((lat - start_lat) * deg_lat_dist, 1))
xy_geometry = shapely.ops.transform(project_xy, geometry)
shape_lon_lat = np.array(geometry).T
shape_lon = shape_lon_lat[0]
shape_lat = shape_lon_lat[1]
shape_prev_lon = np.r_[shape_lon[0], shape_lon[:-1]]
shape_prev_lat = np.r_[shape_lat[0], shape_lat[:-1]]
# shape_cumulative_dist[i] is the cumulative distance in meters along the shape geometry from 0th to ith coordinate
shape_cumulative_dist = np.cumsum(util.haver_distance(shape_lon, shape_lat, shape_prev_lon, shape_prev_lat))
shape_lines_xy = [shapely.geometry.LineString(xy_geometry.coords[i:i+2]) for i in range(0, len(xy_geometry.coords) - 1)]
# this is the total distance of the GTFS shape, which may not be exactly the same as the
# distance along the route between the first and last Nextbus stop
dir_data['distance'] = int(shape_cumulative_dist[-1])
print(f" distance = {dir_data['distance']}")
# Find each stop along the route shape, so that the frontend can draw line segments between stops along the shape
start_index = 0
for stop_id in stop_ids:
stop_info = route_data['stops'][stop_id]
# Need to project lon/lat coords to x/y in order for shapely to determine the distance between
# a point and a line (shapely doesn't support distance for lon/lat coords)
stop_xy = shapely.geometry.Point(project_xy(stop_info['lon'], stop_info['lat']))
stop_geometry = get_stop_geometry(stop_xy, shape_lines_xy, shape_cumulative_dist, start_index)
if stop_geometry['offset'] > 100:
print(f" !! bad geometry for stop {stop_id}: {stop_geometry['offset']} m from route line segment")
continue
dir_data['stop_geometry'][stop_id] = stop_geometry
start_index = stop_geometry['after_index']
if route_id in agency.custom_directions:
for custom_direction_info in agency.custom_directions[route_id]:
add_custom_direction(custom_direction_info)
else:
for direction_id in np.unique(route_direction_id_values):
add_default_direction(direction_id)
if routes_data[0]['sort_order'] is not None:
sort_key = lambda route_data: route_data['sort_order']
else:
sort_key = lambda route_data: route_data['id']
routes_data = sorted(routes_data, key=sort_key)
data_str = json.dumps({
'version': routeconfig.DefaultVersion,
'routes': routes_data
}, separators=(',', ':'))
cache_path = routeconfig.get_cache_path(agency_id)
with open(cache_path, "w") as f:
f.write(data_str)
if save_to_s3:
s3 = boto3.resource('s3')
s3_path = routeconfig.get_s3_path(agency_id)
s3_bucket = config.s3_bucket
print(f'saving to s3://{s3_bucket}/{s3_path}')
object = s3.Object(s3_bucket, s3_path)
object.put(
Body=gzip.compress(bytes(data_str, 'utf-8')),
CacheControl='max-age=86400',
ContentType='application/json',
ContentEncoding='gzip',
ACL='public-read'
)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Save route configuration from GTFS and possibly Nextbus API')
parser.add_argument('--agency', required=False, help='Agency ID')
parser.add_argument('--s3', dest='s3', action='store_true', help='store in s3')
parser.set_defaults(s3=False)
args = parser.parse_args()
agencies = [config.get_agency(args.agency)] if args.agency is not None else config.agencies
for agency in agencies:
save_routes_for_agency(agency, args.s3)
|
[
"zipfile.ZipFile",
"models.nextbus.get_route_list",
"shapely.geometry.Point",
"numpy.argsort",
"numpy.array",
"partridge.load_geo_feed",
"argparse.ArgumentParser",
"pathlib.Path",
"json.dumps",
"boto3.resource",
"models.nextbus.get_route_config",
"models.config.get_agency",
"shapely.ops.transform",
"requests.get",
"shapely.geometry.LineString",
"models.util.get_data_dir",
"numpy.unique",
"models.routeconfig.get_s3_path",
"models.util.haver_distance",
"models.routeconfig.get_cache_path"
] |
[((2964, 2990), 'numpy.argsort', 'np.argsort', (['terminal_dists'], {}), '(terminal_dists)\n', (2974, 2990), True, 'import numpy as np\n'), ((4156, 4216), 'shapely.geometry.Point', 'shapely.geometry.Point', (['shape_lines_xy[best_index].coords[0]'], {}), '(shape_lines_xy[best_index].coords[0])\n', (4178, 4216), False, 'import shapely\n'), ((5580, 5636), 'numpy.unique', 'np.unique', (['direction_shape_id_values'], {'return_counts': '(True)'}), '(direction_shape_id_values, return_counts=True)\n', (5589, 5636), True, 'import numpy as np\n'), ((5668, 5710), 'numpy.argsort', 'np.argsort', (['(-1 * direction_shape_id_counts)'], {}), '(-1 * direction_shape_id_counts)\n', (5678, 5710), True, 'import numpy as np\n'), ((8532, 8552), 'pathlib.Path', 'Path', (['gtfs_cache_dir'], {}), '(gtfs_cache_dir)\n', (8536, 8552), False, 'from pathlib import Path\n'), ((9646, 9683), 'partridge.load_geo_feed', 'ptg.load_geo_feed', (['gtfs_cache_dir', '{}'], {}), '(gtfs_cache_dir, {})\n', (9663, 9683), True, 'import partridge as ptg\n'), ((23533, 23634), 'json.dumps', 'json.dumps', (["{'version': routeconfig.DefaultVersion, 'routes': routes_data}"], {'separators': "(',', ':')"}), "({'version': routeconfig.DefaultVersion, 'routes': routes_data},\n separators=(',', ':'))\n", (23543, 23634), False, 'import json\n'), ((23671, 23708), 'models.routeconfig.get_cache_path', 'routeconfig.get_cache_path', (['agency_id'], {}), '(agency_id)\n', (23697, 23708), False, 'from models import gtfs, config, util, nextbus, routeconfig\n'), ((24300, 24403), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Save route configuration from GTFS and possibly Nextbus API"""'}), "(description=\n 'Save route configuration from GTFS and possibly Nextbus API')\n", (24323, 24403), False, 'import argparse\n'), ((2644, 2742), 'models.util.haver_distance', 'util.haver_distance', (['first_stop_info.lat', 'first_stop_info.lon', 'shape_start[1]', 'shape_start[0]'], {}), '(first_stop_info.lat, first_stop_info.lon, shape_start[1\n ], shape_start[0])\n', (2663, 2742), False, 'from models import gtfs, config, util, nextbus, routeconfig\n'), ((2757, 2848), 'models.util.haver_distance', 'util.haver_distance', (['last_stop_info.lat', 'last_stop_info.lon', 'shape_end[1]', 'shape_end[0]'], {}), '(last_stop_info.lat, last_stop_info.lon, shape_end[1],\n shape_end[0])\n', (2776, 2848), False, 'from models import gtfs, config, util, nextbus, routeconfig\n'), ((8652, 8674), 'requests.get', 'requests.get', (['gtfs_url'], {}), '(gtfs_url)\n', (8664, 8674), False, 'import requests\n'), ((23806, 23826), 'boto3.resource', 'boto3.resource', (['"""s3"""'], {}), "('s3')\n", (23820, 23826), False, 'import boto3\n'), ((23845, 23879), 'models.routeconfig.get_s3_path', 'routeconfig.get_s3_path', (['agency_id'], {}), '(agency_id)\n', (23868, 23879), False, 'from models import gtfs, config, util, nextbus, routeconfig\n'), ((8951, 8981), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zip_path', '"""r"""'], {}), "(zip_path, 'r')\n", (8966, 8981), False, 'import zipfile\n'), ((9547, 9566), 'models.util.get_data_dir', 'util.get_data_dir', ([], {}), '()\n', (9564, 9566), False, 'from models import gtfs, config, util, nextbus, routeconfig\n'), ((20973, 21016), 'shapely.ops.transform', 'shapely.ops.transform', (['project_xy', 'geometry'], {}), '(project_xy, geometry)\n', (20994, 21016), False, 'import shapely\n'), ((23196, 23232), 'numpy.unique', 'np.unique', (['route_direction_id_values'], {}), '(route_direction_id_values)\n', (23205, 23232), True, 'import numpy as np\n'), ((24636, 24666), 'models.config.get_agency', 'config.get_agency', (['args.agency'], {}), '(args.agency)\n', (24653, 24666), False, 'from models import gtfs, config, util, nextbus, routeconfig\n'), ((8822, 8841), 'models.util.get_data_dir', 'util.get_data_dir', ([], {}), '()\n', (8839, 8841), False, 'from models import gtfs, config, util, nextbus, routeconfig\n'), ((11107, 11148), 'models.nextbus.get_route_list', 'nextbus.get_route_list', (['agency.nextbus_id'], {}), '(agency.nextbus_id)\n', (11129, 11148), False, 'from models import gtfs, config, util, nextbus, routeconfig\n'), ((12121, 12174), 'models.nextbus.get_route_config', 'nextbus.get_route_config', (['agency.nextbus_id', 'route_id'], {}), '(agency.nextbus_id, route_id)\n', (12145, 12174), False, 'from models import gtfs, config, util, nextbus, routeconfig\n'), ((20249, 20318), 'models.util.haver_distance', 'util.haver_distance', (['start_lat', 'start_lon', '(start_lat - 0.1)', 'start_lon'], {}), '(start_lat, start_lon, start_lat - 0.1, start_lon)\n', (20268, 20318), False, 'from models import gtfs, config, util, nextbus, routeconfig\n'), ((20347, 20416), 'models.util.haver_distance', 'util.haver_distance', (['start_lat', 'start_lon', 'start_lat', '(start_lon - 0.1)'], {}), '(start_lat, start_lon, start_lat, start_lon - 0.1)\n', (20366, 20416), False, 'from models import gtfs, config, util, nextbus, routeconfig\n'), ((21046, 21064), 'numpy.array', 'np.array', (['geometry'], {}), '(geometry)\n', (21054, 21064), True, 'import numpy as np\n'), ((21455, 21528), 'models.util.haver_distance', 'util.haver_distance', (['shape_lon', 'shape_lat', 'shape_prev_lon', 'shape_prev_lat'], {}), '(shape_lon, shape_lat, shape_prev_lon, shape_prev_lat)\n', (21474, 21528), False, 'from models import gtfs, config, util, nextbus, routeconfig\n'), ((21561, 21617), 'shapely.geometry.LineString', 'shapely.geometry.LineString', (['xy_geometry.coords[i:i + 2]'], {}), '(xy_geometry.coords[i:i + 2])\n', (21588, 21617), False, 'import shapely\n'), ((6414, 6445), 'json.dumps', 'json.dumps', (['shape_trip_stop_ids'], {}), '(shape_trip_stop_ids)\n', (6424, 6445), False, 'import json\n')]
|
"""AVLetters lip dataset.
The original dataset is available from
http://www.ee.surrey.ac.uk/Projects/LILiR/datasets/avletters1/index.html
This dataset consists of three repetitions by each of 10 talkers,
five male (two with moustaches) and five female,
of the isolated letters A-Z, a total of 780 utterances
References
----------
<NAME>, T.Cootes, <NAME>, <NAME>, and <NAME>.
Extraction of visual features for lipreading.
IEEE Trans. on Pattern Analysis and Machine Vision,
vol. 24, no. 2, pp. 198-213, 2002.
"""
# License: BSD 3 clause
import numpy as np
from string import ascii_uppercase
import random
from os import listdir
from os.path import dirname, exists, isfile, join
from scipy.io import loadmat
folderpath = join(dirname(__file__), './avletters/Lips/')
def fetch_avletters_averaged():
"""Load the AVLetters dataset with averaged frames
================ =======================
Classes 26
Samples total 780
Dimensionality (12, 60, 80)
Features real, between 255 and 0
================ =======================
Returns
-------
(lip_videos, label) : tuple
lip_videos : ndarray of shape (780, 12, 60, 80)
The lip videos with averaged frames.
Each video consists of 12 60x80 image frames.
persons : ndarray of shape (780,)
The persons corresponding to the lip videos.
label : ndarray of shape (780,)
Labels corresponding to the lip videos.
Those labels are ranging from 0-23 and
correspond to the letters spoken in the lip video.
"""
if not (exists(folderpath)):
raise IOError("Data not found")
lip_paths = []
for f in listdir(folderpath):
if isfile(join(folderpath, f)) and f.endswith('.mat'):
lip_paths.append(f)
n_samples = 780
n_frames = 12
n_rows = 60
n_columns = 80
people = ['Anya', 'Bill', 'Faye', 'John', 'Kate', 'Nicola', 'Stephen',
'Steve', 'Verity', 'Yi']
lip_videos = np.empty(shape=(n_samples, n_frames, n_rows, n_columns), dtype=float)
persons = np.zeros(shape=(n_samples,), dtype='<U8')
label = np.empty(shape=(n_samples,), dtype=int)
# Save all lip videos in the preferred form
for i, lip_path in enumerate(lip_paths):
# Load the lip video
lip_mat = loadmat(folderpath + lip_path)
n_frames_curr = int(lip_mat['siz'][0,2])
lip_video = lip_mat['vid'].reshape(n_columns, n_rows, n_frames_curr)
lip_video = lip_video.transpose(2, 1, 0)
# Average the video frames over a window of size
# `n_frames_curr - n_frames + 1` so that the new video
# has `n_frames` frames.
window_size = n_frames_curr - n_frames + 1
for j in range(n_frames):
lip_videos[i, j] = lip_video[j:j+window_size].mean(axis=0)
for p in people:
if p in lip_path:
persons[i] = p
label[i] = ord(lip_path[0]) - ord('A')
return (lip_videos, persons, label)
|
[
"os.path.exists",
"os.listdir",
"scipy.io.loadmat",
"os.path.join",
"os.path.dirname",
"numpy.zeros",
"numpy.empty"
] |
[((736, 753), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (743, 753), False, 'from os.path import dirname, exists, isfile, join\n'), ((1790, 1809), 'os.listdir', 'listdir', (['folderpath'], {}), '(folderpath)\n', (1797, 1809), False, 'from os import listdir\n'), ((2113, 2182), 'numpy.empty', 'np.empty', ([], {'shape': '(n_samples, n_frames, n_rows, n_columns)', 'dtype': 'float'}), '(shape=(n_samples, n_frames, n_rows, n_columns), dtype=float)\n', (2121, 2182), True, 'import numpy as np\n'), ((2197, 2238), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n_samples,)', 'dtype': '"""<U8"""'}), "(shape=(n_samples,), dtype='<U8')\n", (2205, 2238), True, 'import numpy as np\n'), ((2251, 2290), 'numpy.empty', 'np.empty', ([], {'shape': '(n_samples,)', 'dtype': 'int'}), '(shape=(n_samples,), dtype=int)\n', (2259, 2290), True, 'import numpy as np\n'), ((1696, 1714), 'os.path.exists', 'exists', (['folderpath'], {}), '(folderpath)\n', (1702, 1714), False, 'from os.path import dirname, exists, isfile, join\n'), ((2432, 2462), 'scipy.io.loadmat', 'loadmat', (['(folderpath + lip_path)'], {}), '(folderpath + lip_path)\n', (2439, 2462), False, 'from scipy.io import loadmat\n'), ((1829, 1848), 'os.path.join', 'join', (['folderpath', 'f'], {}), '(folderpath, f)\n', (1833, 1848), False, 'from os.path import dirname, exists, isfile, join\n')]
|
import cv2
import numpy as np
img = cv2.imread('../Resources/Photos/park.jpg')
b,g,r = cv2.split(img)
# cv2.imshow('Blue',b)
# cv2.imshow('Green',g)
# cv2.imshow('Red',r)
blank = np.zeros(img.shape[:2],dtype='uint8')
blue = cv2.merge([b,blank,blank])
green = cv2.merge([blank,g,blank])
red = cv2.merge([blank,blank,r])
# cv2.imshow('Blue',blue)
# cv2.imshow('Green',green)
# cv2.imshow('Red',red)
# print(f'img -> {img.shape}, b->{b.shape}, g->{g.shape}, r-> {r.shape}')
merged = cv2.merge([b,g,r])
cv2.imshow('Merged',merged)
cv2.waitKey(0)
|
[
"cv2.merge",
"cv2.imshow",
"numpy.zeros",
"cv2.waitKey",
"cv2.split",
"cv2.imread"
] |
[((37, 79), 'cv2.imread', 'cv2.imread', (['"""../Resources/Photos/park.jpg"""'], {}), "('../Resources/Photos/park.jpg')\n", (47, 79), False, 'import cv2\n'), ((89, 103), 'cv2.split', 'cv2.split', (['img'], {}), '(img)\n', (98, 103), False, 'import cv2\n'), ((182, 220), 'numpy.zeros', 'np.zeros', (['img.shape[:2]'], {'dtype': '"""uint8"""'}), "(img.shape[:2], dtype='uint8')\n", (190, 220), True, 'import numpy as np\n'), ((227, 255), 'cv2.merge', 'cv2.merge', (['[b, blank, blank]'], {}), '([b, blank, blank])\n', (236, 255), False, 'import cv2\n'), ((262, 290), 'cv2.merge', 'cv2.merge', (['[blank, g, blank]'], {}), '([blank, g, blank])\n', (271, 290), False, 'import cv2\n'), ((295, 323), 'cv2.merge', 'cv2.merge', (['[blank, blank, r]'], {}), '([blank, blank, r])\n', (304, 323), False, 'import cv2\n'), ((484, 504), 'cv2.merge', 'cv2.merge', (['[b, g, r]'], {}), '([b, g, r])\n', (493, 504), False, 'import cv2\n'), ((503, 531), 'cv2.imshow', 'cv2.imshow', (['"""Merged"""', 'merged'], {}), "('Merged', merged)\n", (513, 531), False, 'import cv2\n'), ((531, 545), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (542, 545), False, 'import cv2\n')]
|
# encoding: utf-8
"""
@author: ccj
@contact:
"""
import numpy as np
from typing import List, Dict, Tuple, Any
import torch
import torch.nn.functional as F
def crop_white(image: np.ndarray, value: int = 255) -> np.ndarray:
"""
Crop white border from image
:param image: Type: np.ndarray, image to be processed
:param value: Type: int, default value is 255
:return:
Cropped image after removing the white border
"""
assert (image.shape[2] == 3), "image shape should be [W, H, 3]"
assert (image.dtype == np.uint8), "image type should be np.uint8"
ys, = (image.min((1, 2)) < value).nonzero()
xs, = (image.min(0).min(1) < value).nonzero()
if len(xs) == 0 or len(ys) == 0:
return image
return image[ys.min(): ys.max()+1, xs.min(): xs.max()+1]
def get_tiles(image: np.ndarray, tile_size: int = 256, n_tiles: int = 36,
mode: int = 0) -> Tuple[Dict[str, Any], bool]:
"""
Crop big image to multiple pieces of small patches
:param image: Type np.ndarray, image to be cropped
:param tile_size: Type int, size of small patches
:param n_tiles: Type int, number of small patches
:param mode: Type int, pad type for cropping
:return:
dict includes small pacthes and its responding index, bool flag indicates if the
image can get enough small patches
"""
result = []
h, w, c = image.shape
pad_h = (tile_size - h % tile_size) % tile_size + ((tile_size * mode) // 2)
pad_w = (tile_size - w % tile_size) % tile_size + ((tile_size * mode) // 2)
img2 = np.pad(image, [[pad_h // 2, pad_h - pad_h // 2], [pad_w // 2, pad_w - pad_w//2], [0, 0]], constant_values=255)
img3 = img2.reshape(
img2.shape[0] // tile_size,
tile_size,
img2.shape[1] // tile_size,
tile_size,
3
)
img3 = img3.transpose(0, 2, 1, 3, 4).reshape(-1, tile_size, tile_size,3)
n_tiles_with_info = (img3.reshape(img3.shape[0], -1).sum(1) < tile_size ** 2 * 3 * 255).sum()
if len(img3) < n_tiles:
img3 = np.pad(img3, [[0, n_tiles-len(img3)], [0, 0], [0, 0], [0, 0]], constant_values=255)
idxs = np.argsort(img3.reshape(img3.shape[0], -1).sum(-1))[:n_tiles]
img3 = img3[idxs]
for i in range(len(img3)):
result.append({'img': img3[i], 'idx': i})
return result, n_tiles_with_info >= n_tiles
def glue_to_one_picture(image: np.ndarray, tile_size: int = 256, n_tiles: int = 36,
random_idx: bool = True) -> np.ndarray:
"""
reorganize the distribution of images
:param image: Type: np.ndarray, image to be processed
:param tile_size: Type int, size of small patches
:param n_tiles: Type int, number of small patches
:param random_idx: Type bool, determine if the small patches are randomly organized
:return:
a image that is generated by reorganizing cropped patches from original image
"""
tiles, _ = get_tiles(image, tile_size=tile_size, n_tiles=n_tiles, mode=0)
if random_idx:
patch_idxes = np.random.choice(list(range(n_tiles)), n_tiles, replace=False)
else:
patch_idxes = list(range(n_tiles))
n_row_tiles = int(np.sqrt(n_tiles))
images = np.zeros((tile_size * n_row_tiles, tile_size * n_row_tiles, 3)).astype(np.uint8)
index = 0
for h in range(n_row_tiles):
for w in range(n_row_tiles):
if len(tiles) > patch_idxes[index]:
this_img = tiles[patch_idxes[index]]["img"]
index = index + 1
else:
this_img = np.zeros((tile_size, tile_size, 3)).astype(np.uint8)
images[h * tile_size:(h + 1) * tile_size, w * tile_size:(w + 1) * tile_size, :] = this_img
return images
def cutmix(batch: List[Dict[str, Any]], hparams: Dict[str, Any]) -> Dict[str, Any]:
"""
Apply cutmix transform for one batch of images
:param batch: Type: dict, batch of dataset (default: {"image": image, "target": target}
:param hparams: Type: config, config file
:return:
batch of dataset
"""
image, target = batch["image"], batch["target"]
batch_size = image.shape[0]
img_h, img_w = image.shape[2:]
imgs, labs = [], []
for j in range(batch_size):
p = np.random.uniform(0., 1.)
if p >= hparams["cutmix_prob"]:
idx = int(np.random.uniform(0, batch_size))
# choose x, y and beta dist
x = np.random.uniform(0, img_w)
y = np.random.uniform(0, img_h)
b = np.random.uniform(0., 1.)
w = img_w * np.sqrt(1 - b)
h = img_h * np.sqrt(1 - b)
x0 = int(np.round(max(0, x - w/2)))
x1 = int(np.round(min(img_w, x + w/2)))
y0 = int(np.round(max(0, y - h/2)))
y1 = int(np.round(min(img_h, y + h/2)))
one = image[j, :, y0:y1, 0:x0]
two = image[idx, :, y0:y1, x0:x1]
three = image[j, :, y0:y1, x1: img_w]
middle = torch.cat((one, two, three), dim=2)
img = torch.cat((image[j, :, 0:y0, :], middle, image[j, :, y1:img_h, :]), dim=1)
imgs.append(img)
a = w * h / img_w / img_h
if len(target.shape) < 2:
if hparams.ohe_mode:
lab1 = F.one_hot(target[j], num_classes=hparams.num_class)
lab2 = F.one_hot(target[idx], num_classes=hparams.num_class)
else:
lab1 = target[j]
lab2 = target[idx]
else:
lab1 = target[j, :]
lab2 = target[idx, :]
labs.append((1 - a) * lab1 + a * lab2)
else:
imgs.append(image[j, :, :, :])
if len(target.shape) < 2:
if hparams.ohe_mode:
labs.append(F.one_hot(target[j], num_classes=hparams.num_class).float())
else:
labs.append(target[j])
else:
labs.append(target[j, :])
image2 = torch.stack(imgs)
label2 = torch.stack(labs)
return {
"image": image2,
"target": label2,
}
def mixup(batch: List[Dict[str, Any]], hparams: Dict[str, Any]) -> Dict[str, Any]:
"""
Apply mixup transform for one batch of images
:param batch: Type: dict, batch of dataset (default: {"image": image, "target": target}
:param hparams: Type: config, config file
:return:
batch of dataset
"""
image, target = batch["image"], batch["target"]
batch_size = image.shape[0]
imgs, labs = [], []
for j in range(batch_size):
p = np.random.uniform(0., 1.)
if p >= hparams["mixup_prob"]:
idx = int(np.random.uniform(0, batch_size))
# choose beta dist
b = np.random.uniform(0., 1.)
img = (1 - b) * image[j, :, :, :] + b * image[idx, :, :, :]
imgs.append(img)
if len(target.shape) < 2:
if hparams.ohe_mode:
lab1 = F.one_hot(target[j], num_classes=hparams.num_class)
lab2 = F.one_hot(target[idx], num_classes=hparams.num_class)
else:
lab1 = target[j]
lab2 = target[idx]
else:
lab1 = target[j, :]
lab2 = target[idx, :]
labs.append((1 - b) * lab1 + b * lab2)
else:
imgs.append(image[j, :, :, :])
if len(target.shape) < 2:
if hparams.ohe_mode:
labs.append(F.one_hot(target[j], num_classes=hparams.num_class).float())
else:
labs.append(target[j])
else:
labs.append(target[j, :])
image2 = torch.stack(imgs)
label2 = torch.stack(labs)
return {
"image": image2,
"target": label2,
}
class MixCollator:
def __init__(self, hparams: Dict[str, Any]):
super(MixCollator, self).__init__()
self.hparams = hparams
def __call__(self, batch: List[Dict[str, Any]]) -> Dict[str, Any]:
batch = torch.utils.data.dataloader.default_collate(batch)
if self.hparams["mix_aug"]["cutmix"]:
batch = cutmix(batch, self.hparams)
if self.hparams["mix_aug"]["mixup"]:
batch = mixup(batch, self.hparams)
return batch
|
[
"torch.utils.data.dataloader.default_collate",
"numpy.sqrt",
"torch.stack",
"numpy.zeros",
"torch.nn.functional.one_hot",
"numpy.random.uniform",
"numpy.pad",
"torch.cat"
] |
[((1593, 1709), 'numpy.pad', 'np.pad', (['image', '[[pad_h // 2, pad_h - pad_h // 2], [pad_w // 2, pad_w - pad_w // 2], [0, 0]]'], {'constant_values': '(255)'}), '(image, [[pad_h // 2, pad_h - pad_h // 2], [pad_w // 2, pad_w - pad_w //\n 2], [0, 0]], constant_values=255)\n', (1599, 1709), True, 'import numpy as np\n'), ((6057, 6074), 'torch.stack', 'torch.stack', (['imgs'], {}), '(imgs)\n', (6068, 6074), False, 'import torch\n'), ((6088, 6105), 'torch.stack', 'torch.stack', (['labs'], {}), '(labs)\n', (6099, 6105), False, 'import torch\n'), ((7798, 7815), 'torch.stack', 'torch.stack', (['imgs'], {}), '(imgs)\n', (7809, 7815), False, 'import torch\n'), ((7829, 7846), 'torch.stack', 'torch.stack', (['labs'], {}), '(labs)\n', (7840, 7846), False, 'import torch\n'), ((3206, 3222), 'numpy.sqrt', 'np.sqrt', (['n_tiles'], {}), '(n_tiles)\n', (3213, 3222), True, 'import numpy as np\n'), ((4285, 4312), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (4302, 4312), True, 'import numpy as np\n'), ((6660, 6687), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (6677, 6687), True, 'import numpy as np\n'), ((8150, 8200), 'torch.utils.data.dataloader.default_collate', 'torch.utils.data.dataloader.default_collate', (['batch'], {}), '(batch)\n', (8193, 8200), False, 'import torch\n'), ((3237, 3300), 'numpy.zeros', 'np.zeros', (['(tile_size * n_row_tiles, tile_size * n_row_tiles, 3)'], {}), '((tile_size * n_row_tiles, tile_size * n_row_tiles, 3))\n', (3245, 3300), True, 'import numpy as np\n'), ((4463, 4490), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'img_w'], {}), '(0, img_w)\n', (4480, 4490), True, 'import numpy as np\n'), ((4507, 4534), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'img_h'], {}), '(0, img_h)\n', (4524, 4534), True, 'import numpy as np\n'), ((4551, 4578), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (4568, 4578), True, 'import numpy as np\n'), ((5018, 5053), 'torch.cat', 'torch.cat', (['(one, two, three)'], {'dim': '(2)'}), '((one, two, three), dim=2)\n', (5027, 5053), False, 'import torch\n'), ((5072, 5146), 'torch.cat', 'torch.cat', (['(image[j, :, 0:y0, :], middle, image[j, :, y1:img_h, :])'], {'dim': '(1)'}), '((image[j, :, 0:y0, :], middle, image[j, :, y1:img_h, :]), dim=1)\n', (5081, 5146), False, 'import torch\n'), ((6828, 6855), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (6845, 6855), True, 'import numpy as np\n'), ((4373, 4405), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'batch_size'], {}), '(0, batch_size)\n', (4390, 4405), True, 'import numpy as np\n'), ((4602, 4616), 'numpy.sqrt', 'np.sqrt', (['(1 - b)'], {}), '(1 - b)\n', (4609, 4616), True, 'import numpy as np\n'), ((4641, 4655), 'numpy.sqrt', 'np.sqrt', (['(1 - b)'], {}), '(1 - b)\n', (4648, 4655), True, 'import numpy as np\n'), ((6747, 6779), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'batch_size'], {}), '(0, batch_size)\n', (6764, 6779), True, 'import numpy as np\n'), ((5318, 5369), 'torch.nn.functional.one_hot', 'F.one_hot', (['target[j]'], {'num_classes': 'hparams.num_class'}), '(target[j], num_classes=hparams.num_class)\n', (5327, 5369), True, 'import torch.nn.functional as F\n'), ((5397, 5450), 'torch.nn.functional.one_hot', 'F.one_hot', (['target[idx]'], {'num_classes': 'hparams.num_class'}), '(target[idx], num_classes=hparams.num_class)\n', (5406, 5450), True, 'import torch.nn.functional as F\n'), ((7059, 7110), 'torch.nn.functional.one_hot', 'F.one_hot', (['target[j]'], {'num_classes': 'hparams.num_class'}), '(target[j], num_classes=hparams.num_class)\n', (7068, 7110), True, 'import torch.nn.functional as F\n'), ((7138, 7191), 'torch.nn.functional.one_hot', 'F.one_hot', (['target[idx]'], {'num_classes': 'hparams.num_class'}), '(target[idx], num_classes=hparams.num_class)\n', (7147, 7191), True, 'import torch.nn.functional as F\n'), ((3589, 3624), 'numpy.zeros', 'np.zeros', (['(tile_size, tile_size, 3)'], {}), '((tile_size, tile_size, 3))\n', (3597, 3624), True, 'import numpy as np\n'), ((5857, 5908), 'torch.nn.functional.one_hot', 'F.one_hot', (['target[j]'], {'num_classes': 'hparams.num_class'}), '(target[j], num_classes=hparams.num_class)\n', (5866, 5908), True, 'import torch.nn.functional as F\n'), ((7598, 7649), 'torch.nn.functional.one_hot', 'F.one_hot', (['target[j]'], {'num_classes': 'hparams.num_class'}), '(target[j], num_classes=hparams.num_class)\n', (7607, 7649), True, 'import torch.nn.functional as F\n')]
|
import pickle
import numpy as np
import scipy.linalg as sci
from scipy import signal
# Rotations
def wrap2Pi(x):
xm = np.mod(x+np.pi,(2.0*np.pi))
return xm-np.pi
def Rot(x):
return np.array([[np.cos(x),-np.sin(x)],[np.sin(x),np.cos(x)]])
def RotVec(x_vec, rot_vec):
rvec = np.array([np.dot(x_vec[i,:-1],Rot(rot_vec[i])) for i in range(x_vec.shape[0])])
return np.hstack([rvec, x_vec[:,2].reshape(x_vec.shape[0],1)])
# Rattling
def diffusion_vel(x, dt):
t_vec = np.expand_dims(np.sqrt(np.arange(1,x.shape[0]+1)*dt),axis=1)
vec = np.divide(x-x[0],t_vec)
return vec
def rattling(x, dt, noRat = False, diffVel=True):
if diffVel:
vec = diffusion_vel(x, dt)
else:
vec = np.copy(x)
C = np.cov(vec.T)
if noRat:
R = None
else:
if len(np.shape(C)) == 0:
R = 0.5*np.log(C)
else:
R = 0.5*np.log(np.linalg.det(C))
return R, C
def rattling_windows(mat, dt, window_sz, overlap,noRat=False,diffVel=True):
cov_list = []
rat_list = []
ind_list = window_inds(mat,window_sz,overlap)
for inds in ind_list:
R, C = rattling(mat[inds[0]:inds[1],:],dt,noRat, diffVel)
cov_list.append(C)
rat_list.append(R)
return rat_list, cov_list, ind_list
# Rectangular windowing
def window_inds(dataset, window_sz, overlap, offset=0):
"""
Helper function that applies a rectangular window to the dataset
given some overlap percentage, s.t. ov \in [0,1)
"""
data_len = dataset.shape[0]
assert window_sz < data_len
ind1 = offset
ind2 = offset+window_sz-1
ind_list = []
ov_ind_diff = int(np.ceil(np.abs(overlap*window_sz)))
if ov_ind_diff == window_sz:
ov_ind_diff += -1
while ind2 < data_len+offset:
ind_list.append((ind1,ind2))
ind1 += window_sz-ov_ind_diff
ind2 += window_sz-ov_ind_diff
return ind_list
# Filtering
def moving_average(x,N):
return np.convolve(x,np.ones((N,))/float(N),mode='valid')
def butter_highpass(cutoff, fs, order=5):
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
b, a = signal.butter(order, normal_cutoff, btype='high', analog=False)
return b, a
def butter_highpass_filter(data, cutoff, fs, order=5):
b, a = butter_highpass(cutoff, fs, order=order)
y = signal.filtfilt(b, a, data)
return y
def butter_bandstop(cutoffs, fs, order=5):
nyq = 0.5 * fs
normal_cutoffs = cutoffs / nyq
b, a = signal.butter(order, normal_cutoffs, btype='bandstop', analog=False)
return b, a
def butter_bandstop_filter(data, cutoffs, fs, order=5):
b, a = butter_bandstop(cutoffs, fs, order=order)
y = signal.filtfilt(b, a, data)
return y
# Save/Load
def store_data(fname, rlist, clist, elist):
db = {}
db['rlist'] = rlist
db['clist'] = clist
db['elist'] = elist
dbfile = open(fname, 'ab')
pickle.dump(db, dbfile)
dbfile.close()
def load_data(fname):
# for reading also binary mode is important
dbfile = open(fname, 'rb')
db = pickle.load(dbfile)
rlist = db['rlist']
clist = db['clist']
elist = db['elist']
dbfile.close()
return rlist, clist, elist
# Observable preprocessing
def preprocess(data):
"""
Here we have take in a dataset of smarticle coordinates of the following
shape: (SampleNum, SmarticleNum*3), where each of the 3 coordinates are
coords = [x,y,theta,L_arm_theta,R_arm_theta]. We output a 7 dimensional
vector of the following format: [<mx_1>,<mx_2>,<mx_3>,<my_1>,<my_2>,<my_3>,e_theta]
"""
# Take in (x,y,theta) of each smarticle
S1_coords = data[:,0:3]
S2_coords = data[:,3:6]
S3_coords = data[:,6:9]
#########################
# Rotational invariance #
#########################
# Get CoM from the frame of each smarticle
CoM = np.mean([S1_coords,S2_coords,S3_coords],axis=0)
CoM_S1 = CoM-S1_coords
CoM_S2 = CoM-S2_coords
CoM_S3 = CoM-S3_coords
# Wrap angles
CoM_S1[:,2] = wrap2Pi(CoM_S1[:,2])
CoM_S2[:,2] = wrap2Pi(CoM_S2[:,2])
CoM_S3[:,2] = wrap2Pi(CoM_S3[:,2])
# Rotate coordinates so they're relative to the previous timestep
relCoM_S1 = RotVec(CoM_S1, S1_coords[:,2])
relCoM_S2 = RotVec(CoM_S2, S2_coords[:,2])
relCoM_S3 = RotVec(CoM_S3, S3_coords[:,2])
# Result Matrix
resMat = np.vstack([relCoM_S1[:,0],relCoM_S2[:,0],relCoM_S3[:,0],
relCoM_S1[:,1],relCoM_S2[:,1],relCoM_S3[:,1]]).T
# For theta:
pTheta = np.abs(np.mean(np.exp(1j*np.vstack([S1_coords[:,2],S2_coords[:,2],S3_coords[:,2]]).T),axis=1)).reshape(data.shape[0],1)
return np.hstack([resMat,pTheta])
|
[
"numpy.hstack",
"scipy.signal.filtfilt",
"numpy.log",
"numpy.sin",
"numpy.cov",
"numpy.mod",
"numpy.arange",
"numpy.divide",
"numpy.mean",
"numpy.vstack",
"numpy.abs",
"numpy.ones",
"pickle.load",
"numpy.cos",
"numpy.shape",
"numpy.copy",
"pickle.dump",
"scipy.signal.butter",
"numpy.linalg.det"
] |
[((120, 150), 'numpy.mod', 'np.mod', (['(x + np.pi)', '(2.0 * np.pi)'], {}), '(x + np.pi, 2.0 * np.pi)\n', (126, 150), True, 'import numpy as np\n'), ((545, 571), 'numpy.divide', 'np.divide', (['(x - x[0])', 't_vec'], {}), '(x - x[0], t_vec)\n', (554, 571), True, 'import numpy as np\n'), ((730, 743), 'numpy.cov', 'np.cov', (['vec.T'], {}), '(vec.T)\n', (736, 743), True, 'import numpy as np\n'), ((2128, 2191), 'scipy.signal.butter', 'signal.butter', (['order', 'normal_cutoff'], {'btype': '"""high"""', 'analog': '(False)'}), "(order, normal_cutoff, btype='high', analog=False)\n", (2141, 2191), False, 'from scipy import signal\n'), ((2324, 2351), 'scipy.signal.filtfilt', 'signal.filtfilt', (['b', 'a', 'data'], {}), '(b, a, data)\n', (2339, 2351), False, 'from scipy import signal\n'), ((2474, 2542), 'scipy.signal.butter', 'signal.butter', (['order', 'normal_cutoffs'], {'btype': '"""bandstop"""', 'analog': '(False)'}), "(order, normal_cutoffs, btype='bandstop', analog=False)\n", (2487, 2542), False, 'from scipy import signal\n'), ((2677, 2704), 'scipy.signal.filtfilt', 'signal.filtfilt', (['b', 'a', 'data'], {}), '(b, a, data)\n', (2692, 2704), False, 'from scipy import signal\n'), ((2898, 2921), 'pickle.dump', 'pickle.dump', (['db', 'dbfile'], {}), '(db, dbfile)\n', (2909, 2921), False, 'import pickle\n'), ((3087, 3106), 'pickle.load', 'pickle.load', (['dbfile'], {}), '(dbfile)\n', (3098, 3106), False, 'import pickle\n'), ((3850, 3900), 'numpy.mean', 'np.mean', (['[S1_coords, S2_coords, S3_coords]'], {'axis': '(0)'}), '([S1_coords, S2_coords, S3_coords], axis=0)\n', (3857, 3900), True, 'import numpy as np\n'), ((4590, 4617), 'numpy.hstack', 'np.hstack', (['[resMat, pTheta]'], {}), '([resMat, pTheta])\n', (4599, 4617), True, 'import numpy as np\n'), ((711, 721), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (718, 721), True, 'import numpy as np\n'), ((4325, 4443), 'numpy.vstack', 'np.vstack', (['[relCoM_S1[:, 0], relCoM_S2[:, 0], relCoM_S3[:, 0], relCoM_S1[:, 1],\n relCoM_S2[:, 1], relCoM_S3[:, 1]]'], {}), '([relCoM_S1[:, 0], relCoM_S2[:, 0], relCoM_S3[:, 0], relCoM_S1[:, \n 1], relCoM_S2[:, 1], relCoM_S3[:, 1]])\n', (4334, 4443), True, 'import numpy as np\n'), ((1667, 1694), 'numpy.abs', 'np.abs', (['(overlap * window_sz)'], {}), '(overlap * window_sz)\n', (1673, 1694), True, 'import numpy as np\n'), ((1985, 1998), 'numpy.ones', 'np.ones', (['(N,)'], {}), '((N,))\n', (1992, 1998), True, 'import numpy as np\n'), ((197, 206), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (203, 206), True, 'import numpy as np\n'), ((220, 229), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (226, 229), True, 'import numpy as np\n'), ((230, 239), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (236, 239), True, 'import numpy as np\n'), ((497, 525), 'numpy.arange', 'np.arange', (['(1)', '(x.shape[0] + 1)'], {}), '(1, x.shape[0] + 1)\n', (506, 525), True, 'import numpy as np\n'), ((800, 811), 'numpy.shape', 'np.shape', (['C'], {}), '(C)\n', (808, 811), True, 'import numpy as np\n'), ((839, 848), 'numpy.log', 'np.log', (['C'], {}), '(C)\n', (845, 848), True, 'import numpy as np\n'), ((208, 217), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (214, 217), True, 'import numpy as np\n'), ((890, 906), 'numpy.linalg.det', 'np.linalg.det', (['C'], {}), '(C)\n', (903, 906), True, 'import numpy as np\n'), ((4487, 4549), 'numpy.vstack', 'np.vstack', (['[S1_coords[:, 2], S2_coords[:, 2], S3_coords[:, 2]]'], {}), '([S1_coords[:, 2], S2_coords[:, 2], S3_coords[:, 2]])\n', (4496, 4549), True, 'import numpy as np\n')]
|
"""
Make a learning curve for the full neural net trained on all 30 output
measures. The point of this graph is to investigate how much training data
is needed to achieve various MSE values.
"""
import matplotlib.pyplot as plt
import numpy as np
import cPickle as pickle
import lasagne
from lasagne import layers
from lasagne import nonlinearities
from lasagne.nonlinearities import ScaledTanH
from nolearn.lasagne import NeuralNet, TrainSplit
from sklearn.learning_curve import learning_curve
from lignet_utils import gen_train_test
x_train, x_test, y_train, y_test, x_scaler, y_scaler = gen_train_test()
# set up the Scaled tanh parameters. See nonlinearities.py for usage notes.
# I am following the guidance of LeCun et al. for these values
scaled_tanh = ScaledTanH(scale_in=2./3, scale_out=1.7159)
# Make a learning curve to find out how much training data to use
train_size = int(1 * x_train.shape[0])
xt = x_train[:train_size, :]
yt = y_train[:train_size, :]
train_sizes, train_scores, valid_scores = learning_curve(
NeuralNet(
layers=[
('input', layers.InputLayer),
('hidden0', layers.DenseLayer),
('hidden1', layers.DenseLayer),
('output', layers.DenseLayer)
],
input_shape=(None, x_train.shape[1]),
hidden0_num_units=18,
hidden0_nonlinearity=scaled_tanh,
hidden1_num_units=20,
hidden1_nonlinearity=scaled_tanh,
output_num_units=y_train.shape[1],
output_nonlinearity=nonlinearities.linear,
regression=True,
verbose=1,
max_epochs=4000,
update=lasagne.updates.adagrad,
train_split=TrainSplit(eval_size=0.3),
),
xt, yt,
train_sizes=[500, 1500, 5000, 15000, 35000, 75000, 133333],
scoring='mean_squared_error')
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
valid_scores_mean = np.mean(valid_scores, axis=1)
valid_scores_std = np.std(valid_scores, axis=1)
with open('learning_curve.pkl', 'wb') as pkl:
pickle.dump([train_scores_mean, train_scores_std,
valid_scores_mean, valid_scores_std,
train_sizes], pkl)
|
[
"numpy.mean",
"cPickle.dump",
"lignet_utils.gen_train_test",
"lasagne.nonlinearities.ScaledTanH",
"numpy.std",
"nolearn.lasagne.TrainSplit"
] |
[((593, 609), 'lignet_utils.gen_train_test', 'gen_train_test', ([], {}), '()\n', (607, 609), False, 'from lignet_utils import gen_train_test\n'), ((764, 810), 'lasagne.nonlinearities.ScaledTanH', 'ScaledTanH', ([], {'scale_in': '(2.0 / 3)', 'scale_out': '(1.7159)'}), '(scale_in=2.0 / 3, scale_out=1.7159)\n', (774, 810), False, 'from lasagne.nonlinearities import ScaledTanH\n'), ((1832, 1861), 'numpy.mean', 'np.mean', (['train_scores'], {'axis': '(1)'}), '(train_scores, axis=1)\n', (1839, 1861), True, 'import numpy as np\n'), ((1881, 1909), 'numpy.std', 'np.std', (['train_scores'], {'axis': '(1)'}), '(train_scores, axis=1)\n', (1887, 1909), True, 'import numpy as np\n'), ((1930, 1959), 'numpy.mean', 'np.mean', (['valid_scores'], {'axis': '(1)'}), '(valid_scores, axis=1)\n', (1937, 1959), True, 'import numpy as np\n'), ((1979, 2007), 'numpy.std', 'np.std', (['valid_scores'], {'axis': '(1)'}), '(valid_scores, axis=1)\n', (1985, 2007), True, 'import numpy as np\n'), ((2059, 2168), 'cPickle.dump', 'pickle.dump', (['[train_scores_mean, train_scores_std, valid_scores_mean, valid_scores_std,\n train_sizes]', 'pkl'], {}), '([train_scores_mean, train_scores_std, valid_scores_mean,\n valid_scores_std, train_sizes], pkl)\n', (2070, 2168), True, 'import cPickle as pickle\n'), ((1663, 1688), 'nolearn.lasagne.TrainSplit', 'TrainSplit', ([], {'eval_size': '(0.3)'}), '(eval_size=0.3)\n', (1673, 1688), False, 'from nolearn.lasagne import NeuralNet, TrainSplit\n')]
|
import numpy, copy
from numpy import nan
from PyQt5.QtGui import QPalette, QColor, QFont
from PyQt5.QtWidgets import QMessageBox
from orangewidget import gui
from orangewidget import widget
from orangewidget.settings import Setting
from oasys.widgets import gui as oasysgui
from oasys.widgets import congruence
from oasys.widgets.gui import ConfirmDialog
from oasys.util.oasys_util import TriggerIn, TriggerOut
import oasys.util.oasys_util as OU
from srxraylib.util.data_structures import ScaledMatrix
from scipy.interpolate import RectBivariateSpline
from wofrysrw.propagator.wavefront2D.srw_wavefront import SRWWavefront, PolarizationComponent, Polarization
from wofrysrw.beamline.optical_elements.other.srw_crl import SRWCRL
from wofry.propagator.propagator import PropagationManager
from wofrysrw.propagator.propagators2D.srw_fresnel_native import SRW_APPLICATION
from wofrysrw.propagator.propagators2D.srw_propagation_mode import SRWPropagationMode
from orangecontrib.srw.util.srw_objects import SRWData
from orangecontrib.srw.util.srw_util import SRWPlot
from orangecontrib.srw.widgets.gui.ow_srw_wavefront_viewer import SRWWavefrontViewer
class OWThicknessErrorPhaseShift(SRWWavefrontViewer):
name = "Thickness Error Phase Shift"
description = "Thickness Error Phase Shift"
icon = "icons/thickness_phase_shifter.png"
maintainer = "<NAME>"
maintainer_email = "<EMAIL>"
priority = 5
category = "Display Data Tools"
keywords = ["data", "file", "load", "read"]
outputs = [{"name":"SRWData",
"type":SRWData,
"doc":"SRW Optical Element Data",
"id":"data"},
{"name":"Trigger",
"type": TriggerIn,
"doc":"Feedback signal to start a new beam simulation",
"id":"Trigger"}]
inputs = [("SRWData", SRWData, "set_input"),
("Error Profiles", list, "setErrorProfiles"),
("Trigger", TriggerOut, "propagate_new_wavefront")]
crl_error_profiles = Setting([])
crl_scaling_factor = Setting(1.0)
TABS_AREA_HEIGHT = 555
CONTROL_AREA_WIDTH = 405
def __init__(self):
super().__init__()
self.runaction = widget.OWAction("Propagate Wavefront", self)
self.runaction.triggered.connect(self.propagate_wavefront)
self.addAction(self.runaction)
button_box = oasysgui.widgetBox(self.controlArea, "", addSpace=False, orientation="horizontal")
button = gui.button(button_box, self, "Propagate Wavefront", callback=self.propagate_wavefront)
font = QFont(button.font())
font.setBold(True)
button.setFont(font)
palette = QPalette(button.palette()) # make a copy of the palette
palette.setColor(QPalette.ButtonText, QColor('Dark Blue'))
button.setPalette(palette) # assign new palette
button.setFixedHeight(45)
button = gui.button(button_box, self, "Reset Fields", callback=self.callResetSettings)
font = QFont(button.font())
font.setItalic(True)
button.setFont(font)
palette = QPalette(button.palette()) # make a copy of the palette
palette.setColor(QPalette.ButtonText, QColor('Dark Red'))
button.setPalette(palette) # assign new palette
button.setFixedHeight(45)
button.setFixedWidth(150)
gui.separator(self.controlArea)
self.controlArea.setFixedWidth(self.CONTROL_AREA_WIDTH)
self.tabs_setting = oasysgui.tabWidget(self.controlArea)
self.tabs_setting.setFixedHeight(self.TABS_AREA_HEIGHT)
self.tabs_setting.setFixedWidth(self.CONTROL_AREA_WIDTH-5)
tab_thick = oasysgui.createTabPage(self.tabs_setting, "Thickness Error")
input_box = oasysgui.widgetBox(tab_thick, "Thickness Error Files", addSpace=True, orientation="vertical", height=390, width=self.CONTROL_AREA_WIDTH-20)
self.files_area = oasysgui.textArea(height=315)
input_box.layout().addWidget(self.files_area)
self.refresh_files_text_area()
oasysgui.lineEdit(input_box, self, "crl_scaling_factor", "Thickness Error Scaling Factor", labelWidth=260, valueType=float, orientation="horizontal")
def refresh_files_text_area(self):
text = ""
for file in self.crl_error_profiles:
text += file + "\n"
self.files_area.setText(text)
def setErrorProfiles(self, error_profiles):
try:
if not error_profiles is None:
self.crl_error_profiles = error_profiles
self.refresh_files_text_area()
except Exception as exception:
QMessageBox.critical(self, "Error",
exception.args[0],
QMessageBox.Ok)
if self.IS_DEVELOP: raise exception
def set_input(self, srw_data):
if not srw_data is None:
self.input_srw_data = srw_data
if self.is_automatic_run:
self.propagate_wavefront()
def set_srw_live_propagation_mode(self):
if PropagationManager.Instance().get_propagation_mode(SRW_APPLICATION)==SRWPropagationMode.WHOLE_BEAMLINE:
raise ValueError("Propagation Mode not supported, switch to Element by Element")
else:
super(OWThicknessErrorPhaseShift, self).set_srw_live_propagation_mode()
def propagate_wavefront(self):
try:
self.progressBarInit()
if self.input_srw_data is None: raise Exception("No Input Data")
self.check_data()
input_wavefront = self.input_srw_data.get_srw_wavefront().duplicate()
srw_beamline = self.input_srw_data.get_srw_beamline().duplicate()
optical_element = srw_beamline.get_beamline_element_at(-1).get_optical_element()
coordinates = srw_beamline.get_beamline_element_at(-1).get_coordinates()
if not isinstance(optical_element, SRWCRL):
raise ValueError("Thickness Error Phase Shift should be connected to a CRL optical element")
if coordinates.q() != 0.0:
raise ValueError("Thickness Error Phase Shift should be applied on unpropagated wavefronts: put 'q' value to 0.0 in the previous optical element")
crl_delta = optical_element.delta
crl_w_mirr_2D_values = [OWThicknessErrorPhaseShift.h5_readsurface(thickness_error_file) for thickness_error_file in self.crl_error_profiles]
# TO WOFRY
generic_wavefront = input_wavefront.toGenericWavefront()
for thickness_error_profile in crl_w_mirr_2D_values:
phase_shift = OWThicknessErrorPhaseShift.get_crl_phase_shift(thickness_error_profile, crl_delta, generic_wavefront, self.crl_scaling_factor)
generic_wavefront.add_phase_shift(phase_shift, Polarization.SIGMA)
generic_wavefront.add_phase_shift(phase_shift, Polarization.PI)
# TO SRW
output_wavefront = SRWWavefront.fromGenericWavefront(generic_wavefront)
output_wavefront.Rx = input_wavefront.Rx
output_wavefront.Ry = input_wavefront.Ry
output_wavefront.dRx = input_wavefront.dRx
output_wavefront.dRy = input_wavefront.dRy
output_wavefront.xc = input_wavefront.xc
output_wavefront.yc = input_wavefront.yc
output_wavefront.avgPhotEn = input_wavefront.avgPhotEn
output_wavefront.presCA = input_wavefront.presCA
output_wavefront.presFT = input_wavefront.presFT
output_wavefront.unitElFld = input_wavefront.unitElFld
output_wavefront.arElecPropMatr = copy.deepcopy(input_wavefront.arElecPropMatr)
output_wavefront.arMomX = copy.deepcopy(input_wavefront.arMomX)
output_wavefront.arMomY = copy.deepcopy(input_wavefront.arMomY)
output_wavefront.arWfrAuxData = copy.deepcopy(input_wavefront.arWfrAuxData)
output_wavefront.partBeam = copy.deepcopy(input_wavefront.partBeam)
output_wavefront.setScanningData(input_wavefront.scanned_variable_data)
output_srw_data = SRWData(srw_beamline=srw_beamline, srw_wavefront=output_wavefront)
self.progressBarSet(50)
self.initializeTabs()
tickets = []
self.run_calculation_for_plots(output_wavefront=output_wavefront, tickets=tickets, progress_bar_value=50)
self.plot_results(tickets, 80)
self.progressBarFinished()
self.setStatusMessage("")
self.send("SRWData", output_srw_data)
self.send("Trigger", TriggerIn(new_object=True))
except Exception as e:
QMessageBox.critical(self, "Error", str(e.args[0]), QMessageBox.Ok)
self.setStatusMessage("")
self.progressBarFinished()
if self.IS_DEVELOP: raise e
def run_calculation_for_plots(self, output_wavefront, tickets, progress_bar_value):
if self.view_type==2:
e, h, v, i = output_wavefront.get_intensity(multi_electron=False, polarization_component_to_be_extracted=PolarizationComponent.LINEAR_HORIZONTAL)
tickets.append(SRWPlot.get_ticket_2D(h*1000, v*1000, i[int(e.size/2)]))
self.progressBarSet(progress_bar_value)
e, h, v, i = output_wavefront.get_intensity(multi_electron=False, polarization_component_to_be_extracted=PolarizationComponent.LINEAR_VERTICAL)
tickets.append(SRWPlot.get_ticket_2D(h*1000, v*1000, i[int(e.size/2)]))
e, h, v, p = output_wavefront.get_phase(polarization_component_to_be_extracted=PolarizationComponent.LINEAR_HORIZONTAL)
tickets.append(SRWPlot.get_ticket_2D(h*1000, v*1000, p[int(e.size/2)]))
self.progressBarSet(progress_bar_value + 10)
e, h, v, p = output_wavefront.get_phase(polarization_component_to_be_extracted=PolarizationComponent.LINEAR_VERTICAL)
tickets.append(SRWPlot.get_ticket_2D(h*1000, v*1000, p[int(e.size/2)]))
elif self.view_type==1:
e, h, v, i = output_wavefront.get_intensity(multi_electron=False)
tickets.append(SRWPlot.get_ticket_2D(h*1000, v*1000, i[int(e.size/2)]))
self.progressBarSet(progress_bar_value)
e, h, v, p = output_wavefront.get_phase()
tickets.append(SRWPlot.get_ticket_2D(h*1000, v*1000, p[int(e.size/2)]))
self.progressBarSet(progress_bar_value + 10)
def propagate_new_wavefront(self, trigger):
try:
if trigger and trigger.new_object == True:
if trigger.has_additional_parameter("variable_name"):
if self.input_srw_data is None: raise Exception("No Input Data")
variable_name = trigger.get_additional_parameter("variable_name").strip()
variable_display_name = trigger.get_additional_parameter("variable_display_name").strip()
variable_value = trigger.get_additional_parameter("variable_value")
variable_um = trigger.get_additional_parameter("variable_um")
if "," in variable_name:
variable_names = variable_name.split(",")
for variable_name in variable_names:
setattr(self, variable_name.strip(), variable_value)
else:
setattr(self, variable_name, variable_value)
self.input_srw_data.get_srw_wavefront().setScanningData(SRWWavefront.ScanningData(variable_name, variable_value, variable_display_name, variable_um))
self.propagate_wavefront()
except Exception as exception:
QMessageBox.critical(self, "Error", str(exception), QMessageBox.Ok)
if self.IS_DEVELOP: raise exception
def check_data(self):
if len(self.crl_error_profiles) == 0: raise ValueError("No Thickness error profile specified")
congruence.checkPositiveNumber(self.crl_scaling_factor, "Thickness Error Scaling Factor")
@classmethod
def h5_readsurface(cls, filename):
x_coords, y_coords, z_values = OU.read_surface_file(filename)
return ScaledMatrix(x_coords, y_coords, z_values.T)
@classmethod
def get_crl_phase_shift(cls, thickness_error_profile, crl_delta, wavefront, crl_scaling_factor=1.0):
coord_x = thickness_error_profile.x_coord
coord_y = thickness_error_profile.y_coord
thickness_error = thickness_error_profile.z_values
interpolator = RectBivariateSpline(coord_x, coord_y, thickness_error, bbox=[None, None, None, None], kx=1, ky=1, s=0)
wavelength = wavefront.get_wavelength()
wavefront_coord_x = wavefront.get_coordinate_x()
wavefront_coord_y = wavefront.get_coordinate_y()
thickness_error = interpolator(wavefront_coord_x, wavefront_coord_y)
thickness_error[numpy.where(thickness_error==numpy.nan)] = 0.0
thickness_error *= crl_scaling_factor
return -2*numpy.pi*crl_delta*thickness_error/wavelength
def getVariablesToPlot(self):
if self.view_type == 2:
return [[1, 2], [1, 2], [1, 2], [1, 2]]
else:
return [[1, 2], [1, 2]]
def getWeightedPlots(self):
if self.view_type == 2:
return [False, False, True, True]
else:
return [False, True]
def getWeightTickets(self):
if self.view_type == 2:
return [nan, nan, 0, 1]
else:
return [nan, 0]
def getTitles(self, with_um=False):
if self.view_type == 2:
if with_um: return ["Intensity SE \u03c0 [ph/s/.1%bw/mm\u00b2]",
"Intensity SE \u03c3 [ph/s/.1%bw/mm\u00b2]",
"Phase SE \u03c0 [rad]",
"Phase SE \u03c0 [rad]"]
else: return ["Intensity SE \u03c0",
"Intensity SE \u03c3",
"Phase SE \u03c0",
"Phase SE \u03c3"]
else:
if with_um: return ["Intensity SE [ph/s/.1%bw/mm\u00b2]",
"Phase SE [rad]"]
else: return ["Intensity SE",
"Phase SE"]
def getXTitles(self):
if self.view_type == 2:
return ["X [\u03bcm]", "X [\u03bcm]", "X [\u03bcm]", "X [\u03bcm]"]
else:
return ["X [\u03bcm]", "X [\u03bcm]"]
def getYTitles(self):
if self.view_type == 2:
return ["Y [\u03bcm]", "Y [\u03bcm]", "Y [\u03bcm]", "Y [\u03bcm]"]
else:
return ["Y [\u03bcm]", "Y [\u03bcm]"]
def getXUM(self):
if self.view_type == 2:
return ["X [\u03bcm]", "X [\u03bcm]", "X [\u03bcm]", "X [\u03bcm]"]
else:
return ["X [\u03bcm]", "X [\u03bcm]"]
def getYUM(self):
if self.view_type == 2:
return ["Y [\u03bcm]", "Y [\u03bcm]", "Y [\u03bcm]", "Y [\u03bcm]"]
else:
return ["Y [\u03bcm]", "Y [\u03bcm]"]
def callResetSettings(self):
if ConfirmDialog.confirmed(parent=self, message="Confirm Reset of the Fields?"):
try:
self.resetSettings()
except:
pass
|
[
"oasys.widgets.gui.widgetBox",
"oasys.widgets.gui.createTabPage",
"PyQt5.QtGui.QColor",
"oasys.util.oasys_util.read_surface_file",
"oasys.widgets.gui.tabWidget",
"copy.deepcopy",
"orangewidget.settings.Setting",
"wofrysrw.propagator.wavefront2D.srw_wavefront.SRWWavefront.fromGenericWavefront",
"oasys.widgets.congruence.checkPositiveNumber",
"wofrysrw.propagator.wavefront2D.srw_wavefront.SRWWavefront.ScanningData",
"scipy.interpolate.RectBivariateSpline",
"numpy.where",
"orangecontrib.srw.util.srw_objects.SRWData",
"orangewidget.gui.separator",
"oasys.widgets.gui.lineEdit",
"orangewidget.gui.button",
"wofry.propagator.propagator.PropagationManager.Instance",
"oasys.widgets.gui.ConfirmDialog.confirmed",
"oasys.widgets.gui.textArea",
"oasys.util.oasys_util.TriggerIn",
"PyQt5.QtWidgets.QMessageBox.critical",
"srxraylib.util.data_structures.ScaledMatrix",
"orangewidget.widget.OWAction"
] |
[((2028, 2039), 'orangewidget.settings.Setting', 'Setting', (['[]'], {}), '([])\n', (2035, 2039), False, 'from orangewidget.settings import Setting\n'), ((2065, 2077), 'orangewidget.settings.Setting', 'Setting', (['(1.0)'], {}), '(1.0)\n', (2072, 2077), False, 'from orangewidget.settings import Setting\n'), ((2213, 2257), 'orangewidget.widget.OWAction', 'widget.OWAction', (['"""Propagate Wavefront"""', 'self'], {}), "('Propagate Wavefront', self)\n", (2228, 2257), False, 'from orangewidget import widget\n'), ((2386, 2473), 'oasys.widgets.gui.widgetBox', 'oasysgui.widgetBox', (['self.controlArea', '""""""'], {'addSpace': '(False)', 'orientation': '"""horizontal"""'}), "(self.controlArea, '', addSpace=False, orientation=\n 'horizontal')\n", (2404, 2473), True, 'from oasys.widgets import gui as oasysgui\n'), ((2487, 2578), 'orangewidget.gui.button', 'gui.button', (['button_box', 'self', '"""Propagate Wavefront"""'], {'callback': 'self.propagate_wavefront'}), "(button_box, self, 'Propagate Wavefront', callback=self.\n propagate_wavefront)\n", (2497, 2578), False, 'from orangewidget import gui\n'), ((2915, 2992), 'orangewidget.gui.button', 'gui.button', (['button_box', 'self', '"""Reset Fields"""'], {'callback': 'self.callResetSettings'}), "(button_box, self, 'Reset Fields', callback=self.callResetSettings)\n", (2925, 2992), False, 'from orangewidget import gui\n'), ((3360, 3391), 'orangewidget.gui.separator', 'gui.separator', (['self.controlArea'], {}), '(self.controlArea)\n', (3373, 3391), False, 'from orangewidget import gui\n'), ((3486, 3522), 'oasys.widgets.gui.tabWidget', 'oasysgui.tabWidget', (['self.controlArea'], {}), '(self.controlArea)\n', (3504, 3522), True, 'from oasys.widgets import gui as oasysgui\n'), ((3675, 3735), 'oasys.widgets.gui.createTabPage', 'oasysgui.createTabPage', (['self.tabs_setting', '"""Thickness Error"""'], {}), "(self.tabs_setting, 'Thickness Error')\n", (3697, 3735), True, 'from oasys.widgets import gui as oasysgui\n'), ((3757, 3902), 'oasys.widgets.gui.widgetBox', 'oasysgui.widgetBox', (['tab_thick', '"""Thickness Error Files"""'], {'addSpace': '(True)', 'orientation': '"""vertical"""', 'height': '(390)', 'width': '(self.CONTROL_AREA_WIDTH - 20)'}), "(tab_thick, 'Thickness Error Files', addSpace=True,\n orientation='vertical', height=390, width=self.CONTROL_AREA_WIDTH - 20)\n", (3775, 3902), True, 'from oasys.widgets import gui as oasysgui\n'), ((3924, 3953), 'oasys.widgets.gui.textArea', 'oasysgui.textArea', ([], {'height': '(315)'}), '(height=315)\n', (3941, 3953), True, 'from oasys.widgets import gui as oasysgui\n'), ((4058, 4215), 'oasys.widgets.gui.lineEdit', 'oasysgui.lineEdit', (['input_box', 'self', '"""crl_scaling_factor"""', '"""Thickness Error Scaling Factor"""'], {'labelWidth': '(260)', 'valueType': 'float', 'orientation': '"""horizontal"""'}), "(input_box, self, 'crl_scaling_factor',\n 'Thickness Error Scaling Factor', labelWidth=260, valueType=float,\n orientation='horizontal')\n", (4075, 4215), True, 'from oasys.widgets import gui as oasysgui\n'), ((12076, 12169), 'oasys.widgets.congruence.checkPositiveNumber', 'congruence.checkPositiveNumber', (['self.crl_scaling_factor', '"""Thickness Error Scaling Factor"""'], {}), "(self.crl_scaling_factor,\n 'Thickness Error Scaling Factor')\n", (12106, 12169), False, 'from oasys.widgets import congruence\n'), ((12262, 12292), 'oasys.util.oasys_util.read_surface_file', 'OU.read_surface_file', (['filename'], {}), '(filename)\n', (12282, 12292), True, 'import oasys.util.oasys_util as OU\n'), ((12309, 12353), 'srxraylib.util.data_structures.ScaledMatrix', 'ScaledMatrix', (['x_coords', 'y_coords', 'z_values.T'], {}), '(x_coords, y_coords, z_values.T)\n', (12321, 12353), False, 'from srxraylib.util.data_structures import ScaledMatrix\n'), ((12660, 12766), 'scipy.interpolate.RectBivariateSpline', 'RectBivariateSpline', (['coord_x', 'coord_y', 'thickness_error'], {'bbox': '[None, None, None, None]', 'kx': '(1)', 'ky': '(1)', 's': '(0)'}), '(coord_x, coord_y, thickness_error, bbox=[None, None,\n None, None], kx=1, ky=1, s=0)\n', (12679, 12766), False, 'from scipy.interpolate import RectBivariateSpline\n'), ((15248, 15324), 'oasys.widgets.gui.ConfirmDialog.confirmed', 'ConfirmDialog.confirmed', ([], {'parent': 'self', 'message': '"""Confirm Reset of the Fields?"""'}), "(parent=self, message='Confirm Reset of the Fields?')\n", (15271, 15324), False, 'from oasys.widgets.gui import ConfirmDialog\n'), ((2786, 2805), 'PyQt5.QtGui.QColor', 'QColor', (['"""Dark Blue"""'], {}), "('Dark Blue')\n", (2792, 2805), False, 'from PyQt5.QtGui import QPalette, QColor, QFont\n'), ((3207, 3225), 'PyQt5.QtGui.QColor', 'QColor', (['"""Dark Red"""'], {}), "('Dark Red')\n", (3213, 3225), False, 'from PyQt5.QtGui import QPalette, QColor, QFont\n'), ((7041, 7093), 'wofrysrw.propagator.wavefront2D.srw_wavefront.SRWWavefront.fromGenericWavefront', 'SRWWavefront.fromGenericWavefront', (['generic_wavefront'], {}), '(generic_wavefront)\n', (7074, 7093), False, 'from wofrysrw.propagator.wavefront2D.srw_wavefront import SRWWavefront, PolarizationComponent, Polarization\n'), ((7728, 7773), 'copy.deepcopy', 'copy.deepcopy', (['input_wavefront.arElecPropMatr'], {}), '(input_wavefront.arElecPropMatr)\n', (7741, 7773), False, 'import numpy, copy\n'), ((7813, 7850), 'copy.deepcopy', 'copy.deepcopy', (['input_wavefront.arMomX'], {}), '(input_wavefront.arMomX)\n', (7826, 7850), False, 'import numpy, copy\n'), ((7890, 7927), 'copy.deepcopy', 'copy.deepcopy', (['input_wavefront.arMomY'], {}), '(input_wavefront.arMomY)\n', (7903, 7927), False, 'import numpy, copy\n'), ((7973, 8016), 'copy.deepcopy', 'copy.deepcopy', (['input_wavefront.arWfrAuxData'], {}), '(input_wavefront.arWfrAuxData)\n', (7986, 8016), False, 'import numpy, copy\n'), ((8057, 8096), 'copy.deepcopy', 'copy.deepcopy', (['input_wavefront.partBeam'], {}), '(input_wavefront.partBeam)\n', (8070, 8096), False, 'import numpy, copy\n'), ((8213, 8279), 'orangecontrib.srw.util.srw_objects.SRWData', 'SRWData', ([], {'srw_beamline': 'srw_beamline', 'srw_wavefront': 'output_wavefront'}), '(srw_beamline=srw_beamline, srw_wavefront=output_wavefront)\n', (8220, 8279), False, 'from orangecontrib.srw.util.srw_objects import SRWData\n'), ((13028, 13069), 'numpy.where', 'numpy.where', (['(thickness_error == numpy.nan)'], {}), '(thickness_error == numpy.nan)\n', (13039, 13069), False, 'import numpy, copy\n'), ((4644, 4714), 'PyQt5.QtWidgets.QMessageBox.critical', 'QMessageBox.critical', (['self', '"""Error"""', 'exception.args[0]', 'QMessageBox.Ok'], {}), "(self, 'Error', exception.args[0], QMessageBox.Ok)\n", (4664, 4714), False, 'from PyQt5.QtWidgets import QMessageBox\n'), ((8703, 8729), 'oasys.util.oasys_util.TriggerIn', 'TriggerIn', ([], {'new_object': '(True)'}), '(new_object=True)\n', (8712, 8729), False, 'from oasys.util.oasys_util import TriggerIn, TriggerOut\n'), ((5081, 5110), 'wofry.propagator.propagator.PropagationManager.Instance', 'PropagationManager.Instance', ([], {}), '()\n', (5108, 5110), False, 'from wofry.propagator.propagator import PropagationManager\n'), ((11628, 11724), 'wofrysrw.propagator.wavefront2D.srw_wavefront.SRWWavefront.ScanningData', 'SRWWavefront.ScanningData', (['variable_name', 'variable_value', 'variable_display_name', 'variable_um'], {}), '(variable_name, variable_value,\n variable_display_name, variable_um)\n', (11653, 11724), False, 'from wofrysrw.propagator.wavefront2D.srw_wavefront import SRWWavefront, PolarizationComponent, Polarization\n')]
|
import numpy as np
import torch
from matplotlib import pyplot as plt
from scipy.spatial.distance import directed_hausdorff
from numpy import linalg as LA
from sklearn import metrics
def get_roc_auc(target, prediction):
y_true = target.view(-1).numpy()
y_score = prediction.view(-1).cpu().detach().numpy()
roc_auc_score = metrics.roc_auc_score(y_true, y_score)
return roc_auc_score
def get_precission_recall_auc(target, prediction):
y_true = target.view(-1).numpy()
y_score = prediction.view(-1).cpu().detach().numpy()
precision, recall, _ = metrics.precision_recall_curve(y_true, y_score)
precission_recall_auc = metrics.auc(recall, precision)
return precission_recall_auc
def dice_coef(target, prediction):
pred_flat = prediction.contiguous().view(-1)
target_flat = target.contiguous().view(-1)
intersection = torch.sum(pred_flat * target_flat)
union = torch.sum(pred_flat + target_flat)
coef = (2 * intersection) / union
return coef
def hausdorff_distance(target_coord, prediction_coord):
if len(prediction_coord) >= 1:
hausdorff_distance = max(directed_hausdorff(target_coord, prediction_coord)[0], directed_hausdorff(prediction_coord, target_coord)[0])
else:
hausdorff_distance = None
return hausdorff_distance
def jaccard_coef(target_fg, prediction_fg):
intersection = torch.sum(prediction_fg * target_fg)
union = torch.sum(prediction_fg + target_fg)
coef_fg = intersection/(union - intersection)
return coef_fg
# def gt_hot_encoding(ground_truth):
# return (ground_truth-1)*-1
def mean_surface_distance(target_coord, prediction_coord):
surface_sum_distance = 0
if len(prediction_coord) != 0:
for point in target_coord:
min_distances = min([LA.norm(coord) for coord in np.array(point)-np.array(prediction_coord)])
surface_sum_distance += min_distances
for point in prediction_coord:
min_distances = min([LA.norm(coord) for coord in np.array(point) - np.array(target_coord)])
surface_sum_distance += min_distances
return surface_sum_distance/(len(target_coord) + len(prediction_coord))
else:
return None
def convert_to_coordinates(target, prediction):
target = target.squeeze_(0).numpy()
prediction = prediction.squeeze_(0).numpy()
target_coord = [(x, y, z) for x, y, z in zip(np.where(target==1)[0], np.where(target==1)[1], np.where(target==1)[2])]
prediction_coord = [(x, y, z) for x, y, z in zip(np.where(prediction==1)[0], np.where(prediction==1)[1], np.where(prediction==1)[2])]
return target_coord, prediction_coord
def get_relative_volume(mask):
relative_volume = 100 * torch.sum(mask)/mask.numel()
return relative_volume
def plot_ct_and_mask(query, mask, pred, title, path):
query = query.squeeze_(0).squeeze_(0).detach().cpu()
pred = pred.squeeze_(0)
fig1 = plt.figure()
fig2 = plt.figure()
subplot_1 = 1
subplot_2 = 1
slices = np.random.choice(np.arange(query.shape[2]), 5)
for i in slices:
fig = plt.figure(figsize=(10, 10))
ax_gt = fig.add_subplot(1, 2, 1)
ax_gt.imshow(query[:, :, i] + mask[:, :, i] * 5, cmap=plt.cm.bone, aspect='auto')
ax_pred = fig.add_subplot(1, 2, 2)
ax_pred.imshow(query[:, :, i] + pred[:, :, i] * 5, cmap=plt.cm.bone, aspect='auto')
plt.title(title + ' slice ' + str(i))
plt.savefig(path + '/' + title + ' slice ' + str(i) + '.png')
def save_images(prediction, path, title, in_memory=None):
prediction = prediction.squeeze_(0)
if in_memory is not None:
title = title + in_memory
np.save(path + '/' + title, prediction)
|
[
"scipy.spatial.distance.directed_hausdorff",
"numpy.arange",
"numpy.where",
"sklearn.metrics.auc",
"sklearn.metrics.precision_recall_curve",
"sklearn.metrics.roc_auc_score",
"numpy.array",
"matplotlib.pyplot.figure",
"torch.sum",
"numpy.linalg.norm",
"numpy.save"
] |
[((336, 374), 'sklearn.metrics.roc_auc_score', 'metrics.roc_auc_score', (['y_true', 'y_score'], {}), '(y_true, y_score)\n', (357, 374), False, 'from sklearn import metrics\n'), ((574, 621), 'sklearn.metrics.precision_recall_curve', 'metrics.precision_recall_curve', (['y_true', 'y_score'], {}), '(y_true, y_score)\n', (604, 621), False, 'from sklearn import metrics\n'), ((650, 680), 'sklearn.metrics.auc', 'metrics.auc', (['recall', 'precision'], {}), '(recall, precision)\n', (661, 680), False, 'from sklearn import metrics\n'), ((867, 901), 'torch.sum', 'torch.sum', (['(pred_flat * target_flat)'], {}), '(pred_flat * target_flat)\n', (876, 901), False, 'import torch\n'), ((914, 948), 'torch.sum', 'torch.sum', (['(pred_flat + target_flat)'], {}), '(pred_flat + target_flat)\n', (923, 948), False, 'import torch\n'), ((1378, 1414), 'torch.sum', 'torch.sum', (['(prediction_fg * target_fg)'], {}), '(prediction_fg * target_fg)\n', (1387, 1414), False, 'import torch\n'), ((1427, 1463), 'torch.sum', 'torch.sum', (['(prediction_fg + target_fg)'], {}), '(prediction_fg + target_fg)\n', (1436, 1463), False, 'import torch\n'), ((2933, 2945), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2943, 2945), True, 'from matplotlib import pyplot as plt\n'), ((2957, 2969), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2967, 2969), True, 'from matplotlib import pyplot as plt\n'), ((3681, 3720), 'numpy.save', 'np.save', (["(path + '/' + title)", 'prediction'], {}), "(path + '/' + title, prediction)\n", (3688, 3720), True, 'import numpy as np\n'), ((3037, 3062), 'numpy.arange', 'np.arange', (['query.shape[2]'], {}), '(query.shape[2])\n', (3046, 3062), True, 'import numpy as np\n'), ((3102, 3130), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (3112, 3130), True, 'from matplotlib import pyplot as plt\n'), ((2725, 2740), 'torch.sum', 'torch.sum', (['mask'], {}), '(mask)\n', (2734, 2740), False, 'import torch\n'), ((1129, 1179), 'scipy.spatial.distance.directed_hausdorff', 'directed_hausdorff', (['target_coord', 'prediction_coord'], {}), '(target_coord, prediction_coord)\n', (1147, 1179), False, 'from scipy.spatial.distance import directed_hausdorff\n'), ((1184, 1234), 'scipy.spatial.distance.directed_hausdorff', 'directed_hausdorff', (['prediction_coord', 'target_coord'], {}), '(prediction_coord, target_coord)\n', (1202, 1234), False, 'from scipy.spatial.distance import directed_hausdorff\n'), ((1797, 1811), 'numpy.linalg.norm', 'LA.norm', (['coord'], {}), '(coord)\n', (1804, 1811), True, 'from numpy import linalg as LA\n'), ((1992, 2006), 'numpy.linalg.norm', 'LA.norm', (['coord'], {}), '(coord)\n', (1999, 2006), True, 'from numpy import linalg as LA\n'), ((2411, 2432), 'numpy.where', 'np.where', (['(target == 1)'], {}), '(target == 1)\n', (2419, 2432), True, 'import numpy as np\n'), ((2435, 2456), 'numpy.where', 'np.where', (['(target == 1)'], {}), '(target == 1)\n', (2443, 2456), True, 'import numpy as np\n'), ((2459, 2480), 'numpy.where', 'np.where', (['(target == 1)'], {}), '(target == 1)\n', (2467, 2480), True, 'import numpy as np\n'), ((2537, 2562), 'numpy.where', 'np.where', (['(prediction == 1)'], {}), '(prediction == 1)\n', (2545, 2562), True, 'import numpy as np\n'), ((2565, 2590), 'numpy.where', 'np.where', (['(prediction == 1)'], {}), '(prediction == 1)\n', (2573, 2590), True, 'import numpy as np\n'), ((2593, 2618), 'numpy.where', 'np.where', (['(prediction == 1)'], {}), '(prediction == 1)\n', (2601, 2618), True, 'import numpy as np\n'), ((1825, 1840), 'numpy.array', 'np.array', (['point'], {}), '(point)\n', (1833, 1840), True, 'import numpy as np\n'), ((1841, 1867), 'numpy.array', 'np.array', (['prediction_coord'], {}), '(prediction_coord)\n', (1849, 1867), True, 'import numpy as np\n'), ((2020, 2035), 'numpy.array', 'np.array', (['point'], {}), '(point)\n', (2028, 2035), True, 'import numpy as np\n'), ((2038, 2060), 'numpy.array', 'np.array', (['target_coord'], {}), '(target_coord)\n', (2046, 2060), True, 'import numpy as np\n')]
|
# -*- encoding: utf-8 -*-
"""
@Author : zYx.Tom
@Contact : <EMAIL>
@site : https://zhuyuanxiang.github.io
---------------------------
@Software : PyCharm
@Project : tensorflow_cookbook
@File : C0707_Doc2Vec.py
@Version : v0.1
@Time : 2019-12-06 17:12
@License : (C)Copyright 2018-2019, zYx.Tom
@Reference : 《TensorFlow机器学习实战指南,Nick McClure》, Sec0707,P172
@Desc : 自然语言处理,使用 TensorFlow 实现基于 Doc2Vec 的情感分析
@理解:关键是文档嵌套与单词嵌套的结合。
结合有两种方式:❶ 文档嵌套和单词嵌套相加;❷ 文档嵌套直接在单词嵌套后面。
这个模型采用的是第2种方式,但是使用的数据集对于理解Doc2Vec方法效果不太好,通过这个例子只能知道如何使用,无法知道这个模型带来的改变是什么。
这个例子还说明,虽然使用神经网络训练不需要考虑太多前期工作,但是前期数据特征化依然是非常重要的,只有对模型的充分理解才能更好的特征化。
"""
# common imports
import os
import pickle
import sys
import matplotlib.pyplot as plt
import numpy as np # pip install numpy<1.17,小于1.17就不会报错
import sklearn
import tensorflow as tf
import winsound
from nltk.corpus import stopwords
from tensorflow.python.framework import ops
# 设置数据显示的精确度为小数点后3位
from text_tools import build_dictionary, generate_batch_data, load_movie_data, normalize_text, text_to_numbers
np.set_printoptions(precision = 8, suppress = True, threshold = np.inf, linewidth = 200)
# 利用随机种子,保证随机数据的稳定性,使得每次随机测试的结果一样
seed = 42
np.random.seed(seed)
tf.set_random_seed(seed)
# Python ≥3.5 is required
assert sys.version_info >= (3, 5)
# Scikit-Learn ≥0.20 is required
assert sklearn.__version__ >= "0.20"
# numpy 1.16.4 is required
assert np.__version__ in ["1.16.5", "1.16.4"]
# 屏蔽警告:Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# 初始化默认的计算图
ops.reset_default_graph()
# Open graph session
sess = tf.Session()
# ----------------------------------------------------------------------
# Declare model parameters
data_folder_name = 'temp'
batch_size = 500
vocabulary_size = 7500
generations = 100000
model_learning_rate = 0.001
embedding_size = 200 # Word embedding size
doc_embedding_size = 100 # Document embedding size
concatenated_size = embedding_size + doc_embedding_size
num_sampled = int(batch_size / 2) # Number of negative examples to sample.
window_size = 3 # How many words to consider to the left.
# Add checkpoints to training
save_embeddings_every = 5000
print_valid_every = 5000
print_loss_every = 100
# Declare stop words
stops = stopwords.words('english')
# We pick a few test words for validation.
valid_words = ['love', 'hate', 'happy', 'sad', 'man', 'woman']
# Later we will have to transform these into indices
# Load the movie review data
print('Loading Data')
texts, target = load_movie_data()
# Normalize text
print('Normalizing Text Data')
texts = normalize_text(texts, stops)
# Texts must contain at least 3 words
target = [target[ix] for ix, x in enumerate(texts) if len(x.split()) > window_size]
texts = [x for x in texts if len(x.split()) > window_size]
assert (len(target) == len(texts))
# Build our data set and dictionaries
print('Creating Dictionary')
word_dictionary = build_dictionary(texts, vocabulary_size)
word_dictionary_rev = dict(zip(word_dictionary.values(), word_dictionary.keys()))
text_data = text_to_numbers(texts, word_dictionary)
# 获得检验用的单词的键值
valid_examples = [word_dictionary[x] for x in valid_words]
print('Creating Model')
# 6. 定义单词嵌套,声明对比噪声损失函数(NCE)
embeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
nce_weights = tf.Variable(tf.truncated_normal(
[vocabulary_size, concatenated_size], stddev = 1.0 / np.sqrt(concatenated_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Create data/target placeholders
x_inputs = tf.placeholder(tf.int32, shape = [None, window_size + 1]) # windows_size是单词嵌套,后面的1是文档嵌套
y_target = tf.placeholder(tf.int32, shape = [None, 1])
valid_dataset = tf.constant(valid_examples, dtype = tf.int32)
# 8. 创建单词嵌套函数和文档嵌套函数,将单词嵌套求和,再与文档嵌套连接在一起
# 创建单词嵌套函数(基于的CBOW方法)
embed = tf.zeros([batch_size, embedding_size])
for element in range(window_size):
embed += tf.nn.embedding_lookup(embeddings, x_inputs[:, element])
# 创建文档嵌套函数(文档索引基于文档导入时顺序的唯一索引值)
doc_indices = tf.slice(x_inputs, [0, window_size], [batch_size, 1])
doc_embeddings = tf.Variable(tf.random_uniform([len(texts), doc_embedding_size], -1.0, 1.0))
doc_embed = tf.nn.embedding_lookup(doc_embeddings, doc_indices)
# 单词嵌套与文档嵌套的连接
final_embed = tf.concat(axis = 1, values = [embed, tf.squeeze(doc_embed)])
# 9. 声明损失函数和优化器
# Get loss from prediction
loss = tf.reduce_mean(tf.nn.nce_loss(weights = nce_weights,
biases = nce_biases,
labels = y_target,
inputs = final_embed,
num_sampled = num_sampled,
num_classes = vocabulary_size))
# Create optimizer
optimizer = tf.train.GradientDescentOptimizer(learning_rate = model_learning_rate)
train_step = optimizer.minimize(loss)
# 10. 声明验证单词集的余弦距离
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims = True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings, valid_dataset)
similarity = tf.matmul(valid_embeddings, normalized_embeddings, transpose_b = True)
# 11. 创建模型的 Saver 函数,用于保存单词嵌套和文档嵌套
saver = tf.train.Saver({"embeddings": embeddings, "doc_embeddings": doc_embeddings})
# Add variable initializer.
init = tf.global_variables_initializer()
sess.run(init)
# 训练 Doc2Vec 模型
print('Starting Training')
loss_vec = []
loss_x_vec = []
for i in range(generations):
batch_inputs, batch_labels = generate_batch_data(text_data, batch_size, window_size, method = 'doc2vec')
feed_dict = {x_inputs: batch_inputs, y_target: batch_labels}
# Run the train step
sess.run(train_step, feed_dict = feed_dict)
# Return the loss
if (i + 1) % print_loss_every == 0:
loss_val = sess.run(loss, feed_dict = feed_dict)
loss_vec.append(loss_val)
loss_x_vec.append(i + 1)
print('Loss at step {} : {}'.format(i + 1, loss_val))
# Validation: Print some random words and top 5 related words
if (i + 1) % print_valid_every == 0:
sim = sess.run(similarity, feed_dict = feed_dict)
for j in range(len(valid_words)):
valid_word = word_dictionary_rev[valid_examples[j]]
top_k = 5 # number of nearest neighbors
nearest = (-sim[j, :]).argsort()[1:top_k + 1]
log_str = "Nearest to {}:".format(valid_word)
for k in range(top_k):
close_word = word_dictionary_rev[nearest[k]]
log_str = '{} {},'.format(log_str, close_word)
print(log_str)
# Save dictionary + embeddings
if (i + 1) % save_embeddings_every == 0:
# Save vocabulary dictionary
with open(os.path.join(data_folder_name, 'movie_vocab.pkl'), 'wb') as f:
pickle.dump(word_dictionary, f)
# Save embeddings
model_checkpoint_path = os.path.join(os.getcwd(), data_folder_name, 'doc2vec_movie_embeddings.ckpt')
save_path = saver.save(sess, model_checkpoint_path)
print('Model saved in file: {}'.format(save_path))
# Start logistic model-------------------------
# 使用这些嵌套矩阵训练逻辑回归模型
max_words = 20
logistic_batch_size = 500
# Split dataset into train and test sets
# Need to keep the indices sorted to keep track of document index
train_indices = np.sort(np.random.choice(len(target), round(0.8 * len(target)), replace = False))
test_indices = np.sort(np.array(list(set(range(len(target))) - set(train_indices))))
texts_train = [x for ix, x in enumerate(texts) if ix in train_indices]
texts_test = [x for ix, x in enumerate(texts) if ix in test_indices]
target_train = np.array([x for ix, x in enumerate(target) if ix in train_indices])
target_test = np.array([x for ix, x in enumerate(target) if ix in test_indices])
# Convert texts to lists of indices
text_data_train = np.array(text_to_numbers(texts_train, word_dictionary))
text_data_test = np.array(text_to_numbers(texts_test, word_dictionary))
# Pad/crop movie reviews to specific length
text_data_train = np.array([x[0:max_words] for x in [y + [0] * max_words for y in text_data_train]])
text_data_test = np.array([x[0:max_words] for x in [y + [0] * max_words for y in text_data_test]])
# Define Logistic placeholders
log_x_inputs = tf.placeholder(tf.int32, shape = [None, max_words + 1]) # plus 1 for doc index
log_y_target = tf.placeholder(tf.int32, shape = [None, 1])
# Define logistic embedding lookup (needed if we have two different batch sizes)
# Add together element embeddings in window:
log_embed = tf.zeros([logistic_batch_size, embedding_size])
for element in range(max_words):
log_embed += tf.nn.embedding_lookup(embeddings, log_x_inputs[:, element])
log_doc_indices = tf.slice(log_x_inputs, [0, max_words], [logistic_batch_size, 1])
log_doc_embed = tf.nn.embedding_lookup(doc_embeddings, log_doc_indices)
# concatenate embeddings
log_final_embed = tf.concat(axis = 1, values = [log_embed, tf.squeeze(log_doc_embed)])
# Define model:
# Create variables for logistic regression
A = tf.Variable(tf.random_normal(shape = [concatenated_size, 1]))
b = tf.Variable(tf.random_normal(shape = [1, 1]))
# Declare logistic model (sigmoid in loss function)
model_output = tf.add(tf.matmul(log_final_embed, A), b)
# Declare loss function (Cross Entropy loss)
logistic_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits = model_output, labels = tf.cast(log_y_target, tf.float32)))
# Actual Prediction
prediction = tf.round(tf.sigmoid(model_output))
predictions_correct = tf.cast(tf.equal(prediction, tf.cast(log_y_target, tf.float32)), tf.float32)
accuracy = tf.reduce_mean(predictions_correct)
# Declare optimizer
logistic_opt = tf.train.GradientDescentOptimizer(learning_rate = 0.01)
logistic_train_step = logistic_opt.minimize(logistic_loss, var_list = [A, b])
# Intitialize Variables
init = tf.global_variables_initializer()
sess.run(init)
# Start Logistic Regression
print('Starting Logistic Doc2Vec Model Training')
train_loss, test_loss = [], []
train_acc, test_acc = [], []
i_data = []
for i in range(10000):
rand_index = np.random.choice(text_data_train.shape[0], size = logistic_batch_size)
rand_x = text_data_train[rand_index]
# Append review index at the end of text data
rand_x_doc_indices = train_indices[rand_index]
# 这里才把输入数据补齐(单词索引+文档索引)
rand_x = np.hstack((rand_x, np.transpose([rand_x_doc_indices])))
rand_y = np.transpose([target_train[rand_index]])
feed_dict = {log_x_inputs: rand_x, log_y_target: rand_y}
sess.run(logistic_train_step, feed_dict = feed_dict)
# Only record loss and accuracy every 100 generations
if (i + 1) % 100 == 0:
rand_index_test = np.random.choice(text_data_test.shape[0], size = logistic_batch_size)
rand_x_test = text_data_test[rand_index_test]
rand_x_doc_indices_test = test_indices[rand_index_test]
rand_x_test = np.hstack((rand_x_test, np.transpose([rand_x_doc_indices_test])))
rand_y_test = np.transpose([target_test[rand_index_test]])
test_feed_dict = {log_x_inputs: rand_x_test, log_y_target: rand_y_test}
i_data.append(i + 1)
train_loss_temp = sess.run(logistic_loss, feed_dict = feed_dict)
train_loss.append(train_loss_temp)
test_loss_temp = sess.run(logistic_loss, feed_dict = test_feed_dict)
test_loss.append(test_loss_temp)
train_acc_temp = sess.run(accuracy, feed_dict = feed_dict)
train_acc.append(train_acc_temp)
test_acc_temp = sess.run(accuracy, feed_dict = test_feed_dict)
test_acc.append(test_acc_temp)
if (i + 1) % 500 == 0:
acc_and_loss = [i + 1, train_loss_temp, test_loss_temp, train_acc_temp, test_acc_temp]
acc_and_loss = [np.round(x, 2) for x in acc_and_loss]
print('Generation # {}. Train Loss (Test Loss): {:.2f} ({:.2f}). Train Acc (Test Acc): {:.2f} ({:.2f})'
.format(*acc_and_loss))
# Plot loss over time
plt.figure()
plt.plot(i_data, train_loss, 'k-', label = '训练集')
plt.plot(i_data, test_loss, 'r--', label = '测试集', linewidth = 4)
plt.title('每次迭代的交叉熵损失')
plt.xlabel('迭代次数')
plt.ylabel('交叉熵损失')
plt.legend(loc = 'upper right')
# Plot train and test accuracy
plt.figure()
plt.plot(i_data, train_acc, 'k-', label = '训练集')
plt.plot(i_data, test_acc, 'r--', label = '测试集', linewidth = 4)
plt.title('训练集和测试集的精度')
plt.xlabel('迭代次数')
plt.ylabel('精度')
plt.legend(loc = 'lower right')
# ----------------------------------------------------------------------
# 运行结束的提醒
winsound.Beep(600, 500)
if len(plt.get_fignums()) != 0:
plt.show()
pass
|
[
"numpy.sqrt",
"tensorflow.python.framework.ops.reset_default_graph",
"matplotlib.pyplot.ylabel",
"text_tools.generate_batch_data",
"text_tools.text_to_numbers",
"numpy.array",
"tensorflow.reduce_mean",
"text_tools.build_dictionary",
"tensorflow.set_random_seed",
"tensorflow.cast",
"tensorflow.slice",
"tensorflow.nn.embedding_lookup",
"tensorflow.random_normal",
"nltk.corpus.stopwords.words",
"tensorflow.Session",
"tensorflow.placeholder",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"numpy.random.seed",
"tensorflow.matmul",
"text_tools.normalize_text",
"tensorflow.square",
"tensorflow.zeros",
"numpy.round",
"numpy.random.choice",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.sigmoid",
"text_tools.load_movie_data",
"matplotlib.pyplot.title",
"numpy.transpose",
"matplotlib.pyplot.legend",
"numpy.set_printoptions",
"matplotlib.pyplot.show",
"pickle.dump",
"tensorflow.nn.nce_loss",
"tensorflow.train.Saver",
"matplotlib.pyplot.get_fignums",
"os.path.join",
"os.getcwd",
"tensorflow.global_variables_initializer",
"tensorflow.random_uniform",
"matplotlib.pyplot.figure",
"tensorflow.constant",
"winsound.Beep",
"tensorflow.squeeze"
] |
[((1086, 1171), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(8)', 'suppress': '(True)', 'threshold': 'np.inf', 'linewidth': '(200)'}), '(precision=8, suppress=True, threshold=np.inf, linewidth=200\n )\n', (1105, 1171), True, 'import numpy as np\n'), ((1219, 1239), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1233, 1239), True, 'import numpy as np\n'), ((1240, 1264), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['seed'], {}), '(seed)\n', (1258, 1264), True, 'import tensorflow as tf\n'), ((1621, 1646), 'tensorflow.python.framework.ops.reset_default_graph', 'ops.reset_default_graph', ([], {}), '()\n', (1644, 1646), False, 'from tensorflow.python.framework import ops\n'), ((1675, 1687), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1685, 1687), True, 'import tensorflow as tf\n'), ((2330, 2356), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (2345, 2356), False, 'from nltk.corpus import stopwords\n'), ((2585, 2602), 'text_tools.load_movie_data', 'load_movie_data', ([], {}), '()\n', (2600, 2602), False, 'from text_tools import build_dictionary, generate_batch_data, load_movie_data, normalize_text, text_to_numbers\n'), ((2660, 2688), 'text_tools.normalize_text', 'normalize_text', (['texts', 'stops'], {}), '(texts, stops)\n', (2674, 2688), False, 'from text_tools import build_dictionary, generate_batch_data, load_movie_data, normalize_text, text_to_numbers\n'), ((2992, 3032), 'text_tools.build_dictionary', 'build_dictionary', (['texts', 'vocabulary_size'], {}), '(texts, vocabulary_size)\n', (3008, 3032), False, 'from text_tools import build_dictionary, generate_batch_data, load_movie_data, normalize_text, text_to_numbers\n'), ((3127, 3166), 'text_tools.text_to_numbers', 'text_to_numbers', (['texts', 'word_dictionary'], {}), '(texts, word_dictionary)\n', (3142, 3166), False, 'from text_tools import build_dictionary, generate_batch_data, load_movie_data, normalize_text, text_to_numbers\n'), ((3621, 3676), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, window_size + 1]'}), '(tf.int32, shape=[None, window_size + 1])\n', (3635, 3676), True, 'import tensorflow as tf\n'), ((3721, 3762), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, 1]'}), '(tf.int32, shape=[None, 1])\n', (3735, 3762), True, 'import tensorflow as tf\n'), ((3781, 3824), 'tensorflow.constant', 'tf.constant', (['valid_examples'], {'dtype': 'tf.int32'}), '(valid_examples, dtype=tf.int32)\n', (3792, 3824), True, 'import tensorflow as tf\n'), ((3899, 3937), 'tensorflow.zeros', 'tf.zeros', (['[batch_size, embedding_size]'], {}), '([batch_size, embedding_size])\n', (3907, 3937), True, 'import tensorflow as tf\n'), ((4089, 4142), 'tensorflow.slice', 'tf.slice', (['x_inputs', '[0, window_size]', '[batch_size, 1]'], {}), '(x_inputs, [0, window_size], [batch_size, 1])\n', (4097, 4142), True, 'import tensorflow as tf\n'), ((4248, 4299), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['doc_embeddings', 'doc_indices'], {}), '(doc_embeddings, doc_indices)\n', (4270, 4299), True, 'import tensorflow as tf\n'), ((4832, 4900), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': 'model_learning_rate'}), '(learning_rate=model_learning_rate)\n', (4865, 4900), True, 'import tensorflow as tf\n'), ((5096, 5156), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['normalized_embeddings', 'valid_dataset'], {}), '(normalized_embeddings, valid_dataset)\n', (5118, 5156), True, 'import tensorflow as tf\n'), ((5170, 5238), 'tensorflow.matmul', 'tf.matmul', (['valid_embeddings', 'normalized_embeddings'], {'transpose_b': '(True)'}), '(valid_embeddings, normalized_embeddings, transpose_b=True)\n', (5179, 5238), True, 'import tensorflow as tf\n'), ((5285, 5361), 'tensorflow.train.Saver', 'tf.train.Saver', (["{'embeddings': embeddings, 'doc_embeddings': doc_embeddings}"], {}), "({'embeddings': embeddings, 'doc_embeddings': doc_embeddings})\n", (5299, 5361), True, 'import tensorflow as tf\n'), ((5398, 5431), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (5429, 5431), True, 'import tensorflow as tf\n'), ((8122, 8210), 'numpy.array', 'np.array', (['[x[0:max_words] for x in [(y + [0] * max_words) for y in text_data_train]]'], {}), '([x[0:max_words] for x in [(y + [0] * max_words) for y in\n text_data_train]])\n', (8130, 8210), True, 'import numpy as np\n'), ((8222, 8309), 'numpy.array', 'np.array', (['[x[0:max_words] for x in [(y + [0] * max_words) for y in text_data_test]]'], {}), '([x[0:max_words] for x in [(y + [0] * max_words) for y in\n text_data_test]])\n', (8230, 8309), True, 'import numpy as np\n'), ((8351, 8404), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, max_words + 1]'}), '(tf.int32, shape=[None, max_words + 1])\n', (8365, 8404), True, 'import tensorflow as tf\n'), ((8446, 8487), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, 1]'}), '(tf.int32, shape=[None, 1])\n', (8460, 8487), True, 'import tensorflow as tf\n'), ((8629, 8676), 'tensorflow.zeros', 'tf.zeros', (['[logistic_batch_size, embedding_size]'], {}), '([logistic_batch_size, embedding_size])\n', (8637, 8676), True, 'import tensorflow as tf\n'), ((8807, 8871), 'tensorflow.slice', 'tf.slice', (['log_x_inputs', '[0, max_words]', '[logistic_batch_size, 1]'], {}), '(log_x_inputs, [0, max_words], [logistic_batch_size, 1])\n', (8815, 8871), True, 'import tensorflow as tf\n'), ((8888, 8943), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['doc_embeddings', 'log_doc_indices'], {}), '(doc_embeddings, log_doc_indices)\n', (8910, 8943), True, 'import tensorflow as tf\n'), ((9715, 9750), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['predictions_correct'], {}), '(predictions_correct)\n', (9729, 9750), True, 'import tensorflow as tf\n'), ((9787, 9840), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': '(0.01)'}), '(learning_rate=0.01)\n', (9820, 9840), True, 'import tensorflow as tf\n'), ((9953, 9986), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (9984, 9986), True, 'import tensorflow as tf\n'), ((12075, 12087), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (12085, 12087), True, 'import matplotlib.pyplot as plt\n'), ((12088, 12135), 'matplotlib.pyplot.plot', 'plt.plot', (['i_data', 'train_loss', '"""k-"""'], {'label': '"""训练集"""'}), "(i_data, train_loss, 'k-', label='训练集')\n", (12096, 12135), True, 'import matplotlib.pyplot as plt\n'), ((12138, 12198), 'matplotlib.pyplot.plot', 'plt.plot', (['i_data', 'test_loss', '"""r--"""'], {'label': '"""测试集"""', 'linewidth': '(4)'}), "(i_data, test_loss, 'r--', label='测试集', linewidth=4)\n", (12146, 12198), True, 'import matplotlib.pyplot as plt\n'), ((12203, 12226), 'matplotlib.pyplot.title', 'plt.title', (['"""每次迭代的交叉熵损失"""'], {}), "('每次迭代的交叉熵损失')\n", (12212, 12226), True, 'import matplotlib.pyplot as plt\n'), ((12227, 12245), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""迭代次数"""'], {}), "('迭代次数')\n", (12237, 12245), True, 'import matplotlib.pyplot as plt\n'), ((12246, 12265), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""交叉熵损失"""'], {}), "('交叉熵损失')\n", (12256, 12265), True, 'import matplotlib.pyplot as plt\n'), ((12266, 12295), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (12276, 12295), True, 'import matplotlib.pyplot as plt\n'), ((12330, 12342), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (12340, 12342), True, 'import matplotlib.pyplot as plt\n'), ((12343, 12389), 'matplotlib.pyplot.plot', 'plt.plot', (['i_data', 'train_acc', '"""k-"""'], {'label': '"""训练集"""'}), "(i_data, train_acc, 'k-', label='训练集')\n", (12351, 12389), True, 'import matplotlib.pyplot as plt\n'), ((12392, 12451), 'matplotlib.pyplot.plot', 'plt.plot', (['i_data', 'test_acc', '"""r--"""'], {'label': '"""测试集"""', 'linewidth': '(4)'}), "(i_data, test_acc, 'r--', label='测试集', linewidth=4)\n", (12400, 12451), True, 'import matplotlib.pyplot as plt\n'), ((12456, 12479), 'matplotlib.pyplot.title', 'plt.title', (['"""训练集和测试集的精度"""'], {}), "('训练集和测试集的精度')\n", (12465, 12479), True, 'import matplotlib.pyplot as plt\n'), ((12480, 12498), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""迭代次数"""'], {}), "('迭代次数')\n", (12490, 12498), True, 'import matplotlib.pyplot as plt\n'), ((12499, 12515), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""精度"""'], {}), "('精度')\n", (12509, 12515), True, 'import matplotlib.pyplot as plt\n'), ((12516, 12545), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (12526, 12545), True, 'import matplotlib.pyplot as plt\n'), ((12631, 12654), 'winsound.Beep', 'winsound.Beep', (['(600)', '(500)'], {}), '(600, 500)\n', (12644, 12654), False, 'import winsound\n'), ((3319, 3382), 'tensorflow.random_uniform', 'tf.random_uniform', (['[vocabulary_size, embedding_size]', '(-1.0)', '(1.0)'], {}), '([vocabulary_size, embedding_size], -1.0, 1.0)\n', (3336, 3382), True, 'import tensorflow as tf\n'), ((3546, 3573), 'tensorflow.zeros', 'tf.zeros', (['[vocabulary_size]'], {}), '([vocabulary_size])\n', (3554, 3573), True, 'import tensorflow as tf\n'), ((3986, 4042), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embeddings', 'x_inputs[:, element]'], {}), '(embeddings, x_inputs[:, element])\n', (4008, 4042), True, 'import tensorflow as tf\n'), ((4456, 4605), 'tensorflow.nn.nce_loss', 'tf.nn.nce_loss', ([], {'weights': 'nce_weights', 'biases': 'nce_biases', 'labels': 'y_target', 'inputs': 'final_embed', 'num_sampled': 'num_sampled', 'num_classes': 'vocabulary_size'}), '(weights=nce_weights, biases=nce_biases, labels=y_target,\n inputs=final_embed, num_sampled=num_sampled, num_classes=vocabulary_size)\n', (4470, 4605), True, 'import tensorflow as tf\n'), ((5583, 5656), 'text_tools.generate_batch_data', 'generate_batch_data', (['text_data', 'batch_size', 'window_size'], {'method': '"""doc2vec"""'}), "(text_data, batch_size, window_size, method='doc2vec')\n", (5602, 5656), False, 'from text_tools import build_dictionary, generate_batch_data, load_movie_data, normalize_text, text_to_numbers\n'), ((7940, 7985), 'text_tools.text_to_numbers', 'text_to_numbers', (['texts_train', 'word_dictionary'], {}), '(texts_train, word_dictionary)\n', (7955, 7985), False, 'from text_tools import build_dictionary, generate_batch_data, load_movie_data, normalize_text, text_to_numbers\n'), ((8013, 8057), 'text_tools.text_to_numbers', 'text_to_numbers', (['texts_test', 'word_dictionary'], {}), '(texts_test, word_dictionary)\n', (8028, 8057), False, 'from text_tools import build_dictionary, generate_batch_data, load_movie_data, normalize_text, text_to_numbers\n'), ((8727, 8787), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embeddings', 'log_x_inputs[:, element]'], {}), '(embeddings, log_x_inputs[:, element])\n', (8749, 8787), True, 'import tensorflow as tf\n'), ((9133, 9179), 'tensorflow.random_normal', 'tf.random_normal', ([], {'shape': '[concatenated_size, 1]'}), '(shape=[concatenated_size, 1])\n', (9149, 9179), True, 'import tensorflow as tf\n'), ((9199, 9229), 'tensorflow.random_normal', 'tf.random_normal', ([], {'shape': '[1, 1]'}), '(shape=[1, 1])\n', (9215, 9229), True, 'import tensorflow as tf\n'), ((9308, 9337), 'tensorflow.matmul', 'tf.matmul', (['log_final_embed', 'A'], {}), '(log_final_embed, A)\n', (9317, 9337), True, 'import tensorflow as tf\n'), ((9579, 9603), 'tensorflow.sigmoid', 'tf.sigmoid', (['model_output'], {}), '(model_output)\n', (9589, 9603), True, 'import tensorflow as tf\n'), ((10193, 10261), 'numpy.random.choice', 'np.random.choice', (['text_data_train.shape[0]'], {'size': 'logistic_batch_size'}), '(text_data_train.shape[0], size=logistic_batch_size)\n', (10209, 10261), True, 'import numpy as np\n'), ((10516, 10556), 'numpy.transpose', 'np.transpose', (['[target_train[rand_index]]'], {}), '([target_train[rand_index]])\n', (10528, 10556), True, 'import numpy as np\n'), ((12691, 12701), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12699, 12701), True, 'import matplotlib.pyplot as plt\n'), ((4990, 5011), 'tensorflow.square', 'tf.square', (['embeddings'], {}), '(embeddings)\n', (4999, 5011), True, 'import tensorflow as tf\n'), ((9656, 9689), 'tensorflow.cast', 'tf.cast', (['log_y_target', 'tf.float32'], {}), '(log_y_target, tf.float32)\n', (9663, 9689), True, 'import tensorflow as tf\n'), ((10788, 10855), 'numpy.random.choice', 'np.random.choice', (['text_data_test.shape[0]'], {'size': 'logistic_batch_size'}), '(text_data_test.shape[0], size=logistic_batch_size)\n', (10804, 10855), True, 'import numpy as np\n'), ((11086, 11130), 'numpy.transpose', 'np.transpose', (['[target_test[rand_index_test]]'], {}), '([target_test[rand_index_test]])\n', (11098, 11130), True, 'import numpy as np\n'), ((12662, 12679), 'matplotlib.pyplot.get_fignums', 'plt.get_fignums', ([], {}), '()\n', (12677, 12679), True, 'import matplotlib.pyplot as plt\n'), ((4366, 4387), 'tensorflow.squeeze', 'tf.squeeze', (['doc_embed'], {}), '(doc_embed)\n', (4376, 4387), True, 'import tensorflow as tf\n'), ((6885, 6916), 'pickle.dump', 'pickle.dump', (['word_dictionary', 'f'], {}), '(word_dictionary, f)\n', (6896, 6916), False, 'import pickle\n'), ((6989, 7000), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6998, 7000), False, 'import os\n'), ((9029, 9054), 'tensorflow.squeeze', 'tf.squeeze', (['log_doc_embed'], {}), '(log_doc_embed)\n', (9039, 9054), True, 'import tensorflow as tf\n'), ((9500, 9533), 'tensorflow.cast', 'tf.cast', (['log_y_target', 'tf.float32'], {}), '(log_y_target, tf.float32)\n', (9507, 9533), True, 'import tensorflow as tf\n'), ((10466, 10500), 'numpy.transpose', 'np.transpose', (['[rand_x_doc_indices]'], {}), '([rand_x_doc_indices])\n', (10478, 10500), True, 'import numpy as np\n'), ((3492, 3518), 'numpy.sqrt', 'np.sqrt', (['concatenated_size'], {}), '(concatenated_size)\n', (3499, 3518), True, 'import numpy as np\n'), ((6810, 6859), 'os.path.join', 'os.path.join', (['data_folder_name', '"""movie_vocab.pkl"""'], {}), "(data_folder_name, 'movie_vocab.pkl')\n", (6822, 6859), False, 'import os\n'), ((11022, 11061), 'numpy.transpose', 'np.transpose', (['[rand_x_doc_indices_test]'], {}), '([rand_x_doc_indices_test])\n', (11034, 11061), True, 'import numpy as np\n'), ((11856, 11870), 'numpy.round', 'np.round', (['x', '(2)'], {}), '(x, 2)\n', (11864, 11870), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import torch
import numpy as np
from utils import Generator
import matplotlib.pyplot as plt
from IPython.display import HTML
import torchvision.utils as vutils
import matplotlib.animation as animation
from IPython import embed
if __name__ == "__main__":
model_dir = '../checkpoints'
mids = list(range(1, 11))
fixed_noise = torch.randn(64, 100, 1, 1).cuda(0)
generator = Generator(100, 64).cuda(0)
generator = torch.nn.DataParallel(generator, device_ids=[0, 1])
imgs_list = []
for mid in mids:
checkpoints = torch.load(os.path.join(model_dir, 'epoch_%d.pth.tar' % mid))
epoch = checkpoints['epoch']
generator.load_state_dict(checkpoints['generator'])
print('epoch : %d, mid : %d' % (epoch, mid))
generator.eval()
fake = generator(fixed_noise).detach().cpu()
imgs_list.append(fake)
fig = plt.figure(figsize=(8,8))
plt.axis("off")
embed()
ims = [[plt.imshow(np.transpose(i[0],(1, 2, 0)), animated=True)] for i in imgs_list]
ani = animation.ArtistAnimation(fig, ims, interval=1000, repeat_delay=1000, blit=True)
HTML(ani.to_jshtml())
plt.subplot(1, 2, 2)
plt.axis("off")
plt.title("Fake Images")
plt.imshow(np.transpose(img_list[-1][0],(1,2,0)))
plt.show()
|
[
"utils.Generator",
"matplotlib.pyplot.title",
"IPython.embed",
"torch.nn.DataParallel",
"os.path.join",
"matplotlib.animation.ArtistAnimation",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axis",
"numpy.transpose",
"matplotlib.pyplot.subplot",
"torch.randn",
"matplotlib.pyplot.show"
] |
[((495, 546), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['generator'], {'device_ids': '[0, 1]'}), '(generator, device_ids=[0, 1])\n', (516, 546), False, 'import torch\n'), ((968, 994), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (978, 994), True, 'import matplotlib.pyplot as plt\n'), ((998, 1013), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1006, 1013), True, 'import matplotlib.pyplot as plt\n'), ((1018, 1025), 'IPython.embed', 'embed', ([], {}), '()\n', (1023, 1025), False, 'from IPython import embed\n'), ((1125, 1210), 'matplotlib.animation.ArtistAnimation', 'animation.ArtistAnimation', (['fig', 'ims'], {'interval': '(1000)', 'repeat_delay': '(1000)', 'blit': '(True)'}), '(fig, ims, interval=1000, repeat_delay=1000, blit=True\n )\n', (1150, 1210), True, 'import matplotlib.animation as animation\n'), ((1237, 1257), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (1248, 1257), True, 'import matplotlib.pyplot as plt\n'), ((1262, 1277), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1270, 1277), True, 'import matplotlib.pyplot as plt\n'), ((1282, 1306), 'matplotlib.pyplot.title', 'plt.title', (['"""Fake Images"""'], {}), "('Fake Images')\n", (1291, 1306), True, 'import matplotlib.pyplot as plt\n'), ((1365, 1375), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1373, 1375), True, 'import matplotlib.pyplot as plt\n'), ((1322, 1362), 'numpy.transpose', 'np.transpose', (['img_list[-1][0]', '(1, 2, 0)'], {}), '(img_list[-1][0], (1, 2, 0))\n', (1334, 1362), True, 'import numpy as np\n'), ((401, 427), 'torch.randn', 'torch.randn', (['(64)', '(100)', '(1)', '(1)'], {}), '(64, 100, 1, 1)\n', (412, 427), False, 'import torch\n'), ((452, 470), 'utils.Generator', 'Generator', (['(100)', '(64)'], {}), '(100, 64)\n', (461, 470), False, 'from utils import Generator\n'), ((634, 683), 'os.path.join', 'os.path.join', (['model_dir', "('epoch_%d.pth.tar' % mid)"], {}), "(model_dir, 'epoch_%d.pth.tar' % mid)\n", (646, 683), False, 'import os\n'), ((1049, 1078), 'numpy.transpose', 'np.transpose', (['i[0]', '(1, 2, 0)'], {}), '(i[0], (1, 2, 0))\n', (1061, 1078), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.colors import ListedColormap
from . import common
def v_loc(x):
return 40*np.log10(x + 1)
def x_loc(x):
return 40*(np.log10(x) + 1)
def main(debug=False):
name = ['I', 'SCA', 'tfp']
suffix = ['', '', '']
df = []
for n, s in zip(name, suffix):
prec = pd.read_csv(f'results/logk_prec_{n}{s}.csv')
prec = prec.groupby(['v', 'x'])['log_err'].mean()
prec.name = f'prec_{n}'
time = pd.read_csv(f'results/logk_time_{n}{s}.csv')
time = time.groupby(['v', 'x'])['time'].mean()
time = 1000 * time
time.name = f'time_{n}'
df += [prec, time]
df = pd.concat(df, axis=1)
df['diff_prec'] = df['prec_SCA'] - df['prec_I']
df['diff_time'] = df['time_SCA'] - df['time_I']
v, x = zip(*df.index)
df['v'] = v
df['x'] = x
df1 = df[['v', 'x', 'prec_I', 'prec_SCA', 'prec_tfp']].copy()
df1.rename(columns=dict(prec_I='I', prec_SCA='SCA', prec_tfp='tfp'), inplace=True)
df1 = df1.melt(id_vars=['v','x'])
df1.rename(columns=dict(variable='type', value='prec'), inplace=True)
df2 = df[['v', 'x', 'time_I', 'time_SCA', 'time_tfp']].copy()
df2.rename(columns=dict(time_I='I', time_SCA='SCA', time_tfp='tfp'), inplace=True)
df2 = df2.melt(id_vars=['v','x'])
df2.rename(columns=dict(variable='type', value='time'), inplace=True)
type_cmap = ListedColormap(['silver', 'grey', 'black'])
type_cmap.set_under('white')
name = [['diff_prec', 'prec_SCA'], ['diff_time', 'time_SCA']]
#pos = [[[0.1, 0.85], [0.85, 0.1]], [[0.1, 0.1], [0.1, 0.85]]]
vmin = [[-1.0, 0], [-10, 0]]
vmax = [[+1.0, 2.8], [10, 28]]
cmap = [[type_cmap, 'Reds'], [type_cmap, 'Blues']]
fig = common.figure(figsize=(5.5, 4), box=debug)
ax = fig.subplots(
2, 2, sharex='col',
)
vticks = [0, 1, 5, 10, 50]
xticks = [0.1, 0.5, 1, 5, 10, 50]
label = [['a', 'c'], ['b', 'd']]
pos = [[[-0.15, 0.9], [-0.2, 0.9]],
[[-0.15, 0.9], [-0.2, 0.9]]]
for i in range(2):
for j in [0]:
hm = df[name[i][j]].unstack(0)
sns.heatmap(hm, vmin=vmin[i][j], vmax=vmax[i][j], cmap=cmap[i][j], ax=ax[i, j])
ax[i, j].invert_yaxis()
ax[i, j].set_xticks([v_loc(v) for v in vticks])
ax[i, j].set_xticklabels([f"${k}$" for k in vticks], rotation=0)
ax[i, j].xaxis.set_ticks_position('both')
ax[i, j].set_yticks([x_loc(x) for x in xticks])
ax[i, j].set_yticklabels([f"${k}$" for k in xticks])
ax[i, j].yaxis.set_ticks_position('both')
if i == 1:
ax[i, j].set_xlabel('$v$')
else:
ax[i, j].set_xlabel('')
if j == 0:
ax[i, j].set_ylabel('$x$')
else:
ax[i, j].set_ylabel('')
for i in range(2):
for j in range(2):
ax[i, j].text(*pos[i][j], label[i][j], transform=ax[i, j].transAxes)
args = dict(
color='white',
)
sns.boxenplot(x='type', y='prec', data=df1, ax=ax[0, 1], **args)
ax[0, 1].xaxis.label.set_visible(False)
ax[0, 1].set_ylabel('err ($\log (\Delta/\epsilon + 1)$)')
sns.boxenplot(x='type', y='time', data=df2, ax=ax[1, 1], **args)
ax[1, 1].set_ylim(0, 35)
ax[1, 1].set_ylabel('time (msec)')
for i in range(2):
for c in ax[i, 1].collections[1::2]:
plt.setp(c, color='k')
fig.savefig('figs/fig5.pdf')
if __name__ == '__main__':
main(debug=False)
|
[
"matplotlib.pyplot.setp",
"numpy.log10",
"pandas.read_csv",
"seaborn.heatmap",
"matplotlib.colors.ListedColormap",
"seaborn.boxenplot",
"pandas.concat"
] |
[((744, 765), 'pandas.concat', 'pd.concat', (['df'], {'axis': '(1)'}), '(df, axis=1)\n', (753, 765), True, 'import pandas as pd\n'), ((1477, 1520), 'matplotlib.colors.ListedColormap', 'ListedColormap', (["['silver', 'grey', 'black']"], {}), "(['silver', 'grey', 'black'])\n", (1491, 1520), False, 'from matplotlib.colors import ListedColormap\n'), ((3127, 3191), 'seaborn.boxenplot', 'sns.boxenplot', ([], {'x': '"""type"""', 'y': '"""prec"""', 'data': 'df1', 'ax': 'ax[0, 1]'}), "(x='type', y='prec', data=df1, ax=ax[0, 1], **args)\n", (3140, 3191), True, 'import seaborn as sns\n'), ((3303, 3367), 'seaborn.boxenplot', 'sns.boxenplot', ([], {'x': '"""type"""', 'y': '"""time"""', 'data': 'df2', 'ax': 'ax[1, 1]'}), "(x='type', y='time', data=df2, ax=ax[1, 1], **args)\n", (3316, 3367), True, 'import seaborn as sns\n'), ((191, 206), 'numpy.log10', 'np.log10', (['(x + 1)'], {}), '(x + 1)\n', (199, 206), True, 'import numpy as np\n'), ((399, 443), 'pandas.read_csv', 'pd.read_csv', (['f"""results/logk_prec_{n}{s}.csv"""'], {}), "(f'results/logk_prec_{n}{s}.csv')\n", (410, 443), True, 'import pandas as pd\n'), ((549, 593), 'pandas.read_csv', 'pd.read_csv', (['f"""results/logk_time_{n}{s}.csv"""'], {}), "(f'results/logk_time_{n}{s}.csv')\n", (560, 593), True, 'import pandas as pd\n'), ((238, 249), 'numpy.log10', 'np.log10', (['x'], {}), '(x)\n', (246, 249), True, 'import numpy as np\n'), ((2209, 2288), 'seaborn.heatmap', 'sns.heatmap', (['hm'], {'vmin': 'vmin[i][j]', 'vmax': 'vmax[i][j]', 'cmap': 'cmap[i][j]', 'ax': 'ax[i, j]'}), '(hm, vmin=vmin[i][j], vmax=vmax[i][j], cmap=cmap[i][j], ax=ax[i, j])\n', (2220, 2288), True, 'import seaborn as sns\n'), ((3517, 3539), 'matplotlib.pyplot.setp', 'plt.setp', (['c'], {'color': '"""k"""'}), "(c, color='k')\n", (3525, 3539), True, 'import matplotlib.pyplot as plt\n')]
|
import symjax
import symjax.tensor as T
import matplotlib.pyplot as plt
import numpy as np
J = 5
Q = 4
scales = T.power(2, T.linspace(0.1, J - 1, J * Q))
scales = scales[:, None]
print(scales.get())
wavelet = symjax.tensor.signal.complex_morlet(5 * scales, np.pi / scales)
waveletw = symjax.tensor.signal.fourier_complex_morlet(
5 * scales, np.pi / scales, wavelet.shape[-1]
)
waveletlp = symjax.tensor.signal.littewood_paley_normalization(
waveletw, down=np.pi / scales[-1, 0]
)
wavelet = wavelet.get()
waveletw = waveletw.get()
waveletlp = waveletlp.get()
plt.subplot(321)
for i in range(J * Q):
fr = np.real(np.fft.fft(np.fft.ifftshift(wavelet[i])))
fi = np.imag(np.fft.fft(np.fft.ifftshift(wavelet[i])))
plt.plot(i + fr, "--b")
plt.plot(i + fi, "--r")
plt.subplot(322)
for i in range(J * Q):
plt.plot(2 * i + wavelet[i].real, c="b")
plt.plot(2 * i + wavelet[i].imag, c="r")
plt.subplot(324)
for i in range(J * Q):
fr = np.real(np.fft.fftshift(np.fft.ifft(waveletw[i])))
fi = np.imag(np.fft.fftshift(np.fft.ifft(waveletw[i])))
plt.plot(2 * i + fr / fr.max(), "--b")
plt.plot(2 * i + fi / fi.max(), "--r")
plt.subplot(323)
for i in range(J * Q):
plt.plot(i + waveletw[i].real, c="b")
plt.plot(i + waveletw[i].imag, c="r")
plt.subplot(325)
for i in range(J * Q):
plt.plot(i + waveletlp[i].real, c="b")
plt.plot(i + waveletlp[i].imag, c="r")
plt.plot(np.abs(waveletlp).sum(0), c="g")
plt.subplot(326)
for i in range(J * Q):
fr = np.real(np.fft.fftshift(np.fft.ifft(waveletlp[i])))
fi = np.imag(np.fft.fftshift(np.fft.ifft(waveletlp[i])))
plt.plot(2 * i + fr / fr.max(), "--b")
plt.plot(2 * i + fi / fi.max(), "--r")
# plt.show()
plt.savefig("wavelets.png")
|
[
"numpy.abs",
"matplotlib.pyplot.savefig",
"symjax.tensor.linspace",
"symjax.tensor.signal.littewood_paley_normalization",
"symjax.tensor.signal.complex_morlet",
"matplotlib.pyplot.plot",
"numpy.fft.ifft",
"numpy.fft.ifftshift",
"matplotlib.pyplot.subplot",
"symjax.tensor.signal.fourier_complex_morlet"
] |
[((212, 275), 'symjax.tensor.signal.complex_morlet', 'symjax.tensor.signal.complex_morlet', (['(5 * scales)', '(np.pi / scales)'], {}), '(5 * scales, np.pi / scales)\n', (247, 275), False, 'import symjax\n'), ((287, 381), 'symjax.tensor.signal.fourier_complex_morlet', 'symjax.tensor.signal.fourier_complex_morlet', (['(5 * scales)', '(np.pi / scales)', 'wavelet.shape[-1]'], {}), '(5 * scales, np.pi / scales,\n wavelet.shape[-1])\n', (330, 381), False, 'import symjax\n'), ((396, 488), 'symjax.tensor.signal.littewood_paley_normalization', 'symjax.tensor.signal.littewood_paley_normalization', (['waveletw'], {'down': '(np.pi / scales[-1, 0])'}), '(waveletw, down=np.pi /\n scales[-1, 0])\n', (446, 488), False, 'import symjax\n'), ((572, 588), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(321)'], {}), '(321)\n', (583, 588), True, 'import matplotlib.pyplot as plt\n'), ((787, 803), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(322)'], {}), '(322)\n', (798, 803), True, 'import matplotlib.pyplot as plt\n'), ((918, 934), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(324)'], {}), '(324)\n', (929, 934), True, 'import matplotlib.pyplot as plt\n'), ((1165, 1181), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(323)'], {}), '(323)\n', (1176, 1181), True, 'import matplotlib.pyplot as plt\n'), ((1290, 1306), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(325)'], {}), '(325)\n', (1301, 1306), True, 'import matplotlib.pyplot as plt\n'), ((1459, 1475), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(326)'], {}), '(326)\n', (1470, 1475), True, 'import matplotlib.pyplot as plt\n'), ((1722, 1749), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""wavelets.png"""'], {}), "('wavelets.png')\n", (1733, 1749), True, 'import matplotlib.pyplot as plt\n'), ((124, 153), 'symjax.tensor.linspace', 'T.linspace', (['(0.1)', '(J - 1)', '(J * Q)'], {}), '(0.1, J - 1, J * Q)\n', (134, 153), True, 'import symjax.tensor as T\n'), ((734, 757), 'matplotlib.pyplot.plot', 'plt.plot', (['(i + fr)', '"""--b"""'], {}), "(i + fr, '--b')\n", (742, 757), True, 'import matplotlib.pyplot as plt\n'), ((762, 785), 'matplotlib.pyplot.plot', 'plt.plot', (['(i + fi)', '"""--r"""'], {}), "(i + fi, '--r')\n", (770, 785), True, 'import matplotlib.pyplot as plt\n'), ((831, 871), 'matplotlib.pyplot.plot', 'plt.plot', (['(2 * i + wavelet[i].real)'], {'c': '"""b"""'}), "(2 * i + wavelet[i].real, c='b')\n", (839, 871), True, 'import matplotlib.pyplot as plt\n'), ((876, 916), 'matplotlib.pyplot.plot', 'plt.plot', (['(2 * i + wavelet[i].imag)'], {'c': '"""r"""'}), "(2 * i + wavelet[i].imag, c='r')\n", (884, 916), True, 'import matplotlib.pyplot as plt\n'), ((1209, 1246), 'matplotlib.pyplot.plot', 'plt.plot', (['(i + waveletw[i].real)'], {'c': '"""b"""'}), "(i + waveletw[i].real, c='b')\n", (1217, 1246), True, 'import matplotlib.pyplot as plt\n'), ((1251, 1288), 'matplotlib.pyplot.plot', 'plt.plot', (['(i + waveletw[i].imag)'], {'c': '"""r"""'}), "(i + waveletw[i].imag, c='r')\n", (1259, 1288), True, 'import matplotlib.pyplot as plt\n'), ((1334, 1372), 'matplotlib.pyplot.plot', 'plt.plot', (['(i + waveletlp[i].real)'], {'c': '"""b"""'}), "(i + waveletlp[i].real, c='b')\n", (1342, 1372), True, 'import matplotlib.pyplot as plt\n'), ((1377, 1415), 'matplotlib.pyplot.plot', 'plt.plot', (['(i + waveletlp[i].imag)'], {'c': '"""r"""'}), "(i + waveletlp[i].imag, c='r')\n", (1385, 1415), True, 'import matplotlib.pyplot as plt\n'), ((640, 668), 'numpy.fft.ifftshift', 'np.fft.ifftshift', (['wavelet[i]'], {}), '(wavelet[i])\n', (656, 668), True, 'import numpy as np\n'), ((699, 727), 'numpy.fft.ifftshift', 'np.fft.ifftshift', (['wavelet[i]'], {}), '(wavelet[i])\n', (715, 727), True, 'import numpy as np\n'), ((991, 1015), 'numpy.fft.ifft', 'np.fft.ifft', (['waveletw[i]'], {}), '(waveletw[i])\n', (1002, 1015), True, 'import numpy as np\n'), ((1051, 1075), 'numpy.fft.ifft', 'np.fft.ifft', (['waveletw[i]'], {}), '(waveletw[i])\n', (1062, 1075), True, 'import numpy as np\n'), ((1425, 1442), 'numpy.abs', 'np.abs', (['waveletlp'], {}), '(waveletlp)\n', (1431, 1442), True, 'import numpy as np\n'), ((1532, 1557), 'numpy.fft.ifft', 'np.fft.ifft', (['waveletlp[i]'], {}), '(waveletlp[i])\n', (1543, 1557), True, 'import numpy as np\n'), ((1593, 1618), 'numpy.fft.ifft', 'np.fft.ifft', (['waveletlp[i]'], {}), '(waveletlp[i])\n', (1604, 1618), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
import plotly.graph_objs as go
# import plotly.plotly as py
dates = pd.date_range('01-Jan-2010', pd.datetime.now().date(), freq='D')
df = pd.DataFrame(100 + np.random.randn(dates.size).cumsum(), dates, columns=['AAPL'])
trace = go.Scatter(x=df.index, y=df.AAPL)
data = [trace]
layout = dict(
title='Time series with range slider and selectors',
xaxis=dict(
rangeselector=dict(
buttons=list([
dict(count=1,
label='1m',
step='month',
stepmode='backward'),
dict(count=6,
label='6m',
step='month',
stepmode='backward'),
dict(count=1,
label='YTD',
step='year',
stepmode='todate'),
dict(count=1,
label='1y',
step='year',
stepmode='backward'),
dict(step='all')
])
),
rangeslider=dict(),
type='date'
)
)
fig = go.Figure()
fig.add_trace(trace)
fig.update_layout(
title='Time series with range slider and selectors',
xaxis=dict(
rangeselector=dict(
buttons=list([
dict(count=1,
label='1m',
step='month',
stepmode='backward'),
dict(count=6,
label='6m',
step='month',
stepmode='backward'),
dict(count=1,
label='YTD',
step='year',
stepmode='todate'),
dict(count=1,
label='1y',
step='year',
stepmode='backward'),
dict(step='all')
])
),
rangeslider=dict(
visible=True,
thickness=0.3
),
type='date'
))
initial_range = ['2016-01-01', '2017-09-01']
fig['layout']['xaxis'].update(range=initial_range)
fig.show()
|
[
"plotly.graph_objs.Figure",
"numpy.random.randn",
"plotly.graph_objs.Scatter",
"pandas.datetime.now"
] |
[((270, 303), 'plotly.graph_objs.Scatter', 'go.Scatter', ([], {'x': 'df.index', 'y': 'df.AAPL'}), '(x=df.index, y=df.AAPL)\n', (280, 303), True, 'import plotly.graph_objs as go\n'), ((1145, 1156), 'plotly.graph_objs.Figure', 'go.Figure', ([], {}), '()\n', (1154, 1156), True, 'import plotly.graph_objs as go\n'), ((138, 155), 'pandas.datetime.now', 'pd.datetime.now', ([], {}), '()\n', (153, 155), True, 'import pandas as pd\n'), ((198, 225), 'numpy.random.randn', 'np.random.randn', (['dates.size'], {}), '(dates.size)\n', (213, 225), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 15 21:56:08 2020
@author: <NAME>
"""
# STEP1----------------- # Importing the libraries------------
#-------------------------------------------------------------
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import glob
import scipy.signal as ss
import csv
import sklearn
from quilt.data.ResidentMario import missingno_data
import missingno as msno
import seaborn as sns
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler # for preprocessing the data
from sklearn.ensemble import RandomForestClassifier # Random forest classifier
from sklearn.tree import DecisionTreeClassifier # for Decision Tree classifier
from sklearn.svm import SVC # for SVM classification
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split # to split the data
from sklearn.model_selection import KFold # For cross vbalidation
from sklearn.model_selection import GridSearchCV # for tunnig hyper parameter it will use all combination of given parameters
from sklearn.model_selection import RandomizedSearchCV # same for tunning hyper parameter but will use random combinations of parameters
from sklearn.metrics import confusion_matrix,recall_score,precision_recall_curve,auc,roc_curve,roc_auc_score,classification_report
# STEP2------------------# Importing the DATASET ------------
#------------------------------------------------------------
# Loading data from the iMotions the path to csv file directory
os.chdir("\\ML4TakeOver\\Data\\RawData")
directory = os.getcwd()
#dataFrame_takeover_feature = pd.read_csv('takeover_cleaned_feature4ML.csv', index_col=[0])
dataFrame_takeover_feature = pd.read_csv('takeover4ML.csv', index_col=[0])
dataset = dataFrame_takeover_feature
chunk_users = ['015_M3', '015_m2', '015_M1', '014_M3', #Select a handful of ppl for saving resource
'014_M2', '014_m1']
chunk_dataset = dataset[dataset['Name'].isin(chunk_users)]
dataset = chunk_dataset
dataset.shape
###### ======================================Encoding notes=======================================
# Alarm Type: TA =2, NoA =1, FA = 0 , Z = 3
# TakeOver : TK =1 , NTK= 0
# Alarm : 339.0 =339.0, 103.0= 4, 332.0=14, 259.0=11, 16.0=2, 178.0=6, 284.0=12,
# 213.0=9, 323.0=13, 185.0=7, 84.0=3, 137.0=5, 5.0=1, 191.0=8, 254.0=10
# Mode : +1 (Auto)= +1, -1(Manual)= 0
##### ===========================================================================================
dt_tmp = dataset
dt_tmp['Takeover'] = dt_tmp.Takeover.astype('category')
# Number of "NOT-TAKEOVER" per alarm type
dataset[dataset.Takeover == 'NTK']['Coming_AlarmType'].value_counts()
# Number of "TAKEOVER" per alarm type
dataset[dataset.Takeover == 'TK']['Coming_AlarmType'].value_counts()
## STEP3========================= Eploring the data, mainly the Label (Takeover) ====================
## ===================================================================================================
# let's check the "Takeover" distributions
sns.countplot("Takeover",data=dataset)
# Let's check the Percentage for "TakeOver"
Count_NoTakeOver = len(dataset[dataset["Takeover"]== 0 ]) # Non-TakeOver are repersented by 0
Count_TakeOver = len(dataset[dataset["Takeover"]== 1 ]) # TakeOver by 1
Percentage_of_NoTakeOver = Count_NoTakeOver/(Count_NoTakeOver+Count_TakeOver)
print("percentage of None-TakeOver, 0 = ",Percentage_of_NoTakeOver*100)
Percentage_of_TakeOver= Count_TakeOver/(Count_NoTakeOver+Count_TakeOver)
print("percentage of TakeOver, 1 = ",Percentage_of_TakeOver*100)
# the amount related to valid "TakeOver" and "None-Takeover"
Amount_TakeOver = dataset[dataset["Takeover"]== 1]
Amount_NoTakeOver = dataset[dataset["Takeover"]== 0]
plt.figure(figsize=(10,6))
plt.subplot(121)
Amount_TakeOver.plot.hist(title="TakeOver", legend =None)
plt.subplot(122)
Amount_NoTakeOver.plot.hist(title="No-Takeover",legend =None)
# Pandas offers us out-of-the-box three various correlation coefficients 1) Pearson's 2) Spearman rank 3) Kendall Tau
pearson = dataset.corr(method='pearson')
# assume target attr is the "Takeover or -3", then remove corr with itself
corr_with_target = pearson.iloc[-3][:]
# attributes sorted from the most predictive
predictivity = corr_with_target.sort_values(ascending=False)
## STEP4=========================-# Prepration for Machine Learning algorithms=========================
## ====================================================================================================
# Drop useless features for ML
dataset = dataset.drop(['Timestamp','index','ID', 'Name', 'EventSource', 'ManualGear','EventW','EventN','GazeDirectionLeftY','Alarm',
'GazeDirectionLeftX', 'GazeDirectionRightX', 'GazeDirectionRightY','CurrentBrake',
'PassBy','RangeN'], axis=1) #ManualGear has only "one" value
#EventW is pretty similar to EventN
dataset.shape
#---------------------------------------------------------
# convert categorical value to the number
# convert datatype of object to int and strings
dataset['LeftLaneType'] = dataset.LeftLaneType.astype(object)
dataset['RightLaneType'] = dataset.RightLaneType.astype(object)
dataset['TOT_Class'] = dataset.TOT_Class.astype(object)
dataset['Coming_Alarm'] = dataset.Coming_Alarm.astype(object)
dataset['Takeover'] = dataset.Takeover.astype(object)
dataset['Coming_AlarmType'] = dataset.Coming_AlarmType.astype(object)
dataset['NDTask'] = dataset.NDTask.astype(object)
#****** Drop features that happing after Alarm (anything after alarm interupt takeover prediction)****************
dataset = dataset.drop(['Mode','TOT_Class', 'AlarmDuration','Coming_Alarm','ReactionTime','Coming_AlarmType'], axis=1) # Coming Alarm maybe helpful for ReactionTime
# ------------------------------------------------------.
# takeover (NT, TK) is our target
input_data = dataset.iloc[:, dataset.columns != 'Takeover']
X = input_data
y = dataset[['Takeover']].values.ravel()
# ======================================= Encoding Categorical variables =========================
# # Encoding categorical variables
from sklearn.preprocessing import StandardScaler,LabelEncoder, OneHotEncoder
from sklearn.compose import ColumnTransformer, make_column_transformer #labelencoder class takes cat. var. and assign value to them
# List of all Categorical features
Cat_Features= ['LeftLaneType','RightLaneType','NDTask']
# Get the column index of the categorical features
categorical_features = []
for i in Cat_Features:
position = dataset.columns.get_loc(i)
categorical_features.append(position)
print(categorical_features)
# Get the column index of the Contin. features
conti_features = []
Cont_Filter = dataset.dtypes!=object
Cont_Filter = dataset.columns.where(Cont_Filter).tolist()
Cont_Filter_Cleaned = [name for name in Cont_Filter if str(name) !='nan']
for i in Cont_Filter_Cleaned:
position = dataset.columns.get_loc(i)
conti_features.append(position)
print(conti_features)
# How many columns will be needed for each categorical feature?
print(dataset[Cat_Features].nunique(),
'There are',"--",sum(dataset[Cat_Features].nunique().loc[:]),"--",'groups in the whole dataset')
# ===============================Create pipeline for data transformatin (normalize numeric, and hot encoder categorical)
# =============================================================================
from sklearn.pipeline import make_pipeline
numeric = make_pipeline(
StandardScaler())
categorical = make_pipeline(
# handles categorical features
# sparse = False output an array not sparse matrix
OneHotEncoder(sparse=False)) # Automatically take care of Dummy Trap
# creates a simple preprocessing pipeline (that will be combined in a full prediction pipeline below)
# to scale the numerical features and one-hot encode the categorical features.
preprocess = make_column_transformer((numeric, Cont_Filter_Cleaned),
(categorical, ['LeftLaneType','RightLaneType','Coming_AlarmType','NDTask']),
remainder='passthrough')
# =============================================================================
# Taking care of splitting
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.20, random_state = 42)
# apply preprocess step (normalize the numeric value and one hot encoding for the categorical)
preprocess.fit_transform(X_train)
# =============================================================================
#SVM is usually optimized using two parameters gamma,C .
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}] # C: the Cost parameter, Gamma: Control Bias and variance
# A High value of Gamma leads to more accuracy but biased results and vice-versa.
# Similarly, a large value of Cost parameter (C) indicates poor accuracy but low bias and vice-versa.
tuned_parameters2 = [{'kernel': ['linear'], 'C': [1, 100]}]
model = make_pipeline(
preprocess,
SVC())
##### Try Simple Version ##############
from sklearn import svm
clf = svm.SVC()
X_train = preprocess.fit_transform(X_train)
grid_result = clf.fit(X_train, y_train)
X_test = preprocess.fit_transform(X_test)
clf.predict(X_test)
## we should try this in near future: https://machinelearningmastery.com/multi-class-classification-tutorial-keras-deep-learning-library/
##############
############################
##########################################
########################################################
######################################################################
# the GridSearchCV object with pipeline and the parameter space with 5 folds cross validation.
scores = ['precision', 'recall']
best_params = []
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(
SVC(), tuned_parameters2, scoring='%s_macro' % score
)
X_train = preprocess.fit_transform(X_train)
grid_result = clf.fit(X_train, y_train)
best_params.append(grid_result.best_params_)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r"
% (mean, std * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
X_test = preprocess.fit_transform(X_test)
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# =============================================================================
# ================= Resampling the imbalanced Label of "TakeOver" ========================================
#==========================================================================================================
# We create the preprocessing pipelines for both numeric and categorical data.
from sklearn.pipeline import Pipeline
from sklearn.utils import resample
numeric_features = Cont_Filter_Cleaned
numeric_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='median')),
('scaler', StandardScaler())])
categorical_features = ['LeftLaneType','RightLaneType','Coming_AlarmType','NDTask']
categorical_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='constant', fill_value='missing')),
('onehot', OneHotEncoder(handle_unknown='ignore'))])
preprocessor = ColumnTransformer(
transformers=[
('num', numeric_transformer, numeric_features),
('cat', categorical_transformer, categorical_features)])
# Append classifier to preprocessing pipeline.
# Separate input features and target
y = dataset.Takeover
X = dataset.drop('Takeover', axis=1)
# setting up testing and training sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=27)
# concatenate our training data back together
X = pd.concat([X_train, y_train], axis=1)
# separate minority and majority classes
take_over = X[X.Takeover=='TK']
not_takeover = X[X.Takeover=='NTK']
# upsample minority
not_takeover_upsampled = resample(not_takeover,
replace=True, # sample with replacement
n_samples=len(take_over), # match number in majority class
random_state=27) # reproducible results
# combine majority and upsampled minority
upsampled = pd.concat([take_over, not_takeover_upsampled])
# check new class counts
upsampled.Takeover.value_counts() #713585
# trying logistic regression again with the balanced dataset
y_train = upsampled.Takeover
X_train = upsampled.drop('Takeover', axis=1)
##### LOGISTIC REGRESSION ###############################
#########################################################
# Now we have a full prediction pipeline.
clf = Pipeline(steps=[('preprocessor', preprocessor),
('classifier', LogisticRegression())])
y_score = clf.fit(X_train, y_train)
print("model score: %.3f" % clf.score(X_test, y_test)) # model score: 0.846
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
##### DECISION TREE ##################################
#########################################################
from sklearn.tree import DecisionTreeClassifier
clf_3 = Pipeline(steps=[('preprocessor', preprocessor),
('reduce_dim', PCA()),
('clf', DecisionTreeClassifier(random_state=0))])
y_score = clf_3.fit(X_train, y_train)
print("model score: %.3f" % clf_3.score(X_test, y_test)) # model score: 0.99
y_true_3, y_pred_3 = y_test, clf_3.predict(X_test)
print(classification_report(y_true_3, y_pred_3))
##### RANDOM FOREST ##################################
#########################################################
clf_2 = Pipeline(steps=[('preprocessor', preprocessor),
('reduce_dim', PCA()),
('clf',RandomForestClassifier(max_depth=2, random_state=0))])
y_score = clf_2.fit(X_train, y_train)
print("model score: %.3f" % clf_2.score(X_test, y_test)) # model score: 0.830
y_true_2, y_pred_2 = y_test, clf_2.predict(X_test)
print(classification_report(y_true_2, y_pred_2))
##### Regularized Greedy Forest (RGF) ##################################
############################################################################
from sklearn.utils.validation import check_random_state
from sklearn.model_selection import StratifiedKFold, cross_val_score
from sklearn.ensemble import GradientBoostingClassifier
from rgf.sklearn import RGFClassifier
y_upsampled = upsampled.Takeover
X_upsampled = upsampled.drop('Takeover', axis=1)
clf_5 = Pipeline(steps=[('preprocessor', preprocessor),
('reduce_dim', PCA()),
('classifier', RGFClassifier(max_leaf=400,
algorithm="RGF_Sib",
test_interval=100,
verbose=True))])
n_folds = 5
rgf_scores = cross_val_score(clf_5,
X_upsampled,
y_upsampled,
cv=StratifiedKFold(n_folds))
rgf_score = sum(rgf_scores)/n_folds
print('RGF Classifier score: {0:.5f}'.format(rgf_score)) #RGF Classifier score: 0.92304
XGBClassifier(class_weight='balanced')
##### Gradiaent Boosting #############################################
############################################################################
from sklearn.ensemble import GradientBoostingClassifier
clf_gb = Pipeline(steps=[('preprocessor', preprocessor),
('reduce_dim', PCA()),
('classifier', GradientBoostingClassifier(n_estimators=20,
learning_rate=0.01,
subsample=0.6,
random_state=127))])
gb_scores = cross_val_score(clf_gb,
X_upsampled,
y_upsampled,
scoring="f1_weighted",
cv=StratifiedKFold(n_folds))
gb_score = sum(gb_scores)/n_folds
print('Gradient Boosting Classifier score: {0:.5f}'.format(gb_score)) #score: 0.79832
print('>> Mean CV score is: ', round(np.mean(gb_scores),3))
pltt = sns.distplot(pd.Series(gb_scores,name='CV scores distribution(Gradiaent Boosting)'), color='r')
##### ADA Boost #########################################################
###########################################################################
from sklearn.ensemble import AdaBoostClassifier
clf_4 = Pipeline(steps=[('preprocessor', preprocessor),
('reduce_dim', PCA()),
('classifier', AdaBoostClassifier(n_estimators=100, random_state=0))])
y_score = clf_4.fit(X_train, y_train)
print("model score: %.3f" % clf_4.score(X_test, y_test)) # model score: 0.887
y_true_4, y_pred_4 = y_test, clf_4.predict(X_test)
print(classification_report(y_true_4, y_pred_4))
##### GAUSSIAN PROCESS #################################
#########################################################
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
kernel = 1.0 * RBF(1.0)
clf_3 = Pipeline(steps=[('preprocessor', preprocessor),
('reduce_dim', PCA()),
('clf',GaussianProcessClassifier(kernel=kernel, random_state=0))]) # model score: 0.830
y_score = clf_3.fit(X_train, y_train)
print("model score: %.3f" % clf_3.score(X_test, y_test)) # model score: 0.830
y_true_3, y_pred_3 = y_test, clf_3.predict(X_test)
print(classification_report(y_true_3, y_pred_3))
# # =============================================================================
# ================= DownSampling the majority imbalanced Label of "TakeOver" ======================================
#==================================================================================================================
# separate minority and majority classes
take_over = X[X.Takeover=='TK']
not_takeover = X[X.Takeover=='NTK']
# downsample majority
takeover_downsampled = resample(take_over,
replace = False, # sample without replacement
n_samples = len(not_takeover), # match minority n
random_state = 27) # reproducible results
# combine minority and downsampled majority
downsampled = pd.concat([takeover_downsampled, not_takeover])
# checking counts
downsampled.Takeover.value_counts()
# trying logistic regression again with the balanced dataset
y_train_down = downsampled.Takeover
X_train_down = downsampled.drop('Takeover', axis=1)
##### LOGISTIC REGRESSION ###############################
#########################################################
# Now we have a full prediction pipeline.
clf_down = Pipeline(steps=[('preprocessor', preprocessor),
('classifier', LogisticRegression())])
y_score_down = clf_down.fit(X_train_down, y_train_down)
print("model score: %.3f" % clf_down.score(X_test, y_test)) # model score: 0.846
y_true, y_pred = y_test, clf_down.predict(X_test)
print(classification_report(y_true, y_pred))
##### ADA Boost ##################################
#########################################################
from sklearn.ensemble import AdaBoostClassifier
clf_4_down = Pipeline(steps=[('preprocessor', preprocessor),
('reduce_dim', PCA()),
('classifier', AdaBoostClassifier(n_estimators=100, random_state=0))])
y_score = clf_4_down.fit(X_train_down, y_train_down)
print("model score: %.3f" % clf_4_down.score(X_test, y_test)) # model score: 0.887
y_true_down_4, y_pred_down_4 = y_test, clf_4_down.predict(X_test)
print(classification_report(y_true_down_4, y_pred_down_4))
# # =============================================================================
# example of one hot encoding for a neural network
from pandas import read_csv
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from keras.models import Sequential
from keras.layers import Dense
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
from keras.models import load_model
import h5py
import pytest
# Check the GPU availability
from tensorflow.python.client import device_lib
device_lib.list_local_devices()
# Assigning values to X, Y
y = dataset.Takeover
X = dataset.drop('Takeover', axis=1)
# setting up testing and training sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=27)
# concatenate our training data back together
X = pd.concat([X_train, y_train], axis=1)
# separate minority and majority classes
take_over = X[X.Takeover=='TK']
not_takeover = X[X.Takeover=='NTK']
# upsample minority
not_takeover_upsampled = resample(not_takeover,
replace=True, # sample with replacement
n_samples=len(take_over), # match number in majority class
random_state=27) # reproducible results
# combine majority and upsampled minority
upsampled = pd.concat([take_over, not_takeover_upsampled])
# check new class counts
upsampled.Takeover.value_counts() #713585
# trying logistic regression again with the balanced dataset
y_train = upsampled.Takeover
X_train = upsampled.drop('Takeover', axis=1)
## Preprocessing
# Get the column index of the Contin. features
conti_features = []
Cont_Filter = dataset.dtypes!=object
Cont_Filter = dataset.columns.where(Cont_Filter).tolist()
Cont_Filter_Cleaned = [name for name in Cont_Filter if str(name) !='nan']
for i in Cont_Filter_Cleaned:
position = dataset.columns.get_loc(i)
conti_features.append(position)
print(conti_features)
numeric_features = Cont_Filter_Cleaned
numeric_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='median')),
('scaler', StandardScaler())])
categorical_features = ['LeftLaneType','RightLaneType','NDTask']
categorical_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='constant', fill_value='missing')),
('onehot', OneHotEncoder(handle_unknown='ignore'))])
preprocessor = ColumnTransformer(
transformers=[
('num', numeric_transformer, numeric_features),
('cat', categorical_transformer, categorical_features)])
# prepare input data
def prepare_inputs(X_train, X_test):
X_train_enc = preprocessor.fit_transform(X_train)
X_test_enc = preprocessor.fit_transform(X_test)
return X_train_enc, X_test_enc
# prepare target
def prepare_targets(y_train, y_test):
le = LabelEncoder()
le.fit(y_train)
y_train_enc = le.transform(y_train)
y_test_enc = le.transform(y_test)
return y_train_enc, y_test_enc
# load the dataset
X = dataset.drop('Takeover', axis=1)
y = dataset.Takeover
# split into train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=27)
# concatenate our training data back together
X = pd.concat([X_train, y_train], axis=1)
# separate minority and majority classes
take_over = X[X.Takeover=='TK']
not_takeover = X[X.Takeover=='NTK']
# upsample minority
not_takeover_upsampled = resample(not_takeover,
replace=True, # sample with replacement
n_samples=len(take_over), # match number in majority class
random_state=27) # reproducible results
# combine majority and upsampled minority
upsampled = pd.concat([take_over, not_takeover_upsampled])
# check new class counts
upsampled.Takeover.value_counts() #713585
# trying logistic regression again with the balanced dataset
y_train = upsampled.Takeover
X_train = upsampled.drop('Takeover', axis=1)
# prepare input data
X_train_enc, X_test_enc = prepare_inputs(X_train, X_test)
# prepare output data
y_train_enc, y_test_enc = prepare_targets(y_train, y_test)
# define the model
model = Sequential()
model.add(Dense(23, input_dim=X_train_enc.shape[1], activation='relu', kernel_initializer='he_normal'))
model.add(Dense(14, activation='relu'))
model.add(Dense(8, activation='relu'))
#logits layer
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# simple early stopping
#set early stopping monitor so the model stops training when it won't improve anymore
# checkpoint
filepath="best-model-{epoch:02d}-{val_loss:.2f}.hdf5"
keras_callbacks = [
EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=8),
ModelCheckpoint(filepath, monitor='val_loss', mode='min', verbose=1, save_best_only=True)
]
# fit the keras model on the dataset
history = model.fit(X_train_enc, y_train_enc, validation_split=0.10, epochs=30,
batch_size=16, verbose=2, callbacks=keras_callbacks) #val_split: Fraction of the training data to be used as validation data
# load the saved best model
saved_model = load_model('best-model-02-0.04.hdf5')
# list all data in history
print(history.history.keys())
# evaluate the model
_, train_acc = saved_model.evaluate(X_train_enc, y_train_enc, verbose=2)
_, test_acc = saved_model.evaluate(X_test_enc, y_test_enc, verbose=1)
print('Accuracy of test: %.2f' % (test_acc*100))
print('Accuracy of the: '+'1) Train: %.3f, 2) Test: %.3f' % (train_acc, test_acc)) # test: 91.04
# plot training history
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='test')
plt.legend(['train', 'test'], loc='upper left')
plt.ylabel('Loss')
plt.show()
# summarize history for accuracy
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
|
[
"sklearn.preprocessing.LabelEncoder",
"tensorflow.python.client.device_lib.list_local_devices",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"sklearn.metrics.classification_report",
"sklearn.ensemble.AdaBoostClassifier",
"sklearn.model_selection.StratifiedKFold",
"keras.layers.Dense",
"numpy.mean",
"sklearn.decomposition.PCA",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.gaussian_process.GaussianProcessClassifier",
"sklearn.compose.ColumnTransformer",
"keras.callbacks.EarlyStopping",
"sklearn.gaussian_process.kernels.RBF",
"sklearn.model_selection.train_test_split",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.compose.make_column_transformer",
"keras.models.Sequential",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.title",
"sklearn.ensemble.GradientBoostingClassifier",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"sklearn.svm.SVC",
"pandas.Series",
"keras.models.load_model",
"keras.callbacks.ModelCheckpoint",
"sklearn.preprocessing.OneHotEncoder",
"rgf.sklearn.RGFClassifier",
"os.getcwd",
"os.chdir",
"sklearn.preprocessing.StandardScaler",
"matplotlib.pyplot.figure",
"sklearn.linear_model.LogisticRegression",
"sklearn.impute.SimpleImputer",
"seaborn.countplot",
"pandas.concat"
] |
[((1638, 1678), 'os.chdir', 'os.chdir', (['"""\\\\ML4TakeOver\\\\Data\\\\RawData"""'], {}), "('\\\\ML4TakeOver\\\\Data\\\\RawData')\n", (1646, 1678), False, 'import os\n'), ((1692, 1703), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1701, 1703), False, 'import os\n'), ((1827, 1872), 'pandas.read_csv', 'pd.read_csv', (['"""takeover4ML.csv"""'], {'index_col': '[0]'}), "('takeover4ML.csv', index_col=[0])\n", (1838, 1872), True, 'import pandas as pd\n'), ((3219, 3258), 'seaborn.countplot', 'sns.countplot', (['"""Takeover"""'], {'data': 'dataset'}), "('Takeover', data=dataset)\n", (3232, 3258), True, 'import seaborn as sns\n'), ((3937, 3964), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (3947, 3964), True, 'import matplotlib.pyplot as plt\n'), ((3965, 3981), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (3976, 3981), True, 'import matplotlib.pyplot as plt\n'), ((4042, 4058), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (4053, 4058), True, 'import matplotlib.pyplot as plt\n'), ((8233, 8402), 'sklearn.compose.make_column_transformer', 'make_column_transformer', (['(numeric, Cont_Filter_Cleaned)', "(categorical, ['LeftLaneType', 'RightLaneType', 'Coming_AlarmType', 'NDTask'])"], {'remainder': '"""passthrough"""'}), "((numeric, Cont_Filter_Cleaned), (categorical, [\n 'LeftLaneType', 'RightLaneType', 'Coming_AlarmType', 'NDTask']),\n remainder='passthrough')\n", (8256, 8402), False, 'from sklearn.compose import ColumnTransformer, make_column_transformer\n'), ((8701, 8755), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(X, y, test_size=0.2, random_state=42)\n', (8717, 8755), False, 'from sklearn.model_selection import train_test_split\n'), ((9704, 9713), 'sklearn.svm.SVC', 'svm.SVC', ([], {}), '()\n', (9711, 9713), False, 'from sklearn import svm\n'), ((12432, 12572), 'sklearn.compose.ColumnTransformer', 'ColumnTransformer', ([], {'transformers': "[('num', numeric_transformer, numeric_features), ('cat',\n categorical_transformer, categorical_features)]"}), "(transformers=[('num', numeric_transformer,\n numeric_features), ('cat', categorical_transformer, categorical_features)])\n", (12449, 12572), False, 'from sklearn.compose import ColumnTransformer, make_column_transformer\n'), ((12826, 12880), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': '(27)'}), '(X, y, test_size=0.2, random_state=27)\n', (12842, 12880), False, 'from sklearn.model_selection import train_test_split\n'), ((12936, 12973), 'pandas.concat', 'pd.concat', (['[X_train, y_train]'], {'axis': '(1)'}), '([X_train, y_train], axis=1)\n', (12945, 12973), True, 'import pandas as pd\n'), ((13438, 13484), 'pandas.concat', 'pd.concat', (['[take_over, not_takeover_upsampled]'], {}), '([take_over, not_takeover_upsampled])\n', (13447, 13484), True, 'import pandas as pd\n'), ((19921, 19968), 'pandas.concat', 'pd.concat', (['[takeover_downsampled, not_takeover]'], {}), '([takeover_downsampled, not_takeover])\n', (19930, 19968), True, 'import pandas as pd\n'), ((22006, 22037), 'tensorflow.python.client.device_lib.list_local_devices', 'device_lib.list_local_devices', ([], {}), '()\n', (22035, 22037), False, 'from tensorflow.python.client import device_lib\n'), ((22208, 22262), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': '(27)'}), '(X, y, test_size=0.2, random_state=27)\n', (22224, 22262), False, 'from sklearn.model_selection import train_test_split\n'), ((22318, 22355), 'pandas.concat', 'pd.concat', (['[X_train, y_train]'], {'axis': '(1)'}), '([X_train, y_train], axis=1)\n', (22327, 22355), True, 'import pandas as pd\n'), ((22820, 22866), 'pandas.concat', 'pd.concat', (['[take_over, not_takeover_upsampled]'], {}), '([take_over, not_takeover_upsampled])\n', (22829, 22866), True, 'import pandas as pd\n'), ((23910, 24050), 'sklearn.compose.ColumnTransformer', 'ColumnTransformer', ([], {'transformers': "[('num', numeric_transformer, numeric_features), ('cat',\n categorical_transformer, categorical_features)]"}), "(transformers=[('num', numeric_transformer,\n numeric_features), ('cat', categorical_transformer, categorical_features)])\n", (23927, 24050), False, 'from sklearn.compose import ColumnTransformer, make_column_transformer\n'), ((24640, 24694), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': '(27)'}), '(X, y, test_size=0.2, random_state=27)\n', (24656, 24694), False, 'from sklearn.model_selection import train_test_split\n'), ((24750, 24787), 'pandas.concat', 'pd.concat', (['[X_train, y_train]'], {'axis': '(1)'}), '([X_train, y_train], axis=1)\n', (24759, 24787), True, 'import pandas as pd\n'), ((25252, 25298), 'pandas.concat', 'pd.concat', (['[take_over, not_takeover_upsampled]'], {}), '([take_over, not_takeover_upsampled])\n', (25261, 25298), True, 'import pandas as pd\n'), ((25708, 25720), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (25718, 25720), False, 'from keras.models import Sequential\n'), ((26742, 26779), 'keras.models.load_model', 'load_model', (['"""best-model-02-0.04.hdf5"""'], {}), "('best-model-02-0.04.hdf5')\n", (26752, 26779), False, 'from keras.models import load_model\n'), ((27186, 27234), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['loss']"], {'label': '"""train"""'}), "(history.history['loss'], label='train')\n", (27194, 27234), True, 'import matplotlib.pyplot as plt\n'), ((27236, 27287), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_loss']"], {'label': '"""test"""'}), "(history.history['val_loss'], label='test')\n", (27244, 27287), True, 'import matplotlib.pyplot as plt\n'), ((27289, 27336), 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'test']"], {'loc': '"""upper left"""'}), "(['train', 'test'], loc='upper left')\n", (27299, 27336), True, 'import matplotlib.pyplot as plt\n'), ((27338, 27356), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (27348, 27356), True, 'import matplotlib.pyplot as plt\n'), ((27358, 27368), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (27366, 27368), True, 'import matplotlib.pyplot as plt\n'), ((27408, 27445), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['accuracy']"], {}), "(history.history['accuracy'])\n", (27416, 27445), True, 'import matplotlib.pyplot as plt\n'), ((27447, 27488), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_accuracy']"], {}), "(history.history['val_accuracy'])\n", (27455, 27488), True, 'import matplotlib.pyplot as plt\n'), ((27490, 27517), 'matplotlib.pyplot.title', 'plt.title', (['"""model accuracy"""'], {}), "('model accuracy')\n", (27499, 27517), True, 'import matplotlib.pyplot as plt\n'), ((27519, 27541), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""accuracy"""'], {}), "('accuracy')\n", (27529, 27541), True, 'import matplotlib.pyplot as plt\n'), ((27543, 27562), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (27553, 27562), True, 'import matplotlib.pyplot as plt\n'), ((27564, 27611), 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'test']"], {'loc': '"""upper left"""'}), "(['train', 'test'], loc='upper left')\n", (27574, 27611), True, 'import matplotlib.pyplot as plt\n'), ((27613, 27623), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (27621, 27623), True, 'import matplotlib.pyplot as plt\n'), ((27655, 27688), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['loss']"], {}), "(history.history['loss'])\n", (27663, 27688), True, 'import matplotlib.pyplot as plt\n'), ((27690, 27727), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_loss']"], {}), "(history.history['val_loss'])\n", (27698, 27727), True, 'import matplotlib.pyplot as plt\n'), ((27729, 27752), 'matplotlib.pyplot.title', 'plt.title', (['"""model loss"""'], {}), "('model loss')\n", (27738, 27752), True, 'import matplotlib.pyplot as plt\n'), ((27754, 27772), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (27764, 27772), True, 'import matplotlib.pyplot as plt\n'), ((27774, 27793), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (27784, 27793), True, 'import matplotlib.pyplot as plt\n'), ((27795, 27842), 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'test']"], {'loc': '"""upper left"""'}), "(['train', 'test'], loc='upper left')\n", (27805, 27842), True, 'import matplotlib.pyplot as plt\n'), ((27844, 27854), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (27852, 27854), True, 'import matplotlib.pyplot as plt\n'), ((7824, 7840), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (7838, 7840), False, 'from sklearn.preprocessing import StandardScaler, LabelEncoder, OneHotEncoder\n'), ((7962, 7989), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'sparse': '(False)'}), '(sparse=False)\n', (7975, 7989), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((9620, 9625), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (9623, 9625), False, 'from sklearn.svm import SVC\n'), ((14159, 14196), 'sklearn.metrics.classification_report', 'classification_report', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (14180, 14196), False, 'from sklearn.metrics import confusion_matrix, recall_score, precision_recall_curve, auc, roc_curve, roc_auc_score, classification_report\n'), ((14737, 14778), 'sklearn.metrics.classification_report', 'classification_report', (['y_true_3', 'y_pred_3'], {}), '(y_true_3, y_pred_3)\n', (14758, 14778), False, 'from sklearn.metrics import confusion_matrix, recall_score, precision_recall_curve, auc, roc_curve, roc_auc_score, classification_report\n'), ((15286, 15327), 'sklearn.metrics.classification_report', 'classification_report', (['y_true_2', 'y_pred_2'], {}), '(y_true_2, y_pred_2)\n', (15307, 15327), False, 'from sklearn.metrics import confusion_matrix, recall_score, precision_recall_curve, auc, roc_curve, roc_auc_score, classification_report\n'), ((17675, 17746), 'pandas.Series', 'pd.Series', (['gb_scores'], {'name': '"""CV scores distribution(Gradiaent Boosting)"""'}), "(gb_scores, name='CV scores distribution(Gradiaent Boosting)')\n", (17684, 17746), True, 'import pandas as pd\n'), ((18350, 18391), 'sklearn.metrics.classification_report', 'classification_report', (['y_true_4', 'y_pred_4'], {}), '(y_true_4, y_pred_4)\n', (18371, 18391), False, 'from sklearn.metrics import confusion_matrix, recall_score, precision_recall_curve, auc, roc_curve, roc_auc_score, classification_report\n'), ((18651, 18659), 'sklearn.gaussian_process.kernels.RBF', 'RBF', (['(1.0)'], {}), '(1.0)\n', (18654, 18659), False, 'from sklearn.gaussian_process.kernels import RBF\n'), ((19063, 19104), 'sklearn.metrics.classification_report', 'classification_report', (['y_true_3', 'y_pred_3'], {}), '(y_true_3, y_pred_3)\n', (19084, 19104), False, 'from sklearn.metrics import confusion_matrix, recall_score, precision_recall_curve, auc, roc_curve, roc_auc_score, classification_report\n'), ((20673, 20710), 'sklearn.metrics.classification_report', 'classification_report', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (20694, 20710), False, 'from sklearn.metrics import confusion_matrix, recall_score, precision_recall_curve, auc, roc_curve, roc_auc_score, classification_report\n'), ((21304, 21355), 'sklearn.metrics.classification_report', 'classification_report', (['y_true_down_4', 'y_pred_down_4'], {}), '(y_true_down_4, y_pred_down_4)\n', (21325, 21355), False, 'from sklearn.metrics import confusion_matrix, recall_score, precision_recall_curve, auc, roc_curve, roc_auc_score, classification_report\n'), ((24348, 24362), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (24360, 24362), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((25732, 25828), 'keras.layers.Dense', 'Dense', (['(23)'], {'input_dim': 'X_train_enc.shape[1]', 'activation': '"""relu"""', 'kernel_initializer': '"""he_normal"""'}), "(23, input_dim=X_train_enc.shape[1], activation='relu',\n kernel_initializer='he_normal')\n", (25737, 25828), False, 'from keras.layers import Dense\n'), ((25837, 25865), 'keras.layers.Dense', 'Dense', (['(14)'], {'activation': '"""relu"""'}), "(14, activation='relu')\n", (25842, 25865), False, 'from keras.layers import Dense\n'), ((25878, 25905), 'keras.layers.Dense', 'Dense', (['(8)'], {'activation': '"""relu"""'}), "(8, activation='relu')\n", (25883, 25905), False, 'from keras.layers import Dense\n'), ((25933, 25963), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (25938, 25963), False, 'from keras.layers import Dense\n'), ((26259, 26327), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'mode': '"""min"""', 'verbose': '(1)', 'patience': '(8)'}), "(monitor='val_loss', mode='min', verbose=1, patience=8)\n", (26272, 26327), False, 'from keras.callbacks import EarlyStopping\n'), ((26336, 26429), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['filepath'], {'monitor': '"""val_loss"""', 'mode': '"""min"""', 'verbose': '(1)', 'save_best_only': '(True)'}), "(filepath, monitor='val_loss', mode='min', verbose=1,\n save_best_only=True)\n", (26351, 26429), False, 'from keras.callbacks import ModelCheckpoint\n'), ((10509, 10514), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (10512, 10514), False, 'from sklearn.svm import SVC\n'), ((11465, 11502), 'sklearn.metrics.classification_report', 'classification_report', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (11486, 11502), False, 'from sklearn.metrics import confusion_matrix, recall_score, precision_recall_curve, auc, roc_curve, roc_auc_score, classification_report\n'), ((16367, 16391), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', (['n_folds'], {}), '(n_folds)\n', (16382, 16391), False, 'from sklearn.model_selection import StratifiedKFold, cross_val_score\n'), ((17443, 17467), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', (['n_folds'], {}), '(n_folds)\n', (17458, 17467), False, 'from sklearn.model_selection import StratifiedKFold, cross_val_score\n'), ((17631, 17649), 'numpy.mean', 'np.mean', (['gb_scores'], {}), '(gb_scores)\n', (17638, 17649), True, 'import numpy as np\n'), ((12078, 12110), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'strategy': '"""median"""'}), "(strategy='median')\n", (12091, 12110), False, 'from sklearn.impute import SimpleImputer\n'), ((12129, 12145), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (12143, 12145), False, 'from sklearn.preprocessing import StandardScaler, LabelEncoder, OneHotEncoder\n'), ((12297, 12353), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'strategy': '"""constant"""', 'fill_value': '"""missing"""'}), "(strategy='constant', fill_value='missing')\n", (12310, 12353), False, 'from sklearn.impute import SimpleImputer\n'), ((12372, 12410), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'handle_unknown': '"""ignore"""'}), "(handle_unknown='ignore')\n", (12385, 12410), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((13964, 13984), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (13982, 13984), False, 'from sklearn.linear_model import LogisticRegression\n'), ((14470, 14475), 'sklearn.decomposition.PCA', 'PCA', ([], {}), '()\n', (14473, 14475), False, 'from sklearn.decomposition import PCA\n'), ((14511, 14549), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'random_state': '(0)'}), '(random_state=0)\n', (14533, 14549), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((15005, 15010), 'sklearn.decomposition.PCA', 'PCA', ([], {}), '()\n', (15008, 15010), False, 'from sklearn.decomposition import PCA\n'), ((15045, 15096), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'max_depth': '(2)', 'random_state': '(0)'}), '(max_depth=2, random_state=0)\n', (15067, 15096), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((15905, 15910), 'sklearn.decomposition.PCA', 'PCA', ([], {}), '()\n', (15908, 15910), False, 'from sklearn.decomposition import PCA\n'), ((15953, 16039), 'rgf.sklearn.RGFClassifier', 'RGFClassifier', ([], {'max_leaf': '(400)', 'algorithm': '"""RGF_Sib"""', 'test_interval': '(100)', 'verbose': '(True)'}), "(max_leaf=400, algorithm='RGF_Sib', test_interval=100, verbose\n =True)\n", (15966, 16039), False, 'from rgf.sklearn import RGFClassifier\n'), ((16885, 16890), 'sklearn.decomposition.PCA', 'PCA', ([], {}), '()\n', (16888, 16890), False, 'from sklearn.decomposition import PCA\n'), ((16934, 17035), 'sklearn.ensemble.GradientBoostingClassifier', 'GradientBoostingClassifier', ([], {'n_estimators': '(20)', 'learning_rate': '(0.01)', 'subsample': '(0.6)', 'random_state': '(127)'}), '(n_estimators=20, learning_rate=0.01, subsample=\n 0.6, random_state=127)\n', (16960, 17035), False, 'from sklearn.ensemble import GradientBoostingClassifier\n'), ((18067, 18072), 'sklearn.decomposition.PCA', 'PCA', ([], {}), '()\n', (18070, 18072), False, 'from sklearn.decomposition import PCA\n'), ((18113, 18165), 'sklearn.ensemble.AdaBoostClassifier', 'AdaBoostClassifier', ([], {'n_estimators': '(100)', 'random_state': '(0)'}), '(n_estimators=100, random_state=0)\n', (18131, 18165), False, 'from sklearn.ensemble import AdaBoostClassifier\n'), ((18757, 18762), 'sklearn.decomposition.PCA', 'PCA', ([], {}), '()\n', (18760, 18762), False, 'from sklearn.decomposition import PCA\n'), ((18797, 18853), 'sklearn.gaussian_process.GaussianProcessClassifier', 'GaussianProcessClassifier', ([], {'kernel': 'kernel', 'random_state': '(0)'}), '(kernel=kernel, random_state=0)\n', (18822, 18853), False, 'from sklearn.gaussian_process import GaussianProcessClassifier\n'), ((20448, 20468), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (20466, 20468), False, 'from sklearn.linear_model import LogisticRegression\n'), ((20986, 20991), 'sklearn.decomposition.PCA', 'PCA', ([], {}), '()\n', (20989, 20991), False, 'from sklearn.decomposition import PCA\n'), ((21032, 21084), 'sklearn.ensemble.AdaBoostClassifier', 'AdaBoostClassifier', ([], {'n_estimators': '(100)', 'random_state': '(0)'}), '(n_estimators=100, random_state=0)\n', (21050, 21084), False, 'from sklearn.ensemble import AdaBoostClassifier\n'), ((23575, 23607), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'strategy': '"""median"""'}), "(strategy='median')\n", (23588, 23607), False, 'from sklearn.impute import SimpleImputer\n'), ((23626, 23642), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (23640, 23642), False, 'from sklearn.preprocessing import StandardScaler, LabelEncoder, OneHotEncoder\n'), ((23775, 23831), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'strategy': '"""constant"""', 'fill_value': '"""missing"""'}), "(strategy='constant', fill_value='missing')\n", (23788, 23831), False, 'from sklearn.impute import SimpleImputer\n'), ((23850, 23888), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'handle_unknown': '"""ignore"""'}), "(handle_unknown='ignore')\n", (23863, 23888), False, 'from sklearn.preprocessing import OneHotEncoder\n')]
|
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# @author: <NAME>
import os
from typing import Dict, Optional, Tuple, cast
import gym
import hydra.utils
from mbrl.env.offline_data import load_dataset_and_env
import numpy as np
import omegaconf
from omegaconf import read_write
import torch
import mbrl.constants
import mbrl.models
import mbrl.planning
import mbrl.third_party.pytorch_sac as pytorch_sac
import mbrl.types
import mbrl.util
import mbrl.util.common
import mbrl.util.math
from mbrl.planning.sac_wrapper import SACAgent
MBPO_LOG_FORMAT = mbrl.constants.EVAL_LOG_FORMAT + [
("epoch", "E", "int"),
("rollout_length", "RL", "int"),
]
def create_dataloader_from_dict(cfg: omegaconf.DictConfig, env: gym.Env, dataset: Dict) -> mbrl.util.ReplayBuffer:
dataset_length = len(dataset["observations"])
assert cfg.overrides.num_steps >= dataset_length, \
f"Buffer must be large enough for pretraining dataset, trying to fit {dataset_length} into {cfg.overrides.num_steps} steps."
rng = np.random.default_rng(seed=cfg.seed)
dtype = np.float32
obs_shape = env.observation_space.shape
act_shape = env.action_space.shape
replay_buffer = mbrl.util.common.create_replay_buffer(
cfg,
obs_shape,
act_shape,
rng=rng,
obs_type=dtype,
action_type=dtype,
reward_type=dtype,
)
observations = dataset["observations"]
actions = dataset["actions"]
rewards = dataset["rewards"]
next_observations = dataset["next_observations"]
dones = dataset["terminals"]
if "timeouts" in dataset.keys():
dones = np.logical_or(dataset["terminals"], dataset["timeouts"])
for (obs, act, rew, obs_next, done) in zip(observations, actions, rewards, next_observations, dones):
replay_buffer.add(obs, act, obs_next, rew, done)
return replay_buffer
def train(
env: gym.Env,
cfg: omegaconf.DictConfig,
) -> None:
# ------------------- Initialization -------------------
assert cfg.model_pretraining.train_dataset == cfg.overrides.env, "Dataset for pretraining must come from the training env."
debug_mode = cfg.get("debug_mode", False)
train_dataset, _ = load_dataset_and_env(cfg.model_pretraining.train_dataset)
test_dataset, _ = load_dataset_and_env(cfg.model_pretraining.test_dataset)
train_dataset = create_dataloader_from_dict(cfg, env, train_dataset)
test_dataset = create_dataloader_from_dict(cfg, env, test_dataset)
obs_shape = env.observation_space.shape
act_shape = env.action_space.shape
# mbrl.planning.complete_agent_cfg(env, cfg.algorithm.agent)
# agent = hydra.utils.instantiate(cfg.algorithm.agent)
work_dir = os.getcwd()
logger = mbrl.util.Logger(work_dir, enable_back_compatible=True)
logger.register_group(
mbrl.constants.RESULTS_LOG_NAME,
MBPO_LOG_FORMAT,
color="green",
dump_frequency=1,
)
torch_generator = torch.Generator(device=cfg.device)
if cfg.seed is not None:
torch_generator.manual_seed(cfg.seed)
dynamics_model = mbrl.util.common.create_one_dim_tr_model(cfg, obs_shape, act_shape)
# ---------------------------------------------------------
# --------------------- Training Loop ---------------------
model_trainer = mbrl.models.ModelTrainer(
dynamics_model,
optim_lr=cfg.overrides.model_lr,
weight_decay=cfg.overrides.model_wd,
logger=logger,
)
mbrl.util.common.train_model_and_save_model_and_data(
dynamics_model,
model_trainer,
cfg.overrides,
train_dataset,
work_dir=work_dir,
)
return dynamics_model, model_trainer, train_dataset
# ---------------------------------------------------------
# -------------- Evaluate on test dataset -----------------
pass
#TODO: implement more mature testing logic here and maybe some nice viz
# ---------------------------------------------------------
# -------------- Create initial overrides. dataset --------------
pass
#TODO: implement robust saving so we can use this model for more experiments
# down the line
|
[
"numpy.random.default_rng",
"mbrl.env.offline_data.load_dataset_and_env",
"numpy.logical_or",
"os.getcwd",
"torch.Generator"
] |
[((1098, 1134), 'numpy.random.default_rng', 'np.random.default_rng', ([], {'seed': 'cfg.seed'}), '(seed=cfg.seed)\n', (1119, 1134), True, 'import numpy as np\n'), ((2285, 2342), 'mbrl.env.offline_data.load_dataset_and_env', 'load_dataset_and_env', (['cfg.model_pretraining.train_dataset'], {}), '(cfg.model_pretraining.train_dataset)\n', (2305, 2342), False, 'from mbrl.env.offline_data import load_dataset_and_env\n'), ((2365, 2421), 'mbrl.env.offline_data.load_dataset_and_env', 'load_dataset_and_env', (['cfg.model_pretraining.test_dataset'], {}), '(cfg.model_pretraining.test_dataset)\n', (2385, 2421), False, 'from mbrl.env.offline_data import load_dataset_and_env\n'), ((2792, 2803), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2801, 2803), False, 'import os\n'), ((3045, 3079), 'torch.Generator', 'torch.Generator', ([], {'device': 'cfg.device'}), '(device=cfg.device)\n', (3060, 3079), False, 'import torch\n'), ((1705, 1761), 'numpy.logical_or', 'np.logical_or', (["dataset['terminals']", "dataset['timeouts']"], {}), "(dataset['terminals'], dataset['timeouts'])\n", (1718, 1761), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import os
from collections import defaultdict
import numpy as np
from scipy.spatial import distance
from tqdm import tqdm
np.set_printoptions(threshold=np.inf, suppress=True)
def main(args):
num_batches = args.num_batches
bert_data = defaultdict(list)
s_or_e_bert_data = defaultdict(list)
print('loading data...')
for para_idx in range(num_batches):
bert_filename = os.path.join(args.in_dir, 'bert_b{}.npz'.format(para_idx + 1))
bert_outputs = np.load(bert_filename)
for k, v in bert_outputs.items():
bert_data[k].append(v)
sbert_filename = os.path.join(args.in_dir, '{}_b{}.npz'.format(args.model, para_idx + 1))
sbert_outputs = np.load(sbert_filename)
for k, v in sbert_outputs.items():
s_or_e_bert_data[k].append(v)
print('stacking all examples of both bert and {}...'.format(args.model))
for k, v in s_or_e_bert_data.items():
s_or_e_bert_data[k] = np.concatenate(v) # stack along batch dim
for k, v in bert_data.items():
bert_data[k] = np.concatenate(v) # stack along batch dim
print('begin computing...')
all_para_distances = [[] for _ in range(12)]
all_q_distances = [[] for _ in range(12)]
# 500 examples paragraphs
for para_idx in tqdm(range(500)):
in_ids = bert_data['input_ids'][para_idx]
seg_ids = bert_data['segment_ids'][para_idx]
feature_ids = bert_data['feature_id'][para_idx]
q_ids = s_or_e_bert_data["question_ids"][para_idx]
c_ids = s_or_e_bert_data["context_ids"][para_idx]
q_length = np.sum(q_ids.astype(np.bool))
c_length = np.sum(c_ids.astype(np.bool))
sequence_length = np.sum(in_ids.astype(np.bool))
second_length = np.sum(seg_ids.astype(np.bool))
first_length = sequence_length - second_length
if not (c_length == second_length):
print('shifted paragraphs:', feature_ids, c_length, second_length)
continue
if not (q_length == first_length):
print('shifted questions:', feature_ids, q_length, first_length)
continue
for l in range(12):
b_layer_vectors = bert_data['layer{}'.format(l)][para_idx]
s_layer_vectors = s_or_e_bert_data['layer{}'.format(l)][para_idx]
# b_pvs is layer paragraph tokens vectors for bert
b_pvs = b_layer_vectors[first_length:second_length]
s_pvs = s_layer_vectors[len(q_ids):len(q_ids) + c_length]
# calculate variance of distances of 5 paragraph vectors to the centroid
p_dist = np.mean([distance.cosine(b_p, s_p) for b_p, s_p in zip(b_pvs, s_pvs)])
all_para_distances[l].append(p_dist)
# q_pvs is layer question tokens vectors for bert
b_qvs = b_layer_vectors[:first_length]
s_qvs = s_layer_vectors[:q_length]
q_dist = np.mean([distance.cosine(b_q, s_q) for b_q, s_q in zip(b_qvs, s_qvs)])
all_q_distances[l].append(q_dist)
# all_para_variances has 12 list, each has 100 variances
all_para_mean_variances = [np.mean(v) for v in all_para_distances]
all_q_mean_variances = [np.mean(v) for v in all_q_distances]
print(all_para_mean_variances)
print(all_q_mean_variances)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('in_dir', type=str, default=None)
parser.add_argument('-n', '--num_batches', type=int, default=20)
parser.add_argument('-m', '--model', type=str, default='sbert', choices=('ebert', 'sbert'),
help='choose which model compare distance')
main(parser.parse_args())
|
[
"numpy.mean",
"scipy.spatial.distance.cosine",
"argparse.ArgumentParser",
"collections.defaultdict",
"numpy.concatenate",
"numpy.load",
"numpy.set_printoptions"
] |
[((188, 240), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'np.inf', 'suppress': '(True)'}), '(threshold=np.inf, suppress=True)\n', (207, 240), True, 'import numpy as np\n'), ((310, 327), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (321, 327), False, 'from collections import defaultdict\n'), ((352, 369), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (363, 369), False, 'from collections import defaultdict\n'), ((3408, 3433), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3431, 3433), False, 'import argparse\n'), ((549, 571), 'numpy.load', 'np.load', (['bert_filename'], {}), '(bert_filename)\n', (556, 571), True, 'import numpy as np\n'), ((772, 795), 'numpy.load', 'np.load', (['sbert_filename'], {}), '(sbert_filename)\n', (779, 795), True, 'import numpy as np\n'), ((1031, 1048), 'numpy.concatenate', 'np.concatenate', (['v'], {}), '(v)\n', (1045, 1048), True, 'import numpy as np\n'), ((1133, 1150), 'numpy.concatenate', 'np.concatenate', (['v'], {}), '(v)\n', (1147, 1150), True, 'import numpy as np\n'), ((3194, 3204), 'numpy.mean', 'np.mean', (['v'], {}), '(v)\n', (3201, 3204), True, 'import numpy as np\n'), ((3262, 3272), 'numpy.mean', 'np.mean', (['v'], {}), '(v)\n', (3269, 3272), True, 'import numpy as np\n'), ((2690, 2715), 'scipy.spatial.distance.cosine', 'distance.cosine', (['b_p', 's_p'], {}), '(b_p, s_p)\n', (2705, 2715), False, 'from scipy.spatial import distance\n'), ((2993, 3018), 'scipy.spatial.distance.cosine', 'distance.cosine', (['b_q', 's_q'], {}), '(b_q, s_q)\n', (3008, 3018), False, 'from scipy.spatial import distance\n')]
|
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Functions to calculate metrics on forecasts."""
import numpy as np
def check_shape_wrapper(func):
"""Wrapper that checks the shapes of predictions and ground truth."""
def wrapped_func(predictions, ground_truth):
assert predictions.shape == ground_truth.shape, (
f"Predictions array has shape {predictions.shape}, ground truth has "
f"shape {ground_truth.shape}")
assert predictions.ndim == 3, (
"Metrics calculation expects rank 3 predictions and ground truth.")
assert predictions.shape[-1] == 1, (
"Metrics calculation expects a single target")
return func(predictions, ground_truth)
wrapped_func.__name__ = func.__name__
return wrapped_func
@check_shape_wrapper
def rmse(predictions: np.ndarray, ground_truth: np.ndarray) -> float:
"""Gets the RMSE averaged over time and sites for the given predictions."""
squared_error = (predictions - ground_truth) ** 2
return np.sqrt(np.mean(squared_error))
@check_shape_wrapper
def mae(predictions: np.ndarray, ground_truth: np.ndarray) -> float:
"""Gets MAE averaged over time and sites for the given predictions."""
return np.mean(np.abs(predictions - ground_truth))
|
[
"numpy.mean",
"numpy.abs"
] |
[((1677, 1699), 'numpy.mean', 'np.mean', (['squared_error'], {}), '(squared_error)\n', (1684, 1699), True, 'import numpy as np\n'), ((1883, 1917), 'numpy.abs', 'np.abs', (['(predictions - ground_truth)'], {}), '(predictions - ground_truth)\n', (1889, 1917), True, 'import numpy as np\n')]
|
import torch
import torch.nn as nn
import argparse
from torch.utils.data import Dataset
import sys
'''
Block of net
'''
def net_block(n_in, n_out):
block = nn.Sequential(nn.Linear(n_in, n_out),
nn.BatchNorm1d(n_out),
nn.ReLU())
return block
class Model(nn.Module):
def __init__(self, n_input, n_hidden, num_class, opt, toplevel=False):
super(Model, self).__init__()
self.opt = opt
self.toplevel = toplevel
self.block1 = net_block(n_input, n_hidden)
self.dropout = nn.Dropout(p=0.1)
if (opt.glove or opt.sift or opt.prefix10m):
#if include skip connection:
#self.block_mid = net_block(n_hidden + n_input, n_hidden)
self.block_mid = net_block(n_hidden, n_hidden)
if toplevel:
self.block2 = net_block(n_hidden, n_hidden)
self.fc1 = nn.Linear(n_hidden, num_class)
self.softmax = nn.Softmax(dim=-1)
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
nn.init.constant_(m.bias, 0.01)
def forward(self, x):
y = self.block1(x)
#y = self.dropout(x1)
if self.opt.glove or self.opt.sift or self.opt.prefix10m:
#if include skip connection:
#y = self.block_mid(torch.cat([x, y], dim=1))
y = self.block_mid(y)
if self.toplevel:
y = self.block2(y)
y = self.dropout(y)
out = self.fc1(y)
out = self.softmax(out)
return out
def get_dataset(data, shuffle, param_batch_size):
X, y = data
dset = torch.utils.data.TensorDataset(X.float(), y)
loader = torch.utils.data.DataLoader(dataset=dset, batch_size=param_batch_size,
shuffle=shuffle)
return loader
def write_results(result, output):
result = (-result.detach().numpy()).argsort(axis=1)
for i in range(result.shape[0]):
output.write(" ".join([str(x) for x in result[i]]) + "\n")
def run(param_feat, param_lr, param_batch_size):
print("RUNNING WITH: features="+str(param_feat)+"; lr="+str(param_lr)+"; batch_size="+str(param_batch_size))
input_dim = 100
# read data
X, y = torch.load('./data/parts64/data.path')
import numpy as np
dataset = np.load('./data/parts64/dataset.npy')
queries = np.load('./data/parts64/queries.npy')
n_data = X.size(0)
split = int(n_data * 0.95)
trainloader = get_dataset((X[:split], y[:split]), shuffle=True, param_batch_size=param_batch_size)
valloader = get_dataset((X[split:], y[split:]), shuffle=False, param_batch_size=param_batch_size)
# build model
m = Model
model = m(input_dim=input_dim, feat_dim=param_feat, num_class=64, args=None).cuda()
# criterion
crit = nn.CrossEntropyLoss().cuda()
# optimizer
# optimizer = torch.optim.RMSprop(model.parameters(), args.lr)
lr = param_lr
optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=10**(-4))
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[20, 30, 35, 38, 39], gamma=0.1)
# start training!
losses = []
iterations = 40
for ep in range(1, iterations + 1):
print("==="+str(ep)+"===")
loss_sum = 0.
train_acc_tot = 0
train_n_tot = 0
scheduler.step()
for i, (X, y) in enumerate(trainloader):
y_pred = model(X.cuda())
loss = crit(y_pred, y.cuda())
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_sum += loss.item()
train_acc_tot += (y_pred.argmax(dim=1).cpu() == y).sum().item()
train_n_tot += X.size(0)
print("loss:", loss_sum)
print("train acc:", train_acc_tot*1. / train_n_tot)
losses.append(loss_sum / len(trainloader))
acc_tot = 0
n_tot = 0.
for i, (X, y) in enumerate(valloader):
y_pred = model(X.cuda())
acc_tot += (y_pred.argmax(dim=1).cpu() == y).sum().item()
n_tot += X.size(0)
print("val acc:", acc_tot / n_tot)
print("Doing inference and writing result files...")
# inference on data
batch_size = 10000
param_str = "_".join(sys.argv[1:])
with open("./data/parts64/data_prediction"+param_str+".txt","w") as output:
for b in range(0, n_data, batch_size):
data_batch_results = model(torch.from_numpy(dataset[b:b+batch_size]).float().cuda()).cpu()
write_results(data_batch_results, output)
# inference on queries
query_results = model(torch.from_numpy(queries).float().cuda()).cpu()
with open("./data/parts64/queries_prediction"+param_str+".txt","w") as output:
write_results(query_results, output)
if __name__ == "__main__":
run(int(sys.argv[1]), float(sys.argv[2]), int(sys.argv[3]))
|
[
"torch.nn.ReLU",
"torch.nn.Dropout",
"torch.optim.lr_scheduler.MultiStepLR",
"torch.nn.Softmax",
"torch.nn.CrossEntropyLoss",
"torch.nn.init.constant_",
"torch.load",
"torch.from_numpy",
"torch.nn.init.xavier_normal_",
"torch.nn.BatchNorm1d",
"torch.nn.Linear",
"torch.utils.data.DataLoader",
"numpy.load"
] |
[((1855, 1946), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'dset', 'batch_size': 'param_batch_size', 'shuffle': 'shuffle'}), '(dataset=dset, batch_size=param_batch_size,\n shuffle=shuffle)\n', (1882, 1946), False, 'import torch\n'), ((2418, 2456), 'torch.load', 'torch.load', (['"""./data/parts64/data.path"""'], {}), "('./data/parts64/data.path')\n", (2428, 2456), False, 'import torch\n'), ((2494, 2531), 'numpy.load', 'np.load', (['"""./data/parts64/dataset.npy"""'], {}), "('./data/parts64/dataset.npy')\n", (2501, 2531), True, 'import numpy as np\n'), ((2546, 2583), 'numpy.load', 'np.load', (['"""./data/parts64/queries.npy"""'], {}), "('./data/parts64/queries.npy')\n", (2553, 2583), True, 'import numpy as np\n'), ((3222, 3317), 'torch.optim.lr_scheduler.MultiStepLR', 'torch.optim.lr_scheduler.MultiStepLR', (['optimizer'], {'milestones': '[20, 30, 35, 38, 39]', 'gamma': '(0.1)'}), '(optimizer, milestones=[20, 30, 35, 38,\n 39], gamma=0.1)\n', (3258, 3317), False, 'import torch\n'), ((181, 203), 'torch.nn.Linear', 'nn.Linear', (['n_in', 'n_out'], {}), '(n_in, n_out)\n', (190, 203), True, 'import torch.nn as nn\n'), ((231, 252), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['n_out'], {}), '(n_out)\n', (245, 252), True, 'import torch.nn as nn\n'), ((280, 289), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (287, 289), True, 'import torch.nn as nn\n'), ((589, 606), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.1)'}), '(p=0.1)\n', (599, 606), True, 'import torch.nn as nn\n'), ((968, 998), 'torch.nn.Linear', 'nn.Linear', (['n_hidden', 'num_class'], {}), '(n_hidden, num_class)\n', (977, 998), True, 'import torch.nn as nn\n'), ((1023, 1041), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (1033, 1041), True, 'import torch.nn as nn\n'), ((2992, 3013), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (3011, 3013), True, 'import torch.nn as nn\n'), ((1141, 1173), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['m.weight'], {}), '(m.weight)\n', (1163, 1173), True, 'import torch.nn as nn\n'), ((1190, 1221), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0.01)'], {}), '(m.bias, 0.01)\n', (1207, 1221), True, 'import torch.nn as nn\n'), ((4809, 4834), 'torch.from_numpy', 'torch.from_numpy', (['queries'], {}), '(queries)\n', (4825, 4834), False, 'import torch\n'), ((4637, 4680), 'torch.from_numpy', 'torch.from_numpy', (['dataset[b:b + batch_size]'], {}), '(dataset[b:b + batch_size])\n', (4653, 4680), False, 'import torch\n')]
|
import numpy as np
import cv2
import glob
import PIL.ExifTags
import PIL.Image
from tqdm import tqdm
import os
import matplotlib.pyplot as plt
from pyntcloud import PyntCloud
import open3d as o3d
def create_output(vertices, colors, filename):
colors = colors.reshape(-1,3)
vertices = np.hstack([vertices.reshape(-1,3),colors])
ply_header = '''ply
format ascii 1.0
element vertex %(vert_num)d
property float x
property float y
property float z
property uchar red
property uchar green
property uchar blue
end_header
'''
with open(filename, 'w') as f:
f.write(ply_header %dict(vert_num=len(vertices)))
np.savetxt(f,vertices,'%f %f %f %d %d %d')
def generateDisparityMap(left,right,win_size,min_disp,max_disp,blockSize,uniquenessRatio,speckleSize,speckleRange,f):
f = f/100.0
ret = np.load(os.path.join("camera_params","ret.npy"))
K = np.load(os.path.join("camera_params","K.npy"))
dist = np.load(os.path.join("camera_params","dist.npy"))
img_1 = cv2.imread(left)
img_2 = cv2.imread(right)
img_1 = cv2.resize(img_1,(int(img_1.shape[1]/4),int(img_1.shape[0]/4)))
img_2 = cv2.resize(img_2,(int(img_2.shape[1]/4),int(img_2.shape[0]/4)))
h,w = img_2.shape[:2]
new_camera_matrix, roi = cv2.getOptimalNewCameraMatrix(K,dist,(w,h),1,(w,h))
img_1_undistorted = cv2.undistort(img_1, K, dist, None, K)
img_2_undistorted = cv2.undistort(img_2, K, dist, None, K)
num_disp = max_disp - min_disp
stereo = cv2.StereoSGBM_create(minDisparity= min_disp,
numDisparities = num_disp,
blockSize = blockSize,
uniquenessRatio = uniquenessRatio,
speckleWindowSize = speckleSize,
speckleRange = speckleRange,
P1 = 8*3*win_size**2,#8*3*win_size**2,
P2 =32*3*win_size**2) #32*3*win_size**2)
#Compute disparity map
print ("\nComputing the disparity map...")
disparity_map = stereo.compute(img_1_undistorted, img_2_undistorted)
plt.imsave('disparity_map.jpg',disparity_map)
#Generate point cloud.
print ("\nGenerating the 3D map...")
focal_length = np.load(os.path.join("camera_params","FocalLength.npy"), allow_pickle=True)
Q2 = np.float32([[1,0,0,0],
[0,-1,0,0],
[0,0,focal_length*f,0], #Focal length multiplication obtained experimentally.
[0,0,0,1]])
#Reproject points into 3D
points_3D = cv2.reprojectImageTo3D(disparity_map, Q2)
#Get color points
colors = cv2.cvtColor(img_1_undistorted, cv2.COLOR_BGR2RGB)
#Get rid of points with value 0 (i.e no depth)
mask_map = disparity_map > disparity_map.min()
#Mask colors and points.
output_points = points_3D[mask_map]
output_colors = colors[mask_map]
#Define name for output file
output_file = 'reconstructed.ply'
#Generate point cloud
print ("\n Creating the output file... \n")
create_output(output_points, output_colors, output_file)
|
[
"matplotlib.pyplot.imsave",
"cv2.undistort",
"cv2.reprojectImageTo3D",
"os.path.join",
"cv2.getOptimalNewCameraMatrix",
"cv2.StereoSGBM_create",
"cv2.cvtColor",
"numpy.savetxt",
"cv2.imread",
"numpy.float32"
] |
[((997, 1013), 'cv2.imread', 'cv2.imread', (['left'], {}), '(left)\n', (1007, 1013), False, 'import cv2\n'), ((1026, 1043), 'cv2.imread', 'cv2.imread', (['right'], {}), '(right)\n', (1036, 1043), False, 'import cv2\n'), ((1253, 1310), 'cv2.getOptimalNewCameraMatrix', 'cv2.getOptimalNewCameraMatrix', (['K', 'dist', '(w, h)', '(1)', '(w, h)'], {}), '(K, dist, (w, h), 1, (w, h))\n', (1282, 1310), False, 'import cv2\n'), ((1330, 1368), 'cv2.undistort', 'cv2.undistort', (['img_1', 'K', 'dist', 'None', 'K'], {}), '(img_1, K, dist, None, K)\n', (1343, 1368), False, 'import cv2\n'), ((1393, 1431), 'cv2.undistort', 'cv2.undistort', (['img_2', 'K', 'dist', 'None', 'K'], {}), '(img_2, K, dist, None, K)\n', (1406, 1431), False, 'import cv2\n'), ((1482, 1730), 'cv2.StereoSGBM_create', 'cv2.StereoSGBM_create', ([], {'minDisparity': 'min_disp', 'numDisparities': 'num_disp', 'blockSize': 'blockSize', 'uniquenessRatio': 'uniquenessRatio', 'speckleWindowSize': 'speckleSize', 'speckleRange': 'speckleRange', 'P1': '(8 * 3 * win_size ** 2)', 'P2': '(32 * 3 * win_size ** 2)'}), '(minDisparity=min_disp, numDisparities=num_disp,\n blockSize=blockSize, uniquenessRatio=uniquenessRatio, speckleWindowSize\n =speckleSize, speckleRange=speckleRange, P1=8 * 3 * win_size ** 2, P2=\n 32 * 3 * win_size ** 2)\n', (1503, 1730), False, 'import cv2\n'), ((1935, 1981), 'matplotlib.pyplot.imsave', 'plt.imsave', (['"""disparity_map.jpg"""', 'disparity_map'], {}), "('disparity_map.jpg', disparity_map)\n", (1945, 1981), True, 'import matplotlib.pyplot as plt\n'), ((2156, 2244), 'numpy.float32', 'np.float32', (['[[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, focal_length * f, 0], [0, 0, 0, 1]]'], {}), '([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, focal_length * f, 0], [0, 0,\n 0, 1]])\n', (2166, 2244), True, 'import numpy as np\n'), ((2352, 2393), 'cv2.reprojectImageTo3D', 'cv2.reprojectImageTo3D', (['disparity_map', 'Q2'], {}), '(disparity_map, Q2)\n', (2374, 2393), False, 'import cv2\n'), ((2429, 2479), 'cv2.cvtColor', 'cv2.cvtColor', (['img_1_undistorted', 'cv2.COLOR_BGR2RGB'], {}), '(img_1_undistorted, cv2.COLOR_BGR2RGB)\n', (2441, 2479), False, 'import cv2\n'), ((629, 673), 'numpy.savetxt', 'np.savetxt', (['f', 'vertices', '"""%f %f %f %d %d %d"""'], {}), "(f, vertices, '%f %f %f %d %d %d')\n", (639, 673), True, 'import numpy as np\n'), ((827, 867), 'os.path.join', 'os.path.join', (['"""camera_params"""', '"""ret.npy"""'], {}), "('camera_params', 'ret.npy')\n", (839, 867), False, 'import os\n'), ((884, 922), 'os.path.join', 'os.path.join', (['"""camera_params"""', '"""K.npy"""'], {}), "('camera_params', 'K.npy')\n", (896, 922), False, 'import os\n'), ((942, 983), 'os.path.join', 'os.path.join', (['"""camera_params"""', '"""dist.npy"""'], {}), "('camera_params', 'dist.npy')\n", (954, 983), False, 'import os\n'), ((2079, 2127), 'os.path.join', 'os.path.join', (['"""camera_params"""', '"""FocalLength.npy"""'], {}), "('camera_params', 'FocalLength.npy')\n", (2091, 2127), False, 'import os\n')]
|
# Copyright (c) 2016 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# ^
# / \
# |
# |
#
# License included because this module is a heavily modified version based on
# Paulo's implementation of dynamic t-SNE.
# (https://github.com/paulorauber/thesne)
import math
import numpy as np
import theano
import theano.tensor as T
from sklearn.utils import check_random_state
from scipy.spatial.distance import pdist
from modules.layout_io import save_drawing
epsilon = 1e-16
floath = np.float32
class SigmaTooLowException(Exception):
pass
class NaNException(Exception):
pass
# Squared Euclidean distance between all pairs of row-vectors
def sqeuclidean_var(X):
N = X.shape[0]
ss = (X ** 2).sum(axis=1)
return ss.reshape((N, 1)) + ss.reshape((1, N)) - 2 * X.dot(X.T)
# Euclidean distance between all pairs of row-vectors
def euclidean_var(X):
return T.maximum(sqeuclidean_var(X), epsilon) ** 0.5
# Conditional probabilities of picking (ordered) pairs in high-dim space.
def p_ij_conditional_var(X, sigma):
N = X.shape[0]
sqdistance = X**2
esqdistance = T.exp(-sqdistance / ((2 * (sigma**2)).reshape((N, 1))))
esqdistance_zd = T.fill_diagonal(esqdistance, 0)
row_sum = T.sum(esqdistance_zd, axis=1).reshape((N, 1))
return esqdistance_zd / row_sum # Possibly dangerous
# Symmetrized probabilities of picking pairs in high-dim space.
def p_ij_sym_var(p_ij_conditional):
return (p_ij_conditional + p_ij_conditional.T) / (2 * p_ij_conditional.shape[0])
# Probabilities of picking pairs in low-dim space (using Student
# t-distribution).
def q_ij_student_t_var(Y):
sqdistance = sqeuclidean_var(Y)
one_over = T.fill_diagonal(1 / (sqdistance + 1), 0)
return one_over / one_over.sum()
# Probabilities of picking pairs in low-dim space (using Gaussian).
def q_ij_gaussian_var(Y):
sqdistance = sqeuclidean_var(Y)
gauss = T.fill_diagonal(T.exp(-sqdistance), 0)
return gauss / gauss.sum()
# Per point cost function
def cost_var(X, Y, sigma, Adj, l_kl, l_e, l_c, l_r, r_eps):
N = X.shape[0]
num_edges = 0.5 * T.sum(Adj)
# Used to normalize s.t. the l_*'s sum up to one.
l_sum = l_kl + l_e + l_c + l_r
p_ij_conditional = p_ij_conditional_var(X, sigma)
p_ij = p_ij_sym_var(p_ij_conditional)
q_ij = q_ij_student_t_var(Y)
p_ij_safe = T.maximum(p_ij, epsilon)
q_ij_safe = T.maximum(q_ij, epsilon)
# Kullback-Leibler term
kl = T.sum(p_ij * T.log(p_ij_safe / q_ij_safe), axis=1)
# Edge contraction term
edge_contraction = (1 / (2 * num_edges)) * T.sum(Adj * sqeuclidean_var(Y), axis=1)
# Compression term
compression = (1 / (2 * N)) * T.sum(Y**2, axis=1)
# Repulsion term
# repulsion = (1 / (2 * N**2)) * T.sum(T.fill_diagonal(1 / (euclidean_var(Y) + r_eps), 0), axis=1)
repulsion = -(1 / (2 * N**2)) * T.sum(T.fill_diagonal(T.log(euclidean_var(Y) + r_eps), 0), axis=1)
cost = (l_kl / l_sum) * kl + (l_e / l_sum) * edge_contraction + (l_c / l_sum) * compression + (l_r / l_sum) * repulsion
return cost
# Binary search on sigma for a given perplexity
def find_sigma(X_shared, sigma_shared, N, perplexity, sigma_iters, verbose=0):
X = T.fmatrix('X')
sigma = T.fvector('sigma')
target = np.log(perplexity)
P = T.maximum(p_ij_conditional_var(X, sigma), epsilon)
entropy = -T.sum(P * T.log(P), axis=1)
# Setting update for binary search interval
sigmin_shared = theano.shared(np.full(N, np.sqrt(epsilon), dtype=floath))
sigmax_shared = theano.shared(np.full(N, np.inf, dtype=floath))
sigmin = T.fvector('sigmin')
sigmax = T.fvector('sigmax')
upmin = T.switch(T.lt(entropy, target), sigma, sigmin)
upmax = T.switch(T.gt(entropy, target), sigma, sigmax)
givens = {X: X_shared, sigma: sigma_shared, sigmin: sigmin_shared,
sigmax: sigmax_shared}
updates = [(sigmin_shared, upmin), (sigmax_shared, upmax)]
update_intervals = theano.function([], entropy, givens=givens, updates=updates)
# Setting update for sigma according to search interval
upsigma = T.switch(T.isinf(sigmax), sigma * 2, (sigmin + sigmax) / 2.)
givens = {sigma: sigma_shared, sigmin: sigmin_shared,
sigmax: sigmax_shared}
updates = [(sigma_shared, upsigma)]
update_sigma = theano.function([], sigma, givens=givens, updates=updates)
for i in range(sigma_iters):
e = update_intervals()
update_sigma()
if verbose:
print('[find_sigma] Iteration {0}: Perplexities in [{1:.4f}, {2:.4f}].'.format(i + 1, np.exp(e.min()), np.exp(e.max())), end='\r')
if verbose:
print('\n[find_sigma] Done! Perplexities in [{0:.4f}, {1:.4f}].'.format(np.exp(e.min()), np.exp(e.max())))
if np.any(np.isnan(np.exp(e))):
raise SigmaTooLowException('Invalid sigmas. The perplexity is probably too low.')
# Receives vectors in Y, and moves co-located vertices in opposite directions,
# to assist in the repulsion of vertices.
def switch_shake(Y, magnitude=1e-5):
N = Y.shape[0]
# Auxiliary functions for translating from square to condensed indexing
# of the distance matrix.
def calc_row_idx(k, n):
return int(math.ceil((1 / 2.) * (- (-8 * k + 4 * n**2 - 4 * n - 7)**0.5 + 2 * n - 1) - 1))
def elem_in_i_rows(i, n):
return i * (n - 1 - i) + (i * (i + 1)) / 2
def calc_col_idx(k, i, n):
return int(n - elem_in_i_rows(i + 1, n) + k)
def condensed_to_square(k, n):
i = calc_row_idx(k, n)
j = calc_col_idx(k, i, n)
return i, j
euclid_dist = pdist(Y)
max_dist = euclid_dist.max()
for idx in np.where(euclid_dist <= np.finfo(np.float32).eps)[0]:
(i, j) = condensed_to_square(idx, N)
nudge = np.random.normal(0, max_dist * magnitude, 2)
# v_i and v_j are co-located. Move v_i in a direction, and move v_j in
# the opposite direction.
Y[i, :] += nudge
Y[j, :] -= nudge
return Y
# Perform momentum-based gradient descent on the cost function with the given
# parameters. Return the vertex coordinates and per-vertex cost.
def find_Y(X_shared, Y_shared, sigma_shared, N, output_dims, n_epochs,
initial_lr, final_lr, lr_switch, init_stdev, initial_momentum,
final_momentum, momentum_switch,
initial_l_kl, final_l_kl, l_kl_switch,
initial_l_e, final_l_e, l_e_switch,
initial_l_c, final_l_c, l_c_switch,
initial_l_r, final_l_r, l_r_switch,
r_eps,
Adj_shared, g=None, save_every=None, output_folder=None, verbose=0):
# Optimization hyperparameters
initial_lr = np.array(initial_lr, dtype=floath)
final_lr = np.array(final_lr, dtype=floath)
initial_momentum = np.array(initial_momentum, dtype=floath)
final_momentum = np.array(final_momentum, dtype=floath)
# Hyperparameters used within Theano
lr = T.fscalar('lr')
lr_shared = theano.shared(initial_lr)
momentum = T.fscalar('momentum')
momentum_shared = theano.shared(initial_momentum)
# Cost parameters
initial_l_kl = np.array(initial_l_kl, dtype=floath)
final_l_kl = np.array(final_l_kl, dtype=floath)
initial_l_e = np.array(initial_l_e, dtype=floath)
final_l_e = np.array(final_l_e, dtype=floath)
initial_l_c = np.array(initial_l_c, dtype=floath)
final_l_c = np.array(final_l_c, dtype=floath)
initial_l_r = np.array(initial_l_r, dtype=floath)
final_l_r = np.array(final_l_r, dtype=floath)
# Cost parameters used within Theano
l_kl = T.fscalar('l_kl')
l_kl_shared = theano.shared(initial_l_kl)
l_e = T.fscalar('l_e')
l_e_shared = theano.shared(initial_l_e)
l_c = T.fscalar('l_c')
l_c_shared = theano.shared(initial_l_c)
l_r = T.fscalar('l_r')
l_r_shared = theano.shared(initial_l_r)
# High-dimensional observations (connectivities of vertices)
X = T.fmatrix('X')
# 2D projection (coordinates of vertices)
Y = T.fmatrix('Y')
# Adjacency matrix
Adj = T.fmatrix('Adj')
# Standard deviations used for Gaussians to attain perplexity
sigma = T.fvector('sigma')
# Y velocities (for momentum-based descent)
Yv = T.fmatrix('Yv')
Yv_shared = theano.shared(np.zeros((N, output_dims), dtype=floath))
# Function for retrieving cost for all individual data points
costs = cost_var(X, Y, sigma, Adj, l_kl, l_e, l_c, l_r, r_eps)
# Sum of all costs (scalar)
cost = T.sum(costs)
# Gradient of the cost w.r.t. Y
grad_Y = T.grad(cost, Y)
# Update step for velocity
update_Yv = theano.function(
[], None,
givens={
X: X_shared,
sigma: sigma_shared,
Y: Y_shared,
Yv: Yv_shared,
Adj: Adj_shared,
lr: lr_shared,
momentum: momentum_shared,
l_kl: l_kl_shared,
l_e: l_e_shared,
l_c: l_c_shared,
l_r: l_r_shared
},
updates=[
(Yv_shared, momentum * Yv - lr * grad_Y)
]
)
# Gradient descent step
update_Y = theano.function(
[], [],
givens={
Y: Y_shared, Yv: Yv_shared
},
updates=[
(Y_shared, Y + Yv)
]
)
# Build function to retrieve cost
get_cost = theano.function(
[], cost,
givens={
X: X_shared,
sigma: sigma_shared,
Y: Y_shared,
Adj: Adj_shared,
l_kl: l_kl_shared,
l_e: l_e_shared,
l_c: l_c_shared,
l_r: l_r_shared
}
)
# Build function to retrieve per-vertex cost
get_costs = theano.function(
[], costs,
givens={
X: X_shared,
sigma: sigma_shared,
Y: Y_shared,
Adj: Adj_shared,
l_kl: l_kl_shared,
l_e: l_e_shared,
l_c: l_c_shared,
l_r: l_r_shared
}
)
# Optimization loop
for epoch in range(n_epochs):
# Switch parameter if a switching point is reached.
if epoch == lr_switch:
lr_shared.set_value(final_lr)
if epoch == momentum_switch:
momentum_shared.set_value(final_momentum)
if epoch == l_kl_switch:
l_kl_shared.set_value(final_l_kl)
if epoch == l_e_switch:
l_e_shared.set_value(final_l_e)
if epoch == l_c_switch:
l_c_shared.set_value(final_l_c)
if epoch == l_r_switch:
l_r_shared.set_value(final_l_r)
if final_l_r != 0:
# Give a nudge to co-located vertices in the epoch before the
# repulsion kicks in (otherwise they don't feel any).
Y_shared.set_value(switch_shake(Y_shared.get_value()))
# Do update step for velocity
update_Yv()
# Do a gradient descent step
update_Y()
c = get_cost()
if np.isnan(float(c)):
raise NaNException('Encountered NaN for cost.')
if verbose:
print('[tsne] Epoch: {0}. Cost: {1:.6f}.'.format(epoch + 1, float(c)), end='\r')
if output_folder is not None and g is not None and save_every is not None and epoch % save_every == 0:
# Get per-vertex cost for colour-coding
cs = get_costs()
# Save a snapshot
save_drawing(output_folder, g, Y_shared.get_value().T, 'tsne_snap_' + str(epoch).zfill(5), formats=['jpg'], verbose=False, edge_colors="rgb", draw_vertices=False, opacity=0.3)
# Get per-vertex cost
cs = get_costs()
if verbose:
print('\n[tsne] Done! ')
return np.array(Y_shared.get_value()), cs
def tsne(X, perplexity=30, Y=None, output_dims=2, n_epochs=1000,
initial_lr=10, final_lr=4, lr_switch=None, init_stdev=1e-4,
sigma_iters=50, initial_momentum=0.5, final_momentum=0.8,
momentum_switch=250,
initial_l_kl=None, final_l_kl=None, l_kl_switch=None,
initial_l_e=None, final_l_e=None, l_e_switch=None,
initial_l_c=None, final_l_c=None, l_c_switch=None,
initial_l_r=None, final_l_r=None, l_r_switch=None,
r_eps=1, random_state=None, Adj=None, g=None,
save_every=None, snaps_output_folder=None, verbose=1):
random_state = check_random_state(random_state)
N = X.shape[0]
X_shared = theano.shared(np.asarray(X, dtype=floath))
sigma_shared = theano.shared(np.ones(N, dtype=floath))
if Y is None:
Y = random_state.normal(0, init_stdev, size=(N, output_dims))
Y_shared = theano.shared(np.asarray(Y, dtype=floath))
# Find sigmas to attain the given perplexity.
find_sigma(X_shared, sigma_shared, N, perplexity, sigma_iters, verbose)
# Do the optimization to find Y (the vertex coordinates).
Y, costs = find_Y(X_shared, Y_shared, sigma_shared, N, output_dims, n_epochs,
initial_lr, final_lr, lr_switch, init_stdev, initial_momentum,
final_momentum, momentum_switch,
initial_l_kl, final_l_kl, l_kl_switch,
initial_l_e, final_l_e, l_e_switch,
initial_l_c, final_l_c, l_c_switch,
initial_l_r, final_l_r, l_r_switch,
r_eps,
Adj, g, save_every,
snaps_output_folder, verbose)
# Return the vertex coordinates and the per-vertex costs.
return Y, costs
|
[
"theano.tensor.exp",
"theano.tensor.gt",
"numpy.sqrt",
"numpy.log",
"numpy.array",
"theano.shared",
"theano.function",
"numpy.asarray",
"theano.tensor.fvector",
"numpy.exp",
"theano.tensor.fill_diagonal",
"numpy.random.normal",
"sklearn.utils.check_random_state",
"theano.tensor.maximum",
"numpy.ones",
"theano.tensor.sum",
"scipy.spatial.distance.pdist",
"theano.tensor.fscalar",
"theano.tensor.fmatrix",
"numpy.finfo",
"theano.tensor.isinf",
"theano.tensor.grad",
"theano.tensor.lt",
"math.ceil",
"numpy.zeros",
"numpy.full",
"theano.tensor.log"
] |
[((2250, 2281), 'theano.tensor.fill_diagonal', 'T.fill_diagonal', (['esqdistance', '(0)'], {}), '(esqdistance, 0)\n', (2265, 2281), True, 'import theano.tensor as T\n'), ((2753, 2793), 'theano.tensor.fill_diagonal', 'T.fill_diagonal', (['(1 / (sqdistance + 1))', '(0)'], {}), '(1 / (sqdistance + 1), 0)\n', (2768, 2793), True, 'import theano.tensor as T\n'), ((3422, 3446), 'theano.tensor.maximum', 'T.maximum', (['p_ij', 'epsilon'], {}), '(p_ij, epsilon)\n', (3431, 3446), True, 'import theano.tensor as T\n'), ((3463, 3487), 'theano.tensor.maximum', 'T.maximum', (['q_ij', 'epsilon'], {}), '(q_ij, epsilon)\n', (3472, 3487), True, 'import theano.tensor as T\n'), ((4278, 4292), 'theano.tensor.fmatrix', 'T.fmatrix', (['"""X"""'], {}), "('X')\n", (4287, 4292), True, 'import theano.tensor as T\n'), ((4305, 4323), 'theano.tensor.fvector', 'T.fvector', (['"""sigma"""'], {}), "('sigma')\n", (4314, 4323), True, 'import theano.tensor as T\n'), ((4338, 4356), 'numpy.log', 'np.log', (['perplexity'], {}), '(perplexity)\n', (4344, 4356), True, 'import numpy as np\n'), ((4670, 4689), 'theano.tensor.fvector', 'T.fvector', (['"""sigmin"""'], {}), "('sigmin')\n", (4679, 4689), True, 'import theano.tensor as T\n'), ((4703, 4722), 'theano.tensor.fvector', 'T.fvector', (['"""sigmax"""'], {}), "('sigmax')\n", (4712, 4722), True, 'import theano.tensor as T\n'), ((5038, 5098), 'theano.function', 'theano.function', (['[]', 'entropy'], {'givens': 'givens', 'updates': 'updates'}), '([], entropy, givens=givens, updates=updates)\n', (5053, 5098), False, 'import theano\n'), ((5391, 5449), 'theano.function', 'theano.function', (['[]', 'sigma'], {'givens': 'givens', 'updates': 'updates'}), '([], sigma, givens=givens, updates=updates)\n', (5406, 5449), False, 'import theano\n'), ((6679, 6687), 'scipy.spatial.distance.pdist', 'pdist', (['Y'], {}), '(Y)\n', (6684, 6687), False, 'from scipy.spatial.distance import pdist\n'), ((7748, 7782), 'numpy.array', 'np.array', (['initial_lr'], {'dtype': 'floath'}), '(initial_lr, dtype=floath)\n', (7756, 7782), True, 'import numpy as np\n'), ((7798, 7830), 'numpy.array', 'np.array', (['final_lr'], {'dtype': 'floath'}), '(final_lr, dtype=floath)\n', (7806, 7830), True, 'import numpy as np\n'), ((7854, 7894), 'numpy.array', 'np.array', (['initial_momentum'], {'dtype': 'floath'}), '(initial_momentum, dtype=floath)\n', (7862, 7894), True, 'import numpy as np\n'), ((7916, 7954), 'numpy.array', 'np.array', (['final_momentum'], {'dtype': 'floath'}), '(final_momentum, dtype=floath)\n', (7924, 7954), True, 'import numpy as np\n'), ((8006, 8021), 'theano.tensor.fscalar', 'T.fscalar', (['"""lr"""'], {}), "('lr')\n", (8015, 8021), True, 'import theano.tensor as T\n'), ((8038, 8063), 'theano.shared', 'theano.shared', (['initial_lr'], {}), '(initial_lr)\n', (8051, 8063), False, 'import theano\n'), ((8079, 8100), 'theano.tensor.fscalar', 'T.fscalar', (['"""momentum"""'], {}), "('momentum')\n", (8088, 8100), True, 'import theano.tensor as T\n'), ((8123, 8154), 'theano.shared', 'theano.shared', (['initial_momentum'], {}), '(initial_momentum)\n', (8136, 8154), False, 'import theano\n'), ((8197, 8233), 'numpy.array', 'np.array', (['initial_l_kl'], {'dtype': 'floath'}), '(initial_l_kl, dtype=floath)\n', (8205, 8233), True, 'import numpy as np\n'), ((8251, 8285), 'numpy.array', 'np.array', (['final_l_kl'], {'dtype': 'floath'}), '(final_l_kl, dtype=floath)\n', (8259, 8285), True, 'import numpy as np\n'), ((8304, 8339), 'numpy.array', 'np.array', (['initial_l_e'], {'dtype': 'floath'}), '(initial_l_e, dtype=floath)\n', (8312, 8339), True, 'import numpy as np\n'), ((8356, 8389), 'numpy.array', 'np.array', (['final_l_e'], {'dtype': 'floath'}), '(final_l_e, dtype=floath)\n', (8364, 8389), True, 'import numpy as np\n'), ((8408, 8443), 'numpy.array', 'np.array', (['initial_l_c'], {'dtype': 'floath'}), '(initial_l_c, dtype=floath)\n', (8416, 8443), True, 'import numpy as np\n'), ((8460, 8493), 'numpy.array', 'np.array', (['final_l_c'], {'dtype': 'floath'}), '(final_l_c, dtype=floath)\n', (8468, 8493), True, 'import numpy as np\n'), ((8512, 8547), 'numpy.array', 'np.array', (['initial_l_r'], {'dtype': 'floath'}), '(initial_l_r, dtype=floath)\n', (8520, 8547), True, 'import numpy as np\n'), ((8564, 8597), 'numpy.array', 'np.array', (['final_l_r'], {'dtype': 'floath'}), '(final_l_r, dtype=floath)\n', (8572, 8597), True, 'import numpy as np\n'), ((8651, 8668), 'theano.tensor.fscalar', 'T.fscalar', (['"""l_kl"""'], {}), "('l_kl')\n", (8660, 8668), True, 'import theano.tensor as T\n'), ((8687, 8714), 'theano.shared', 'theano.shared', (['initial_l_kl'], {}), '(initial_l_kl)\n', (8700, 8714), False, 'import theano\n'), ((8725, 8741), 'theano.tensor.fscalar', 'T.fscalar', (['"""l_e"""'], {}), "('l_e')\n", (8734, 8741), True, 'import theano.tensor as T\n'), ((8759, 8785), 'theano.shared', 'theano.shared', (['initial_l_e'], {}), '(initial_l_e)\n', (8772, 8785), False, 'import theano\n'), ((8796, 8812), 'theano.tensor.fscalar', 'T.fscalar', (['"""l_c"""'], {}), "('l_c')\n", (8805, 8812), True, 'import theano.tensor as T\n'), ((8830, 8856), 'theano.shared', 'theano.shared', (['initial_l_c'], {}), '(initial_l_c)\n', (8843, 8856), False, 'import theano\n'), ((8867, 8883), 'theano.tensor.fscalar', 'T.fscalar', (['"""l_r"""'], {}), "('l_r')\n", (8876, 8883), True, 'import theano.tensor as T\n'), ((8901, 8927), 'theano.shared', 'theano.shared', (['initial_l_r'], {}), '(initial_l_r)\n', (8914, 8927), False, 'import theano\n'), ((9002, 9016), 'theano.tensor.fmatrix', 'T.fmatrix', (['"""X"""'], {}), "('X')\n", (9011, 9016), True, 'import theano.tensor as T\n'), ((9071, 9085), 'theano.tensor.fmatrix', 'T.fmatrix', (['"""Y"""'], {}), "('Y')\n", (9080, 9085), True, 'import theano.tensor as T\n'), ((9120, 9136), 'theano.tensor.fmatrix', 'T.fmatrix', (['"""Adj"""'], {}), "('Adj')\n", (9129, 9136), True, 'import theano.tensor as T\n'), ((9216, 9234), 'theano.tensor.fvector', 'T.fvector', (['"""sigma"""'], {}), "('sigma')\n", (9225, 9234), True, 'import theano.tensor as T\n'), ((9293, 9308), 'theano.tensor.fmatrix', 'T.fmatrix', (['"""Yv"""'], {}), "('Yv')\n", (9302, 9308), True, 'import theano.tensor as T\n'), ((9559, 9571), 'theano.tensor.sum', 'T.sum', (['costs'], {}), '(costs)\n', (9564, 9571), True, 'import theano.tensor as T\n'), ((9622, 9637), 'theano.tensor.grad', 'T.grad', (['cost', 'Y'], {}), '(cost, Y)\n', (9628, 9637), True, 'import theano.tensor as T\n'), ((9686, 9975), 'theano.function', 'theano.function', (['[]', 'None'], {'givens': '{X: X_shared, sigma: sigma_shared, Y: Y_shared, Yv: Yv_shared, Adj:\n Adj_shared, lr: lr_shared, momentum: momentum_shared, l_kl: l_kl_shared,\n l_e: l_e_shared, l_c: l_c_shared, l_r: l_r_shared}', 'updates': '[(Yv_shared, momentum * Yv - lr * grad_Y)]'}), '([], None, givens={X: X_shared, sigma: sigma_shared, Y:\n Y_shared, Yv: Yv_shared, Adj: Adj_shared, lr: lr_shared, momentum:\n momentum_shared, l_kl: l_kl_shared, l_e: l_e_shared, l_c: l_c_shared,\n l_r: l_r_shared}, updates=[(Yv_shared, momentum * Yv - lr * grad_Y)])\n', (9701, 9975), False, 'import theano\n'), ((10202, 10297), 'theano.function', 'theano.function', (['[]', '[]'], {'givens': '{Y: Y_shared, Yv: Yv_shared}', 'updates': '[(Y_shared, Y + Yv)]'}), '([], [], givens={Y: Y_shared, Yv: Yv_shared}, updates=[(\n Y_shared, Y + Yv)])\n', (10217, 10297), False, 'import theano\n'), ((10421, 10597), 'theano.function', 'theano.function', (['[]', 'cost'], {'givens': '{X: X_shared, sigma: sigma_shared, Y: Y_shared, Adj: Adj_shared, l_kl:\n l_kl_shared, l_e: l_e_shared, l_c: l_c_shared, l_r: l_r_shared}'}), '([], cost, givens={X: X_shared, sigma: sigma_shared, Y:\n Y_shared, Adj: Adj_shared, l_kl: l_kl_shared, l_e: l_e_shared, l_c:\n l_c_shared, l_r: l_r_shared})\n', (10436, 10597), False, 'import theano\n'), ((10784, 10961), 'theano.function', 'theano.function', (['[]', 'costs'], {'givens': '{X: X_shared, sigma: sigma_shared, Y: Y_shared, Adj: Adj_shared, l_kl:\n l_kl_shared, l_e: l_e_shared, l_c: l_c_shared, l_r: l_r_shared}'}), '([], costs, givens={X: X_shared, sigma: sigma_shared, Y:\n Y_shared, Adj: Adj_shared, l_kl: l_kl_shared, l_e: l_e_shared, l_c:\n l_c_shared, l_r: l_r_shared})\n', (10799, 10961), False, 'import theano\n'), ((13438, 13470), 'sklearn.utils.check_random_state', 'check_random_state', (['random_state'], {}), '(random_state)\n', (13456, 13470), False, 'from sklearn.utils import check_random_state\n'), ((2991, 3009), 'theano.tensor.exp', 'T.exp', (['(-sqdistance)'], {}), '(-sqdistance)\n', (2996, 3009), True, 'import theano.tensor as T\n'), ((3174, 3184), 'theano.tensor.sum', 'T.sum', (['Adj'], {}), '(Adj)\n', (3179, 3184), True, 'import theano.tensor as T\n'), ((3751, 3772), 'theano.tensor.sum', 'T.sum', (['(Y ** 2)'], {'axis': '(1)'}), '(Y ** 2, axis=1)\n', (3756, 3772), True, 'import theano.tensor as T\n'), ((4622, 4654), 'numpy.full', 'np.full', (['N', 'np.inf'], {'dtype': 'floath'}), '(N, np.inf, dtype=floath)\n', (4629, 4654), True, 'import numpy as np\n'), ((4745, 4766), 'theano.tensor.lt', 'T.lt', (['entropy', 'target'], {}), '(entropy, target)\n', (4749, 4766), True, 'import theano.tensor as T\n'), ((4804, 4825), 'theano.tensor.gt', 'T.gt', (['entropy', 'target'], {}), '(entropy, target)\n', (4808, 4825), True, 'import theano.tensor as T\n'), ((5183, 5198), 'theano.tensor.isinf', 'T.isinf', (['sigmax'], {}), '(sigmax)\n', (5190, 5198), True, 'import theano.tensor as T\n'), ((6851, 6895), 'numpy.random.normal', 'np.random.normal', (['(0)', '(max_dist * magnitude)', '(2)'], {}), '(0, max_dist * magnitude, 2)\n', (6867, 6895), True, 'import numpy as np\n'), ((9339, 9379), 'numpy.zeros', 'np.zeros', (['(N, output_dims)'], {'dtype': 'floath'}), '((N, output_dims), dtype=floath)\n', (9347, 9379), True, 'import numpy as np\n'), ((13521, 13548), 'numpy.asarray', 'np.asarray', (['X'], {'dtype': 'floath'}), '(X, dtype=floath)\n', (13531, 13548), True, 'import numpy as np\n'), ((13583, 13607), 'numpy.ones', 'np.ones', (['N'], {'dtype': 'floath'}), '(N, dtype=floath)\n', (13590, 13607), True, 'import numpy as np\n'), ((13727, 13754), 'numpy.asarray', 'np.asarray', (['Y'], {'dtype': 'floath'}), '(Y, dtype=floath)\n', (13737, 13754), True, 'import numpy as np\n'), ((2297, 2326), 'theano.tensor.sum', 'T.sum', (['esqdistance_zd'], {'axis': '(1)'}), '(esqdistance_zd, axis=1)\n', (2302, 2326), True, 'import theano.tensor as T\n'), ((3539, 3567), 'theano.tensor.log', 'T.log', (['(p_ij_safe / q_ij_safe)'], {}), '(p_ij_safe / q_ij_safe)\n', (3544, 3567), True, 'import theano.tensor as T\n'), ((4555, 4571), 'numpy.sqrt', 'np.sqrt', (['epsilon'], {}), '(epsilon)\n', (4562, 4571), True, 'import numpy as np\n'), ((5856, 5865), 'numpy.exp', 'np.exp', (['e'], {}), '(e)\n', (5862, 5865), True, 'import numpy as np\n'), ((6292, 6377), 'math.ceil', 'math.ceil', (['(1 / 2.0 * (-(-8 * k + 4 * n ** 2 - 4 * n - 7) ** 0.5 + 2 * n - 1) - 1)'], {}), '(1 / 2.0 * (-(-8 * k + 4 * n ** 2 - 4 * n - 7) ** 0.5 + 2 * n - 1) - 1\n )\n', (6301, 6377), False, 'import math\n'), ((4443, 4451), 'theano.tensor.log', 'T.log', (['P'], {}), '(P)\n', (4448, 4451), True, 'import theano.tensor as T\n'), ((6760, 6780), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (6768, 6780), True, 'import numpy as np\n')]
|
import io
from PIL import Image, ImageDraw, ImageFont
import tensorflow as tf
import numpy as np
from matplotlib import cm
from matplotlib.colors import ListedColormap
import pdb
default_color = 'blue'
highlight_color = 'red'
class SemanticSegmentationOverlay:
def __init__(self, args):
self.segmap_key = args.segmap_key
self.segmap_format_key = args.segmap_format_key
self.segmap_colormap_file = args.segmap_colormap_file
self.font = ImageFont.truetype("./fonts/OpenSans-Regular.ttf", 12)
self.segmap_raw_divisor_key = args.segmap_raw_divisor_key
if self.segmap_colormap_file is None:
self.colormap_function = cm.gist_earth
else:
self.colormap_function = self.load_colormap()
def apply_overlay(self, image_bytes, example):
"""Apply segmentation overlay over input image.
Args:
image_bytes: JPEG image.
feature: TF Record Feature
Returns:
image_bytes_with_overlay: JPEG image with segmentation overlay.
"""
img = Image.open(io.BytesIO(image_bytes))
draw = ImageDraw.Draw(img)
width, height = img.size
segmap = self.get_segmap(example, height, width)
segmap = self.apply_colormap(segmap)
segmap_img = Image.fromarray(segmap).convert('RGB')
out_img = Image.blend(img, segmap_img, 0.5)
with io.BytesIO() as output:
out_img.save(output, format="JPEG")
image_bytes_with_overlay = output.getvalue()
return image_bytes_with_overlay
def load_colormap(self):
colormap = np.zeros((256, 3), dtype=np.uint8)
with open(self.segmap_colormap_file, 'rt') as f:
for i, line in enumerate(f):
colormap[i] = np.fromstring(line, sep=",", dtype=int)
listed_colormap = ListedColormap(colormap/255)
return listed_colormap
def apply_colormap(self, segmap):
cm_array = self.colormap_function(segmap/255)
return np.uint8(cm_array*255)
def get_segmap(self, example, im_height, im_width):
""" From a TF Record Feature, get the image/class label.
Args:
feature: TF Record Feature
Returns:
mask (numpy.ndarray): image segmentation mask (0-255)
"""
segmap_format = example.features.feature[self.segmap_format_key].bytes_list.value[0].decode("utf-8")
example = example.SerializeToString()
string_feature = tf.io.FixedLenFeature((), tf.string)
keys_to_features = {self.segmap_key : string_feature, self.segmap_format_key: string_feature}
parsed_tensors = tf.io.parse_single_example(
example, features=keys_to_features)
label_shape = tf.stack([im_height,
im_width, 1
])
if segmap_format == "raw":
flattened_label = tf.io.decode_raw(
parsed_tensors[self.segmap_key], out_type=tf.int32)
mask = tf.reshape(flattened_label, label_shape).numpy()[:,:,0] // self.segmap_raw_divisor_key
elif segmap_format == "png":
label = tf.io.decode_image(parsed_tensors[self.segmap_key], channels=1)
mask = label.numpy()[:,:,0]
else:
raise ValueError("Unknown format: "+segmap_format)
return mask
|
[
"numpy.uint8",
"PIL.Image.fromarray",
"tensorflow.io.decode_image",
"tensorflow.io.parse_single_example",
"PIL.Image.blend",
"io.BytesIO",
"PIL.ImageFont.truetype",
"matplotlib.colors.ListedColormap",
"PIL.ImageDraw.Draw",
"numpy.zeros",
"tensorflow.io.FixedLenFeature",
"tensorflow.io.decode_raw",
"tensorflow.reshape",
"numpy.fromstring",
"tensorflow.stack"
] |
[((459, 513), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['"""./fonts/OpenSans-Regular.ttf"""', '(12)'], {}), "('./fonts/OpenSans-Regular.ttf', 12)\n", (477, 513), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((1067, 1086), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['img'], {}), '(img)\n', (1081, 1086), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((1288, 1321), 'PIL.Image.blend', 'Image.blend', (['img', 'segmap_img', '(0.5)'], {}), '(img, segmap_img, 0.5)\n', (1299, 1321), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((1533, 1567), 'numpy.zeros', 'np.zeros', (['(256, 3)'], {'dtype': 'np.uint8'}), '((256, 3), dtype=np.uint8)\n', (1541, 1567), True, 'import numpy as np\n'), ((1748, 1778), 'matplotlib.colors.ListedColormap', 'ListedColormap', (['(colormap / 255)'], {}), '(colormap / 255)\n', (1762, 1778), False, 'from matplotlib.colors import ListedColormap\n'), ((1908, 1932), 'numpy.uint8', 'np.uint8', (['(cm_array * 255)'], {}), '(cm_array * 255)\n', (1916, 1932), True, 'import numpy as np\n'), ((2350, 2386), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['()', 'tf.string'], {}), '((), tf.string)\n', (2371, 2386), True, 'import tensorflow as tf\n'), ((2507, 2569), 'tensorflow.io.parse_single_example', 'tf.io.parse_single_example', (['example'], {'features': 'keys_to_features'}), '(example, features=keys_to_features)\n', (2533, 2569), True, 'import tensorflow as tf\n'), ((2598, 2632), 'tensorflow.stack', 'tf.stack', (['[im_height, im_width, 1]'], {}), '([im_height, im_width, 1])\n', (2606, 2632), True, 'import tensorflow as tf\n'), ((1031, 1054), 'io.BytesIO', 'io.BytesIO', (['image_bytes'], {}), '(image_bytes)\n', (1041, 1054), False, 'import io\n'), ((1333, 1345), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (1343, 1345), False, 'import io\n'), ((2706, 2774), 'tensorflow.io.decode_raw', 'tf.io.decode_raw', (['parsed_tensors[self.segmap_key]'], {'out_type': 'tf.int32'}), '(parsed_tensors[self.segmap_key], out_type=tf.int32)\n', (2722, 2774), True, 'import tensorflow as tf\n'), ((1229, 1252), 'PIL.Image.fromarray', 'Image.fromarray', (['segmap'], {}), '(segmap)\n', (1244, 1252), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((1684, 1723), 'numpy.fromstring', 'np.fromstring', (['line'], {'sep': '""","""', 'dtype': 'int'}), "(line, sep=',', dtype=int)\n", (1697, 1723), True, 'import numpy as np\n'), ((2933, 2996), 'tensorflow.io.decode_image', 'tf.io.decode_image', (['parsed_tensors[self.segmap_key]'], {'channels': '(1)'}), '(parsed_tensors[self.segmap_key], channels=1)\n', (2951, 2996), True, 'import tensorflow as tf\n'), ((2799, 2839), 'tensorflow.reshape', 'tf.reshape', (['flattened_label', 'label_shape'], {}), '(flattened_label, label_shape)\n', (2809, 2839), True, 'import tensorflow as tf\n')]
|
'''
Authors: <NAME> and <NAME>
Date: July 10, 2017
Pre-cnmf-e processing of videos in chunks:
- Downsampling
- Motion Correction
'''
from os import path, system
import pims
import av
import numpy as np
import math
from tqdm import tqdm
from skimage import img_as_uint
from motion import align_video
import skimage.io
import skimage.filters
from skimage.morphology import square
import h5py as hd
def process_chunk(filename, start, stop, reference, save_name, xlims = None, ylims = None, fps= 20, ds_factor=4, correct_motion=True, thresh=1.8, cutoff=0.05, clean_pixels=False, pixel_thresh=1.1, format='tiff'):
'''
Process one chunk of a video read in from pims and save as .tiff
Input:
- filename: video path
- start: start frame
- stop: stop frame
- reference: reference frame
- xlims: tuple of 2 ints, crop limits on x-axis
- ylims: tuple of 2 ints, crop limits on y-axis
- fps: int, output frames per second
- ds_factor: int, downsample factor, default=4
- correct_motion: bool, correct motion, default=True
- thresh: flt, threshold for motion correction, default=1.0
- cutoff: flt, cutoff for motion correction, default=0.05
- format: str, format to save chunk as (tiff, avi, hdf5), default='tiff'
Output:
- None, saves processed chunk as .tiff or .avi
'''
chunk = stop/(stop-start)
video = pims.ImageIOReader(filename)
frame_rate = fps # video.frame_rate
video_chunk = video[start:stop]
print("Processing frames {} to {} of {}".format(start, stop, len(video)))
video_chunk_ds = downsample(video_chunk, ds_factor, xlims, ylims)
#in order to have 01, 02 for file sorting and concatenation of chunks
if chunk < 10:
chunk = '0' + str(chunk)
if clean_pixels:
remove_dead_pixels(video_chunk_ds, pixel_thresh)
if correct_motion:
video_chunk_ds = align_video(video_chunk_ds, reference, thresh, cutoff)
if format == 'tiff':
skimage.io.imsave(save_name + '_temp_{}.tiff'.format(chunk), img_as_uint(video_chunk_ds/2**16))
elif format == 'avi':
save_to_avi(video_chunk_ds, fps = frame_rate / ds_factor, filename = save_name + '_temp_{}.avi'.format(chunk))
elif format == 'hdf5':
save_to_hdf(video_chunk_ds, filename = save_name + '_temp_{}.hdf5'.format(chunk))
def downsample(vid, ds_factor, xlims=None, ylims=None):
'''
Downsample video by ds_factor.
If xlims and ylims are not None, crop video to these limits also
Input:
- vid: numpy array, video
- ds_factor: int, downsample factor
- xlims (optional): tuple of ints, x-index of crop limits
- ylims (optional): tuple of ints: y-index of crop limits
Output:
- vid_ds: numpy array, downsampled video
'''
dims = vid[0].shape
if xlims is not None:
xs, xe = xlims
else:
xs = 0
xe = dims[1] - 1
if ylims is not None:
ys, ye = ylims
else:
ys = 0
ye = dims[0] - 1
dims = vid[0].shape
vid_ds = np.zeros((int(len(vid)/ds_factor), ye-ys, xe-xs))
frame_ds = 0
for frame in tqdm(range(0, len(vid), ds_factor), desc='Downsampling'):
if frame + ds_factor <= len(vid):
stack = np.array(vid[frame:frame+ds_factor])[:,ys:ye,xs:xe,0]
vid_ds[frame_ds, :, :] = np.round(np.mean(stack, axis=0))
frame_ds += 1
else:
continue
return vid_ds
def get_crop_lims(vid, crop_thresh=40):
'''
Find x,y limits where the mean fluorescence is always above a defined threshold value
Input:
- vid: numpy array, video
- crop_thresh: int, fluorescence threshold to find x,y limits to crop to
Output:
- xlims: tuple of 2 ints, x-axis pixels to crop to
- ylims: tuple of 2 ints, y-axis pixels to crop to
'''
dims = vid[0].shape
xs = np.inf
xe = 0
ys = np.inf
ye = 0
y = np.arange(dims[0])
x = np.arange(dims[1])
for frame in vid:
frame = np.array(frame)[:,:,0]
xf = frame.mean(axis=0)
yf = frame.mean(axis=1)
x_thresh = x[xf>=crop_thresh]
y_thresh = y[yf>=crop_thresh]
if x_thresh[0] < xs:
xs = x_thresh[0]
if x_thresh[-1] > xe:
xe = x_thresh[-1]
if y_thresh[0] < ys:
ys = y_thresh[0]
if y_thresh[-1] > ye:
ye = y_thresh[-1]
return (xs, xe), (ys, ye)
def remove_dead_pixels(vid, thresh=1.1):
for frame in tqdm(range(vid.shape[0]), desc='Removing Dead Pixels'):
med = skimage.filters.median(vid[frame, :, :], square(10)).ravel()
img = vid[frame, :, :].ravel()
img[img>thresh*med] = med[img>thresh*med]
vid[frame, :, :] = img.reshape(vid.shape[1], vid.shape[2])
def save_to_avi(vid, fps, filename):
total_frames, height, width = vid.shape
container = av.open(filename, 'w')
stream = container.add_stream('rawvideo', rate=fps)
stream.height = height
stream.width = width
stream.pix_fmt = 'bgr24'
for frame in vid:
# Convert frame to RGB uint8 values
frame = frame.astype('uint8')
frame = np.repeat(np.reshape(frame, newshape=(frame.shape[0], frame.shape[1], 1)), repeats=3, axis=2)
# Encode frame into stream
frame = av.VideoFrame.from_ndarray(frame, format='bgr24')
for packet in stream.encode(frame):
container.mux(packet)
# Flush Stream
for packet in stream.encode():
container.mux(packet)
# Close file
container.close()
def save_to_hdf(Y, filename):
# Author: <NAME>
# Y is a numpy array of dimensions (T_dim, y_dim, x_dim)
# FramesxHxW
dirname = path.dirname(filename)
basename = path.basename(filename)
filename_new = path.splitext(filename)[0] + '.hdf5'
if path.exists(filename_new):
system('rm %s'%filename_new)
file = hd.File(filename_new)
tdim, xdim, ydim = Y.shape
movie = file.create_dataset('original', shape = (tdim, xdim*ydim), chunks = True)
file.attrs['folder'] = dirname
file.attrs['filename'] = basename
file['original'].attrs['duration'] = tdim
file['original'].attrs['dims'] = (ydim, xdim) # 2D np.arrays are (row X cols) --> (ydim X xdim)
movie[:] = Y.reshape((tdim, xdim*ydim))
return file
|
[
"os.path.exists",
"numpy.mean",
"numpy.reshape",
"skimage.img_as_uint",
"skimage.morphology.square",
"os.path.splitext",
"h5py.File",
"av.VideoFrame.from_ndarray",
"os.path.dirname",
"av.open",
"numpy.array",
"os.path.basename",
"pims.ImageIOReader",
"motion.align_video",
"os.system",
"numpy.arange"
] |
[((1431, 1459), 'pims.ImageIOReader', 'pims.ImageIOReader', (['filename'], {}), '(filename)\n', (1449, 1459), False, 'import pims\n'), ((4013, 4031), 'numpy.arange', 'np.arange', (['dims[0]'], {}), '(dims[0])\n', (4022, 4031), True, 'import numpy as np\n'), ((4040, 4058), 'numpy.arange', 'np.arange', (['dims[1]'], {}), '(dims[1])\n', (4049, 4058), True, 'import numpy as np\n'), ((4985, 5007), 'av.open', 'av.open', (['filename', '"""w"""'], {}), "(filename, 'w')\n", (4992, 5007), False, 'import av\n'), ((5812, 5834), 'os.path.dirname', 'path.dirname', (['filename'], {}), '(filename)\n', (5824, 5834), False, 'from os import path, system\n'), ((5850, 5873), 'os.path.basename', 'path.basename', (['filename'], {}), '(filename)\n', (5863, 5873), False, 'from os import path, system\n'), ((5938, 5963), 'os.path.exists', 'path.exists', (['filename_new'], {}), '(filename_new)\n', (5949, 5963), False, 'from os import path, system\n'), ((6014, 6035), 'h5py.File', 'hd.File', (['filename_new'], {}), '(filename_new)\n', (6021, 6035), True, 'import h5py as hd\n'), ((1940, 1994), 'motion.align_video', 'align_video', (['video_chunk_ds', 'reference', 'thresh', 'cutoff'], {}), '(video_chunk_ds, reference, thresh, cutoff)\n', (1951, 1994), False, 'from motion import align_video\n'), ((5413, 5462), 'av.VideoFrame.from_ndarray', 'av.VideoFrame.from_ndarray', (['frame'], {'format': '"""bgr24"""'}), "(frame, format='bgr24')\n", (5439, 5462), False, 'import av\n'), ((5973, 6003), 'os.system', 'system', (["('rm %s' % filename_new)"], {}), "('rm %s' % filename_new)\n", (5979, 6003), False, 'from os import path, system\n'), ((2090, 2127), 'skimage.img_as_uint', 'img_as_uint', (['(video_chunk_ds / 2 ** 16)'], {}), '(video_chunk_ds / 2 ** 16)\n', (2101, 2127), False, 'from skimage import img_as_uint\n'), ((4098, 4113), 'numpy.array', 'np.array', (['frame'], {}), '(frame)\n', (4106, 4113), True, 'import numpy as np\n'), ((5276, 5339), 'numpy.reshape', 'np.reshape', (['frame'], {'newshape': '(frame.shape[0], frame.shape[1], 1)'}), '(frame, newshape=(frame.shape[0], frame.shape[1], 1))\n', (5286, 5339), True, 'import numpy as np\n'), ((5893, 5916), 'os.path.splitext', 'path.splitext', (['filename'], {}), '(filename)\n', (5906, 5916), False, 'from os import path, system\n'), ((3317, 3355), 'numpy.array', 'np.array', (['vid[frame:frame + ds_factor]'], {}), '(vid[frame:frame + ds_factor])\n', (3325, 3355), True, 'import numpy as np\n'), ((3417, 3439), 'numpy.mean', 'np.mean', (['stack'], {'axis': '(0)'}), '(stack, axis=0)\n', (3424, 3439), True, 'import numpy as np\n'), ((4709, 4719), 'skimage.morphology.square', 'square', (['(10)'], {}), '(10)\n', (4715, 4719), False, 'from skimage.morphology import square\n')]
|
import numpy as np
import pytest
import pytoolkit as tk
def test_load_voc_od_split(data_dir):
ds = tk.datasets.load_voc_od_split(data_dir / "od", split="train")
assert len(ds) == 3
assert tuple(ds.metadata["class_names"]) == ("~", "〇")
ann = ds.labels[0]
assert ann.path == (data_dir / "od" / "JPEGImages" / "無題.jpg")
assert ann.width == 768
assert ann.height == 614
assert len(ann.classes) == 1
assert ann.classes[0] == 0
assert (ann.difficults == np.array([False])).all()
assert ann.bboxes[0] == pytest.approx(
np.array([203 - 1, 255 - 1, 601 - 1, 355 - 1]) / [768, 614, 768, 614]
)
|
[
"numpy.array",
"pytoolkit.datasets.load_voc_od_split"
] |
[((106, 167), 'pytoolkit.datasets.load_voc_od_split', 'tk.datasets.load_voc_od_split', (["(data_dir / 'od')"], {'split': '"""train"""'}), "(data_dir / 'od', split='train')\n", (135, 167), True, 'import pytoolkit as tk\n'), ((493, 510), 'numpy.array', 'np.array', (['[False]'], {}), '([False])\n', (501, 510), True, 'import numpy as np\n'), ((569, 615), 'numpy.array', 'np.array', (['[203 - 1, 255 - 1, 601 - 1, 355 - 1]'], {}), '([203 - 1, 255 - 1, 601 - 1, 355 - 1])\n', (577, 615), True, 'import numpy as np\n')]
|
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Inconel600
"""
import numpy
from armi.utils.units import getTc
from armi.materials.material import Material
class Inconel600(Material):
name = "Inconel600"
references = {
"mass fractions": "http://www.specialmetals.com/documents/Inconel%20alloy%20600.pdf",
"density": "http://www.specialmetals.com/documents/Inconel%20alloy%20600.pdf",
"thermalConductivity": "http://www.specialmetals.com/documents/Inconel%20alloy%20600.pdf",
"specific heat": "http://www.specialmetals.com/documents/Inconel%20alloy%20600.pdf",
"linear expansion percent": "http://www.specialmetals.com/documents/Inconel%20alloy%20600.pdf",
"linear expansion": "http://www.specialmetals.com/documents/Inconel%20alloy%20600.pdf",
}
def __init__(self):
Material.__init__(self)
self.p.refTempK = 294.15
self.p.refDens = 8.47 # g/cc
# Only density measurement presented in the reference.
# Presumed to be performed at 21C since this was the reference temperature for linear expansion measurements.
def setDefaultMassFracs(self):
massFracs = {
"NI": 0.7541,
"CR": 0.1550,
"FE": 0.0800,
"C": 0.0008,
"MN55": 0.0050,
"S": 0.0001,
"SI": 0.0025,
"CU": 0.0025,
}
for element, massFrac in massFracs.items():
self.setMassFrac(element, massFrac)
def polyfitThermalConductivity(self, power=2):
r"""
Calculates the coefficients of a polynomial fit for thermalConductivity.
Based on data from http://www.specialmetals.com/documents/Inconel%20alloy%20600.pdf
Fits a polynomial to the data set and returns the coefficients.
Parameters
----------
power : int, optional
power of the polynomial fit equation
Returns
-------
list of length 'power' containing the polynomial fit coefficients for thermal conductivity.
"""
Tc = [20.0, 100.0, 200.0, 300.0, 400.0, 500.0, 600.0, 700.0, 800.0]
k = [14.9, 15.9, 17.3, 19.0, 20.5, 22.1, 23.9, 25.7, 27.5]
return numpy.polyfit(numpy.array(Tc), numpy.array(k), power).tolist()
def thermalConductivity(self, Tk=None, Tc=None):
r"""
Returns the thermal conductivity of Inconel600.
Parameters
----------
Tk : float, optional
temperature in (K)
Tc : float, optional
Temperature in (C)
Returns
-------
thermalCond : float
thermal conductivity in W/m/C
"""
Tc = getTc(Tc, Tk)
self.checkTempRange(20.0, 800.0, Tc, "thermal conductivity")
thermalCond = 3.4938e-6 * Tc ** 2 + 1.3403e-2 * Tc + 14.572
return thermalCond # W/m-C
def polyfitHeatCapacity(self, power=2):
r"""
Calculates the coefficients of a polynomial fit for heatCapacity.
Based on data from http://www.specialmetals.com/documents/Inconel%20alloy%20600.pdf
Fits a polynomial to the data set and returns the coefficients.
Parameters
----------
power : int, optional
power of the polynomial fit equation
Returns
-------
list of length 'power' containing the polynomial fit coefficients for heat capacity.
"""
Tc = [20.0, 100.0, 200.0, 300.0, 400.0, 500.0, 600.0, 700.0, 800.0, 900.0]
cp = [444.0, 465.0, 486.0, 502.0, 519.0, 536.0, 578.0, 595.0, 611.0, 628.0]
return numpy.polyfit(numpy.array(Tc), numpy.array(cp), power).tolist()
def heatCapacity(self, Tk=None, Tc=None):
r"""
Returns the specific heat capacity of Inconel600.
Parameters
----------
Tk : float, optional
Temperature in Kelvin.
Tc : float, optional
Temperature in degrees Celsius.
Returns
-------
heatCapacity : float
heat capacity in J/kg/C
"""
Tc = getTc(Tc, Tk)
self.checkTempRange(20, 900, Tc, "heat capacity")
heatCapacity = 7.4021e-6 * Tc ** 2 + 0.20573 * Tc + 441.3
return heatCapacity # J/kg-C
def polyfitLinearExpansionPercent(self, power=2):
r"""
Calculates the coefficients of a polynomial fit for linearExpansionPercent.
Based on data from http://www.specialmetals.com/documents/Inconel%20alloy%20600.pdf
Uses mean CTE values to find percent thermal strain values. Fits a polynomial
to the data set and returns the coefficients.
Parameters
----------
power : int, optional
power of the polynomial fit equation
Returns
-------
list of length 'power' containing the polynomial fit coefficients for linearExpansionPercent
"""
refTempC = getTc(None, Tk=self.p.refTempK)
Tc = [100.0, 200.0, 300.0, 400.0, 500.0, 600.0, 700.0, 800.0, 900.0]
alpha_mean = [
1.33e-05,
1.38e-05,
1.42e-05,
1.45e-05,
1.49e-05,
1.53e-05,
1.58e-05,
1.61e-05,
1.64e-05,
]
linExpPercent = [0.0]
for i, alpha in enumerate(alpha_mean):
linExpPercentVal = 100.0 * alpha * (Tc[i] - refTempC)
linExpPercent.append(linExpPercentVal)
Tc.insert(0, refTempC)
return numpy.polyfit(
numpy.array(Tc), numpy.array(linExpPercent), power
).tolist()
def linearExpansionPercent(self, Tk=None, Tc=None):
r"""
Returns percent linear expansion of Inconel600.
Parameters
----------
Tk : float
temperature in (K)
Tc : float
Temperature in (C)
Returns
-------
linExpPercent in %-m/m/C
"""
Tc = getTc(Tc, Tk)
self.checkTempRange(21.0, 900.0, Tc, "linear expansion percent")
linExpPercent = 3.722e-7 * Tc ** 2 + 1.303e-3 * Tc - 2.863e-2
return linExpPercent
def linearExpansion(self, Tk=None, Tc=None):
r"""
From http://www.specialmetals.com/documents/Inconel%20alloy%20600.pdf
Using the correlation for linearExpansionPercent, the 2nd order polynomial is divided by 100 to convert
from percent strain to strain, then differentiated with respect to temperature to find the correlation
for instantaneous linear expansion.
i.e. for a linearExpansionPercent correlation of a*Tc**2 + b*Tc + c, the linearExpansion correlation is 2*a/100*Tc + b/100
2*(3.722e-7/100.0)*Tc + 1.303e-3/100.0
Parameters
----------
Tk : float
temperature in (K)
Tc : float
Temperature in (C)
Returns
-------
linExp in m/m/C
"""
Tc = getTc(Tc, Tk)
self.checkTempRange(21.0, 900.0, Tc, "linear expansion")
linExp = 7.444e-9 * Tc + 1.303e-5
return linExp
|
[
"numpy.array",
"armi.utils.units.getTc",
"armi.materials.material.Material.__init__"
] |
[((1378, 1401), 'armi.materials.material.Material.__init__', 'Material.__init__', (['self'], {}), '(self)\n', (1395, 1401), False, 'from armi.materials.material import Material\n'), ((3236, 3249), 'armi.utils.units.getTc', 'getTc', (['Tc', 'Tk'], {}), '(Tc, Tk)\n', (3241, 3249), False, 'from armi.utils.units import getTc\n'), ((4640, 4653), 'armi.utils.units.getTc', 'getTc', (['Tc', 'Tk'], {}), '(Tc, Tk)\n', (4645, 4653), False, 'from armi.utils.units import getTc\n'), ((5485, 5516), 'armi.utils.units.getTc', 'getTc', (['None'], {'Tk': 'self.p.refTempK'}), '(None, Tk=self.p.refTempK)\n', (5490, 5516), False, 'from armi.utils.units import getTc\n'), ((6522, 6535), 'armi.utils.units.getTc', 'getTc', (['Tc', 'Tk'], {}), '(Tc, Tk)\n', (6527, 6535), False, 'from armi.utils.units import getTc\n'), ((7520, 7533), 'armi.utils.units.getTc', 'getTc', (['Tc', 'Tk'], {}), '(Tc, Tk)\n', (7525, 7533), False, 'from armi.utils.units import getTc\n'), ((2776, 2791), 'numpy.array', 'numpy.array', (['Tc'], {}), '(Tc)\n', (2787, 2791), False, 'import numpy\n'), ((2793, 2807), 'numpy.array', 'numpy.array', (['k'], {}), '(k)\n', (2804, 2807), False, 'import numpy\n'), ((4172, 4187), 'numpy.array', 'numpy.array', (['Tc'], {}), '(Tc)\n', (4183, 4187), False, 'import numpy\n'), ((4189, 4204), 'numpy.array', 'numpy.array', (['cp'], {}), '(cp)\n', (4200, 4204), False, 'import numpy\n'), ((6095, 6110), 'numpy.array', 'numpy.array', (['Tc'], {}), '(Tc)\n', (6106, 6110), False, 'import numpy\n'), ((6112, 6138), 'numpy.array', 'numpy.array', (['linExpPercent'], {}), '(linExpPercent)\n', (6123, 6138), False, 'import numpy\n')]
|
import os
import csv
from utils import check_dir, make_sentences
import numpy as np
import pandas as pd
def transform(source_path):
rows = []
sentence_count = 1
new_sentence=True
for root, __subFolders, files in os.walk(source_path):
for file in files:
if file.endswith('.tags'):
for line in open(os.path.join(root, file), encoding='utf-8'):
line = line.split()
if len(line) >= 5 and new_sentence==True:
row = [sentence_count, line[0], line[1], line[4]]
new_sentence=False
rows.append(row)
elif len(line) >= 5:
row = [sentence_count, line[0], line[1], line[4]]
rows.append(row)
else:
new_sentence = True
sentence_count += 1
return rows, sentence_count
def main():
source_path = "./data/gmb-1.0.0"
columns = ["sentence_idx", "Word", "POS", "Tag"]
rows, sentence_count = transform(source_path)
sentence_idx = np.array(range(sentence_count))
# split into train and test files. this will help with keeping the generators simple,
# plus this should really be done at the ETL stage of the pipeline anyway!
test_idx = np.random.choice(np.array(range(sentence_count)), size=int(sentence_count*0.2), replace=False)
train_idx = np.setdiff1d(sentence_idx,test_idx)
# check that the directory to store the data exists, if not create it.
check_dir("./data/processed_data/gmb/")
df_train = pd.DataFrame(data=[s for s in rows if s[0] in train_idx], columns=columns)
train_sentences, train_labels = make_sentences(df_train, group_col="sentence_idx", word_col="Word", tag_col="Tag")
train_sentences.to_csv("./data/processed_data/gmb/train.sentences.csv", index=False, header=False)
train_labels.to_csv("./data/processed_data/gmb/train.labels.csv", index=False, header=False)
vocab = df_train["Word"].unique() # TODO change this to be a full list and add a frequency filter.
tags = sorted(df_train["Tag"].unique(), reverse=True)
with open("./data/processed_data/gmb/vocabulary.txt", "w", newline="") as f:
f.write("\n".join(vocab))
with open("./data/processed_data/gmb/tags.txt", "w", newline="") as f:
f.write("\n".join(tags))
del (df_train, train_sentences, train_labels, vocab, tags)
check_dir("./data/processed_data/gmb/")
df_test = pd.DataFrame(data=[s for s in rows if s[0] in test_idx], columns=columns)
test_sentences, test_labels = make_sentences(df_test, group_col="sentence_idx", word_col="Word", tag_col="Tag")
test_sentences.to_csv("./data/processed_data/gmb/test.sentences.csv", index=False, header=False)
test_labels.to_csv("./data/processed_data/gmb/test.labels.csv", index=False, header=False)
del (df_test, test_sentences, test_labels)
if __name__ == "__main__":
main()
|
[
"utils.make_sentences",
"os.path.join",
"numpy.setdiff1d",
"pandas.DataFrame",
"utils.check_dir",
"os.walk"
] |
[((230, 250), 'os.walk', 'os.walk', (['source_path'], {}), '(source_path)\n', (237, 250), False, 'import os\n'), ((1461, 1497), 'numpy.setdiff1d', 'np.setdiff1d', (['sentence_idx', 'test_idx'], {}), '(sentence_idx, test_idx)\n', (1473, 1497), True, 'import numpy as np\n'), ((1577, 1616), 'utils.check_dir', 'check_dir', (['"""./data/processed_data/gmb/"""'], {}), "('./data/processed_data/gmb/')\n", (1586, 1616), False, 'from utils import check_dir, make_sentences\n'), ((1632, 1706), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': '[s for s in rows if s[0] in train_idx]', 'columns': 'columns'}), '(data=[s for s in rows if s[0] in train_idx], columns=columns)\n', (1644, 1706), True, 'import pandas as pd\n'), ((1743, 1830), 'utils.make_sentences', 'make_sentences', (['df_train'], {'group_col': '"""sentence_idx"""', 'word_col': '"""Word"""', 'tag_col': '"""Tag"""'}), "(df_train, group_col='sentence_idx', word_col='Word', tag_col\n ='Tag')\n", (1757, 1830), False, 'from utils import check_dir, make_sentences\n'), ((2482, 2521), 'utils.check_dir', 'check_dir', (['"""./data/processed_data/gmb/"""'], {}), "('./data/processed_data/gmb/')\n", (2491, 2521), False, 'from utils import check_dir, make_sentences\n'), ((2536, 2609), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': '[s for s in rows if s[0] in test_idx]', 'columns': 'columns'}), '(data=[s for s in rows if s[0] in test_idx], columns=columns)\n', (2548, 2609), True, 'import pandas as pd\n'), ((2644, 2730), 'utils.make_sentences', 'make_sentences', (['df_test'], {'group_col': '"""sentence_idx"""', 'word_col': '"""Word"""', 'tag_col': '"""Tag"""'}), "(df_test, group_col='sentence_idx', word_col='Word', tag_col=\n 'Tag')\n", (2658, 2730), False, 'from utils import check_dir, make_sentences\n'), ((351, 375), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (363, 375), False, 'import os\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
def find_template(signal, rr):
return signal[200:400]
def conv(signal, template):
scores = []
template_length = len(template)
signal_length = len(signal)
for ind in range(signal_length-template_length):
score = np.dot(signal[ind:ind+template_length], template)
score = np.sqrt(score / template_length) - 300
scores.append(score)
return scores
def findpeaks(signal):
pass
if __name__ == "__main__":
pass
|
[
"numpy.dot",
"numpy.sqrt"
] |
[((374, 425), 'numpy.dot', 'np.dot', (['signal[ind:ind + template_length]', 'template'], {}), '(signal[ind:ind + template_length], template)\n', (380, 425), True, 'import numpy as np\n'), ((440, 472), 'numpy.sqrt', 'np.sqrt', (['(score / template_length)'], {}), '(score / template_length)\n', (447, 472), True, 'import numpy as np\n')]
|
from __future__ import print_function
import numpy as np
try:
import QENSmodels
except ImportError:
print('Module QENSmodels not found')
def hwhmChudleyElliotDiffusion(q, D=0.23, L=1.0):
""" Returns some characteristics of `ChudleyElliotDiffusion` as functions
of the momentum transfer `q`:
the half-width half-maximum (`hwhm`), the elastic incoherent structure
factor (`eisf`), and the quasi-elastic incoherent structure factor (`qisf`)
Parameters
----------
q: float, list or :class:`~numpy:numpy.ndarray`
momentum transfer (non-fitting, in 1/Angstrom)
D: float
diffusion coefficient (in Angstrom^2/ps). Default to 0.23.
L: float
jump length (in Angstrom). Default to 1.0.
Returns
-------
hwhm: :class:`~numpy:numpy.ndarray`
half-width half maximum
eisf: :class:`~numpy:numpy.ndarray`
elastic incoherent structure factor
qisf: :class:`~numpy:numpy.ndarray`
quasi-elastic incoherent structure factor
Examples
--------
>>> hwhm, eisf, qisf = hwhmChudleyElliotDiffusion([1., 2.], 0.5, 1.5)
>>> round(hwhm[0], 3), round(hwhm[1], 3)
(1.616, 1.333)
>>> eisf
array([0., 0.])
>>> qisf
array([1., 1.])
"""
# input validation
if D <= 0:
raise ValueError('The diffusion coefficient should be positive')
if L <= 0:
raise ValueError('L, the jump length, should be positive')
q = np.asarray(q, dtype=np.float32)
eisf = np.zeros(q.size)
qisf = np.ones(q.size)
hwhm = 6. * D * (1. - np.sinc(q * L)) / L**2
# Force hwhm to be numpy array, even if single value
hwhm = np.asarray(hwhm, dtype=np.float32)
hwhm = np.reshape(hwhm, hwhm.size)
return hwhm, eisf, qisf
def sqwChudleyElliotDiffusion(w, q, scale=1, center=0, D=0.23,
L=1.0):
r""" Lorentzian model with half width half maximum equal to
:math:`\frac{6D}{L^2}(1 - \frac{sin(QL)}{QL})`
It is a model originally developed for jump diffusion in
liquids. But it can also be applied to diffusion in
crystalline lattices.
Atoms or molecules are `caged` by other atoms and jump into
a neighbouring cage from time to time.
The jump length `L` is identical for all sites.
Parameters
----------
w: float, list or :class:`~numpy:numpy.ndarray`
energy transfer (in 1/ps)
q: float, list or :class:`~numpy:numpy.ndarray`
momentum transfer (non-fitting, in 1/Angstrom).
scale: float
scale factor. Default to 1.
center: float
center of peak. Default to 0.
D: float
diffusion coefficient (in Angstrom^2/ps). Default to 0.23.
L: float
jump distance (in Angstrom). Default to 1.0.
Return
------
:class:`~numpy:numpy.ndarray`
output array
Examples
--------
>>> sqw = sqwChudleyElliotDiffusion([1, 2, 3], 1, 1, 0, 1, 1)
>>> round(sqw[0], 3)
0.052
>>> round(sqw[1], 3)
0.048
>>> round(sqw[2], 3)
0.042
>>> sqw = sqwChudleyElliotDiffusion(1, 1, 1, 0, 1, 1)
>>> round(sqw[0], 3)
0.052
Notes
-----
* The `sqwChudleyElliotDiffusion` is expressed as
.. math::
S(q, \omega) = \text{Lorentzian}(\omega, \text{scale}, \text{center},
\frac{6D}{l^2}(1 - \frac{sin(Ql)}{Ql}))
* Note that an equivalent expression is
.. math::
S(q, \omega) = \text{Lorentzian}(\omega, \text{scale}, \text{center},
\frac{1}{\tau}(1 - \frac{sin(Ql)}{Ql}))
with :math:`\tau=\frac{l^2}{6D}`.
References
----------
* <NAME>, Quasielastic Neutron Scattering and Solid State Diffusion
(Oxford, 2000).
* <NAME> and <NAME>, *Proc. Phys. Soc.* **77**,
353-361 (1961)
`link <https://iopscience.iop.org/article/10.1088/0370-1328/77/2/319/meta>`_
"""
# Input validation
w = np.asarray(w)
q = np.asarray(q, dtype=np.float32)
# Create output array
sqw = np.zeros((q.size, w.size))
# Get widths, EISFs and QISFs of model
hwhm, eisf, qisf = hwhmChudleyElliotDiffusion(q, D, L)
# Model
for i in range(q.size):
sqw[i, :] = QENSmodels.lorentzian(w, scale, center, hwhm[i])
# For Bumps use (needed for final plotting)
# Using a 'Curve' in bumps for each Q --> needs vector array
if q.size == 1:
sqw = np.reshape(sqw, w.size)
return sqw
if __name__ == "__main__":
import doctest
doctest.testmod()
|
[
"numpy.reshape",
"numpy.ones",
"QENSmodels.lorentzian",
"numpy.asarray",
"numpy.sinc",
"numpy.zeros",
"doctest.testmod"
] |
[((1468, 1499), 'numpy.asarray', 'np.asarray', (['q'], {'dtype': 'np.float32'}), '(q, dtype=np.float32)\n', (1478, 1499), True, 'import numpy as np\n'), ((1512, 1528), 'numpy.zeros', 'np.zeros', (['q.size'], {}), '(q.size)\n', (1520, 1528), True, 'import numpy as np\n'), ((1540, 1555), 'numpy.ones', 'np.ones', (['q.size'], {}), '(q.size)\n', (1547, 1555), True, 'import numpy as np\n'), ((1674, 1708), 'numpy.asarray', 'np.asarray', (['hwhm'], {'dtype': 'np.float32'}), '(hwhm, dtype=np.float32)\n', (1684, 1708), True, 'import numpy as np\n'), ((1720, 1747), 'numpy.reshape', 'np.reshape', (['hwhm', 'hwhm.size'], {}), '(hwhm, hwhm.size)\n', (1730, 1747), True, 'import numpy as np\n'), ((3935, 3948), 'numpy.asarray', 'np.asarray', (['w'], {}), '(w)\n', (3945, 3948), True, 'import numpy as np\n'), ((3958, 3989), 'numpy.asarray', 'np.asarray', (['q'], {'dtype': 'np.float32'}), '(q, dtype=np.float32)\n', (3968, 3989), True, 'import numpy as np\n'), ((4027, 4053), 'numpy.zeros', 'np.zeros', (['(q.size, w.size)'], {}), '((q.size, w.size))\n', (4035, 4053), True, 'import numpy as np\n'), ((4507, 4524), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (4522, 4524), False, 'import doctest\n'), ((4218, 4266), 'QENSmodels.lorentzian', 'QENSmodels.lorentzian', (['w', 'scale', 'center', 'hwhm[i]'], {}), '(w, scale, center, hwhm[i])\n', (4239, 4266), False, 'import QENSmodels\n'), ((4415, 4438), 'numpy.reshape', 'np.reshape', (['sqw', 'w.size'], {}), '(sqw, w.size)\n', (4425, 4438), True, 'import numpy as np\n'), ((1582, 1596), 'numpy.sinc', 'np.sinc', (['(q * L)'], {}), '(q * L)\n', (1589, 1596), True, 'import numpy as np\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.