code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
import numpy as np
import pytest
import emcee
import os
from lenstronomy.Cosmo.lens_cosmo import LensCosmo
from hierarc.Sampling.mcmc_sampling import MCMCSampler
from astropy.cosmology import FlatLambdaCDM
class TestMCMCSampling(object):
def setup(self):
np.random.seed(seed=41)
self.z_L = 0.8
self.z_S = 3.0
self.H0_true = 70
self.omega_m_true = 0.3
self.cosmo = FlatLambdaCDM(H0=self.H0_true, Om0=self.omega_m_true, Ob0=0.05)
lensCosmo = LensCosmo(self.z_L, self.z_S, cosmo=self.cosmo)
self.Dd_true = lensCosmo.dd
self.D_dt_true = lensCosmo.ddt
self.sigma_Dd = 100
self.sigma_Ddt = 100
num_samples = 10000
self.D_dt_samples = np.random.normal(self.D_dt_true, self.sigma_Ddt, num_samples)
self.D_d_samples = np.random.normal(self.Dd_true, self.sigma_Dd, num_samples)
def test_mcmc_emcee(self):
n_walkers = 6
n_run = 2
n_burn = 2
kwargs_mean_start = {'kwargs_cosmo': {'h0': self.H0_true}}
kwargs_fixed = {'om': self.omega_m_true}
kwargs_sigma_start = {'kwargs_cosmo': {'h0': 5}}
kwargs_lower = {'h0': 10}
kwargs_upper = {'h0': 200}
kwargs_likelihood_list = [{'z_lens': self.z_L, 'z_source': self.z_S, 'likelihood_type': 'DdtDdKDE',
'dd_samples': self.D_d_samples, 'ddt_samples': self.D_dt_samples,
'kde_type': 'scipy_gaussian', 'bandwidth': 1}]
cosmology = 'FLCDM'
kwargs_bounds = {'kwargs_fixed_cosmo': kwargs_fixed, 'kwargs_lower_cosmo': kwargs_lower,
'kwargs_upper_cosmo': kwargs_upper}
path = os.getcwd()
backup_filename = 'test_emcee.h5'
try:
os.remove(os.path.join(path, backup_filename)) # just remove the backup file created above
except:
pass
backend = emcee.backends.HDFBackend(backup_filename)
kwargs_emcee = {'backend': backend}
mcmc_sampler = MCMCSampler(kwargs_likelihood_list=kwargs_likelihood_list, cosmology=cosmology,
kwargs_bounds=kwargs_bounds, ppn_sampling=False,
lambda_mst_sampling=False, lambda_mst_distribution='delta', anisotropy_sampling=False,
anisotropy_model='OM', custom_prior=None, interpolate_cosmo=True, num_redshift_interp=100,
cosmo_fixed=None)
samples, log_prob = mcmc_sampler.mcmc_emcee(n_walkers, n_burn, n_run, kwargs_mean_start, kwargs_sigma_start, **kwargs_emcee)
assert len(samples) == n_walkers*n_run
assert len(log_prob) == n_walkers*n_run
n_burn_2 = 0
n_run_2 = 2
samples, log_prob = mcmc_sampler.mcmc_emcee(n_walkers, n_burn_2, n_run_2, kwargs_mean_start, kwargs_sigma_start,
continue_from_backend=True, **kwargs_emcee)
assert len(samples) == n_walkers * (n_run + n_run_2 + n_burn)
assert len(log_prob) == n_walkers * (n_run + n_run_2 + n_burn)
name_list = mcmc_sampler.param_names(latex_style=False)
assert len(name_list) == 1
os.remove(os.path.join(path,backup_filename)) # just remove the backup file created above
if __name__ == '__main__':
pytest.main()
|
[
"astropy.cosmology.FlatLambdaCDM",
"numpy.random.seed",
"lenstronomy.Cosmo.lens_cosmo.LensCosmo",
"os.getcwd",
"emcee.backends.HDFBackend",
"hierarc.Sampling.mcmc_sampling.MCMCSampler",
"pytest.main",
"numpy.random.normal",
"os.path.join"
] |
[((3310, 3323), 'pytest.main', 'pytest.main', ([], {}), '()\n', (3321, 3323), False, 'import pytest\n'), ((271, 294), 'numpy.random.seed', 'np.random.seed', ([], {'seed': '(41)'}), '(seed=41)\n', (285, 294), True, 'import numpy as np\n'), ((421, 484), 'astropy.cosmology.FlatLambdaCDM', 'FlatLambdaCDM', ([], {'H0': 'self.H0_true', 'Om0': 'self.omega_m_true', 'Ob0': '(0.05)'}), '(H0=self.H0_true, Om0=self.omega_m_true, Ob0=0.05)\n', (434, 484), False, 'from astropy.cosmology import FlatLambdaCDM\n'), ((505, 552), 'lenstronomy.Cosmo.lens_cosmo.LensCosmo', 'LensCosmo', (['self.z_L', 'self.z_S'], {'cosmo': 'self.cosmo'}), '(self.z_L, self.z_S, cosmo=self.cosmo)\n', (514, 552), False, 'from lenstronomy.Cosmo.lens_cosmo import LensCosmo\n'), ((742, 803), 'numpy.random.normal', 'np.random.normal', (['self.D_dt_true', 'self.sigma_Ddt', 'num_samples'], {}), '(self.D_dt_true, self.sigma_Ddt, num_samples)\n', (758, 803), True, 'import numpy as np\n'), ((831, 889), 'numpy.random.normal', 'np.random.normal', (['self.Dd_true', 'self.sigma_Dd', 'num_samples'], {}), '(self.Dd_true, self.sigma_Dd, num_samples)\n', (847, 889), True, 'import numpy as np\n'), ((1704, 1715), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1713, 1715), False, 'import os\n'), ((1927, 1969), 'emcee.backends.HDFBackend', 'emcee.backends.HDFBackend', (['backup_filename'], {}), '(backup_filename)\n', (1952, 1969), False, 'import emcee\n'), ((2038, 2379), 'hierarc.Sampling.mcmc_sampling.MCMCSampler', 'MCMCSampler', ([], {'kwargs_likelihood_list': 'kwargs_likelihood_list', 'cosmology': 'cosmology', 'kwargs_bounds': 'kwargs_bounds', 'ppn_sampling': '(False)', 'lambda_mst_sampling': '(False)', 'lambda_mst_distribution': '"""delta"""', 'anisotropy_sampling': '(False)', 'anisotropy_model': '"""OM"""', 'custom_prior': 'None', 'interpolate_cosmo': '(True)', 'num_redshift_interp': '(100)', 'cosmo_fixed': 'None'}), "(kwargs_likelihood_list=kwargs_likelihood_list, cosmology=\n cosmology, kwargs_bounds=kwargs_bounds, ppn_sampling=False,\n lambda_mst_sampling=False, lambda_mst_distribution='delta',\n anisotropy_sampling=False, anisotropy_model='OM', custom_prior=None,\n interpolate_cosmo=True, num_redshift_interp=100, cosmo_fixed=None)\n", (2049, 2379), False, 'from hierarc.Sampling.mcmc_sampling import MCMCSampler\n'), ((3196, 3231), 'os.path.join', 'os.path.join', (['path', 'backup_filename'], {}), '(path, backup_filename)\n', (3208, 3231), False, 'import os\n'), ((1793, 1828), 'os.path.join', 'os.path.join', (['path', 'backup_filename'], {}), '(path, backup_filename)\n', (1805, 1828), False, 'import os\n')]
|
import argparse
import joblib as jl
import numpy as np
import basty.project.experiment_processing as experiment_processing
parser = argparse.ArgumentParser(
description="Report details about active and dormant masks."
)
parser.add_argument(
"--main-cfg-path",
type=str,
required=True,
help="Path to the main configuration file.",
)
parser.add_argument(
"--evaluate-active-bouts",
action=argparse.BooleanOptionalAction,
)
parser.add_argument(
"--evaluate-dormant-epochs",
action=argparse.BooleanOptionalAction,
)
parser.add_argument(
"--silent",
action=argparse.BooleanOptionalAction,
)
args = parser.parse_args()
def get_recall_report(recall_dict, behavior_domain):
report = ""
for behavior in behavior_domain:
if recall_dict[behavior] is None:
report += f"\t\t- {behavior} is not observed.\n"
else:
report += (
f"\t\t- {round(recall_dict[behavior], 2)} of {behavior} is observed.\n"
)
return report
def get_evaluation_report(evaluation_dict, behavior_domain):
report = ""
report += (
f"\t= {round(evaluation_dict['masked_percent'], 2)} of frames are masked.\n"
)
report += get_recall_report(evaluation_dict["recall_dict"], behavior_domain)
return report
def get_recall_scores(masked_annotation_counts, annotation_counts, expt_record):
all_unique, all_counts = annotation_counts
masked_unique, masked_counts = masked_annotation_counts
all_count_dict = {}
masked_count_dict = {}
for idx, label in enumerate(all_unique):
behavior = expt_record.label_to_behavior[label]
all_count_dict[behavior] = all_counts[idx]
for idx, label in enumerate(masked_unique):
behavior = expt_record.label_to_behavior[label]
masked_count_dict[behavior] = masked_counts[idx]
recall_dict = {}
for behavior in list(expt_record.label_to_behavior.values()):
all_count = all_count_dict.get(behavior, 0)
masked_count = masked_count_dict.get(behavior, 0)
recall_dict[behavior] = masked_count / all_count if all_count else None
return recall_dict
def active_bout_evaluation(annotations, annotation_counts, expt_record):
maskA = expt_record.mask_active
masked_annotation_counts = np.unique(annotations[maskA], return_counts=True)
masked_percent = round(np.count_nonzero(maskA) / annotations.shape[0], 2)
recall_dict = get_recall_scores(
masked_annotation_counts, annotation_counts, expt_record
)
evaluation_dict = {"recall_dict": recall_dict, "masked_percent": masked_percent}
return evaluation_dict
def dormant_epoch_evaluation(annotations, annotation_counts, expt_record):
maskD = expt_record.mask_dormant
masked_annotation_counts = np.unique(annotations[maskD], return_counts=True)
masked_percent = round(np.count_nonzero(maskD) / annotations.shape[0], 2)
recall_dict = get_recall_scores(
masked_annotation_counts, annotation_counts, expt_record
)
evaluation_dict = {"recall_dict": recall_dict, "masked_percent": masked_percent}
return evaluation_dict
def active_bout_in_dormant_epoch_evaluation(
annotations, annotation_counts, expt_record
):
maskDA = np.logical_and(expt_record.mask_active, expt_record.mask_dormant)
masked_annotation_counts = np.unique(annotations[maskDA], return_counts=True)
masked_percent = round(np.count_nonzero(maskDA) / annotations.shape[0], 2)
recall_dict = get_recall_scores(
masked_annotation_counts, annotation_counts, expt_record
)
evaluation_dict = {"recall_dict": recall_dict, "masked_percent": masked_percent}
return evaluation_dict
def evaluate_predicted_masks(
project_obj,
evaluate_active_bouts=False,
evaluate_dormant_epochs=False,
verbose=True,
):
all_expt_names = list(project_obj.expt_path_dict.keys())
for expt_name in all_expt_names:
expt_path = project_obj.expt_path_dict[expt_name]
expt_record = jl.load(expt_path / "expt_record.z")
report = ""
if expt_record.has_annotation:
report += "============================================================\n"
report += f"Evaluation for {expt_name};\n"
behavior_domain = list(expt_record.label_to_behavior.values())
annotations = np.load(expt_path / "annotations.npy")
annotation_counts = np.unique(annotations, return_counts=True)
if evaluate_active_bouts:
report += "- Performance report for active bouts:\n"
evaluation_dict = active_bout_evaluation(
annotations, annotation_counts, expt_record
)
report += get_evaluation_report(evaluation_dict, behavior_domain)
if evaluate_dormant_epochs:
report += "- Performance report for dormant epochs:\n"
evaluation_dict = dormant_epoch_evaluation(
annotations, annotation_counts, expt_record
)
report += get_evaluation_report(evaluation_dict, behavior_domain)
if evaluate_dormant_epochs and evaluate_active_bouts:
report += "- Performance report for active bouts in dormant epochs:\n"
evaluation_dict = active_bout_in_dormant_epoch_evaluation(
annotations, annotation_counts, expt_record
)
report += get_evaluation_report(evaluation_dict, behavior_domain)
report += "============================================================\n"
if verbose:
print(report)
if __name__ == "__main__":
project = experiment_processing.Project(
args.main_cfg_path,
)
evaluate_predicted_masks(
project,
evaluate_active_bouts=args.evaluate_active_bouts,
evaluate_dormant_epochs=args.evaluate_dormant_epochs,
verbose=not args.silent,
)
|
[
"numpy.load",
"numpy.count_nonzero",
"argparse.ArgumentParser",
"numpy.logical_and",
"basty.project.experiment_processing.Project",
"joblib.load",
"numpy.unique"
] |
[((135, 225), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Report details about active and dormant masks."""'}), "(description=\n 'Report details about active and dormant masks.')\n", (158, 225), False, 'import argparse\n'), ((2314, 2363), 'numpy.unique', 'np.unique', (['annotations[maskA]'], {'return_counts': '(True)'}), '(annotations[maskA], return_counts=True)\n', (2323, 2363), True, 'import numpy as np\n'), ((2810, 2859), 'numpy.unique', 'np.unique', (['annotations[maskD]'], {'return_counts': '(True)'}), '(annotations[maskD], return_counts=True)\n', (2819, 2859), True, 'import numpy as np\n'), ((3272, 3337), 'numpy.logical_and', 'np.logical_and', (['expt_record.mask_active', 'expt_record.mask_dormant'], {}), '(expt_record.mask_active, expt_record.mask_dormant)\n', (3286, 3337), True, 'import numpy as np\n'), ((3369, 3419), 'numpy.unique', 'np.unique', (['annotations[maskDA]'], {'return_counts': '(True)'}), '(annotations[maskDA], return_counts=True)\n', (3378, 3419), True, 'import numpy as np\n'), ((5728, 5777), 'basty.project.experiment_processing.Project', 'experiment_processing.Project', (['args.main_cfg_path'], {}), '(args.main_cfg_path)\n', (5757, 5777), True, 'import basty.project.experiment_processing as experiment_processing\n'), ((4039, 4075), 'joblib.load', 'jl.load', (["(expt_path / 'expt_record.z')"], {}), "(expt_path / 'expt_record.z')\n", (4046, 4075), True, 'import joblib as jl\n'), ((2392, 2415), 'numpy.count_nonzero', 'np.count_nonzero', (['maskA'], {}), '(maskA)\n', (2408, 2415), True, 'import numpy as np\n'), ((2888, 2911), 'numpy.count_nonzero', 'np.count_nonzero', (['maskD'], {}), '(maskD)\n', (2904, 2911), True, 'import numpy as np\n'), ((3448, 3472), 'numpy.count_nonzero', 'np.count_nonzero', (['maskDA'], {}), '(maskDA)\n', (3464, 3472), True, 'import numpy as np\n'), ((4379, 4417), 'numpy.load', 'np.load', (["(expt_path / 'annotations.npy')"], {}), "(expt_path / 'annotations.npy')\n", (4386, 4417), True, 'import numpy as np\n'), ((4450, 4492), 'numpy.unique', 'np.unique', (['annotations'], {'return_counts': '(True)'}), '(annotations, return_counts=True)\n', (4459, 4492), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
from numpy.distutils.core import Extension, setup
setup(name='hw',
description='Simple example on calling F77 from Python',
author='<NAME>',
author_email='<EMAIL>',
ext_modules=[Extension(name='hw', sources=['../hw.f'])],
)
|
[
"numpy.distutils.core.Extension"
] |
[((225, 266), 'numpy.distutils.core.Extension', 'Extension', ([], {'name': '"""hw"""', 'sources': "['../hw.f']"}), "(name='hw', sources=['../hw.f'])\n", (234, 266), False, 'from numpy.distutils.core import Extension, setup\n')]
|
import os
# os.environ['CUDA_VISIBLE_DEVICES'] = '1'
import matplotlib
matplotlib.use('Agg')
from tqdm import tqdm
import time
import numpy as np
from utils.Config import opt
from models.faster_rcnn_vgg16 import FasterRCNNVGG16
from models.faster_rcnn_resnet import FasterRCNNResNet50
from torch.autograd import Variable
from trainer import FasterRCNNTrainer
from utils import array_tool as at
from utils.vis_tool import visdom_bbox, rescale_back, save_gt_pred, save_pred_fig
from data.dataset import inverse_normalize, get_train_loader, get_test_loader, get_train_val_loader
from skimage import io, transform
from data.data_utils import resize_bbox
import pandas as pd
from collections import OrderedDict
from utils.eval_tool import eval_mAP
import torch._utils
try:
torch._utils._rebuild_tensor_v2
except AttributeError:
def _rebuild_tensor_v2(storage, storage_offset, size, stride, requires_grad, backward_hooks):
tensor = torch._utils._rebuild_tensor(storage, storage_offset, size, stride)
tensor.requires_grad = requires_grad
tensor._backward_hooks = backward_hooks
return tensor
torch._utils._rebuild_tensor_v2 = _rebuild_tensor_v2
def pred_test():
faster_rcnn = FasterRCNNVGG16()
# faster_rcnn = FasterRCNNResNet50()
trainer = FasterRCNNTrainer(faster_rcnn).cuda()
# trainer.load('./checkpoints/RSNA_skip_09111650_0.19862726') # 0.062
# trainer.load('./checkpoints/fasterrcnn_09102119_0.2340059') # 0
# trainer.load('./checkpoints/RSNA_skip_09100834_0.16078612') # 0
# trainer.load('./checkpoints/RSNA_skip_10252107_0.22194205') # 0.041
trainer.load('./checkpoints/RSNA_no_skip_09131705_0.33119902') # 0.089
# trainer.load('./checkpoints/RSNA_no_skip_09162308_0.21759672') # 0.015
# trainer.load('./checkpoints/RSNA_skip_10011111') # 0
# trainer.load('./checkpoints/RSNA_skip_10270402')
opt.caffe_pretrain = True # this model was trained from caffe-pretrained model
# Plot examples on training set
print('load data')
testloader = get_test_loader(opt.test_dir, batch_size=opt.batch_size,
shuffle=opt.shuffle, num_workers=opt.num_workers,
pin_memory=opt.pin_memory)
patientId = []
PredictionString = []
for ii, sample in tqdm(enumerate(testloader)):
img_id, img, bbox, scale, label = sample['img_id'], sample['image'], np.zeros((1, 0, 4)), \
sample['scale'], np.zeros((1, 0, 1))
scale = at.scalar(scale)
img = at.tonumpy(img)[0]
# plot predicti bboxes
img = inverse_normalize(at.tonumpy(img[0]))
pred_boxes, pred_labels, pred_scores = trainer.faster_rcnn.predict([img], visualize=True)
pred_boxes = at.tonumpy(pred_boxes[0])
pred_labels = at.tonumpy(pred_labels[0]).reshape(-1)
pred_scores = at.tonumpy(pred_scores[0])
# Rescale back
img, bbox, pred_boxes = rescale_back(img, at.tonumpy(bbox[0]), pred_boxes, scale)
save_path = os.path.join(opt.result_dir, 'pred_on_test_skip', img_id[0] + '.png')
save_pred_fig(img, pred_boxes, pred_scores, img_id, save_path)
# Save Info
patientId.append(img_id[0])
tmp = []
for i in range(pred_boxes.shape[0]):
y0, x0, y1, x1 = pred_boxes[0][0], pred_boxes[0][1], pred_boxes[0][2], pred_boxes[0][3]
h = y1-y0
w = x1-x0
tmp.append([str(round(pred_scores[i],2)), ' ', str(int(x0)), ' ', str(int(y0)), ' ', str(int(w)), ' ', str(int(h)), ' '])
pre_str = ''.join([item for sublist in tmp for item in sublist])
PredictionString.append(pre_str[:-1])
df = pd.DataFrame(OrderedDict((('patientId', pd.Series(patientId)), ('PredictionString', pd.Series(PredictionString)))))
df.to_csv(os.path.join(opt.result_dir, 'pred_on_test_skip.csv'), index=False)
def pred_train():
faster_rcnn = FasterRCNNVGG16()
# faster_rcnn = FasterRCNNResNet50()
trainer = FasterRCNNTrainer(faster_rcnn).cuda()
trainer.load('./checkpoints/RSNA_skip_11061805_0.27202734')
opt.caffe_pretrain = True # this model was trained from caffe-pretrained model
# Plot examples on training set
print('load data')
train_loader, val_loader = get_train_val_loader(opt.root_dir, batch_size=opt.batch_size, val_ratio=0.1,
shuffle=opt.shuffle, num_workers=opt.num_workers,
pin_memory=opt.pin_memory)
patientId = []
PredictionString = []
for ii, sample in tqdm(enumerate(val_loader)):
if len(sample.keys()) == 5:
img_id, img, bbox_, scale, label_ = sample['img_id'], sample['image'], sample['bbox'], sample['scale'], \
sample['label']
img, bbox, label = img.cuda().float(), bbox_.cuda(), label_.cuda()
img, bbox, label = Variable(img), Variable(bbox), Variable(label)
else:
img_id, img, bbox, scale, label = sample['img_id'], sample['image'], np.zeros((1, 0, 4)), \
sample['scale'], np.zeros((1, 0, 1))
img = img.cuda().float()
img = Variable(img)
scale = at.scalar(scale)
img = inverse_normalize(at.tonumpy(img[0]))
pred_boxes, pred_labels, pred_scores = trainer.faster_rcnn.predict([img], visualize=True)
pred_boxes = at.tonumpy(pred_boxes[0])
pred_labels = at.tonumpy(pred_labels[0]).reshape(-1)
pred_scores = at.tonumpy(pred_scores[0])
# Rescale back
img, bbox, pred_boxes = rescale_back(img, at.tonumpy(bbox[0]), pred_boxes, scale)
# Save predicted images
save_path = os.path.join(opt.result_dir, 'pred_on_val_skip', img_id[0] + '.png')
save_gt_pred(img, bbox, pred_boxes, pred_scores, img_id, save_path)
# Save Info
patientId.append(img_id[0])
tmp = []
for i in range(pred_boxes.shape[0]):
y0, x0, y1, x1 = pred_boxes[0][0], pred_boxes[0][1], pred_boxes[0][2], pred_boxes[0][3]
h = y1-y0
w = x1-x0
tmp.append([str(pred_scores[i]), ' ', str(x0), ' ', str(y0), ' ', str(w), ' ', str(h), ' '])
pre_str = ''.join([item for sublist in tmp for item in sublist])
PredictionString.append(pre_str[:-1])
df = pd.DataFrame(OrderedDict((('patientId', pd.Series(patientId)), ('PredictionString', pd.Series(PredictionString)))))
df.to_csv(os.path.join(opt.result_dir, 'pred_on_val_skip.csv'), index=False)
if __name__ == '__main__':
start = time.time()
pred_train() # Predict the result on training set
# pred_test() # Predict the result on testing set
end = time.time()
print('total time: ', (end-start)/3600., ' hours')
|
[
"utils.array_tool.tonumpy",
"data.dataset.get_test_loader",
"data.dataset.get_train_val_loader",
"utils.array_tool.scalar",
"utils.vis_tool.save_gt_pred",
"models.faster_rcnn_vgg16.FasterRCNNVGG16",
"numpy.zeros",
"torch.autograd.Variable",
"time.time",
"utils.vis_tool.save_pred_fig",
"matplotlib.use",
"pandas.Series",
"trainer.FasterRCNNTrainer",
"os.path.join"
] |
[((71, 92), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (85, 92), False, 'import matplotlib\n'), ((1220, 1237), 'models.faster_rcnn_vgg16.FasterRCNNVGG16', 'FasterRCNNVGG16', ([], {}), '()\n', (1235, 1237), False, 'from models.faster_rcnn_vgg16 import FasterRCNNVGG16\n'), ((2053, 2191), 'data.dataset.get_test_loader', 'get_test_loader', (['opt.test_dir'], {'batch_size': 'opt.batch_size', 'shuffle': 'opt.shuffle', 'num_workers': 'opt.num_workers', 'pin_memory': 'opt.pin_memory'}), '(opt.test_dir, batch_size=opt.batch_size, shuffle=opt.\n shuffle, num_workers=opt.num_workers, pin_memory=opt.pin_memory)\n', (2068, 2191), False, 'from data.dataset import inverse_normalize, get_train_loader, get_test_loader, get_train_val_loader\n'), ((3973, 3990), 'models.faster_rcnn_vgg16.FasterRCNNVGG16', 'FasterRCNNVGG16', ([], {}), '()\n', (3988, 3990), False, 'from models.faster_rcnn_vgg16 import FasterRCNNVGG16\n'), ((4323, 4485), 'data.dataset.get_train_val_loader', 'get_train_val_loader', (['opt.root_dir'], {'batch_size': 'opt.batch_size', 'val_ratio': '(0.1)', 'shuffle': 'opt.shuffle', 'num_workers': 'opt.num_workers', 'pin_memory': 'opt.pin_memory'}), '(opt.root_dir, batch_size=opt.batch_size, val_ratio=0.1,\n shuffle=opt.shuffle, num_workers=opt.num_workers, pin_memory=opt.pin_memory\n )\n', (4343, 4485), False, 'from data.dataset import inverse_normalize, get_train_loader, get_test_loader, get_train_val_loader\n'), ((6715, 6726), 'time.time', 'time.time', ([], {}), '()\n', (6724, 6726), False, 'import time\n'), ((6847, 6858), 'time.time', 'time.time', ([], {}), '()\n', (6856, 6858), False, 'import time\n'), ((2546, 2562), 'utils.array_tool.scalar', 'at.scalar', (['scale'], {}), '(scale)\n', (2555, 2562), True, 'from utils import array_tool as at\n'), ((2800, 2825), 'utils.array_tool.tonumpy', 'at.tonumpy', (['pred_boxes[0]'], {}), '(pred_boxes[0])\n', (2810, 2825), True, 'from utils import array_tool as at\n'), ((2909, 2935), 'utils.array_tool.tonumpy', 'at.tonumpy', (['pred_scores[0]'], {}), '(pred_scores[0])\n', (2919, 2935), True, 'from utils import array_tool as at\n'), ((3071, 3140), 'os.path.join', 'os.path.join', (['opt.result_dir', '"""pred_on_test_skip"""', "(img_id[0] + '.png')"], {}), "(opt.result_dir, 'pred_on_test_skip', img_id[0] + '.png')\n", (3083, 3140), False, 'import os\n'), ((3149, 3211), 'utils.vis_tool.save_pred_fig', 'save_pred_fig', (['img', 'pred_boxes', 'pred_scores', 'img_id', 'save_path'], {}), '(img, pred_boxes, pred_scores, img_id, save_path)\n', (3162, 3211), False, 'from utils.vis_tool import visdom_bbox, rescale_back, save_gt_pred, save_pred_fig\n'), ((3868, 3921), 'os.path.join', 'os.path.join', (['opt.result_dir', '"""pred_on_test_skip.csv"""'], {}), "(opt.result_dir, 'pred_on_test_skip.csv')\n", (3880, 3921), False, 'import os\n'), ((5342, 5358), 'utils.array_tool.scalar', 'at.scalar', (['scale'], {}), '(scale)\n', (5351, 5358), True, 'from utils import array_tool as at\n'), ((5532, 5557), 'utils.array_tool.tonumpy', 'at.tonumpy', (['pred_boxes[0]'], {}), '(pred_boxes[0])\n', (5542, 5557), True, 'from utils import array_tool as at\n'), ((5641, 5667), 'utils.array_tool.tonumpy', 'at.tonumpy', (['pred_scores[0]'], {}), '(pred_scores[0])\n', (5651, 5667), True, 'from utils import array_tool as at\n'), ((5835, 5903), 'os.path.join', 'os.path.join', (['opt.result_dir', '"""pred_on_val_skip"""', "(img_id[0] + '.png')"], {}), "(opt.result_dir, 'pred_on_val_skip', img_id[0] + '.png')\n", (5847, 5903), False, 'import os\n'), ((5912, 5979), 'utils.vis_tool.save_gt_pred', 'save_gt_pred', (['img', 'bbox', 'pred_boxes', 'pred_scores', 'img_id', 'save_path'], {}), '(img, bbox, pred_boxes, pred_scores, img_id, save_path)\n', (5924, 5979), False, 'from utils.vis_tool import visdom_bbox, rescale_back, save_gt_pred, save_pred_fig\n'), ((6607, 6659), 'os.path.join', 'os.path.join', (['opt.result_dir', '"""pred_on_val_skip.csv"""'], {}), "(opt.result_dir, 'pred_on_val_skip.csv')\n", (6619, 6659), False, 'import os\n'), ((1293, 1323), 'trainer.FasterRCNNTrainer', 'FasterRCNNTrainer', (['faster_rcnn'], {}), '(faster_rcnn)\n', (1310, 1323), False, 'from trainer import FasterRCNNTrainer\n'), ((2428, 2447), 'numpy.zeros', 'np.zeros', (['(1, 0, 4)'], {}), '((1, 0, 4))\n', (2436, 2447), True, 'import numpy as np\n'), ((2510, 2529), 'numpy.zeros', 'np.zeros', (['(1, 0, 1)'], {}), '((1, 0, 1))\n', (2518, 2529), True, 'import numpy as np\n'), ((2578, 2593), 'utils.array_tool.tonumpy', 'at.tonumpy', (['img'], {}), '(img)\n', (2588, 2593), True, 'from utils import array_tool as at\n'), ((2661, 2679), 'utils.array_tool.tonumpy', 'at.tonumpy', (['img[0]'], {}), '(img[0])\n', (2671, 2679), True, 'from utils import array_tool as at\n'), ((3010, 3029), 'utils.array_tool.tonumpy', 'at.tonumpy', (['bbox[0]'], {}), '(bbox[0])\n', (3020, 3029), True, 'from utils import array_tool as at\n'), ((4046, 4076), 'trainer.FasterRCNNTrainer', 'FasterRCNNTrainer', (['faster_rcnn'], {}), '(faster_rcnn)\n', (4063, 4076), False, 'from trainer import FasterRCNNTrainer\n'), ((5311, 5324), 'torch.autograd.Variable', 'Variable', (['img'], {}), '(img)\n', (5319, 5324), False, 'from torch.autograd import Variable\n'), ((5392, 5410), 'utils.array_tool.tonumpy', 'at.tonumpy', (['img[0]'], {}), '(img[0])\n', (5402, 5410), True, 'from utils import array_tool as at\n'), ((5742, 5761), 'utils.array_tool.tonumpy', 'at.tonumpy', (['bbox[0]'], {}), '(bbox[0])\n', (5752, 5761), True, 'from utils import array_tool as at\n'), ((2848, 2874), 'utils.array_tool.tonumpy', 'at.tonumpy', (['pred_labels[0]'], {}), '(pred_labels[0])\n', (2858, 2874), True, 'from utils import array_tool as at\n'), ((5005, 5018), 'torch.autograd.Variable', 'Variable', (['img'], {}), '(img)\n', (5013, 5018), False, 'from torch.autograd import Variable\n'), ((5020, 5034), 'torch.autograd.Variable', 'Variable', (['bbox'], {}), '(bbox)\n', (5028, 5034), False, 'from torch.autograd import Variable\n'), ((5036, 5051), 'torch.autograd.Variable', 'Variable', (['label'], {}), '(label)\n', (5044, 5051), False, 'from torch.autograd import Variable\n'), ((5148, 5167), 'numpy.zeros', 'np.zeros', (['(1, 0, 4)'], {}), '((1, 0, 4))\n', (5156, 5167), True, 'import numpy as np\n'), ((5236, 5255), 'numpy.zeros', 'np.zeros', (['(1, 0, 1)'], {}), '((1, 0, 1))\n', (5244, 5255), True, 'import numpy as np\n'), ((5580, 5606), 'utils.array_tool.tonumpy', 'at.tonumpy', (['pred_labels[0]'], {}), '(pred_labels[0])\n', (5590, 5606), True, 'from utils import array_tool as at\n'), ((3778, 3798), 'pandas.Series', 'pd.Series', (['patientId'], {}), '(patientId)\n', (3787, 3798), True, 'import pandas as pd\n'), ((3822, 3849), 'pandas.Series', 'pd.Series', (['PredictionString'], {}), '(PredictionString)\n', (3831, 3849), True, 'import pandas as pd\n'), ((6517, 6537), 'pandas.Series', 'pd.Series', (['patientId'], {}), '(patientId)\n', (6526, 6537), True, 'import pandas as pd\n'), ((6561, 6588), 'pandas.Series', 'pd.Series', (['PredictionString'], {}), '(PredictionString)\n', (6570, 6588), True, 'import pandas as pd\n')]
|
# Copyright (c) 2018 Copyright holder of the paper Generative Adversarial Model Learning
# submitted to NeurIPS 2019 for review
# All rights reserved.
from rllab.misc.instrument import run_experiment_custom
from rllab.dynamic_models.cartpole_model import CartPoleModel
from rllab.torch.models.nn_discriminator import NNDiscriminator
from rllab.torch.models.gaussian_upper_level_policy import GaussianUpperLevelPolicy
from rllab.torch.algos.gaml_episode_based_modellearning import GAMLEpisodeBasedModelLearning
from rllab.torch.utils.misc import str2bool
import numpy as np
import joblib
import argparse
import torch
def run_task(v):
# load policy for gaussian noise for sigma^2: 0.05 and 0.01
# data = joblib.load(
data = joblib.load(v["expert_policy_path"])
policy = data['policy']
policy.normalized_input = [False, False, False, False]
policy.normalized_output = [True]
file_name = v["expert_data_path"]
expert_paths = joblib.load(file_name)
# we add 1 to the observation space, because we use sin and cos theta and + 1 for the timestep
action_dim = 1
observation_dim = 4
discriminator = NNDiscriminator(input_dim=action_dim + (observation_dim + 1 ) * 2 +1)
# we initialize the model with random to have the same initialization as we would have when we do MLE and use
# the parameters to initialize the upper level policy
imitation_env = CartPoleModel(initRandom=True, init_std=np.sqrt([v["var_x"], v["var_theta"]]))
theta = imitation_env.theta.detach().numpy()
init_std = imitation_env.std.detach().numpy()
mean = torch.from_numpy(np.concatenate([theta, init_std])).float()
covariance = torch.from_numpy(np.diag([0.05, 0.05, 0.05, 0.005, 0.005])).float()
upper_level_policy = GaussianUpperLevelPolicy(mean, covariance)
algo = GAMLEpisodeBasedModelLearning(policy,
expert_paths,
imitation_env,
discriminator,
upper_level_policy,
n_itr=v["n_itr"],
n_traj=v["n_traj"],
n_samples=v["n_samples"],
max_path_length=v["max_path_length"],
use_timesteps=v["use_timestep"],
use_state_diff_in_discriminator=v["use_state_diff_discrim"],
discount=0.995,
discriminator_updates_per_itr=v["discriminator_updates_per_itr"],
)
algo.train()
def run_GAML_training(dataset_num_traj, seed):
from pathlib import Path
mainDir = Path(__file__).parents[1]
n_itr = 200
n_traj = 25
n_samples = 50
var_x = 0.01
var_theta = 0.01
discriminator_updates_per_itr = 5
expert_policy_path = str(Path.joinpath(mainDir, "datasets/policy_cartpole_swingup.pkl"))
expert_data_path = str(Path.joinpath(mainDir, "datasets/expert_paths_n_traj_100_max_path_500_selected_" + str(dataset_num_traj) + ".pkl"))
max_path_length = 500
discount = 0.995
resultsPath = str(Path.joinpath(mainDir, "results/"))
run_experiment_custom(
run_task,
# Number of parallel workers for sampling
n_parallel=1,
# Only keep the snapshot parameters for the last iteration
snapshot_mode="gap",
snapshot_gap=5,
# Specifies the seed for the experiment. If this is not provided, a random seed
# will be used
seed=seed,
log_dir=resultsPath,
log_debug_log_only=True,
log_tabular_only=True,
exp_prefix="GAML_cartpole_swingup",
variant={'n_itr': n_itr,
'n_traj': n_traj,
'var_x': var_x,
'var_theta': var_theta,
"expert_policy_path": expert_policy_path,
"expert_data_path": expert_data_path,
"discriminator_updates_per_itr": discriminator_updates_per_itr,
"max_path_length": max_path_length,
"discount":discount,
"n_samples":n_samples,
"use_timestep":True,
"use_state_diff_discrim":False,
},
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--seed', default=1, type=int, help="seed for the run")
parser.add_argument('--tempdir', default='/tmp/', type=str, help='temp directory where result are written first')
parser.add_argument('--exp_prefix', default='', type=str, help='prefix folder ')
parser.add_argument('--n_itr', default=201, type=int, help="Number of Iterations")
parser.add_argument('--n_traj', default=25, type=int, help='Number of used expert trajectories')
parser.add_argument('--n_samples', default=50, type=int, help='Number of used expert trajectories')
parser.add_argument('--var_x', default=0.01, type=float, help='variance of x')
parser.add_argument('--var_theta', default=0.01, type=float, help='variance of theta')
parser.add_argument('--use_timestep', default="True", type=str, help='if we use the timestep as input for the discriminator')
parser.add_argument('--use_forward_KL', default="False", type=str, help='if we forward or the reverse KL')
parser.add_argument('--use_state_diff_discrim', default="False", type=str, help='Use state diff or next_state for the discriminator')
parser.add_argument('--discriminator_updates_per_itr', default=5, type=int,
help="number of updates for the discriminator we want to do in one iteration")
parser.add_argument('--expert_policy_path',
default="/home/thh2rng/Documents/gaml/datasets/policy_cartpole_swingup.pkl",
type=str, help="filepath of the expert policy")
parser.add_argument('--expert_data_path',
default="/home/thh2rng/Documents/gaml/datasets/expert_paths_n_traj_100_max_path_500_selected_100.pkl",
type=str, help="filepath of the expert data rollouts")
parser.add_argument('--surrogateDiscriminator', default="ExpertDataDiscriminator", type=str)
parser.add_argument('--max_path_length', default=500, type=int)
parser.add_argument('--discount', default=0.995, type=float, help='discount factor')
args = parser.parse_args()
run_experiment_custom(
run_task,
# Number of parallel workers for sampling
n_parallel=1,
# Only keep the snapshot parameters for the last iteration
snapshot_mode="gap",
snapshot_gap=10,
# Specifies the seed for the experiment. If this is not provided, a random seed
# will be used
seed=args.seed,
log_dir=args.tempdir,
#log_debug_log_only=True,
#log_tabular_only=True,
exp_prefix=args.exp_prefix,
variant={'n_itr': args.n_itr,
'n_traj': args.n_traj,
'var_x': args.var_x,
'var_theta': args.var_theta,
'use_timestep': str2bool(args.use_timestep),
'use_forward_KL': str2bool(args.use_forward_KL),
'use_state_diff_discrim': str2bool(args.use_state_diff_discrim),
"expert_policy_path": args.expert_policy_path,
"expert_data_path": args.expert_data_path,
"discriminator_updates_per_itr": args.discriminator_updates_per_itr,
"surrogateDiscriminator": args.surrogateDiscriminator,
"max_path_length": args.max_path_length,
"discount":args.discount,
"n_samples":args.n_samples,
},
)
|
[
"rllab.torch.models.nn_discriminator.NNDiscriminator",
"argparse.ArgumentParser",
"numpy.concatenate",
"rllab.torch.utils.misc.str2bool",
"rllab.misc.instrument.run_experiment_custom",
"rllab.torch.algos.gaml_episode_based_modellearning.GAMLEpisodeBasedModelLearning",
"pathlib.Path",
"pathlib.Path.joinpath",
"numpy.diag",
"joblib.load",
"rllab.torch.models.gaussian_upper_level_policy.GaussianUpperLevelPolicy",
"numpy.sqrt"
] |
[((736, 772), 'joblib.load', 'joblib.load', (["v['expert_policy_path']"], {}), "(v['expert_policy_path'])\n", (747, 772), False, 'import joblib\n'), ((956, 978), 'joblib.load', 'joblib.load', (['file_name'], {}), '(file_name)\n', (967, 978), False, 'import joblib\n'), ((1142, 1211), 'rllab.torch.models.nn_discriminator.NNDiscriminator', 'NNDiscriminator', ([], {'input_dim': '(action_dim + (observation_dim + 1) * 2 + 1)'}), '(input_dim=action_dim + (observation_dim + 1) * 2 + 1)\n', (1157, 1211), False, 'from rllab.torch.models.nn_discriminator import NNDiscriminator\n'), ((1766, 1808), 'rllab.torch.models.gaussian_upper_level_policy.GaussianUpperLevelPolicy', 'GaussianUpperLevelPolicy', (['mean', 'covariance'], {}), '(mean, covariance)\n', (1790, 1808), False, 'from rllab.torch.models.gaussian_upper_level_policy import GaussianUpperLevelPolicy\n'), ((1821, 2221), 'rllab.torch.algos.gaml_episode_based_modellearning.GAMLEpisodeBasedModelLearning', 'GAMLEpisodeBasedModelLearning', (['policy', 'expert_paths', 'imitation_env', 'discriminator', 'upper_level_policy'], {'n_itr': "v['n_itr']", 'n_traj': "v['n_traj']", 'n_samples': "v['n_samples']", 'max_path_length': "v['max_path_length']", 'use_timesteps': "v['use_timestep']", 'use_state_diff_in_discriminator': "v['use_state_diff_discrim']", 'discount': '(0.995)', 'discriminator_updates_per_itr': "v['discriminator_updates_per_itr']"}), "(policy, expert_paths, imitation_env,\n discriminator, upper_level_policy, n_itr=v['n_itr'], n_traj=v['n_traj'],\n n_samples=v['n_samples'], max_path_length=v['max_path_length'],\n use_timesteps=v['use_timestep'], use_state_diff_in_discriminator=v[\n 'use_state_diff_discrim'], discount=0.995,\n discriminator_updates_per_itr=v['discriminator_updates_per_itr'])\n", (1850, 2221), False, 'from rllab.torch.algos.gaml_episode_based_modellearning import GAMLEpisodeBasedModelLearning\n'), ((3345, 3941), 'rllab.misc.instrument.run_experiment_custom', 'run_experiment_custom', (['run_task'], {'n_parallel': '(1)', 'snapshot_mode': '"""gap"""', 'snapshot_gap': '(5)', 'seed': 'seed', 'log_dir': 'resultsPath', 'log_debug_log_only': '(True)', 'log_tabular_only': '(True)', 'exp_prefix': '"""GAML_cartpole_swingup"""', 'variant': "{'n_itr': n_itr, 'n_traj': n_traj, 'var_x': var_x, 'var_theta': var_theta,\n 'expert_policy_path': expert_policy_path, 'expert_data_path':\n expert_data_path, 'discriminator_updates_per_itr':\n discriminator_updates_per_itr, 'max_path_length': max_path_length,\n 'discount': discount, 'n_samples': n_samples, 'use_timestep': True,\n 'use_state_diff_discrim': False}"}), "(run_task, n_parallel=1, snapshot_mode='gap',\n snapshot_gap=5, seed=seed, log_dir=resultsPath, log_debug_log_only=True,\n log_tabular_only=True, exp_prefix='GAML_cartpole_swingup', variant={\n 'n_itr': n_itr, 'n_traj': n_traj, 'var_x': var_x, 'var_theta':\n var_theta, 'expert_policy_path': expert_policy_path, 'expert_data_path':\n expert_data_path, 'discriminator_updates_per_itr':\n discriminator_updates_per_itr, 'max_path_length': max_path_length,\n 'discount': discount, 'n_samples': n_samples, 'use_timestep': True,\n 'use_state_diff_discrim': False})\n", (3366, 3941), False, 'from rllab.misc.instrument import run_experiment_custom\n'), ((4467, 4492), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4490, 4492), False, 'import argparse\n'), ((3028, 3090), 'pathlib.Path.joinpath', 'Path.joinpath', (['mainDir', '"""datasets/policy_cartpole_swingup.pkl"""'], {}), "(mainDir, 'datasets/policy_cartpole_swingup.pkl')\n", (3041, 3090), False, 'from pathlib import Path\n'), ((3304, 3338), 'pathlib.Path.joinpath', 'Path.joinpath', (['mainDir', '"""results/"""'], {}), "(mainDir, 'results/')\n", (3317, 3338), False, 'from pathlib import Path\n'), ((1445, 1482), 'numpy.sqrt', 'np.sqrt', (["[v['var_x'], v['var_theta']]"], {}), "([v['var_x'], v['var_theta']])\n", (1452, 1482), True, 'import numpy as np\n'), ((2845, 2859), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (2849, 2859), False, 'from pathlib import Path\n'), ((1613, 1646), 'numpy.concatenate', 'np.concatenate', (['[theta, init_std]'], {}), '([theta, init_std])\n', (1627, 1646), True, 'import numpy as np\n'), ((1690, 1731), 'numpy.diag', 'np.diag', (['[0.05, 0.05, 0.05, 0.005, 0.005]'], {}), '([0.05, 0.05, 0.05, 0.005, 0.005])\n', (1697, 1731), True, 'import numpy as np\n'), ((7263, 7290), 'rllab.torch.utils.misc.str2bool', 'str2bool', (['args.use_timestep'], {}), '(args.use_timestep)\n', (7271, 7290), False, 'from rllab.torch.utils.misc import str2bool\n'), ((7327, 7356), 'rllab.torch.utils.misc.str2bool', 'str2bool', (['args.use_forward_KL'], {}), '(args.use_forward_KL)\n', (7335, 7356), False, 'from rllab.torch.utils.misc import str2bool\n'), ((7401, 7438), 'rllab.torch.utils.misc.str2bool', 'str2bool', (['args.use_state_diff_discrim'], {}), '(args.use_state_diff_discrim)\n', (7409, 7438), False, 'from rllab.torch.utils.misc import str2bool\n')]
|
import numpy as np
from keras.models import load_model
from PIL import Image
from keras.applications import mobilenet_v2
from keras.utils.data_utils import get_file
import frederic.utils.general
import frederic.utils.image
BASE_MODEL_URL = 'https://github.com/zylamarek/frederic-models/raw/master/models/'
class Predictor:
def __init__(self, bbox_model_path=None, landmarks_model_path=None, lazy=True):
self.bbox_model_path = bbox_model_path
self.landmarks_model_path = landmarks_model_path
self.lazy = lazy
self.loaded = False
self.bbox_model = None
self.landmarks_model = None
if not lazy:
self.load_models()
def load_models(self):
if not self.loaded:
if self.bbox_model_path is None:
model_name = 'frederic_bbox.h5'
self.bbox_model_path = get_file(model_name, BASE_MODEL_URL + model_name, cache_subdir='models')
if self.landmarks_model_path is None:
model_name = 'frederic_landmarks.h5'
self.landmarks_model_path = get_file(model_name, BASE_MODEL_URL + model_name, cache_subdir='models')
dummy_loss_fn = frederic.utils.general.get_loss_fn('bbox', 'iou_and_mse_landmarks', 1e-5)
custom_objects = frederic.utils.general.get_custom_objects('iou_and_mse_landmarks', dummy_loss_fn)
self.bbox_model = load_model(self.bbox_model_path, custom_objects=custom_objects)
self.landmarks_model = load_model(self.landmarks_model_path, custom_objects=custom_objects)
self.loaded = True
def predict(self, img, dtype='float'):
self.load_models()
img_bbox, img_landmarks = img.copy(), img.copy()
# predict bounding box
img_bbox, _ = frederic.utils.image.resize(img_bbox, 0, sampling_method=Image.LANCZOS)
x = np.expand_dims(mobilenet_v2.preprocess_input(np.asarray(img_bbox)), axis=0)
bbox = self.bbox_model.predict(x, verbose=0)[0, :4]
# scale and translate predicted bounding box
bbox = frederic.utils.image.postprocess_bounding_box(bbox, img.size)
# predict landmarks inside predicted bounding box
img_landmarks, _ = frederic.utils.image.crop(img_landmarks, 0, bbox)
img_landmarks, _ = frederic.utils.image.resize(img_landmarks, 0, sampling_method=Image.LANCZOS)
x = np.expand_dims(mobilenet_v2.preprocess_input(np.asarray(img_landmarks)), axis=0)
y_pred = self.landmarks_model.predict(x, verbose=0)[0]
# scale and translate predicted landmarks
bb_size = np.max((bbox[2] - bbox[0], bbox[3] - bbox[1]))
ratio = bb_size / frederic.utils.general.IMG_SIZE
predicted_landmarks = (y_pred * ratio).reshape((-1, 2)) + bbox[:2]
if 'int' in dtype:
predicted_landmarks = np.round(predicted_landmarks)
return predicted_landmarks.astype(dtype)
|
[
"keras.models.load_model",
"numpy.asarray",
"keras.utils.data_utils.get_file",
"numpy.max",
"numpy.round"
] |
[((2611, 2657), 'numpy.max', 'np.max', (['(bbox[2] - bbox[0], bbox[3] - bbox[1])'], {}), '((bbox[2] - bbox[0], bbox[3] - bbox[1]))\n', (2617, 2657), True, 'import numpy as np\n'), ((1413, 1476), 'keras.models.load_model', 'load_model', (['self.bbox_model_path'], {'custom_objects': 'custom_objects'}), '(self.bbox_model_path, custom_objects=custom_objects)\n', (1423, 1476), False, 'from keras.models import load_model\n'), ((1512, 1580), 'keras.models.load_model', 'load_model', (['self.landmarks_model_path'], {'custom_objects': 'custom_objects'}), '(self.landmarks_model_path, custom_objects=custom_objects)\n', (1522, 1580), False, 'from keras.models import load_model\n'), ((2853, 2882), 'numpy.round', 'np.round', (['predicted_landmarks'], {}), '(predicted_landmarks)\n', (2861, 2882), True, 'import numpy as np\n'), ((876, 948), 'keras.utils.data_utils.get_file', 'get_file', (['model_name', '(BASE_MODEL_URL + model_name)'], {'cache_subdir': '"""models"""'}), "(model_name, BASE_MODEL_URL + model_name, cache_subdir='models')\n", (884, 948), False, 'from keras.utils.data_utils import get_file\n'), ((1096, 1168), 'keras.utils.data_utils.get_file', 'get_file', (['model_name', '(BASE_MODEL_URL + model_name)'], {'cache_subdir': '"""models"""'}), "(model_name, BASE_MODEL_URL + model_name, cache_subdir='models')\n", (1104, 1168), False, 'from keras.utils.data_utils import get_file\n'), ((1924, 1944), 'numpy.asarray', 'np.asarray', (['img_bbox'], {}), '(img_bbox)\n', (1934, 1944), True, 'import numpy as np\n'), ((2443, 2468), 'numpy.asarray', 'np.asarray', (['img_landmarks'], {}), '(img_landmarks)\n', (2453, 2468), True, 'import numpy as np\n')]
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import copy
from time import gmtime, strftime
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from tqdm import tqdm
import numpy as np
import nni
from nni.compression.pytorch import ModelSpeedup
from nni.algorithms.compression.pytorch.pruning import (
LevelPruner,
SlimPruner,
FPGMPruner,
TaylorFOWeightFilterPruner,
L1FilterPruner,
L2FilterPruner,
AGPPruner,
ActivationMeanRankFilterPruner,
ActivationAPoZRankFilterPruner,
AMCPruner
)
from utils import *
os.environ["CUDA_VISIBLE_DEVICES"]="0"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model_type = 'mobilenet_v2_torchhub' # 'mobilenet_v1' 'mobilenet_v2' 'mobilenet_v2_torchhub'
pretrained = False # load imagenet weight (only for 'mobilenet_v2_torchhub')
experiment_dir = './experiments/pretrained_mobilenet_v2_best/'
log_name_additions = ''
checkpoint = experiment_dir + '/checkpoint_best.pt'
input_size = 224
n_classes = 120
# reduce CPU usage
train_dataset, train_dataloader = None, None
# train_dataset_for_pruner, train_dataloader_for_pruner = None, None
valid_dataset, valid_dataloader = None, None
test_dataset, test_dataloader = None, None
# optimization parameters (for finetuning)
batch_size = 32
n_epochs = 30
learning_rate = 1e-4 # 1e-4 for finetuning, 1e-3 (?) for training from scratch
pruner_type_to_class = {'level': LevelPruner,
'l1': L1FilterPruner,
'l2': L2FilterPruner,
'slim': SlimPruner,
'fpgm': FPGMPruner,
'taylor': TaylorFOWeightFilterPruner,
'agp': AGPPruner,
'activationmeanrank': ActivationMeanRankFilterPruner,
'apoz': ActivationAPoZRankFilterPruner,
'amc': AMCPruner}
def run_test(model):
model.eval()
loss_func = nn.CrossEntropyLoss()
acc_list, loss_list = [], []
with torch.no_grad():
for i, (inputs, labels) in enumerate(tqdm(test_dataloader)):
inputs, labels = inputs.float().to(device), labels.to(device)
preds= model(inputs)
pred_idx = preds.max(1).indices
acc = (pred_idx == labels).sum().item() / labels.size(0)
acc_list.append(acc)
loss = loss_func(preds, labels).item()
loss_list.append(loss)
final_loss = np.array(loss_list).mean()
final_acc = np.array(acc_list).mean()
return final_loss, final_acc
def run_validation(model, dataloader):
model.eval()
loss_func = nn.CrossEntropyLoss()
acc_list, loss_list = [], []
with torch.no_grad():
for i, (inputs, labels) in enumerate(tqdm(dataloader)):
inputs, labels = inputs.float().to(device), labels.to(device)
preds= model(inputs)
pred_idx = preds.max(1).indices
acc = (pred_idx == labels).sum().item() / labels.size(0)
acc_list.append(acc)
loss = loss_func(preds, labels).item()
loss_list.append(loss)
valid_loss = np.array(loss_list).mean()
valid_acc = np.array(acc_list).mean()
return valid_loss, valid_acc
def validator(dataloader, model):
_, acc = run_validation(model, dataloader)
return acc
def run_finetune(model, log):
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9)
best_valid_acc = 0.0
best_model = None
for epoch in range(n_epochs):
print('Start training epoch {}'.format(epoch))
loss_list = []
# train
model.train()
for i, (inputs, labels) in enumerate(tqdm(train_dataloader)):
optimizer.zero_grad()
inputs, labels = inputs.float().to(device), labels.to(device)
preds = model(inputs)
loss = criterion(preds, labels)
loss_list.append(loss.item())
loss.backward()
optimizer.step()
# validation
valid_loss, valid_acc = run_validation(model, valid_dataloader)
train_loss = np.array(loss_list).mean()
print('Epoch {}: train loss {:.4f}, valid loss {:.4f}, valid acc {:.4f}'.format
(epoch, train_loss, valid_loss, valid_acc))
log.write('Epoch {}: train loss {:.4f}, valid loss {:.4f}, valid acc {:.4f}\n'.format
(epoch, train_loss, valid_loss, valid_acc))
if valid_acc > best_valid_acc:
best_valid_acc = valid_acc
best_model = copy.deepcopy(model).to(device)
log.write("Best validation accuracy: {}".format(best_valid_acc))
model = best_model
return model
def trainer_helper(model, criterion, optimizer):
print("Running trainer in tuner")
for epoch in range(1):
model.train()
for i, (inputs, labels) in enumerate(tqdm(train_dataloader_for_pruner)):
optimizer.zero_grad()
inputs, labels = inputs.float().to(device), labels.to(device)
preds = model(inputs)
loss = criterion(preds, labels)
loss.backward()
optimizer.step()
def main(pruner_type):
log = open(experiment_dir + '/prune_{}_{}{}.log'.format(pruner_type, strftime("%Y%m%d%H%M", gmtime()), log_name_additions), 'w')
model = create_model(model_type=model_type, pretrained=pretrained, n_classes=n_classes,
input_size=input_size, checkpoint=checkpoint)
model = model.to(device)
print(model)
# evaluation before pruning
count_flops(model, log)
initial_loss, initial_acc = run_test(model)
print('Before Pruning:\nLoss: {}\nAccuracy: {}'.format(initial_loss, initial_acc))
log.write('Before Pruning:\nLoss: {}\nAccuracy: {}\n'.format(initial_loss, initial_acc))
# pruning
config_list = [{
'op_types': ['Conv2d'],
}]
kwargs = {
'evaluator': validator,
'val_loader': valid_dataloader,
'flops_ratio': 0.5,
'lbound': 0.3,
'rbound': 0.8,
'model_type': 'mobilenetv2',
'train_episode': 800
}
pruner = pruner_type_to_class[pruner_type](model, config_list, **kwargs)
pruner.compress()
pruner.export_model('./model_temp.pth', './mask_temp.pth')
# model speedup
dummy_input = torch.rand(1,3,224,224).cuda()
pruner._unwrap_model()
ms = ModelSpeedup(model, dummy_input, './mask_temp.pth')
ms.speedup_model()
print(model)
count_flops(model, log)
intermediate_loss, intermediate_acc = run_test(model)
print('Before Finetuning:\nLoss: {}\nAccuracy: {}'.format(intermediate_loss, intermediate_acc))
log.write('Before Finetuning:\nLoss: {}\nAccuracy: {}\n'.format(intermediate_loss, intermediate_acc))
# finetuning
model = run_finetune(model, log)
# final evaluation
final_loss, final_acc = run_test(model)
print('After Pruning:\nLoss: {}\nAccuracy: {}'.format(final_loss, final_acc))
log.write('After Pruning:\nLoss: {}\nAccuracy: {}'.format(final_loss, final_acc))
# clean up
filePaths = ['./model_tmp.pth', './mask_tmp.pth']
for f in filePaths:
if os.path.exists(f):
os.remove(f)
log.close()
if __name__ == '__main__':
# create here and reuse
train_dataset = TrainDataset('./data/stanford-dogs/Processed/train')
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
# train_dataset_for_pruner = EvalDataset('./data/stanford-dogs/Processed/train')
# train_dataloader_for_pruner = DataLoader(train_dataset, batch_size=batch_size, shuffle=False)
valid_dataset = EvalDataset('./data/stanford-dogs/Processed/valid')
valid_dataloader = DataLoader(valid_dataset, batch_size=batch_size, shuffle=True, drop_last=True)
test_dataset = EvalDataset('./data/stanford-dogs/Processed/test')
test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
torch.set_num_threads(16)
main('amc')
|
[
"tqdm.tqdm",
"os.remove",
"copy.deepcopy",
"nni.compression.pytorch.ModelSpeedup",
"torch.utils.data.DataLoader",
"time.gmtime",
"torch.nn.CrossEntropyLoss",
"os.path.exists",
"torch.set_num_threads",
"torch.cuda.is_available",
"numpy.array",
"torch.rand",
"torch.no_grad"
] |
[((2044, 2065), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (2063, 2065), True, 'import torch.nn as nn\n'), ((2733, 2754), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (2752, 2754), True, 'import torch.nn as nn\n'), ((3492, 3513), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (3511, 3513), True, 'import torch.nn as nn\n'), ((6652, 6703), 'nni.compression.pytorch.ModelSpeedup', 'ModelSpeedup', (['model', 'dummy_input', '"""./mask_temp.pth"""'], {}), "(model, dummy_input, './mask_temp.pth')\n", (6664, 6703), False, 'from nni.compression.pytorch import ModelSpeedup\n'), ((7662, 7724), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': 'batch_size', 'shuffle': '(True)'}), '(train_dataset, batch_size=batch_size, shuffle=True)\n', (7672, 7724), False, 'from torch.utils.data import DataLoader\n'), ((8005, 8083), 'torch.utils.data.DataLoader', 'DataLoader', (['valid_dataset'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'drop_last': '(True)'}), '(valid_dataset, batch_size=batch_size, shuffle=True, drop_last=True)\n', (8015, 8083), False, 'from torch.utils.data import DataLoader\n'), ((8176, 8238), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset'], {'batch_size': 'batch_size', 'shuffle': '(False)'}), '(test_dataset, batch_size=batch_size, shuffle=False)\n', (8186, 8238), False, 'from torch.utils.data import DataLoader\n'), ((8244, 8269), 'torch.set_num_threads', 'torch.set_num_threads', (['(16)'], {}), '(16)\n', (8265, 8269), False, 'import torch\n'), ((683, 708), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (706, 708), False, 'import torch\n'), ((2108, 2123), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2121, 2123), False, 'import torch\n'), ((2797, 2812), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2810, 2812), False, 'import torch\n'), ((7436, 7453), 'os.path.exists', 'os.path.exists', (['f'], {}), '(f)\n', (7450, 7453), False, 'import os\n'), ((2170, 2191), 'tqdm.tqdm', 'tqdm', (['test_dataloader'], {}), '(test_dataloader)\n', (2174, 2191), False, 'from tqdm import tqdm\n'), ((2551, 2570), 'numpy.array', 'np.array', (['loss_list'], {}), '(loss_list)\n', (2559, 2570), True, 'import numpy as np\n'), ((2594, 2612), 'numpy.array', 'np.array', (['acc_list'], {}), '(acc_list)\n', (2602, 2612), True, 'import numpy as np\n'), ((2859, 2875), 'tqdm.tqdm', 'tqdm', (['dataloader'], {}), '(dataloader)\n', (2863, 2875), False, 'from tqdm import tqdm\n'), ((3235, 3254), 'numpy.array', 'np.array', (['loss_list'], {}), '(loss_list)\n', (3243, 3254), True, 'import numpy as np\n'), ((3278, 3296), 'numpy.array', 'np.array', (['acc_list'], {}), '(acc_list)\n', (3286, 3296), True, 'import numpy as np\n'), ((3916, 3938), 'tqdm.tqdm', 'tqdm', (['train_dataloader'], {}), '(train_dataloader)\n', (3920, 3938), False, 'from tqdm import tqdm\n'), ((5112, 5145), 'tqdm.tqdm', 'tqdm', (['train_dataloader_for_pruner'], {}), '(train_dataloader_for_pruner)\n', (5116, 5145), False, 'from tqdm import tqdm\n'), ((6585, 6611), 'torch.rand', 'torch.rand', (['(1)', '(3)', '(224)', '(224)'], {}), '(1, 3, 224, 224)\n', (6595, 6611), False, 'import torch\n'), ((7467, 7479), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (7476, 7479), False, 'import os\n'), ((4353, 4372), 'numpy.array', 'np.array', (['loss_list'], {}), '(loss_list)\n', (4361, 4372), True, 'import numpy as np\n'), ((4786, 4806), 'copy.deepcopy', 'copy.deepcopy', (['model'], {}), '(model)\n', (4799, 4806), False, 'import copy\n'), ((5516, 5524), 'time.gmtime', 'gmtime', ([], {}), '()\n', (5522, 5524), False, 'from time import gmtime, strftime\n')]
|
# Get dependencies
import sys
import dependencies
sys.path.append('yolo')
sys.path.append('core')
import math
import glob
import os
import time
import cv2
import numpy as np
from PIL import Image
import torch
import torchvision.models as models
import torchvision.transforms as transforms
from raft import RAFT
from utils import flow_viz
from utils.utils import InputPadder
from inference import post_process
import argparse
from model import YoloNetV3
import matplotlib.pyplot as plt
from datetime import datetime
#-------------------------------------------------------------------------------
# Parameters
#-------------------------------------------------------------------------------
# Set Input type - image, video
media_type = 'image'
# Location of image folder or video file
location = 'dataset/01/'
# Set xml to true if ground truth data is in xml and False to txt file
xml = False
# Export results in video file
video_out = True
# Show Result in a pop up window frame by frame
preview_result = False
# Calculate and output metrics
metrics_out = True
# Initialize regions of no interest
regions = []
#---------------------------------------------------------------
# Global Variables
#---------------------------------------------------------------
# Detector IoU treshold
IOU_THRESHOLD = 0.4
# Metrics IoU Treshold
EVAL_TRESHOLD = 0.5
# Select GPU as target
DEVICE = 'cuda'
#---------------------------------------------------------------
# Input Image Sequence Handler Class
#---------------------------------------------------------------
class InputData:
# Initializer handling both image and video as input data
def __init__(self, media_type, location):
self.type = media_type
if self.type == 'image':
# get image file list and sort by filename
self.images = glob.glob(os.path.join(location, '*.png'))
self.images = sorted(self.images)
if self.type == 'video':
# Start video object
self.images = cv2.VideoCapture(location)
# Helper function to get next frame in sequence of iamges or video
def get_next_frame(self):
try:
if self.type == 'image':
imfile = self.images[FRAME_NUMBER]
image = np.array(Image.open(imfile)).astype(np.uint8)
#image = cv2.resize(image, (320,240), interpolation = cv2.INTER_AREA)
return image
if self.type == 'video':
_, image = self.images.read()
#image = cv2.resize(image, (320,240), interpolation = cv2.INTER_AREA)
return image
# In case reach end of sequence
except:
return False
#---------------------------------------------------------------
#---------------------------------------------------------------
#---------------------------------------------------------------
class Vehicle:
def __init__(self,frame_number, detection, flow):
global ID
global FRAME_NUMBER
self.x, self.y, self.w, self.h = detection
self.x_dot, self.y_dot = flow
self.first_frame = FRAME_NUMBER
self.gt_id = []
self.last_seen = 0
self.veh_id = ID
ID = ID + 1
def update_full(self, frame_number, detection, flow):
x, y, self.w, self.h = detection
self.last_seen = 0
self.x_dot = x - self.x
self.y_dot = y - self.y
self.x = x
self.y = y
def update_partial(self, flow):
self.last_seen += 1
#self.x_dot = self.x_dot *0.1
#self.y_dot = self.y_dot *0.1 +flow[1] *0.9
def predict(self):
self.x = self.x + self.x_dot
self.y = self.y + self.y_dot
def bounds(self):
for region in regions:
if iou(region, [self.x, self.y, self.x+self.w, self.y+self.h])>0.7:
return False
if self.last_seen > 5:
return False
if self.x < IMG_X_MAX and self.x > 0 and self.y > 0 and self.y < IMG_Y_MAX:
return True
else:
return False
def check_id(self, gt_id):
if self.gt_id == []:
self.gt_id.append(gt_id)
return 0
if gt_id in self.gt_id:
return 0
else:
self.gt_id.append(gt_id)
return 1
#---------------------------------------------------------------
#---------------------------------------------------------------
#---------------------------------------------------------------
class Frame:
def __init__(self, detection, flow , vehicles):
global FRAME_NUMBER
self.frame_number = FRAME_NUMBER
self.bounding_boxes = detection
self.optical_flow = flow
self.prior_vehicles = vehicles
self.measurement = {}
self.update_veh = []
self.predict_veh = []
FRAME_NUMBER = FRAME_NUMBER + 1
def match(self):
iou_matrix = np.zeros((len(self.bounding_boxes),len(self.prior_vehicles)))
i = 0
j = 0
for box in self.bounding_boxes:
for vehicle in self.prior_vehicles:
vehicle_box = [vehicle.x, vehicle.y, vehicle.w + vehicle.x, vehicle.h + vehicle.y]
detection_box = [box[0], box[1], box[0]+box[2], box[1]+box[3]]
iou_matrix[i][j] = iou(detection_box , vehicle_box)
j += 1
i += 1
j = 0
full = 0
initialize = 0
partial = 0
for i in range(len(self.bounding_boxes)):
if max(iou_matrix[i]) > 0.5:
full += 1
idx = int(np.where(iou_matrix[i] == iou_matrix[i].max())[0][0])
self.measurement[self.prior_vehicles[idx].veh_id] = ['full',self.bounding_boxes[i], self.average_flow(self.bounding_boxes[i]), self.prior_vehicles[idx]]
else:
initialize += 1
vehicle = Vehicle(self.frame_number, self.bounding_boxes[i], self.average_flow(self.bounding_boxes[i]))
self.measurement[vehicle.veh_id] = ['initialize', 0, 0, vehicle]
for vehicle in self.prior_vehicles:
if vehicle.veh_id not in self.measurement:
partial += 1
bbox = [vehicle.x, vehicle.y, vehicle.w, vehicle.h]
self.measurement[vehicle.veh_id] = ['partial', 0, self.average_flow(bbox), vehicle]
#print('Full: ' + str(full) + ' Partial: ' + str(partial) + ' New: ' + str(initialize))
def update(self):
for item in self.measurement.values():
if item[0] == 'full':
item[3].update_full(self.frame_number, item[1], item[2])
if item[0] == 'partial':
item[3].update_partial(item[2])
if item[3].bounds():
self.update_veh.append(item[3])
def predict(self):
for vehicle in self.update_veh:
vehicle.predict()
if vehicle.bounds():
self.predict_veh.append(vehicle)
def average_flow(self, bbox):
x, y, w, h = bbox
direction_x = np.average(self.optical_flow[int(y) : int(y +h) , int(x) : int(x+w), 0])
direction_y = np.average(self.optical_flow[int(y) : int(y +h) , int(x):int(x+w) , 1])
return [direction_x, direction_y]
def get_bbox(self):
bbox = []
for vehicle in self.update_veh:
bbox.append([int(vehicle.x), int(vehicle.y), int(vehicle.x + vehicle.w), int(vehicle.y + vehicle.h)])
return bbox
#---------------------------------------------------------------
#---------------------------------------------------------------
#---------------------------------------------------------------
class Detector:
def __init__(self, type):
self.type = type
if type == 'frcnn':
# load faster r-cnn
self.detector = models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
# send model to gpu
self.detector.to(DEVICE)
# set model to inference mode
self.detector.eval()
# set transformation to prepare image for network input
self.transform = transforms.Compose([transforms.ToTensor()])
if type == 'yolo':
weight_path = 'weights/yolov3_original.pt'
# load faster r-cnn
self.detector = YoloNetV3(nms=False)
# load weights
self.detector.load_state_dict(torch.load(weight_path))
# send model to gpu
self.detector.to(DEVICE)
# set model to inference mode
self.detector.eval()
# set transformation to prepare image for network input
self.transform = transforms.Compose([transforms.ToTensor()])
if type == 'sinet':
print('SINet')
def inference(self, image):
if self.type == 'frcnn':
# convert image to torch tensor
input = self.transform(image)
# send input data to GPU
input = input.to(DEVICE)
# process inference and get detections
detections = self.detector([input])
boxes = detections[0]['boxes']
confidence = detections[0]['scores']
class_id = detections[0]['labels']
self.result = self.filter_detection(boxes, confidence, class_id)
if self.type == 'yolo':
# convert image to torch tensor
im = Image.fromarray(image)
input = self.transform(im.resize((IMG_X_MAX,IMG_X_MAX),Image.ANTIALIAS))
input = input.unsqueeze(0)
# send input data to GPU
input = input.to(DEVICE)
# process inference and get detections
with torch.no_grad():
detections = self.detector(input)
detections = post_process(detections, True, SCORE_THRESHOLD, IOU_THRESHOLD)
for detection in detections:
detection[..., :4] = untransform_bboxes(detection[..., :4])
cxcywh_to_xywh(detection)
boxes = detections[0][..., :4]
self.result = boxes.detach().cpu().numpy()
if self.type == 'sinet':
# convert image to torch tensor
input = self.transform(image)
# send input data to GPU
input = input.to(DEVICE)
# process inference and get detections
detections = self.detector([input])
boxes = detections[0]['boxes']
confidence = detections[0]['scores']
class_id = detections[0]['labels']
def filter_detection(self, detections, confidence, class_id):
x1 = detections[:, 0].detach().cpu().numpy()
y1 = detections[:, 1].detach().cpu().numpy()
x2 = detections[:, 2].detach().cpu().numpy()
y2 = detections[:, 3].detach().cpu().numpy()
scores = confidence.detach().cpu().numpy()
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
j = order[0]
keep.append(j)
xx1 = np.maximum(x1[j], x1[order[1:]])
yy1 = np.maximum(y1[j], y1[order[1:]])
xx2 = np.minimum(x2[j], x2[order[1:]])
yy2 = np.minimum(y2[j], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[j] + areas[order[1:]] - inter)
inds = np.where(ovr <= IOU_THRESHOLD)[0]
order = order[inds + 1]
filter = []
for i in keep:
if confidence[i] >= SCORE_THRESHOLD:
if class_id[i] in [2,3,4,6, 7, 8]:
filter.append([int(x1[i]), int(y1[i]), int(x2[i]-x1[i]), int(y2[i]-y1[i])])
return filter
#---------------------------------------------------------------
#---------------------------------------------------------------
#---------------------------------------------------------------
class OpticalFlow:
def __init__(self, type):
self.type = type
if type == 'farneback':
self.type = 'farneback'
if type == 'raft':
parser = argparse.ArgumentParser()
parser.add_argument('--model', nargs='?', const='raft-models/raft-things.pth', type=str, help="restore checkpoint")
parser.add_argument('--path', nargs='?', const='frames', type=int, help="dataset for evaluation")
parser.add_argument('--small', action='store_true', help='use small model')
parser.add_argument('--mixed_precision', action='store_true', help='use mixed precision')
parser.add_argument('--alternate_corr', action='store_true', help='use efficent correlation implementation')
args = parser.parse_args()
args.model = 'raft-models/raft-things.pth'
self.flow_model = torch.nn.DataParallel(RAFT(args))
self.flow_model.load_state_dict(torch.load(args.model))
self.flow_model = self.flow_model.module
self.flow_model.to(DEVICE)
self.flow_model.eval()
if type == 'flownet':
print('Flownet')
def inference(self,image1,image2):
if self.type == 'farneback':
self.mask = np.zeros_like(image1)
gray1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(gray1, gray2, flow=None,
pyr_scale=0.5, levels=10, winsize=15,
iterations=10, poly_n=7, poly_sigma=1.5,
flags=cv2.OPTFLOW_FARNEBACK_GAUSSIAN)
self.result = flow
if self.type == 'raft':
image1 = torch.from_numpy(image1).permute(2, 0, 1).float()
image1 = image1[None].to(DEVICE)
image2 = torch.from_numpy(image2).permute(2, 0, 1).float()
image2 = image2[None].to(DEVICE)
padder = InputPadder(image2.shape)
image1, image2 = padder.pad(image1, image2)
_, flow_up = self.flow_model(image1, image2, iters=5, test_mode=True)
self.result = flow_up[0].permute(1,2,0).detach().cpu().numpy()
if self.type == 'flownet':
self.flow = True
def toimage(self):
if self.type == 'raft':
image = flow_viz.flow_to_image(self.result)
return image
if self.type == 'farneback':
magnitude, angle = cv2.cartToPolar(self.result[..., 0], self.result[..., 1])
mask = self.mask
mask[..., 1] = 255
mask[..., 0] = angle * 180 / np.pi / 2
mask[..., 2] = cv2.normalize(magnitude, None, 0, 255, cv2.NORM_MINMAX)
image = cv2.cvtColor(mask, cv2.COLOR_HSV2BGR)
return image
#---------------------------------------------------------------
# Helper functions
#---------------------------------------------------------------
# draw vehicle bounding box on input image
def draw_bbox(image, bboxes):
copy = np.copy(image)
for bbox in bboxes:
cv2.rectangle(copy, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), np.random.uniform(0, 255) , 2)
return copy
# create optical flow + detection mask
def flow_mask(flow, bboxes):
image = flow.toimage()
mask = np.full(image.shape[:2], 0, dtype=np.uint8)
for bbox in bboxes:
cv2.rectangle(mask, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (255,255,255), -1)
image = cv2.bitwise_or(np.array(image).astype(np.uint8), np.array(image).astype(np.uint8), mask=mask)
return image
# create side by side images
def visuzalization(input, bbox, flow, expected):
bbox_img = cv2.resize(draw_evaluation(input, expected, bbox), (IMG_X_MAX, IMG_Y_MAX), interpolation = cv2.INTER_AREA)
flow_visualization = cv2.resize(flow.toimage(), (IMG_X_MAX, IMG_Y_MAX), interpolation = cv2.INTER_AREA)
mask = cv2.resize(flow_mask(flow, bbox), (IMG_X_MAX, IMG_Y_MAX), interpolation = cv2.INTER_AREA)
img1 = np.concatenate((input,bbox_img), axis = 1)
img2 = np.concatenate((flow_visualization, mask), axis = 1)
merge_img = np.concatenate((img1,img2), axis = 1)
return merge_img
# update output image window
def display_result(image):
cv2.imshow('image',image)
cv2.waitKey(0)
def iou(boxA, boxB):
# determine the (x, y)-coordinates of the intersection rectangle
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
# compute the area of intersection rectangle
interArea = abs(max((xB - xA, 0)) * max((yB - yA), 0))
if interArea == 0:
return 0
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = abs((boxA[2] - boxA[0]) * (boxA[3] - boxA[1]))
boxBArea = abs((boxB[2] - boxB[0]) * (boxB[3] - boxB[1]))
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = interArea / float(boxAArea + boxBArea - interArea)
# return the intersection over union value
return iou
def evaluate(gt , result):
if len(result) == 0:
return [0,len(gt),0, 0]
iou_result = np.zeros((len(gt),len(result)), dtype=float)
for i in range(len(gt)):
for j in range(len(result)):
box1 = [gt[i][2], gt[i][3], gt[i][4] , gt[i][5]]
box2 = [result[j].x, result[j].y, result[j].w +result[j].x, result[j].h+result[j].y]
iou_result[i,j] = iou(box1, box2)
tp = 0
fn = 0
fp = 0
ids = 0
for i in range(len(gt)):
if max(iou_result[i,:]) >= EVAL_TRESHOLD:
tp += 1
idx = np.where(iou_result[i] == iou_result[i].max())
ids += result[int(idx[0][0])].check_id(gt[i][0])
else:
fn += 1
for j in range(len(result)):
if max(iou_result[:,j]) < EVAL_TRESHOLD:
fp += 1
return [tp, fn, fp, ids]
def draw_evaluation(input, gt, result):
iou_result = np.zeros((len(gt),len(result)), dtype=float)
for i in range(len(gt)):
for j in range(len(result)):
box1 = [gt[i][2], gt[i][3], gt[i][4] , gt[i][5] ]
box2 = [result[j][0], result[j][1], result[j][2] , result[j][3]]
iou_result[i,j] = iou(box1, box2)
copy = np.copy(input)
for i in range(len(gt)):
if max(iou_result[i,:]) >= EVAL_TRESHOLD:
idx = int(np.where(iou_result[i] == iou_result[i].max())[0][0])
cv2.rectangle(copy, (int(result[idx][0]), int(result[idx][1])), (int(result[idx][2]), int(result[idx][3])), (255,0,0) , 2)
else:
cv2.rectangle(copy, (int(gt[i][2]), int(gt[i][3])), (int(gt[i][4]), int(gt[i][5])), (0,255,0), 2)
for j in range(len(result)):
if max(iou_result[:,j]) < EVAL_TRESHOLD:
cv2.rectangle(copy, (int(result[j][0]), int(result[j][1])), (int(result[j][2]), int(result[j][3])), (0,0,255), 2)
return copy
def untransform_bboxes(bboxes):
"""transform the bounding box from the scaled image back to the unscaled image."""
x = bboxes[..., 0]
y = bboxes[..., 1]
w = bboxes[..., 2]
h = bboxes[..., 3]
# x, y, w, h = bbs
x /= 1
y /= IMG_X_MAX/IMG_Y_MAX
w /= 1
h /= IMG_X_MAX/IMG_Y_MAX
return bboxes
def cxcywh_to_xywh(bbox):
bbox[..., 0] -= bbox[..., 2] / 2
bbox[..., 1] -= bbox[..., 3] / 2
return bbox
if metrics_out:
now = datetime.now()
now = now.strftime('%Y%m%d_%H-%M')
result_text = open(location + now + '.txt','w')
line = 'DETECTOR' + ','+'FLOW' + ',' + 'SCORE_THRESHOLD' + ',' + 'PRECISION' + ',' + 'RECALL' + ',' + 'MOTA' + ',' + 'FPS' + '\n'
result_text.writelines((line))
# open csv file
results = []
for detector_type in ['frcnn', 'yolo']:
for flow_type in ['raft', 'farneback']:
metrics_all = []
print('---------------------------------------------------------------')
print('Detector: ' + detector_type)
print('Flow: ' + flow_type)
print('---------------------------------------------------------------')
for SCORE_THRESHOLD in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
performance = [0,0,0,0]
print('Score Treshold: ' + str(SCORE_THRESHOLD))
if detector_type == 'yolo' and SCORE_THRESHOLD ==0:
SCORE_THRESHOLD = 0.001
print('---------------------------------------------------------------')
ID = 1
FRAME_NUMBER = 2
#-------------------------------------------------------------------------------
# INITIALIZE INPUT DATA --------------------------------------------------------
#-------------------------------------------------------------------------------
input = InputData(media_type, location)
current_frame = input.get_next_frame()
# INITIALIZE DETECTOR
detector = Detector(detector_type)
detector.inference(current_frame)
inital_veh = []
for detection in detector.result:
vehicle = Vehicle(FRAME_NUMBER, detection, [0 ,0])
inital_veh.append(vehicle)
# INITIALIZE Optical Flow
flow = OpticalFlow(flow_type)
#-------------------------------------------------------------------------------
if video_out:
h, w = current_frame.shape[:2]
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
name = detector_type+'-'+flow_type+ '-' + str(SCORE_THRESHOLD) + '.mp4'
out = cv2.VideoWriter(os.path.join(location, name), fourcc, 5.0, (4*w, h))
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
if metrics_out:
if xml:
import xml.etree.ElementTree as ET
root = ET.parse(os.path.join(location, 'gt.xml')).getroot()
gt = []
for frame in root.findall('frame'):
frame_id = frame.get('num')
vehicles = frame.find('target_list')
for vehicle in vehicles:
veh_id = vehicle.get('id')
x = vehicle.find('box').get('left')
y = vehicle.find('box').get('top')
w = vehicle.find('box').get('width')
h = vehicle.find('box').get('height')
gt.append([int(frame_id), int(veh_id), float(x), float(y), float(w)+float(x), float(h)+float(y)])
for region in root.find('ignored_region').findall('box'):
regions.append([float(region.get('left')),float(region.get('top')),float(region.get('left'))+float(region.get('width')),float(region.get('top'))+float(region.get('height'))])
else:
gt_text = open(location + 'gt.txt','r')
gt_text = gt_text.readlines()
gt = []
for line in gt_text:
data = line.split(',')
gt.append([int(data[0]), int(data[1]), float(data[2]), float(data[3]), float(data[4])+ float(data[2]), float(data[5]) + float(data[3])])
# open csv file
#-------------------------------------------------------------------------------
if metrics_out:
times = []
#-------------------------------------------------------------------------------
# MAIN LOOP
#-------------------------------------------------------------------------------
while(current_frame is not False):
#print('Frame No: ' + str(FRAME_NUMBER) + ' Veh. No.: ' + str(ID))
# read Image
if metrics_out:
start = time.time()
# get image pair
previous_frame = current_frame
current_frame = input.get_next_frame()
# check if reached end frame
if current_frame is False:
break
IMG_Y_MAX, IMG_X_MAX, _ = current_frame.shape
# run detection
detector.inference(current_frame)
# run flow
flow.inference(current_frame, previous_frame)
# create frame
if FRAME_NUMBER == 2: # first pair
frame = Frame(detector.result, flow.result, inital_veh)
else:
frame.predict()
frame = Frame(detector.result, flow.result, frame.predict_veh)
# match
frame.match()
# update
frame.update()
#############################################################################################################################
# LOGGING RESULTS
#############################################################################################################################
if metrics_out:
processing_time = time.time()-start
times.append(processing_time)
#print('elapsed time: {}'.format(processing_time))
expected = [item for item in gt if item[0] == FRAME_NUMBER]
#result = []
#for vehicle in frame.update_veh:
# result.append([vehicle.veh_id , vehicle.x , vehicle.y, vehicle.w +vehicle.x , vehicle.h+vehicle.y])
tp, fn , fp, ids = evaluate(expected , frame.update_veh)
performance[0] += tp
performance[1] += fn
performance[2] += fp
performance[3] += ids
#print(str(FRAME_NUMBER) + ' | TP : ' + str(tp) + ' | FN : ' + str(fn) + ' | FP : ' + str(fp))
if video_out:
expected = [item for item in gt if item[0] == FRAME_NUMBER]
image = visuzalization(current_frame, frame.get_bbox(), flow, expected)
out.write(image)
if preview_result:
expected = [item for item in gt if item[0] == FRAME_NUMBER]
image = visuzalization(current_frame, frame.get_bbox(), flow, expected)
display_result(image)
###################################################################################################################################
# HANDLING CLOSURES
###################################################################################################################################
if media_type == 'video':
input.images.release()
if preview_result:
cv2.destroyAllWindows()
if metrics_out:
#print('RESULTS @ IoU Treshold ' + str(EVAL_TRESHOLD))
try:
precision = performance[0] / (performance[0] + performance[2])
except:
precision = 0.0
try:
recall = performance[0] / (performance[0] + performance[1])
except:
recall = 0.0
try:
mota = 1 - (performance[1] + performance[2] + performance[3])/ (performance[0] + performance[1])
except:
mota = 0.0
metrics_all.append([recall,precision,mota])
average = 1/np.average(times)
print ('Average Frames Per Second: ' + str(average))
print ('Precision: ' + str(precision))
print ('Recall: ' + str(recall))
print ('MOTA: ' + str(mota))
print ('###############################################################')
line = detector_type + ','+ flow_type + ',' + str(SCORE_THRESHOLD) + ',' + str(precision) + ',' + str(recall) + ',' + str(mota) + ',' + str(average) + '\n'
result_text.writelines((line))
if metrics_out:
axis = [0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1]
metrics_all.sort()
metrics_all = np.array(metrics_all)
corrected_precision = np.interp(axis,metrics_all[:,0],metrics_all[:,1] )
AP = np.average(corrected_precision)
print ('Average Precision: ' + str(AP))
print ('###############################################################')
print ('###############################################################')
import matplotlib.pyplot as plt
plt.scatter(axis, corrected_precision)
plt.plot(axis,corrected_precision)
plt.title(detector_type + ' - ' + flow_type)
plt.xlabel("recall")
plt.ylabel("Precision")
plt.show()
if metrics_out:
result_text.close()
|
[
"matplotlib.pyplot.title",
"numpy.maximum",
"argparse.ArgumentParser",
"cv2.VideoWriter_fourcc",
"cv2.calcOpticalFlowFarneback",
"cv2.normalize",
"numpy.interp",
"cv2.imshow",
"os.path.join",
"torch.no_grad",
"sys.path.append",
"numpy.full",
"inference.post_process",
"numpy.zeros_like",
"numpy.copy",
"cv2.cvtColor",
"torch.load",
"utils.flow_viz.flow_to_image",
"torchvision.models.detection.fasterrcnn_resnet50_fpn",
"utils.utils.InputPadder",
"cv2.destroyAllWindows",
"datetime.datetime.now",
"numpy.minimum",
"numpy.average",
"matplotlib.pyplot.show",
"cv2.waitKey",
"matplotlib.pyplot.ylabel",
"raft.RAFT",
"numpy.concatenate",
"torch.from_numpy",
"numpy.random.uniform",
"cv2.cartToPolar",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.scatter",
"time.time",
"cv2.VideoCapture",
"PIL.Image.open",
"numpy.where",
"numpy.array",
"PIL.Image.fromarray",
"matplotlib.pyplot.xlabel",
"model.YoloNetV3",
"torchvision.transforms.ToTensor"
] |
[((50, 73), 'sys.path.append', 'sys.path.append', (['"""yolo"""'], {}), "('yolo')\n", (65, 73), False, 'import sys\n'), ((74, 97), 'sys.path.append', 'sys.path.append', (['"""core"""'], {}), "('core')\n", (89, 97), False, 'import sys\n'), ((15219, 15233), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (15226, 15233), True, 'import numpy as np\n'), ((15500, 15543), 'numpy.full', 'np.full', (['image.shape[:2]', '(0)'], {'dtype': 'np.uint8'}), '(image.shape[:2], 0, dtype=np.uint8)\n', (15507, 15543), True, 'import numpy as np\n'), ((16220, 16261), 'numpy.concatenate', 'np.concatenate', (['(input, bbox_img)'], {'axis': '(1)'}), '((input, bbox_img), axis=1)\n', (16234, 16261), True, 'import numpy as np\n'), ((16274, 16324), 'numpy.concatenate', 'np.concatenate', (['(flow_visualization, mask)'], {'axis': '(1)'}), '((flow_visualization, mask), axis=1)\n', (16288, 16324), True, 'import numpy as np\n'), ((16343, 16379), 'numpy.concatenate', 'np.concatenate', (['(img1, img2)'], {'axis': '(1)'}), '((img1, img2), axis=1)\n', (16357, 16379), True, 'import numpy as np\n'), ((16463, 16489), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'image'], {}), "('image', image)\n", (16473, 16489), False, 'import cv2\n'), ((16493, 16507), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (16504, 16507), False, 'import cv2\n'), ((18604, 18618), 'numpy.copy', 'np.copy', (['input'], {}), '(input)\n', (18611, 18618), True, 'import numpy as np\n'), ((19741, 19755), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (19753, 19755), False, 'from datetime import datetime\n'), ((2005, 2031), 'cv2.VideoCapture', 'cv2.VideoCapture', (['location'], {}), '(location)\n', (2021, 2031), False, 'import cv2\n'), ((7918, 7975), 'torchvision.models.detection.fasterrcnn_resnet50_fpn', 'models.detection.fasterrcnn_resnet50_fpn', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (7958, 7975), True, 'import torchvision.models as models\n'), ((8403, 8423), 'model.YoloNetV3', 'YoloNetV3', ([], {'nms': '(False)'}), '(nms=False)\n', (8412, 8423), False, 'from model import YoloNetV3\n'), ((9493, 9515), 'PIL.Image.fromarray', 'Image.fromarray', (['image'], {}), '(image)\n', (9508, 9515), False, 'from PIL import Image\n'), ((9874, 9936), 'inference.post_process', 'post_process', (['detections', '(True)', 'SCORE_THRESHOLD', 'IOU_THRESHOLD'], {}), '(detections, True, SCORE_THRESHOLD, IOU_THRESHOLD)\n', (9886, 9936), False, 'from inference import post_process\n'), ((11161, 11193), 'numpy.maximum', 'np.maximum', (['x1[j]', 'x1[order[1:]]'], {}), '(x1[j], x1[order[1:]])\n', (11171, 11193), True, 'import numpy as np\n'), ((11212, 11244), 'numpy.maximum', 'np.maximum', (['y1[j]', 'y1[order[1:]]'], {}), '(y1[j], y1[order[1:]])\n', (11222, 11244), True, 'import numpy as np\n'), ((11263, 11295), 'numpy.minimum', 'np.minimum', (['x2[j]', 'x2[order[1:]]'], {}), '(x2[j], x2[order[1:]])\n', (11273, 11295), True, 'import numpy as np\n'), ((11314, 11346), 'numpy.minimum', 'np.minimum', (['y2[j]', 'y2[order[1:]]'], {}), '(y2[j], y2[order[1:]])\n', (11324, 11346), True, 'import numpy as np\n'), ((11364, 11394), 'numpy.maximum', 'np.maximum', (['(0.0)', '(xx2 - xx1 + 1)'], {}), '(0.0, xx2 - xx1 + 1)\n', (11374, 11394), True, 'import numpy as np\n'), ((11411, 11441), 'numpy.maximum', 'np.maximum', (['(0.0)', '(yy2 - yy1 + 1)'], {}), '(0.0, yy2 - yy1 + 1)\n', (11421, 11441), True, 'import numpy as np\n'), ((12270, 12295), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (12293, 12295), False, 'import argparse\n'), ((13361, 13382), 'numpy.zeros_like', 'np.zeros_like', (['image1'], {}), '(image1)\n', (13374, 13382), True, 'import numpy as np\n'), ((13403, 13443), 'cv2.cvtColor', 'cv2.cvtColor', (['image1', 'cv2.COLOR_BGR2GRAY'], {}), '(image1, cv2.COLOR_BGR2GRAY)\n', (13415, 13443), False, 'import cv2\n'), ((13464, 13504), 'cv2.cvtColor', 'cv2.cvtColor', (['image2', 'cv2.COLOR_BGR2GRAY'], {}), '(image2, cv2.COLOR_BGR2GRAY)\n', (13476, 13504), False, 'import cv2\n'), ((13524, 13704), 'cv2.calcOpticalFlowFarneback', 'cv2.calcOpticalFlowFarneback', (['gray1', 'gray2'], {'flow': 'None', 'pyr_scale': '(0.5)', 'levels': '(10)', 'winsize': '(15)', 'iterations': '(10)', 'poly_n': '(7)', 'poly_sigma': '(1.5)', 'flags': 'cv2.OPTFLOW_FARNEBACK_GAUSSIAN'}), '(gray1, gray2, flow=None, pyr_scale=0.5, levels\n =10, winsize=15, iterations=10, poly_n=7, poly_sigma=1.5, flags=cv2.\n OPTFLOW_FARNEBACK_GAUSSIAN)\n', (13552, 13704), False, 'import cv2\n'), ((14128, 14153), 'utils.utils.InputPadder', 'InputPadder', (['image2.shape'], {}), '(image2.shape)\n', (14139, 14153), False, 'from utils.utils import InputPadder\n'), ((14508, 14543), 'utils.flow_viz.flow_to_image', 'flow_viz.flow_to_image', (['self.result'], {}), '(self.result)\n', (14530, 14543), False, 'from utils import flow_viz\n'), ((14637, 14694), 'cv2.cartToPolar', 'cv2.cartToPolar', (['self.result[..., 0]', 'self.result[..., 1]'], {}), '(self.result[..., 0], self.result[..., 1])\n', (14652, 14694), False, 'import cv2\n'), ((14833, 14888), 'cv2.normalize', 'cv2.normalize', (['magnitude', 'None', '(0)', '(255)', 'cv2.NORM_MINMAX'], {}), '(magnitude, None, 0, 255, cv2.NORM_MINMAX)\n', (14846, 14888), False, 'import cv2\n'), ((14909, 14946), 'cv2.cvtColor', 'cv2.cvtColor', (['mask', 'cv2.COLOR_HSV2BGR'], {}), '(mask, cv2.COLOR_HSV2BGR)\n', (14921, 14946), False, 'import cv2\n'), ((15346, 15371), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(255)'], {}), '(0, 255)\n', (15363, 15371), True, 'import numpy as np\n'), ((28872, 28893), 'numpy.array', 'np.array', (['metrics_all'], {}), '(metrics_all)\n', (28880, 28893), True, 'import numpy as np\n'), ((28928, 28981), 'numpy.interp', 'np.interp', (['axis', 'metrics_all[:, 0]', 'metrics_all[:, 1]'], {}), '(axis, metrics_all[:, 0], metrics_all[:, 1])\n', (28937, 28981), True, 'import numpy as np\n'), ((28996, 29027), 'numpy.average', 'np.average', (['corrected_precision'], {}), '(corrected_precision)\n', (29006, 29027), True, 'import numpy as np\n'), ((29309, 29347), 'matplotlib.pyplot.scatter', 'plt.scatter', (['axis', 'corrected_precision'], {}), '(axis, corrected_precision)\n', (29320, 29347), True, 'import matplotlib.pyplot as plt\n'), ((29360, 29395), 'matplotlib.pyplot.plot', 'plt.plot', (['axis', 'corrected_precision'], {}), '(axis, corrected_precision)\n', (29368, 29395), True, 'import matplotlib.pyplot as plt\n'), ((29407, 29451), 'matplotlib.pyplot.title', 'plt.title', (["(detector_type + ' - ' + flow_type)"], {}), "(detector_type + ' - ' + flow_type)\n", (29416, 29451), True, 'import matplotlib.pyplot as plt\n'), ((29464, 29484), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""recall"""'], {}), "('recall')\n", (29474, 29484), True, 'import matplotlib.pyplot as plt\n'), ((29497, 29520), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Precision"""'], {}), "('Precision')\n", (29507, 29520), True, 'import matplotlib.pyplot as plt\n'), ((29533, 29543), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (29541, 29543), True, 'import matplotlib.pyplot as plt\n'), ((1833, 1864), 'os.path.join', 'os.path.join', (['location', '"""*.png"""'], {}), "(location, '*.png')\n", (1845, 1864), False, 'import os\n'), ((8493, 8516), 'torch.load', 'torch.load', (['weight_path'], {}), '(weight_path)\n', (8503, 8516), False, 'import torch\n'), ((9782, 9797), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9795, 9797), False, 'import torch\n'), ((11552, 11582), 'numpy.where', 'np.where', (['(ovr <= IOU_THRESHOLD)'], {}), '(ovr <= IOU_THRESHOLD)\n', (11560, 11582), True, 'import numpy as np\n'), ((12991, 13001), 'raft.RAFT', 'RAFT', (['args'], {}), '(args)\n', (12995, 13001), False, 'from raft import RAFT\n'), ((13047, 13069), 'torch.load', 'torch.load', (['args.model'], {}), '(args.model)\n', (13057, 13069), False, 'import torch\n'), ((15702, 15717), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (15710, 15717), True, 'import numpy as np\n'), ((15736, 15751), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (15744, 15751), True, 'import numpy as np\n'), ((21790, 21821), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'mp4v'"], {}), "(*'mp4v')\n", (21812, 21821), False, 'import cv2\n'), ((27403, 27426), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (27424, 27426), False, 'import cv2\n'), ((8237, 8258), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (8256, 8258), True, 'import torchvision.transforms as transforms\n'), ((8779, 8800), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (8798, 8800), True, 'import torchvision.transforms as transforms\n'), ((21948, 21976), 'os.path.join', 'os.path.join', (['location', 'name'], {}), '(location, name)\n', (21960, 21976), False, 'import os\n'), ((24369, 24380), 'time.time', 'time.time', ([], {}), '()\n', (24378, 24380), False, 'import time\n'), ((28170, 28187), 'numpy.average', 'np.average', (['times'], {}), '(times)\n', (28180, 28187), True, 'import numpy as np\n'), ((25665, 25676), 'time.time', 'time.time', ([], {}), '()\n', (25674, 25676), False, 'import time\n'), ((2267, 2285), 'PIL.Image.open', 'Image.open', (['imfile'], {}), '(imfile)\n', (2277, 2285), False, 'from PIL import Image\n'), ((13894, 13918), 'torch.from_numpy', 'torch.from_numpy', (['image1'], {}), '(image1)\n', (13910, 13918), False, 'import torch\n'), ((14011, 14035), 'torch.from_numpy', 'torch.from_numpy', (['image2'], {}), '(image2)\n', (14027, 14035), False, 'import torch\n'), ((22343, 22375), 'os.path.join', 'os.path.join', (['location', '"""gt.xml"""'], {}), "(location, 'gt.xml')\n", (22355, 22375), False, 'import os\n')]
|
from ipywidgets import widgets, Layout, ValueWidget, link, HBox
from ipywidgets.widgets.widget_description import DescriptionWidget
import numpy as np
from hdmf.common import DynamicTable
from .utils.dynamictable import group_and_sort, infer_categorical_columns
from .utils.pynwb import robust_unique
from typing import Iterable
from tqdm.notebook import tqdm as tqdm_notebook
class RangeController(widgets.HBox, ValueWidget, DescriptionWidget):
def __init__(self, vmin, vmax, start_value=None, dtype='float', description='time window (s)',
orientation='horizontal', **kwargs):
if orientation not in ('horizontal', 'vertical'):
ValueError('Unrecognized orientation: {}'.format(orientation))
self.vmin = vmin
self.vmax = vmax
self.start_value = start_value
self.orientation = orientation
self.dtype = dtype
super().__init__()
self.slider = self.make_range_slider(description=description, **kwargs)
[link((self.slider, attr), (self, attr)) for attr in ('value', 'description')]
if self.orientation == 'horizontal':
self.to_start_button = widgets.Button(description='◀◀', layout=Layout(width='55px'))
self.backwards_button = widgets.Button(description='◀', layout=Layout(width='40px'))
self.forward_button = widgets.Button(description='▶', layout=Layout(width='40px'))
self.to_end_button = widgets.Button(description='▶▶', layout=Layout(width='55px'))
else: # vertical
self.to_end_button = widgets.Button(description='▲▲', layout=Layout(width='50px'))
self.forward_button = widgets.Button(description='▲', layout=Layout(width='50px'))
self.backwards_button = widgets.Button(description='▼', layout=Layout(width='50px'))
self.to_start_button = widgets.Button(description='▼▼', layout=Layout(width='50px'))
self.to_start_button.on_click(self.move_start)
self.backwards_button.on_click(self.move_down)
self.forward_button.on_click(self.move_up)
self.to_end_button.on_click(self.move_end)
self.children = self.get_children()
def get_children(self):
if self.orientation == 'horizontal':
return [
self.slider,
self.to_start_button,
self.backwards_button,
self.forward_button,
self.to_end_button
]
elif self.orientation == 'vertical':
return [widgets.VBox([
self.slider,
self.to_end_button,
self.forward_button,
self.backwards_button,
self.to_start_button,
],
layout=widgets.Layout(display='flex',
flex_flow='column',
align_items='center')
)]
else:
raise ValueError('Unrecognized orientation: {}'.format(self.orientation))
def make_range_slider(self, **kwargs):
"""
Parameters
----------
kwargs: passed into RangeSlider constructor
Returns
-------
"""
slider_kwargs = dict(
value=self.start_value,
min=self.vmin,
max=self.vmax,
continuous_update=False,
readout=True,
style={'description_width': 'initial'},
orientation=self.orientation
)
if self.dtype == 'float':
slider_kwargs.update(
readout_format='.1f',
step=0.1,
description='time window (s)',
layout=Layout(width='100%')
)
slider_kwargs.update(kwargs)
return widgets.FloatRangeSlider(**slider_kwargs)
elif self.dtype == 'int':
slider_kwargs.update(
description='unit window',
layout=Layout(height='100%')
)
slider_kwargs.update(kwargs)
return widgets.IntRangeSlider(**slider_kwargs)
else:
raise ValueError('Unrecognized dtype: {}'.format(self.dtype))
def move_up(self, change):
value_range = self.value[1] - self.value[0]
if self.value[1] + value_range < self.vmax:
self.value = (self.value[0] + value_range, self.value[1] + value_range)
else:
self.move_end(change)
def move_down(self, change):
value_range = self.value[1] - self.value[0]
if self.value[0] - value_range > self.vmin:
self.value = (self.value[0] - value_range, self.value[1] - value_range)
else:
self.move_start(change)
def move_start(self, change):
value_range = self.value[1] - self.value[0]
self.value = (self.vmin, self.vmin + value_range)
def move_end(self, change):
value_range = self.value[1] - self.value[0]
self.value = (self.vmax - value_range, self.vmax)
class StartAndDurationController(HBox, ValueWidget, DescriptionWidget):
"""
Can be used in place of the RangeController.
"""
def __init__(self, tmax, tmin=0, start_value=None, duration=1., dtype='float', description='window (s)',
**kwargs):
self.tmin = tmin
self.tmax = tmax
self.start_value = start_value
self.dtype = dtype
self.slider = widgets.FloatSlider(
value=start_value,
min=tmin,
max=tmax,
step=0.01,
description=description,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.2f',
style={'description_width': 'initial'},
layout=Layout(width='100%'))
self.duration = widgets.BoundedFloatText(
value=duration,
min=0,
max=tmax - tmin,
step=0.1,
description='duration (s):',
style={'description_width': 'initial'},
layout=Layout(max_width='140px')
)
super().__init__()
link((self.slider, 'description'), (self, 'description'))
self.value = (self.slider.value, self.slider.value + self.duration.value)
self.forward_button = widgets.Button(description='▶', layout=Layout(width='50px'))
self.forward_button.on_click(self.move_up)
self.backwards_button = widgets.Button(description='◀', layout=Layout(width='50px'))
self.backwards_button.on_click(self.move_down)
self.children = [self.slider, self.duration, self.backwards_button, self.forward_button]
self.slider.observe(self.monitor_slider)
self.duration.observe(self.monitor_duration)
def monitor_slider(self, change):
if 'new' in change:
if isinstance(change['new'], dict):
if 'value' in change['new']:
value = change['new']['value']
else:
return
else:
value = change['new']
if value + self.duration.value > self.tmax:
self.slider.value = self.tmax - self.duration.value
else:
self.value = (value, value + self.duration.value)
def monitor_duration(self, change):
if 'new' in change:
if isinstance(change['new'], dict):
if 'value' in change['new']:
value = change['new']['value']
if self.slider.value + value > self.tmax:
self.slider.value = self.tmax - value
self.value = (self.slider.value, self.slider.value + value)
def move_up(self, change):
if self.slider.value + 2 * self.duration.value < self.tmax:
self.slider.value += self.duration.value
else:
self.slider.value = self.tmax - self.duration.value
def move_down(self, change):
if self.slider.value - self.duration.value > self.tmin:
self.slider.value -= self.duration.value
else:
self.slider.value = self.tmin
class AbstractGroupAndSortController(widgets.VBox, ValueWidget):
"""
Defines the abstract type for GroupAndSortController objects. These classes take in a DynamicTable objects
and broadcast a `value` of the form
dict(
order=array-like(uint),
group_inds=array-like(uint) | None,
labels=array-like(str) | None
)
"""
def __init__(self, dynamic_table: DynamicTable):
super().__init__()
self.dynamic_table = dynamic_table
self.nitems = len(self.dynamic_table.id)
self.group_vals = None
self.group_by = None
self.group_select = None
self.limit = None
self.desc = False
self.order_by = None
self.order_vals = None
self.window = None
class GroupAndSortController(AbstractGroupAndSortController):
def __init__(self, dynamic_table: DynamicTable, group_by=None, window=None, start_discard_rows=None):
"""
Parameters
----------
dynamic_table
group_by
window: None or bool,
"""
super().__init__(dynamic_table)
groups = self.get_groups()
self.discard_rows = start_discard_rows
self.limit_bit = widgets.BoundedIntText(value=50, min=0, max=99999, disabled=True,
layout=Layout(max_width='70px'))
self.limit_bit.observe(self.limit_bit_observer)
self.limit_cb = widgets.Checkbox(description='limit', style={'description_width': 'initial'}, disabled=True,
indent=False, layout=Layout(max_width='70px'))
self.limit_cb.observe(self.limit_cb_observer)
self.order_dd = widgets.Dropdown(options=[None] + list(groups), description='order by',
layout=Layout(max_width='120px'), style={'description_width': 'initial'})
self.order_dd.observe(self.order_dd_observer)
self.ascending_dd = widgets.Dropdown(options=['ASC', 'DESC'], disabled=True,
layout=Layout(max_width='70px'))
self.ascending_dd.observe(self.ascending_dd_observer)
range_controller_max = min(30, self.nitems)
if window is None:
self.range_controller = RangeController(0, self.nitems, start_value=(0, range_controller_max), dtype='int', description='units',
orientation='vertical')
self.range_controller.observe(self.range_controller_observer)
self.window = self.range_controller.value
elif window is False:
self.window = (0, self.nitems)
self.range_controller = widgets.HTML('')
self.group_sm = widgets.SelectMultiple(layout=Layout(max_width='100px'), disabled=True, rows=1)
self.group_sm.observe(self.group_sm_observer)
if group_by is None:
self.group_dd = widgets.Dropdown(options=[None] + list(groups), description='group by',
style={'description_width': 'initial'}, layout=Layout(width='90%'))
self.group_dd.observe(self.group_dd_observer)
else:
self.group_dd = None
self.set_group_by(group_by)
self.children = self.get_children()
self.layout = Layout(width='280px')
self.update_value()
def get_children(self):
children = [
widgets.HBox(children=(self.group_sm, self.range_controller)),
widgets.HBox(children=(self.limit_cb, self.limit_bit), layout=Layout(max_width='90%')),
widgets.HBox(children=(self.order_dd, self.ascending_dd), layout=Layout(max_width='90%')),
]
if self.group_dd:
children.insert(0, self.group_dd)
return children
def set_group_by(self, group_by):
self.group_by = group_by
self.group_vals = self.get_group_vals(by=group_by)
group_vals = self.group_vals
if self.discard_rows is not None:
group_vals = group_vals[~np.isin(np.arange(len(group_vals), dtype='int'), self.discard_rows)]
if self.group_vals.dtype == np.float:
group_vals = group_vals[~np.isnan(group_vals)]
groups = np.unique(group_vals)
self.group_sm.options = tuple(groups[::-1])
self.group_sm.value = self.group_sm.options
self.group_sm.disabled = False
self.group_sm.rows = min(len(groups), 20)
self.limit_cb.disabled = False
self.group_and_sort()
def group_dd_observer(self, change):
"""group dropdown observer"""
if change['name'] == 'value':
group_by = change['new']
if group_by in ('None', '', None):
self.limit_bit.disabled = True
self.limit_cb.disabled = True
self.group_vals = None
self.group_by = None
self.limit = None
self.limit_cb.value = False
if hasattr(self.range_controller, 'slider'):
self.range_controller.slider.max = len(self.dynamic_table) - len(self.discard_rows)
else:
self.set_group_by(group_by)
self.update_value()
def limit_bit_observer(self, change):
"""limit bounded int text observer"""
if change['name'] == 'value':
limit = self.limit_bit.value
self.limit = limit
self.update_value()
def limit_cb_observer(self, change):
"""limit checkbox observer"""
if change['name'] == 'value':
if self.limit_cb.value and self.group_by is not None:
self.limit_bit.disabled = False
self.limit = self.limit_bit.value
else:
self.limit_bit.disabled = True
self.limit = None
self.update_value()
def order_dd_observer(self, change):
"""order dropdown observer"""
if change['name'] == 'value':
self.order_by = self.order_dd.value
order_vals = self.get_group_vals(by=self.order_by)
# convert to ints. This is mainly for handling strings
_, order_vals = np.unique(order_vals, return_inverse=True)
if self.desc: # if descend is on, invert order.
order_vals *= -1
self.order_vals = order_vals
self.ascending_dd.disabled = self.order_dd.value is None
self.update_value()
def ascending_dd_observer(self, change):
"""ascending dropdown observer"""
if change['name'] == 'value':
if change['new'] == 'ASC':
self.desc = False
self.order_vals *= -1
else:
self.desc = True
self.order_vals *= -1
self.update_value()
def group_sm_observer(self, change):
"""group SelectMultiple observer"""
if change['name'] == 'value' and not self.group_sm.disabled:
self.group_select = change['new']
value_before = self.window
self.group_and_sort()
if hasattr(self.range_controller, 'slider') and not self.range_controller.slider.value == value_before:
pass # do nothing, value was updated automatically
else:
self.update_value()
def range_controller_observer(self, change):
self.window = self.range_controller.value
self.update_value()
def get_groups(self):
return infer_categorical_columns(self.dynamic_table)
def get_group_vals(self, by, units_select=()):
"""Get the values of the group_by variable
Parameters
----------
by
units_select
Returns
-------
"""
if by is None:
return None
elif by in self.dynamic_table:
return self.dynamic_table[by][:][units_select]
else:
raise ValueError('column {} not in DynamicTable {}'.format(by, self.dynamic_table))
def get_orderable_cols(self):
candidate_cols = [x for x in self.units.colnames
if not isinstance(self.units[x][0], Iterable) or
isinstance(self.units[x][0], str)]
return [x for x in candidate_cols if len(robust_unique(self.units[x][:])) > 1]
def group_and_sort(self):
if self.group_vals is None and self.order_vals is None:
self.order_vals = np.arange(self.nitems).astype('int')
order, group_inds, labels = group_and_sort(
group_vals=self.group_vals,
group_select=self.group_select,
discard_rows=self.discard_rows,
order_vals=self.order_vals,
limit=self.limit
)
if hasattr(self.range_controller, 'slider'):
self.range_controller.slider.max = len(order)
# apply window
if self.window is not None:
order = order[self.window[0]:self.window[1]]
if group_inds is not None:
group_inds = group_inds[self.window[0]:self.window[1]]
return order, group_inds, labels
def update_value(self):
order, group_inds, labels = self.group_and_sort()
self.value = dict(order=order, group_inds=group_inds, labels=labels)
class ProgressBar(tqdm_notebook):
def __init__(self, *arg, **kwargs):
super().__init__(*arg, **kwargs)
self.container.children[0].layout = Layout(width='80%')
def make_trial_event_controller(trials):
trial_events = ['start_time']
if not np.all(np.isnan(trials['stop_time'].data)):
trial_events.append('stop_time')
trial_events += [x.name for x in trials.columns if
(('_time' in x.name) and (x.name not in ('start_time', 'stop_time')))]
trial_event_controller = widgets.Dropdown(options=trial_events,
value='start_time',
description='align to: ')
return trial_event_controller
|
[
"ipywidgets.widgets.HTML",
"ipywidgets.widgets.HBox",
"ipywidgets.link",
"ipywidgets.widgets.Dropdown",
"numpy.isnan",
"numpy.arange",
"ipywidgets.widgets.Layout",
"ipywidgets.widgets.IntRangeSlider",
"ipywidgets.Layout",
"numpy.unique",
"ipywidgets.widgets.FloatRangeSlider"
] |
[((18058, 18147), 'ipywidgets.widgets.Dropdown', 'widgets.Dropdown', ([], {'options': 'trial_events', 'value': '"""start_time"""', 'description': '"""align to: """'}), "(options=trial_events, value='start_time', description=\n 'align to: ')\n", (18074, 18147), False, 'from ipywidgets import widgets, Layout, ValueWidget, link, HBox\n'), ((6175, 6232), 'ipywidgets.link', 'link', (["(self.slider, 'description')", "(self, 'description')"], {}), "((self.slider, 'description'), (self, 'description'))\n", (6179, 6232), False, 'from ipywidgets import widgets, Layout, ValueWidget, link, HBox\n'), ((11514, 11535), 'ipywidgets.Layout', 'Layout', ([], {'width': '"""280px"""'}), "(width='280px')\n", (11520, 11535), False, 'from ipywidgets import widgets, Layout, ValueWidget, link, HBox\n'), ((12438, 12459), 'numpy.unique', 'np.unique', (['group_vals'], {}), '(group_vals)\n', (12447, 12459), True, 'import numpy as np\n'), ((17689, 17708), 'ipywidgets.Layout', 'Layout', ([], {'width': '"""80%"""'}), "(width='80%')\n", (17695, 17708), False, 'from ipywidgets import widgets, Layout, ValueWidget, link, HBox\n'), ((1012, 1051), 'ipywidgets.link', 'link', (['(self.slider, attr)', '(self, attr)'], {}), '((self.slider, attr), (self, attr))\n', (1016, 1051), False, 'from ipywidgets import widgets, Layout, ValueWidget, link, HBox\n'), ((3814, 3855), 'ipywidgets.widgets.FloatRangeSlider', 'widgets.FloatRangeSlider', ([], {}), '(**slider_kwargs)\n', (3838, 3855), False, 'from ipywidgets import widgets, Layout, ValueWidget, link, HBox\n'), ((11626, 11687), 'ipywidgets.widgets.HBox', 'widgets.HBox', ([], {'children': '(self.group_sm, self.range_controller)'}), '(children=(self.group_sm, self.range_controller))\n', (11638, 11687), False, 'from ipywidgets import widgets, Layout, ValueWidget, link, HBox\n'), ((14401, 14443), 'numpy.unique', 'np.unique', (['order_vals'], {'return_inverse': '(True)'}), '(order_vals, return_inverse=True)\n', (14410, 14443), True, 'import numpy as np\n'), ((17804, 17838), 'numpy.isnan', 'np.isnan', (["trials['stop_time'].data"], {}), "(trials['stop_time'].data)\n", (17812, 17838), True, 'import numpy as np\n'), ((4086, 4125), 'ipywidgets.widgets.IntRangeSlider', 'widgets.IntRangeSlider', ([], {}), '(**slider_kwargs)\n', (4108, 4125), False, 'from ipywidgets import widgets, Layout, ValueWidget, link, HBox\n'), ((5820, 5840), 'ipywidgets.Layout', 'Layout', ([], {'width': '"""100%"""'}), "(width='100%')\n", (5826, 5840), False, 'from ipywidgets import widgets, Layout, ValueWidget, link, HBox\n'), ((6103, 6128), 'ipywidgets.Layout', 'Layout', ([], {'max_width': '"""140px"""'}), "(max_width='140px')\n", (6109, 6128), False, 'from ipywidgets import widgets, Layout, ValueWidget, link, HBox\n'), ((6388, 6408), 'ipywidgets.Layout', 'Layout', ([], {'width': '"""50px"""'}), "(width='50px')\n", (6394, 6408), False, 'from ipywidgets import widgets, Layout, ValueWidget, link, HBox\n'), ((6533, 6553), 'ipywidgets.Layout', 'Layout', ([], {'width': '"""50px"""'}), "(width='50px')\n", (6539, 6553), False, 'from ipywidgets import widgets, Layout, ValueWidget, link, HBox\n'), ((9515, 9539), 'ipywidgets.Layout', 'Layout', ([], {'max_width': '"""70px"""'}), "(max_width='70px')\n", (9521, 9539), False, 'from ipywidgets import widgets, Layout, ValueWidget, link, HBox\n'), ((9777, 9801), 'ipywidgets.Layout', 'Layout', ([], {'max_width': '"""70px"""'}), "(max_width='70px')\n", (9783, 9801), False, 'from ipywidgets import widgets, Layout, ValueWidget, link, HBox\n'), ((10002, 10027), 'ipywidgets.Layout', 'Layout', ([], {'max_width': '"""120px"""'}), "(max_width='120px')\n", (10008, 10027), False, 'from ipywidgets import widgets, Layout, ValueWidget, link, HBox\n'), ((10261, 10285), 'ipywidgets.Layout', 'Layout', ([], {'max_width': '"""70px"""'}), "(max_width='70px')\n", (10267, 10285), False, 'from ipywidgets import widgets, Layout, ValueWidget, link, HBox\n'), ((10883, 10899), 'ipywidgets.widgets.HTML', 'widgets.HTML', (['""""""'], {}), "('')\n", (10895, 10899), False, 'from ipywidgets import widgets, Layout, ValueWidget, link, HBox\n'), ((10955, 10980), 'ipywidgets.Layout', 'Layout', ([], {'max_width': '"""100px"""'}), "(max_width='100px')\n", (10961, 10980), False, 'from ipywidgets import widgets, Layout, ValueWidget, link, HBox\n'), ((1215, 1235), 'ipywidgets.Layout', 'Layout', ([], {'width': '"""55px"""'}), "(width='55px')\n", (1221, 1235), False, 'from ipywidgets import widgets, Layout, ValueWidget, link, HBox\n'), ((1310, 1330), 'ipywidgets.Layout', 'Layout', ([], {'width': '"""40px"""'}), "(width='40px')\n", (1316, 1330), False, 'from ipywidgets import widgets, Layout, ValueWidget, link, HBox\n'), ((1405, 1425), 'ipywidgets.Layout', 'Layout', ([], {'width': '"""40px"""'}), "(width='40px')\n", (1411, 1425), False, 'from ipywidgets import widgets, Layout, ValueWidget, link, HBox\n'), ((1502, 1522), 'ipywidgets.Layout', 'Layout', ([], {'width': '"""55px"""'}), "(width='55px')\n", (1508, 1522), False, 'from ipywidgets import widgets, Layout, ValueWidget, link, HBox\n'), ((1623, 1643), 'ipywidgets.Layout', 'Layout', ([], {'width': '"""50px"""'}), "(width='50px')\n", (1629, 1643), False, 'from ipywidgets import widgets, Layout, ValueWidget, link, HBox\n'), ((1716, 1736), 'ipywidgets.Layout', 'Layout', ([], {'width': '"""50px"""'}), "(width='50px')\n", (1722, 1736), False, 'from ipywidgets import widgets, Layout, ValueWidget, link, HBox\n'), ((1813, 1833), 'ipywidgets.Layout', 'Layout', ([], {'width': '"""50px"""'}), "(width='50px')\n", (1819, 1833), False, 'from ipywidgets import widgets, Layout, ValueWidget, link, HBox\n'), ((1912, 1932), 'ipywidgets.Layout', 'Layout', ([], {'width': '"""50px"""'}), "(width='50px')\n", (1918, 1932), False, 'from ipywidgets import widgets, Layout, ValueWidget, link, HBox\n'), ((3719, 3739), 'ipywidgets.Layout', 'Layout', ([], {'width': '"""100%"""'}), "(width='100%')\n", (3725, 3739), False, 'from ipywidgets import widgets, Layout, ValueWidget, link, HBox\n'), ((11281, 11300), 'ipywidgets.Layout', 'Layout', ([], {'width': '"""90%"""'}), "(width='90%')\n", (11287, 11300), False, 'from ipywidgets import widgets, Layout, ValueWidget, link, HBox\n'), ((11763, 11786), 'ipywidgets.Layout', 'Layout', ([], {'max_width': '"""90%"""'}), "(max_width='90%')\n", (11769, 11786), False, 'from ipywidgets import widgets, Layout, ValueWidget, link, HBox\n'), ((11866, 11889), 'ipywidgets.Layout', 'Layout', ([], {'max_width': '"""90%"""'}), "(max_width='90%')\n", (11872, 11889), False, 'from ipywidgets import widgets, Layout, ValueWidget, link, HBox\n'), ((12399, 12419), 'numpy.isnan', 'np.isnan', (['group_vals'], {}), '(group_vals)\n', (12407, 12419), True, 'import numpy as np\n'), ((16684, 16706), 'numpy.arange', 'np.arange', (['self.nitems'], {}), '(self.nitems)\n', (16693, 16706), True, 'import numpy as np\n'), ((3990, 4011), 'ipywidgets.Layout', 'Layout', ([], {'height': '"""100%"""'}), "(height='100%')\n", (3996, 4011), False, 'from ipywidgets import widgets, Layout, ValueWidget, link, HBox\n'), ((2772, 2844), 'ipywidgets.widgets.Layout', 'widgets.Layout', ([], {'display': '"""flex"""', 'flex_flow': '"""column"""', 'align_items': '"""center"""'}), "(display='flex', flex_flow='column', align_items='center')\n", (2786, 2844), False, 'from ipywidgets import widgets, Layout, ValueWidget, link, HBox\n')]
|
import numpy as np
from td import TD
import time
class Sarsa(TD):
def __init__(self, env, step_size=0.1, gamma=1, eps=0.1, pol_deriv=None):
super().__init__(env, None, step_size, gamma)
self.pol_deriv = pol_deriv if pol_deriv is not None else self.eps_gre(eps)
self.reset()
#print(f"step size={self.step_size}")
#print(f"epsilon={eps}")
#print(f"gamma={self.gamma}")
def best_action(self, moves, vals):
return moves[np.random.choice(np.flatnonzero(vals == vals.max()))]
def random_move(self, s):
return self.env.moves_d[s][np.random.randint(len(self.env.moves_d[s]))]
def eps_gre(self, eps):
def eps_gre_pol(s):
if np.random.random() < eps:
return self.random_move(s)
return self.best_action(self.env.moves_d[s], np.array([self.Q[(s, a)] for a in self.env.moves_d[s]]))
return eps_gre_pol
def sarsa_update(self, s, a, r, s_p, a_p):
self.Q[(s, a)] += self.step_size * (r + self.gamma * self.Q[(s_p, a_p)] - self.Q[(s, a)])
def on_policy_td_control(self, n_episodes, rews=False):
ep_per_timesteps = []
r_sum_l = []
for ep_nb in range(n_episodes):
ep_start = time.time()
s = self.env.reset()
a = self.pol_deriv(s)
r_sum = 0
while True:
ep_per_timesteps.append(ep_nb)
s_p, r, d, _ = self.env.step(a)
r_sum += r
a_p = self.pol_deriv(s_p)
self.sarsa_update(s, a, r, s_p, a_p)
if d:
r_sum_l.append(r_sum)
break
s, a = s_p, a_p
return ep_per_timesteps if not rews else r_sum_l
def reset(self):
self.Q = {(s,a): 0 for s in self.env.states for a in self.env.moves_d[s]}
|
[
"numpy.random.random",
"numpy.array",
"time.time"
] |
[((1159, 1170), 'time.time', 'time.time', ([], {}), '()\n', (1168, 1170), False, 'import time\n'), ((671, 689), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (687, 689), True, 'import numpy as np\n'), ((783, 836), 'numpy.array', 'np.array', (['[self.Q[s, a] for a in self.env.moves_d[s]]'], {}), '([self.Q[s, a] for a in self.env.moves_d[s]])\n', (791, 836), True, 'import numpy as np\n')]
|
"""
"""
import unittest
import numpy as np
from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION
class TestCell(unittest.TestCase):
def setUp(self):
self.cell = Cell(tokens=[
Token(text='hi',
bounding_box=Box(llx=-1.0, lly=-0.5, urx=1.0, ury=1.0)),
Token(text='<PASSWORD>',
bounding_box=Box(llx=1.5, lly=-0.5, urx=2.5, ury=1.5))
], rowspan=1, colspan=1)
def test_str(self):
self.assertEqual(str(self.cell), 'hi bye')
def test_compute_bounding_box(self):
box = self.cell.bounding_box
self.assertEqual(box.ll.x, -1.0)
self.assertEqual(box.ll.y, -0.5)
self.assertEqual(box.ur.x, 2.5)
self.assertEqual(box.ur.y, 1.5)
class TestTable(unittest.TestCase):
def setUp(self):
self.a = Cell(tokens=[Token(text='a')], rowspan=1, colspan=1)
self.b = Cell(tokens=[Token(text='b')], rowspan=1, colspan=1)
self.c = Cell(tokens=[Token(text='c')], rowspan=1, colspan=1)
self.d = Cell(tokens=[Token(text='d')], rowspan=1, colspan=1)
self.e = Cell(tokens=[Token(text='e')], rowspan=1, colspan=1)
self.f = Cell(tokens=[Token(text='f')], rowspan=1, colspan=1)
self.easy_table = Table(caption='hi this is caption')
self.easy_table.grid = np.array([
[self.a, self.b, self.c],
[self.d, self.e, self.f]
])
self.hard_table = Table.create_from_cells(
cells=[
Cell(tokens=[Token(text='')], rowspan=2, colspan=2),
Cell(tokens=[Token(text='C')], rowspan=1, colspan=2),
Cell(tokens=[Token(text='C:1')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='C:2')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='R')], rowspan=3, colspan=1),
Cell(tokens=[Token(text='R:1')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='a')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='b')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='R:2')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='c')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='d')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='R:3')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='e')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='f')], rowspan=1, colspan=1)
], nrow=5, ncol=4, paper_id='abc', page_num=0,
caption='hi this is caption')
def test_create_from_grid(self):
self.assertEqual(Table.create_from_grid(grid=[
[self.a, self.b, self.c],
[self.d, self.e, self.f]
]), self.easy_table)
# TODO
def test_create_from_cells(self):
pass
def test_improper_table(self):
# misspecified nrow or ncol
with self.assertRaises(Exception):
Table.create_from_cells(
cells=[Cell(tokens=[Token(text='a')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='b')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='c')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='d')], rowspan=1, colspan=1)],
nrow=2, ncol=1, paper_id='', page_num=0, caption='')
with self.assertRaises(Exception):
Table.create_from_cells(
cells=[Cell(tokens=[Token(text='a')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='b')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='c')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='d')], rowspan=1, colspan=1)],
nrow=1, ncol=2, paper_id='', page_num=0, caption='')
# not enough cells to fill out table
with self.assertRaises(Exception):
Table.create_from_cells(
cells=[Cell(tokens=[Token(text='a')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='b')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='c')], rowspan=1, colspan=1)],
nrow=2, ncol=2, paper_id='', page_num=0, caption='')
with self.assertRaises(Exception):
Table.create_from_cells(
cells=[Cell(tokens=[Token(text='a')], rowspan=1, colspan=1),
Cell(tokens=[Token(text='b')], rowspan=1, colspan=1)],
nrow=2, ncol=2, paper_id='', page_num=0, caption='')
# cell juts out of table boundaries
with self.assertRaises(Exception):
Table.create_from_cells(
cells=[Cell(tokens=[Token(text='a')], rowspan=1, colspan=2)],
nrow=1, ncol=1, paper_id='', page_num=0, caption='')
def test_shape_properties(self):
self.assertEqual(self.easy_table.nrow, 2)
self.assertEqual(self.easy_table.ncol, 3)
self.assertEqual(self.easy_table.dim, (2, 3))
self.assertEqual(self.hard_table.nrow, 5)
self.assertEqual(self.hard_table.ncol, 4)
self.assertEqual(self.hard_table.dim, (5, 4))
def test_grid_indexing(self):
# single elements
self.assertEqual(self.easy_table[0, 0], self.a)
self.assertEqual(self.easy_table[-1, -1], self.f)
# full row
self.assertListEqual(self.easy_table[0, :], [self.a, self.b, self.c])
self.assertListEqual(self.easy_table[1, :], [self.d, self.e, self.f])
# partial row
self.assertListEqual(self.easy_table[0, 1:], [self.b, self.c])
self.assertListEqual(self.easy_table[0, :2], [self.a, self.b])
self.assertListEqual(self.easy_table[0, 1:2], [self.b])
# full column
self.assertListEqual(self.easy_table[:, 0], [self.a, self.d])
# partial column
self.assertListEqual(self.easy_table[1:, 0], [self.d])
self.assertListEqual(self.easy_table[:1, 0], [self.a])
self.assertListEqual(self.easy_table[1:2, 0], [self.d])
# full subgrid
self.assertEqual(self.easy_table, self.easy_table[:, :])
# partial subgrid
self.assertEqual(self.easy_table[1:2, 1:2],
Table.create_from_grid(grid=[[self.e]]))
self.assertEqual(self.easy_table[1:, 1:],
Table.create_from_grid(grid=[[self.e, self.f]]))
self.assertEqual(self.easy_table[:2, :2],
Table.create_from_grid(grid=[[self.a, self.b],
[self.d, self.e]]))
def test_str(self):
self.assertEqual(str(self.easy_table),
'a\tb\tc\nd\te\tf' + '\n' + 'hi this is caption')
t = '\t\tC\tC\n\t\tC:1\tC:2\nR\tR:1\ta\tb\nR\tR:2\tc\td\nR\tR:3\te\tf'
c = 'hithisiscaption'
self.assertEqual(str(self.hard_table).replace(' ', ''), t + '\n' + c)
def test_insert_row(self):
x = Cell(tokens=[Token(text='x')], rowspan=1, colspan=1)
y = Cell(tokens=[Token(text='y')], rowspan=1, colspan=1)
z = Cell(tokens=[Token(text='z')], rowspan=1, colspan=1)
self.assertEqual(self.easy_table.insert_row(index=0, row=[x, y, z]),
Table.create_from_grid(grid=[
[x, y, z],
[self.a, self.b, self.c],
[self.d, self.e, self.f]
]))
self.assertEqual(self.easy_table.insert_row(index=1, row=[x, y, z]),
Table.create_from_grid(grid=[
[self.a, self.b, self.c],
[x, y, z],
[self.d, self.e, self.f]
]))
with self.assertRaises(Exception):
self.easy_table.insert_row(index=1, row=[x, y])
def test_insert_column(self):
x = Cell(tokens=[Token(text='x')], rowspan=1, colspan=1)
y = Cell(tokens=[Token(text='y')], rowspan=1, colspan=1)
self.assertEqual(self.easy_table.insert_column(index=1, column=[x, y]),
Table.create_from_grid(grid=[
[self.a, x, self.b, self.c],
[self.d, y, self.e, self.f]
]))
with self.assertRaises(Exception):
self.easy_table.insert_column(index=1, column=[x, y, y])
def test_delete_row(self):
self.assertEqual(self.easy_table.delete_row(index=1),
Table.create_from_grid(grid=[
[self.a, self.b, self.c]
]))
def test_delete_column(self):
self.assertEqual(self.easy_table.delete_column(index=1),
Table.create_from_grid(grid=[
[self.a, self.c],
[self.d, self.f]
]))
def test_append_left(self):
self.assertEqual(
self.easy_table.append_left(other=Table.create_from_grid(
grid=[[self.f, self.b, self.d],
[self.c, self.e, self.a]])),
Table.create_from_grid(
grid=[[self.f, self.b, self.d, self.a, self.b, self.c],
[self.c, self.e, self.a, self.d, self.e, self.f]])
)
def test_append_right(self):
self.assertEqual(
self.easy_table.append_right(other=Table.create_from_grid(
grid=[[self.f, self.b, self.d],
[self.c, self.e, self.a]])),
Table.create_from_grid(
grid=[[self.a, self.b, self.c, self.f, self.b, self.d],
[self.d, self.e, self.f, self.c, self.e, self.a]])
)
def test_append_top(self):
self.assertEqual(
self.easy_table.append_top(other=Table.create_from_grid(
grid=[[self.f, self.b, self.d],
[self.c, self.e, self.a]])),
Table.create_from_grid(
grid=[[self.f, self.b, self.d],
[self.c, self.e, self.a],
[self.a, self.b, self.c],
[self.d, self.e, self.f]])
)
def test_append_bottom(self):
self.assertEqual(
self.easy_table.append_bottom(other=Table.create_from_grid(
grid=[[self.f, self.b, self.d],
[self.c, self.e, self.a]])),
Table.create_from_grid(
grid=[[self.a, self.b, self.c],
[self.d, self.e, self.f],
[self.f, self.b, self.d],
[self.c, self.e, self.a]])
)
def test_compute_bounding_box(self):
table = Table.create_from_cells(
cells=[
Cell(tokens=[Token(text='e')], rowspan=1, colspan=1,
bounding_box=Box(llx=-1.0, lly=-0.5, urx=1.0, ury=1.0)),
Cell(tokens=[Token(text='e')], rowspan=1, colspan=1,
bounding_box=Box(llx=1.5, lly=-0.5, urx=2.5, ury=1.5))
],
nrow=1, ncol=2, paper_id='abc', page_num=0,
caption='hi this is caption')
box = table.bounding_box
self.assertEqual(box.ll.x, -1.0)
self.assertEqual(box.ll.y, -0.5)
self.assertEqual(box.ur.x, 2.5)
self.assertEqual(box.ur.y, 1.5)
# TODO: implement this later
def test_eq(self):
pass
|
[
"corvid.types.table.Table",
"corvid.types.table.Box",
"corvid.types.table.Token",
"numpy.array",
"corvid.types.table.Table.create_from_grid"
] |
[((1279, 1314), 'corvid.types.table.Table', 'Table', ([], {'caption': '"""hi this is caption"""'}), "(caption='hi this is caption')\n", (1284, 1314), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((1346, 1408), 'numpy.array', 'np.array', (['[[self.a, self.b, self.c], [self.d, self.e, self.f]]'], {}), '([[self.a, self.b, self.c], [self.d, self.e, self.f]])\n', (1354, 1408), True, 'import numpy as np\n'), ((2667, 2752), 'corvid.types.table.Table.create_from_grid', 'Table.create_from_grid', ([], {'grid': '[[self.a, self.b, self.c], [self.d, self.e, self.f]]'}), '(grid=[[self.a, self.b, self.c], [self.d, self.e,\n self.f]])\n', (2689, 2752), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((6279, 6318), 'corvid.types.table.Table.create_from_grid', 'Table.create_from_grid', ([], {'grid': '[[self.e]]'}), '(grid=[[self.e]])\n', (6301, 6318), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((6395, 6442), 'corvid.types.table.Table.create_from_grid', 'Table.create_from_grid', ([], {'grid': '[[self.e, self.f]]'}), '(grid=[[self.e, self.f]])\n', (6417, 6442), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((6519, 6584), 'corvid.types.table.Table.create_from_grid', 'Table.create_from_grid', ([], {'grid': '[[self.a, self.b], [self.d, self.e]]'}), '(grid=[[self.a, self.b], [self.d, self.e]])\n', (6541, 6584), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((7303, 7399), 'corvid.types.table.Table.create_from_grid', 'Table.create_from_grid', ([], {'grid': '[[x, y, z], [self.a, self.b, self.c], [self.d, self.e, self.f]]'}), '(grid=[[x, y, z], [self.a, self.b, self.c], [self.d,\n self.e, self.f]])\n', (7325, 7399), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((7613, 7709), 'corvid.types.table.Table.create_from_grid', 'Table.create_from_grid', ([], {'grid': '[[self.a, self.b, self.c], [x, y, z], [self.d, self.e, self.f]]'}), '(grid=[[self.a, self.b, self.c], [x, y, z], [self.d,\n self.e, self.f]])\n', (7635, 7709), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((8194, 8286), 'corvid.types.table.Table.create_from_grid', 'Table.create_from_grid', ([], {'grid': '[[self.a, x, self.b, self.c], [self.d, y, self.e, self.f]]'}), '(grid=[[self.a, x, self.b, self.c], [self.d, y, self.\n e, self.f]])\n', (8216, 8286), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((8599, 8654), 'corvid.types.table.Table.create_from_grid', 'Table.create_from_grid', ([], {'grid': '[[self.a, self.b, self.c]]'}), '(grid=[[self.a, self.b, self.c]])\n', (8621, 8654), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((8837, 8902), 'corvid.types.table.Table.create_from_grid', 'Table.create_from_grid', ([], {'grid': '[[self.a, self.c], [self.d, self.f]]'}), '(grid=[[self.a, self.c], [self.d, self.f]])\n', (8859, 8902), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((9229, 9363), 'corvid.types.table.Table.create_from_grid', 'Table.create_from_grid', ([], {'grid': '[[self.f, self.b, self.d, self.a, self.b, self.c], [self.c, self.e, self.a,\n self.d, self.e, self.f]]'}), '(grid=[[self.f, self.b, self.d, self.a, self.b, self.\n c], [self.c, self.e, self.a, self.d, self.e, self.f]])\n', (9251, 9363), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((9650, 9784), 'corvid.types.table.Table.create_from_grid', 'Table.create_from_grid', ([], {'grid': '[[self.a, self.b, self.c, self.f, self.b, self.d], [self.d, self.e, self.f,\n self.c, self.e, self.a]]'}), '(grid=[[self.a, self.b, self.c, self.f, self.b, self.\n d], [self.d, self.e, self.f, self.c, self.e, self.a]])\n', (9672, 9784), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((10067, 10204), 'corvid.types.table.Table.create_from_grid', 'Table.create_from_grid', ([], {'grid': '[[self.f, self.b, self.d], [self.c, self.e, self.a], [self.a, self.b, self.\n c], [self.d, self.e, self.f]]'}), '(grid=[[self.f, self.b, self.d], [self.c, self.e,\n self.a], [self.a, self.b, self.c], [self.d, self.e, self.f]])\n', (10089, 10204), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((10538, 10675), 'corvid.types.table.Table.create_from_grid', 'Table.create_from_grid', ([], {'grid': '[[self.a, self.b, self.c], [self.d, self.e, self.f], [self.f, self.b, self.\n d], [self.c, self.e, self.a]]'}), '(grid=[[self.a, self.b, self.c], [self.d, self.e,\n self.f], [self.f, self.b, self.d], [self.c, self.e, self.a]])\n', (10560, 10675), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((863, 878), 'corvid.types.table.Token', 'Token', ([], {'text': '"""a"""'}), "(text='a')\n", (868, 878), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((933, 948), 'corvid.types.table.Token', 'Token', ([], {'text': '"""b"""'}), "(text='b')\n", (938, 948), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((1003, 1018), 'corvid.types.table.Token', 'Token', ([], {'text': '"""c"""'}), "(text='c')\n", (1008, 1018), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((1073, 1088), 'corvid.types.table.Token', 'Token', ([], {'text': '"""d"""'}), "(text='d')\n", (1078, 1088), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((1143, 1158), 'corvid.types.table.Token', 'Token', ([], {'text': '"""e"""'}), "(text='e')\n", (1148, 1158), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((1213, 1228), 'corvid.types.table.Token', 'Token', ([], {'text': '"""f"""'}), "(text='f')\n", (1218, 1228), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((7031, 7046), 'corvid.types.table.Token', 'Token', ([], {'text': '"""x"""'}), "(text='x')\n", (7036, 7046), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((7096, 7111), 'corvid.types.table.Token', 'Token', ([], {'text': '"""y"""'}), "(text='y')\n", (7101, 7111), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((7161, 7176), 'corvid.types.table.Token', 'Token', ([], {'text': '"""z"""'}), "(text='z')\n", (7166, 7176), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((7984, 7999), 'corvid.types.table.Token', 'Token', ([], {'text': '"""x"""'}), "(text='x')\n", (7989, 7999), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((8049, 8064), 'corvid.types.table.Token', 'Token', ([], {'text': '"""y"""'}), "(text='y')\n", (8054, 8064), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((9094, 9179), 'corvid.types.table.Table.create_from_grid', 'Table.create_from_grid', ([], {'grid': '[[self.f, self.b, self.d], [self.c, self.e, self.a]]'}), '(grid=[[self.f, self.b, self.d], [self.c, self.e,\n self.a]])\n', (9116, 9179), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((9515, 9600), 'corvid.types.table.Table.create_from_grid', 'Table.create_from_grid', ([], {'grid': '[[self.f, self.b, self.d], [self.c, self.e, self.a]]'}), '(grid=[[self.f, self.b, self.d], [self.c, self.e,\n self.a]])\n', (9537, 9600), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((9932, 10017), 'corvid.types.table.Table.create_from_grid', 'Table.create_from_grid', ([], {'grid': '[[self.f, self.b, self.d], [self.c, self.e, self.a]]'}), '(grid=[[self.f, self.b, self.d], [self.c, self.e,\n self.a]])\n', (9954, 10017), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((10403, 10488), 'corvid.types.table.Table.create_from_grid', 'Table.create_from_grid', ([], {'grid': '[[self.f, self.b, self.d], [self.c, self.e, self.a]]'}), '(grid=[[self.f, self.b, self.d], [self.c, self.e,\n self.a]])\n', (10425, 10488), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((270, 311), 'corvid.types.table.Box', 'Box', ([], {'llx': '(-1.0)', 'lly': '(-0.5)', 'urx': '(1.0)', 'ury': '(1.0)'}), '(llx=-1.0, lly=-0.5, urx=1.0, ury=1.0)\n', (273, 311), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((382, 422), 'corvid.types.table.Box', 'Box', ([], {'llx': '(1.5)', 'lly': '(-0.5)', 'urx': '(2.5)', 'ury': '(1.5)'}), '(llx=1.5, lly=-0.5, urx=2.5, ury=1.5)\n', (385, 422), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((10971, 11012), 'corvid.types.table.Box', 'Box', ([], {'llx': '(-1.0)', 'lly': '(-0.5)', 'urx': '(1.0)', 'ury': '(1.0)'}), '(llx=-1.0, lly=-0.5, urx=1.0, ury=1.0)\n', (10974, 11012), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((11118, 11158), 'corvid.types.table.Box', 'Box', ([], {'llx': '(1.5)', 'lly': '(-0.5)', 'urx': '(2.5)', 'ury': '(1.5)'}), '(llx=1.5, lly=-0.5, urx=2.5, ury=1.5)\n', (11121, 11158), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((1544, 1558), 'corvid.types.table.Token', 'Token', ([], {'text': '""""""'}), "(text='')\n", (1549, 1558), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((1613, 1628), 'corvid.types.table.Token', 'Token', ([], {'text': '"""C"""'}), "(text='C')\n", (1618, 1628), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((1683, 1700), 'corvid.types.table.Token', 'Token', ([], {'text': '"""C:1"""'}), "(text='C:1')\n", (1688, 1700), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((1755, 1772), 'corvid.types.table.Token', 'Token', ([], {'text': '"""C:2"""'}), "(text='C:2')\n", (1760, 1772), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((1827, 1842), 'corvid.types.table.Token', 'Token', ([], {'text': '"""R"""'}), "(text='R')\n", (1832, 1842), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((1897, 1914), 'corvid.types.table.Token', 'Token', ([], {'text': '"""R:1"""'}), "(text='R:1')\n", (1902, 1914), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((1969, 1984), 'corvid.types.table.Token', 'Token', ([], {'text': '"""a"""'}), "(text='a')\n", (1974, 1984), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((2039, 2054), 'corvid.types.table.Token', 'Token', ([], {'text': '"""b"""'}), "(text='b')\n", (2044, 2054), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((2109, 2126), 'corvid.types.table.Token', 'Token', ([], {'text': '"""R:2"""'}), "(text='R:2')\n", (2114, 2126), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((2181, 2196), 'corvid.types.table.Token', 'Token', ([], {'text': '"""c"""'}), "(text='c')\n", (2186, 2196), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((2251, 2266), 'corvid.types.table.Token', 'Token', ([], {'text': '"""d"""'}), "(text='d')\n", (2256, 2266), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((2321, 2338), 'corvid.types.table.Token', 'Token', ([], {'text': '"""R:3"""'}), "(text='R:3')\n", (2326, 2338), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((2393, 2408), 'corvid.types.table.Token', 'Token', ([], {'text': '"""e"""'}), "(text='e')\n", (2398, 2408), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((2463, 2478), 'corvid.types.table.Token', 'Token', ([], {'text': '"""f"""'}), "(text='f')\n", (2468, 2478), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((10897, 10912), 'corvid.types.table.Token', 'Token', ([], {'text': '"""e"""'}), "(text='e')\n", (10902, 10912), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((11044, 11059), 'corvid.types.table.Token', 'Token', ([], {'text': '"""e"""'}), "(text='e')\n", (11049, 11059), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((3052, 3067), 'corvid.types.table.Token', 'Token', ([], {'text': '"""a"""'}), "(text='a')\n", (3057, 3067), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((3129, 3144), 'corvid.types.table.Token', 'Token', ([], {'text': '"""b"""'}), "(text='b')\n", (3134, 3144), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((3206, 3221), 'corvid.types.table.Token', 'Token', ([], {'text': '"""c"""'}), "(text='c')\n", (3211, 3221), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((3283, 3298), 'corvid.types.table.Token', 'Token', ([], {'text': '"""d"""'}), "(text='d')\n", (3288, 3298), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((3511, 3526), 'corvid.types.table.Token', 'Token', ([], {'text': '"""a"""'}), "(text='a')\n", (3516, 3526), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((3588, 3603), 'corvid.types.table.Token', 'Token', ([], {'text': '"""b"""'}), "(text='b')\n", (3593, 3603), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((3665, 3680), 'corvid.types.table.Token', 'Token', ([], {'text': '"""c"""'}), "(text='c')\n", (3670, 3680), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((3742, 3757), 'corvid.types.table.Token', 'Token', ([], {'text': '"""d"""'}), "(text='d')\n", (3747, 3757), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((4015, 4030), 'corvid.types.table.Token', 'Token', ([], {'text': '"""a"""'}), "(text='a')\n", (4020, 4030), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((4092, 4107), 'corvid.types.table.Token', 'Token', ([], {'text': '"""b"""'}), "(text='b')\n", (4097, 4107), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((4169, 4184), 'corvid.types.table.Token', 'Token', ([], {'text': '"""c"""'}), "(text='c')\n", (4174, 4184), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((4397, 4412), 'corvid.types.table.Token', 'Token', ([], {'text': '"""a"""'}), "(text='a')\n", (4402, 4412), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((4474, 4489), 'corvid.types.table.Token', 'Token', ([], {'text': '"""b"""'}), "(text='b')\n", (4479, 4489), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n'), ((4746, 4761), 'corvid.types.table.Token', 'Token', ([], {'text': '"""a"""'}), "(text='a')\n", (4751, 4761), False, 'from corvid.types.table import Box, Token, Cell, Table, EMPTY_CAPTION\n')]
|
"""
This program grabs text from an image and compares it with 'модули иртибот'.
It returns 'Match' if it identifies 'модули иртибот' and 'Not Match' when it doesnt.
"""
import pytesseract
import numpy as np
import cv2
import os, sys
parent_dir = os.path.dirname(os.path.abspath(__file__))
gparent_dir = os.path.dirname(parent_dir)
ggparent_dir = os.path.dirname(gparent_dir)
sys.path += [parent_dir, gparent_dir, ggparent_dir]
from bounding_box import BoundingBox, ObjectType
class TextDetector:
def __init__(self):
self.text = 'модулииртибот'
def detect_russian_word(self, color_image, depth_image):
"""
Detect words in given image.
Returns
-------
A list of box objects that contain desired text
"""
# filter image
_, filter_image = cv2.threshold(np.mean(color_image, axis=2), 185, 255, cv2.THRESH_BINARY)
# shows what the filtered image looks like
# cv2.imshow('img', filter_image)
# cv2.waitKey(0)
## only return boxes that have text in them
## eg. find a way to check if boxes are repetitive or do not contain text
d = pytesseract.image_to_data(filter_image, output_type=pytesseract.Output.DICT, lang="uzb_cyrl")
n_boxes = len(d['level'])
box_obs = []
contents = d['text']
# print(contents)
for i in range(n_boxes):
if not contents[i]:
continue
else:
for j in contents[i]:
if j in self.text:
# print(contents[i])
(x, y, w, h) = (d['left'][i], d['top'][i], d['width'][i], d['height'][i])
# cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
verts = [(x, y), (x + w, y), (x, y + h), (x + w, y + h)]
cv2.rectangle(filter_image, verts[0], verts[-1], (0, 255, 0), 2)
box = BoundingBox(verts, ObjectType('text'))
box_obs.append(box)
break
# cv2.imshow('img', filter_image)
# cv2.waitKey(0)
return box_obs
if __name__ == "__main__":
import time
import os
start = time.time()
color_image = cv2.imread(os.path.join('vision_images', 'text', '2020-02-23.png'))
if color_image is None:
raise FileNotFoundError("Could not read image!")
detector = TextDetector()
result = detector.detect_russian_word(color_image, None)
print(result)
print("Time:", time.time() - start)
|
[
"os.path.abspath",
"os.path.dirname",
"pytesseract.image_to_data",
"time.time",
"numpy.mean",
"cv2.rectangle",
"os.path.join",
"bounding_box.ObjectType"
] |
[((305, 332), 'os.path.dirname', 'os.path.dirname', (['parent_dir'], {}), '(parent_dir)\n', (320, 332), False, 'import os\n'), ((348, 376), 'os.path.dirname', 'os.path.dirname', (['gparent_dir'], {}), '(gparent_dir)\n', (363, 376), False, 'import os\n'), ((264, 289), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (279, 289), False, 'import os\n'), ((2259, 2270), 'time.time', 'time.time', ([], {}), '()\n', (2268, 2270), False, 'import time\n'), ((1163, 1260), 'pytesseract.image_to_data', 'pytesseract.image_to_data', (['filter_image'], {'output_type': 'pytesseract.Output.DICT', 'lang': '"""uzb_cyrl"""'}), "(filter_image, output_type=pytesseract.Output.DICT,\n lang='uzb_cyrl')\n", (1188, 1260), False, 'import pytesseract\n'), ((2301, 2356), 'os.path.join', 'os.path.join', (['"""vision_images"""', '"""text"""', '"""2020-02-23.png"""'], {}), "('vision_images', 'text', '2020-02-23.png')\n", (2313, 2356), False, 'import os\n'), ((837, 865), 'numpy.mean', 'np.mean', (['color_image'], {'axis': '(2)'}), '(color_image, axis=2)\n', (844, 865), True, 'import numpy as np\n'), ((2574, 2585), 'time.time', 'time.time', ([], {}), '()\n', (2583, 2585), False, 'import time\n'), ((1888, 1952), 'cv2.rectangle', 'cv2.rectangle', (['filter_image', 'verts[0]', 'verts[-1]', '(0, 255, 0)', '(2)'], {}), '(filter_image, verts[0], verts[-1], (0, 255, 0), 2)\n', (1901, 1952), False, 'import cv2\n'), ((2002, 2020), 'bounding_box.ObjectType', 'ObjectType', (['"""text"""'], {}), "('text')\n", (2012, 2020), False, 'from bounding_box import BoundingBox, ObjectType\n')]
|
import torch
import numpy as np
import argparse
from models import FlowNet2
from utils.frame_utils import read_gen
class Args():
fp16 = False
rgb_max = 255.
def get_flow(img1, img2, weights):
# initial a Net
args = Args()
net = FlowNet2(args).cuda()
# load the state_dict
dict = torch.load(weights)
net.load_state_dict(dict["state_dict"])
# load the image pair, you can find this operation in dataset.py
pim1 = read_gen(img1)
pim2 = read_gen(img2)
images = [pim1, pim2]
images = np.array(images).transpose(3, 0, 1, 2)
im = torch.from_numpy(images.astype(np.float32)).unsqueeze(0).cuda()
# process the image pair to obtian the flow
result = net(im).squeeze()
data = result.data.cpu().numpy().transpose(1, 2, 0)
return data
|
[
"torch.load",
"utils.frame_utils.read_gen",
"numpy.array",
"models.FlowNet2"
] |
[((326, 345), 'torch.load', 'torch.load', (['weights'], {}), '(weights)\n', (336, 345), False, 'import torch\n'), ((475, 489), 'utils.frame_utils.read_gen', 'read_gen', (['img1'], {}), '(img1)\n', (483, 489), False, 'from utils.frame_utils import read_gen\n'), ((502, 516), 'utils.frame_utils.read_gen', 'read_gen', (['img2'], {}), '(img2)\n', (510, 516), False, 'from utils.frame_utils import read_gen\n'), ((265, 279), 'models.FlowNet2', 'FlowNet2', (['args'], {}), '(args)\n', (273, 279), False, 'from models import FlowNet2\n'), ((558, 574), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (566, 574), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
import json
import numpy as np
import matplotlib.pyplot as plt
import equations
data_output = 'data/simulations/single_ligand.json'
tspan = np.array([0, 120 * 60]) # 2 hour window
units = 1e9 # 1e9 for nM, 1e6 for μM, etc
L1 = 30e-9
R = 800e-9
alpha = 0.06
m = np.array([R, L1, 0]) * units
k = np.array([1e-5, 2.2e-4])
print(f"Simulated conditions: α: {alpha * 100:.1f}% Kd: {k[1]/k[0]:.3f}nM")
simulated_binding = equations.simulate_one_ligand_one_receptor_binding(k, m, tspan, alpha)
result = {
'time': simulated_binding.t.tolist(),
'available_receptor': simulated_binding.y[0,:].tolist(),
'labeled_ligand': simulated_binding.y[1,:].tolist(),
'labeled_receptor_complexes': simulated_binding.y[2,:].tolist(),
'label_kinetic_parameters': k.tolist(),
'initial_conditions': m.tolist(),
'units': 'nM',
'alpha': alpha
}
with open(data_output, 'w') as fd:
fd.write(json.dumps(result))
fd.close()
|
[
"equations.simulate_one_ligand_one_receptor_binding",
"numpy.array",
"json.dumps"
] |
[((167, 190), 'numpy.array', 'np.array', (['[0, 120 * 60]'], {}), '([0, 120 * 60])\n', (175, 190), True, 'import numpy as np\n'), ((326, 352), 'numpy.array', 'np.array', (['[1e-05, 0.00022]'], {}), '([1e-05, 0.00022])\n', (334, 352), True, 'import numpy as np\n'), ((448, 518), 'equations.simulate_one_ligand_one_receptor_binding', 'equations.simulate_one_ligand_one_receptor_binding', (['k', 'm', 'tspan', 'alpha'], {}), '(k, m, tspan, alpha)\n', (498, 518), False, 'import equations\n'), ((292, 312), 'numpy.array', 'np.array', (['[R, L1, 0]'], {}), '([R, L1, 0])\n', (300, 312), True, 'import numpy as np\n'), ((935, 953), 'json.dumps', 'json.dumps', (['result'], {}), '(result)\n', (945, 953), False, 'import json\n')]
|
#!/usr/bin/env python
# TODO: Add type hints and doc strings
# TODO: Write Unit Tests
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__license__ = "MIT"
import sys
import os
import argparse
import logging
import numpy as np
from typing import Dict, List, Tuple
from functools import partial
from scipy import signal
from enum import Enum
from pyfiglet import Figlet
from PIL import Image
from pathlib import Path
import tempfile
if getattr(sys, "frozen", False):
# If the application is run as a bundle, the PyInstaller bootloader
# extends the sys module by a flag frozen=True and sets the app
# path into variable _MEIPASS'.
# application_path = Path(sys._MEIPASS).absolute()
application_path = Path(sys.executable).parent.absolute()
else:
application_path = Path(__file__).parent.absolute()
logger = logging.getLogger("pattern_gen")
logger.setLevel(logging.DEBUG)
if application_path.parent.joinpath("logs").exists():
fh = logging.FileHandler(
application_path.parent.joinpath("logs/pattern_gen.log")
)
else:
fh = logging.FileHandler(
Path(tempfile.gettempdir()).joinpath("pattern_gen.log")
)
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
logger.debug(f"application path is : {application_path}")
parser = argparse.ArgumentParser()
class CheckDirAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if not os.path.exists(values):
parser.error("Directory {} does not exist".format(values))
setattr(namespace, self.dest, values)
class MinPixAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if values < 1:
parser.error(
"Minimum pixel value for {0} is 1 pixel".format(self.dest)
)
setattr(namespace, self.dest, values)
class ArgTypeMixin(Enum):
@classmethod
def argtype(cls, s: str) -> Enum:
try:
return cls[s]
except KeyError:
raise argparse.ArgumentTypeError(
f"{s!r} is not a valid {cls.__name__}"
)
def __str__(self):
return self.name
class FileFormat(ArgTypeMixin, Enum):
"""What type of format to save output images as"""
tif = "tiff"
tiff = "tiff"
jpg = "jpeg"
jpeg = "jpeg"
bmp = "bmp"
png = "png"
class BarFuncType(ArgTypeMixin, Enum):
"""What type of function to generate bars"""
solid = partial(signal.square)
sine = partial(np.sin)
triangle = partial(signal.sawtooth, width=0)
def __call__(self, *args, **kwargs) -> partial:
return self.value(*args, **kwargs)
class Orientation(ArgTypeMixin, Enum):
"""What type of orientation to use"""
horizontal = 0
vertical = 1
parser.add_argument(
"image_dims",
type=int,
nargs=2,
choices=range(256, 2049),
metavar="[256-2048]",
help="Output Dimension of the Image. H x W Muse be between 256 - 2048",
)
parser.add_argument(
"bar_size",
type=int,
nargs="?",
action=MinPixAction,
help="Size of the bars",
)
parser.add_argument(
"--phase_shifts",
type=int,
default=3,
choices=range(3, 13),
metavar="[3-12]",
help="Number of Phase Shifts",
)
parser.add_argument(
"--file_format",
type=FileFormat.argtype,
choices=FileFormat,
default="bmp",
help="File format to save image as",
)
parser.add_argument(
"--bar_func",
type=BarFuncType.argtype,
choices=BarFuncType,
default="sine",
help="Function that generates Bars",
)
parser.add_argument(
"--orientation",
type=Orientation.argtype,
choices=Orientation,
default="vertical",
help="Vertical or Horizontal Bars",
)
parser.add_argument(
"--bit_depth",
type=str,
choices=("8", "16", "32"),
default="8",
help="Bit Depth of the image",
)
parser.add_argument(
"--output_directory",
type=str,
default=application_path,
action=CheckDirAction,
metavar="",
help="Directory to save images to. Defaults to current path of __file__",
)
parser.add_argument(
"--full_depth",
type=bool,
default=True,
metavar="{True, False}",
help="Use full bit depth range",
)
class SimGenerator:
__slots__ = [
"image_dims",
"bar_size",
"phase_shifts",
"file_format",
"bar_func",
"orientation",
"bit_depth",
"output_directory",
"full_depth",
"DTYPES",
"BITLIMS",
]
image_dims: List[int]
bar_size: int
phase_shifts: int
file_format: FileFormat
bar_func: BarFuncType
orientation: Orientation
bit_depth: str
output_directory: str
full_depth: bool
DTYPES: Dict
BITLIMS: Dict
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
self.DTYPES = {"8": np.uint8, "16": np.uint16, "32": np.uint32}
self.BITLIMS = {"8": (0, 255), "16": (0, 65535), "32": (0, 4294967295)}
def __repr__(self) -> str:
return str({slot: getattr(self, slot) for slot in self.__slots__})
def __len__(self):
return len(self.__slots__)
def _what_dim(self):
"""Return Vertical or Horizontal image dimension based on Orientation"""
lv = self.image_dims[0] # Vertical
lh = self.image_dims[1] # Horizontal
return lh if self.orientation.name == "vertical" else lv
def _tile_dim(self):
"""Return Vertical or Horizontal image dimension based on Orientation"""
lv = self.image_dims[0] # Vertical
lh = self.image_dims[1] # Horizontal
return lv if self.orientation.name == "vertical" else lh
def _get_freq(self) -> float:
"""Find the frequency of the bar function"""
freq = self._what_dim() / self.bar_size / 2
logger.debug(f"Frequency: {freq} pixels")
return freq
def _gen_phi(self):
"""Create 1d signal of size n"""
phi_deg = np.linspace(0.001, 360, self.phase_shifts, endpoint=False)
return (
np.radians(deg) for deg in phi_deg
) # phase in radian generator
def _gen_signal(self, phi):
# todo: generate t outside of this function
t = np.linspace(0, 1, self._what_dim(), endpoint=False) # spacing
freq = self._get_freq()
sig = self.bar_func(2 * np.pi * freq * t + phi)
sig = self._rescale(sig, self.BITLIMS[self.bit_depth])
sig = self._cast(sig, self.DTYPES[self.bit_depth])
return sig
def _rescale(self, x, lims=(0, 1)):
new_min, new_max = lims
old_min, old_max = np.min(x), np.max(x)
return (new_max - new_min) * (
(x - old_min) / (old_max - old_min)
) + new_min
def _cast(self, x: np.array, dtype: np.dtype):
return x.astype(dtype)
def _save_images(self, images: List[Tuple[int, Image.Image]]):
"""Save the image to disk"""
for i, image in images:
_path = os.path.join(
self.output_directory,
f"phi_{i+1:02d}.{self.file_format.value}",
)
image.save(_path, self.file_format.value)
logger.info(f"Image saved to {_path}")
def _make_pattern_image(self, sig: np.array) -> Image.Image:
if self.orientation.name == "horizontal":
return Image.fromarray(
np.tile(sig[:, np.newaxis], (1, self._tile_dim()))
)
else:
return Image.fromarray(
np.tile(sig[np.newaxis, :], (self._tile_dim(), 1))
)
def run(self):
"""Run the simulation"""
phis = self._gen_phi()
images = list()
for i, phi in enumerate(phis):
sig = self._gen_signal(phi)
images.append((i, self._make_pattern_image(sig)))
self._save_images(images)
def arg_logger(args):
for arg, value in sorted(vars(args).items()):
logger.debug("Argument %s: %r", arg, value)
def main():
custom_fig = Figlet(font="colossal")
print("\n\n\n", custom_fig.renderText("SIM GEN"))
# use argparse parser to parse input args
if not sys.argv[1:]:
sys.exit(0)
logger.debug("Sim Generator executed from command line")
parsed_args = parser.parse_args(sys.argv[1:])
arg_logger(parsed_args)
sg = SimGenerator(**vars(parsed_args))
sg.run()
if __name__ == "__main__":
main()
|
[
"functools.partial",
"numpy.radians",
"argparse.ArgumentParser",
"pyfiglet.Figlet",
"logging.StreamHandler",
"os.path.exists",
"tempfile.gettempdir",
"logging.Formatter",
"numpy.min",
"numpy.max",
"pathlib.Path",
"numpy.linspace",
"sys.exit",
"os.path.join",
"logging.getLogger",
"argparse.ArgumentTypeError"
] |
[((827, 859), 'logging.getLogger', 'logging.getLogger', (['"""pattern_gen"""'], {}), "('pattern_gen')\n", (844, 859), False, 'import logging\n'), ((1184, 1207), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (1205, 1207), False, 'import logging\n'), ((1246, 1319), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (1263, 1319), False, 'import logging\n'), ((1494, 1519), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1517, 1519), False, 'import argparse\n'), ((2696, 2718), 'functools.partial', 'partial', (['signal.square'], {}), '(signal.square)\n', (2703, 2718), False, 'from functools import partial\n'), ((2730, 2745), 'functools.partial', 'partial', (['np.sin'], {}), '(np.sin)\n', (2737, 2745), False, 'from functools import partial\n'), ((2761, 2794), 'functools.partial', 'partial', (['signal.sawtooth'], {'width': '(0)'}), '(signal.sawtooth, width=0)\n', (2768, 2794), False, 'from functools import partial\n'), ((8312, 8335), 'pyfiglet.Figlet', 'Figlet', ([], {'font': '"""colossal"""'}), "(font='colossal')\n", (8318, 8335), False, 'from pyfiglet import Figlet\n'), ((6256, 6314), 'numpy.linspace', 'np.linspace', (['(0.001)', '(360)', 'self.phase_shifts'], {'endpoint': '(False)'}), '(0.001, 360, self.phase_shifts, endpoint=False)\n', (6267, 6314), True, 'import numpy as np\n'), ((8469, 8480), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (8477, 8480), False, 'import sys\n'), ((1647, 1669), 'os.path.exists', 'os.path.exists', (['values'], {}), '(values)\n', (1661, 1669), False, 'import os\n'), ((6344, 6359), 'numpy.radians', 'np.radians', (['deg'], {}), '(deg)\n', (6354, 6359), True, 'import numpy as np\n'), ((6908, 6917), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (6914, 6917), True, 'import numpy as np\n'), ((6919, 6928), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (6925, 6928), True, 'import numpy as np\n'), ((7276, 7361), 'os.path.join', 'os.path.join', (['self.output_directory', 'f"""phi_{i + 1:02d}.{self.file_format.value}"""'], {}), "(self.output_directory, f'phi_{i + 1:02d}.{self.file_format.value}'\n )\n", (7288, 7361), False, 'import os\n'), ((715, 735), 'pathlib.Path', 'Path', (['sys.executable'], {}), '(sys.executable)\n', (719, 735), False, 'from pathlib import Path\n'), ((784, 798), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (788, 798), False, 'from pathlib import Path\n'), ((2249, 2315), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['f"""{s!r} is not a valid {cls.__name__}"""'], {}), "(f'{s!r} is not a valid {cls.__name__}')\n", (2275, 2315), False, 'import argparse\n'), ((1095, 1116), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (1114, 1116), False, 'import tempfile\n')]
|
from __future__ import division, print_function, absolute_import
import math
import numpy as np
import scipy.special
import mafipy.function
# ----------------------------------------------------------------------------
# Black scholes european call/put
# ----------------------------------------------------------------------------
def _is_d1_or_d2_infinity(underlying, strike, vol):
"""is_d1_or_d2_infinity
:param float underlying:
:param float strike:
:param float vol:
:return: check whether :math:`d_{1}` and :math:`d_{2}` is infinity or not.
:rtype: bool
"""
return (np.isclose(underlying, 0.0)
or strike < 0.0
or vol < 0.0)
def func_d1(underlying, strike, rate, maturity, vol):
"""func_d1
calculate :math:`d_{1}` in black scholes formula.
See :py:func:`black_scholes_call_formula`.
:param float underlying: underlying/strike must be non-negative.
:param float strike: underlying/strike must be non-negative.
:param float rate:
:param float maturity: must be non-negative.
:param float vol: must be non-negative.
:return: :math:`d_{1}`.
:rtype: float
"""
assert(underlying / strike >= 0.0)
assert(maturity >= 0.0)
assert(vol >= 0.0)
numerator = (
math.log(underlying / strike) + (rate + vol * vol * 0.5) * maturity)
denominator = vol * math.sqrt(maturity)
return numerator / denominator
def func_d2(underlying, strike, rate, maturity, vol):
"""func_d2
calculate :math:`d_{2}` in black scholes formula.
See :py:func:`black_scholes_call_formula`.
:param float underlying: underlying/strike must be non-negative.
:param float strike: underlying/strike must be non-negative.
:param float rate:
:param float maturity: must be non-negative.
:param float vol: must be non-negative.
:return: :math:`d_{2}`.
:rtype: float.
"""
assert(underlying / strike >= 0.0)
assert(maturity >= 0.0)
assert(vol >= 0.0)
numerator = (
math.log(underlying / strike) + (rate - vol * vol * 0.5) * maturity)
denominator = vol * math.sqrt(maturity)
return numerator / denominator
def d_fprime_by_strike(underlying, strike, rate, maturity, vol):
"""d_fprime_by_strike
derivative of :math:`d_{1}` with respect to :math:`K`
where :math:`K` is strike.
See :py:func:`func_d1`.
.. math::
\\frac{\partial }{\partial K} d_{1}(K)
= \\frac{K}{\sigma S \sqrt{T}}.
Obviously, derivative of :math:`d_{1}` and :math:`d_{2}` is same.
That is
.. math::
\\frac{\partial }{\partial K} d_{1}(K)
= \\frac{\partial }{\partial K} d_{2}(K).
:param float underlying:
:param float strike:
:param float rate:
:param float maturity: must be non-negative.
:param float vol:
:return: value of derivative.
:rtype: float
"""
assert(maturity > 0.0)
return - 1.0 / (math.sqrt(maturity) * vol * strike)
def d_fhess_by_strike(underlying, strike, rate, maturity, vol):
"""d_fhess_by_strike
second derivative of :math:`d_{i}\ (i = 1, 2)` with respect to :math:`K`,
where :math:`K` is strike.
.. math::
\\frac{\partial^{2}}{\partial K^{2}} d_{1}(K)
= \\frac{1}{S \sigma \sqrt{T} },
where
:math:`S` is underlying,
:math:`\sigma` is vol,
:math:`T` is maturity.
:param float underlying:
:param float strike:
:param float rate:
:param float maturity:
:param float vol:
:return: value of second derivative of :math:`d_{1}` or :math:`d_{2}`.
:rtype: float
"""
assert(maturity > 0.0)
return 1.0 / (math.sqrt(maturity) * vol * strike * strike)
def black_scholes_call_formula(underlying, strike, rate, maturity, vol):
"""black_scholes_call_formula
calculate well known black scholes formula for call option.
.. math::
c(S, K, r, T, \sigma)
:= S N(d_{1}) - K e^{-rT} N(d_{2}),
where
:math:`S` is underlying,
:math:`K` is strike,
:math:`r` is rate,
:math:`T` is maturity,
:math:`\sigma` is vol,
:math:`N(\cdot)` is standard normal distribution,
and :math:`d_{1}` and :math:`d_{2}` are defined as follows:
.. math::
\\begin{eqnarray}
d_{1}
& = &
\\frac{\ln(S/K) + (r + \sigma^{2}/2)T}{\sigma \sqrt{T}},
\\
d_{2}
& = &
\\frac{\ln(S/K) + (r - \sigma^{2}/2)T} {\sigma \sqrt{T}},
\end{eqnarray}
:param float underlying: value of underlying.
:param float strike: strike of call option.
:param float rate: risk free rate.
:param float maturity: year fraction to maturity.
:param float vol: volatility.
:return: call value.
:rtype: float
"""
d1 = func_d1(underlying, strike, rate, maturity, vol)
d2 = func_d2(underlying, strike, rate, maturity, vol)
return (underlying * scipy.special.ndtr(d1)
- strike * math.exp(-rate * maturity) * scipy.special.ndtr(d2))
def black_scholes_put_formula(underlying, strike, rate, maturity, vol):
"""black_scholes_put_formula
calculate well known black scholes formula for put option.
Here value of put option is calculated by put-call parity.
.. math::
\\begin{array}{cccl}
& e^{-rT}(S - K)
& = & c(S, K, r, T, \sigma) - p(S, K, r, T, \sigma)
\\\\
\iff & p(S, K, r, T, \sigma)
& = & c(S, K, r, T, \sigma) - e^{-rT}(S - K)
\end{array}
where
:math:`c(\cdot)` denotes value of call option,
:math:`p(\cdot)` denotes value of put option,
:math:`S` is value of underlying at today,
:math:`K` is strike,
:math:`r` is rate,
:math:`T` is maturity,
:math:`\sigma` is vol.
:math:`c(\cdot)` is calculated
by :py:func:`black_scholes_call_formula`.
:param float underlying: value of underlying.
:param float strike: strike of put option.
:param float rate: risk free rate.
:param float maturity: year fraction to maturity.
:param float vol: volatility.
:return: put value.
:rtype: float
"""
call_value = black_scholes_call_formula(
underlying, strike, rate, maturity, vol)
discount = math.exp(-rate * maturity)
return call_value - (underlying - strike * discount)
def black_scholes_call_value(
underlying,
strike,
rate,
maturity,
vol,
today=0.0):
"""black_scholes_call_value
calculate call value in the case of today is not zero.
(`maturity` - `today`) is treated as time to expiry.
See :py:func:`black_scholes_call_formula`.
* case :math:`S > 0, K < 0`
* return :math:`S - e^{-rT} K`
* case :math:`S < 0, K > 0`
* return 0
* case :math:`S < 0, K < 0`
* return :math:`S - e^{-rT}K + E[(-(S - K))^{+}]`
* case :math:`T \le 0`
* return 0
:param float underlying:
:param float strike:
:param float rate:
:param float maturity:
:param float vol: volatility. This must be positive.
:param float today:
:return: call value.
:rtype: float
"""
assert(vol >= 0.0)
time = maturity - today
# option is expired
if time < 0.0 or np.isclose(time, 0.0):
return 0.0
elif np.isclose(underlying, 0.0):
return math.exp(-rate * time) * max(-strike, 0.0)
elif np.isclose(strike, 0.0) and underlying > 0.0:
return math.exp(-rate * today) * underlying
elif np.isclose(strike, 0.0) and underlying < 0.0:
return 0.0
# never below strike
elif strike < 0.0 and underlying > 0.0:
return underlying - math.exp(-rate * time) * strike
# never beyond strike
elif strike > 0.0 and underlying < 0.0:
return 0.0
elif underlying < 0.0:
# max(S - K, 0) = (S - K) + max(-(S - K), 0)
value = black_scholes_call_formula(
-underlying, -strike, rate, time, vol)
return (underlying - strike) + value
return black_scholes_call_formula(
underlying, strike, rate, time, vol)
def black_scholes_put_value(
underlying,
strike,
rate,
maturity,
vol,
today=0.0):
"""black_scholes_put_value
evaluates value of put option using put-call parity so that
this function calls :py:func:`black_scholes_call_value`.
See :py:func:`black_scholes_put_formula`.
:param float underlying:
:param float strike:
:param float rate:
:param float maturity:
:param float vol:
:param float today:
:return: put value.
:rtype: float
"""
time = maturity - today
# option is expired
if time < 0.0 or np.isclose(time, 0.0):
return 0.0
elif np.isclose(strike, 0.0) and underlying > 0.0:
return 0.0
elif np.isclose(strike, 0.0) and underlying < 0.0:
return underlying * math.exp(-rate * today)
call_value = black_scholes_call_value(
underlying, strike, rate, maturity, vol, today)
discount = math.exp(-rate * time)
return call_value - (underlying - strike * discount)
def black_scholes_call_value_fprime_by_strike(
underlying, strike, rate, maturity, vol):
"""black_scholes_call_value_fprime_by_strike
First derivative of value of call option with respect to strike
under black scholes model.
See :py:func:`black_scholes_call_formula`.
.. math::
\\frac{\partial }{\partial K} c(K; S, r, T, \sigma)
= - e^{-rT} \Phi(d_{1}(K))
where
:math:`S` is underlying,
:math:`K` is strike,
:math:`r` is rate,
:math:`T` is maturity,
:math:`\sigma` is vol,
:math:`d_{1}, d_{2}` is defined
in :py:func:`black_scholes_call_formula`,
:math:`\Phi(\cdot)` is c.d.f. of standard normal distribution,
:math:`\phi(\cdot)` is p.d.f. of standard normal distribution.
:param float underlying:
:param float strike:
:param float rate:
:param float maturity: must be non-negative.
:param float vol: volatility. must be non-negative.
:return: value of derivative.
:rtype: float
"""
norm = scipy.stats.norm
assert(maturity > 0.0)
d2 = func_d2(underlying, strike, rate, maturity, vol)
discount = math.exp(-rate * maturity)
return -discount * norm.cdf(d2)
def black_scholes_call_value_fhess_by_strike(
underlying, strike, rate, maturity, vol):
"""black_scholes_call_value_fhess_by_strike
Second derivative of value of call option with respect to strike
under black scholes model.
See :py:func:`black_scholes_call_formula`
and :py:func:`black_scholes_call_value_fprime_by_strike`.
.. math::
\\begin{array}{ccl}
\\frac{\partial^{2}}{\partial K^{2}} c(0, S; T, K)
& = &
-e^{-rT}
\phi(d_{2}(K)) d^{\prime}(K)
\end{array}
where
:math:`S` is underlying,
:math:`K` is strike,
:math:`r` is rate,
:math:`T` is maturity,
:math:`\sigma` is vol,
:math:`d_{1}, d_{2}` is defined
in :py:func:`black_scholes_call_formula`,
:math:`\Phi(\cdot)` is c.d.f. of standard normal distribution,
:math:`\phi(\cdot)` is p.d.f. of standard normal distribution.
:param float underlying:
:param float strike:
:param float rate:
:param float maturity: non-negative.
:param float vol: volatility. non-negative.
:return: value of second derivative.
:rtype: float.
"""
norm = scipy.stats.norm
# option is expired
if maturity < 0.0 or np.isclose(maturity, 0.0):
return 0.0
# never below strike
elif strike <= 0.0 and underlying > 0.0:
return 0.0
# never beyond strike
elif strike > 0.0 and underlying < 0.0:
return 0.0
elif underlying < 0.0 and strike < 0.0:
underlying = -underlying
strike = -strike
discount = math.exp(-rate * maturity)
d2 = func_d2(underlying, strike, rate, maturity, vol)
d_fprime = d_fprime_by_strike(underlying, strike, rate, maturity, vol)
d2_density = norm.pdf(d2)
return -discount * d2_density * d_fprime
def black_scholes_call_value_third_by_strike(
underlying, strike, rate, maturity, vol):
"""black_scholes_call_value_third_by_strike
Third derivative of value of call option with respect to strike
under black scholes model.
See :py:func:`black_scholes_call_formula`
and :py:func:`black_scholes_call_value_fprime_by_strike`,
and :py:func:`black_scholes_call_value_fhess_by_strike`.
.. math::
\\begin{array}{ccl}
\\frac{\partial^{3}}{\partial K^{3}} c(0, S; T, K)
& = &
-e^{-rT}
\left(
\phi^{\prime}(d_{2}(K))(d^{\prime}(K))^{2}
+ \phi(d_{2}(K))d^{\prime\prime}(K)
\\right)
\end{array}
where
:math:`S` is underlying,
:math:`K` is strike,
:math:`r` is rate,
:math:`T` is maturity,
:math:`\sigma` is vol,
:math:`d_{1}, d_{2}` is defined
in :py:func:`black_scholes_call_formula`,
:math:`\Phi(\cdot)` is c.d.f. of standard normal distribution,
:math:`\phi(\cdot)` is p.d.f. of standard normal distribution.
:param float underlying:
:param float strike:
:param float rate:
:param float maturity: non-negative.
:param float vol: volatility. non-negative.
:return: value of third derivative.
:rtype: float.
"""
norm = scipy.stats.norm
assert(vol > 0.0)
# option is expired
if maturity < 0.0 or np.isclose(maturity, 0.0):
return 0.0
discount = math.exp(-rate * maturity)
d2 = func_d2(underlying, strike, rate, maturity, vol)
d_fprime = d_fprime_by_strike(underlying, strike, rate, maturity, vol)
d_fhess = d_fhess_by_strike(underlying, strike, rate, maturity, vol)
d2_density = norm.pdf(d2)
d2_density_fprime = mafipy.function.norm_pdf_fprime(d2)
term1 = d2_density_fprime * d_fprime * d_fprime
term2 = d2_density * d_fhess
return -discount * (term1 + term2)
# ----------------------------------------------------------------------------
# Black scholes greeks
# ----------------------------------------------------------------------------
def black_scholes_call_delta(underlying, strike, rate, maturity, vol):
"""black_scholes_call_delta
calculates black scholes delta.
.. math::
\\frac{\partial}{\partial S} c(S, K, r, T, \sigma)
= \Phi(d_{1}(S))
where
:math:`S` is underlying,
:math:`K` is strike,
:math:`r` is rate,
:math:`T` is maturity,
:math:`\sigma` is volatility,
:math:`\Phi` is standard normal c.d.f,
:math:`d_{1}` is defined in
:py:func:`func_d1`.
:param float underlying:
:param float strike:
:param float rate:
:param float maturity: if maturity <= 0, this function returns 0.
:param float vol: volatility. This must be positive.
:return: value of delta.
:rtype: float.
"""
assert(vol >= 0.0)
if maturity <= 0.0:
return 0.0
d1 = func_d1(underlying, strike, rate, maturity, vol)
return scipy.stats.norm.cdf(d1)
def black_scholes_call_gamma(underlying, strike, rate, maturity, vol):
"""black_scholes_call_gamma
calculates black scholes gamma.
.. math::
\\frac{\partial^{2}}{\partial S^{2}} c(S, K, r, T, \sigma)
= -\phi(d_{1}(S, K, r, T, \sigma))
\\frac{1}{S^{2}\sigma\sqrt{T}}
where
:math:`S` is underlying,
:math:`K` is strike,
:math:`r` is rate,
:math:`T` is maturity,
:math:`\sigma` is volatility,
:math:`\Phi` is standard normal c.d.f,
:math:`d_{1}` is defined in
:py:func:`func_d1`.
See :py:func:`black_scholes_call_value`.
:param float underlying:
:param float strike:
:param float rate:
:param float maturity:
if maturity is not positive, this function returns 0.0.
:param float vol: volatility. This must be positive.
:return: value of gamma.
:rtype: float.
"""
assert(vol >= 0.0)
if maturity <= 0.0:
return 0.0
d1 = func_d1(underlying, strike, rate, maturity, vol)
denominator = underlying * vol * math.sqrt(maturity)
return scipy.stats.norm.pdf(d1) / denominator
def black_scholes_call_vega(underlying, strike, rate, maturity, vol):
"""black_scholes_call_vega
calculates black scholes vega.
.. math::
\\frac{\partial}{\partial \sigma} c(S, K, r, T, \sigma)
= \sqrt{T}S\phi(d_{1}(S, K, r, T, \sigma))
where
:math:`S` is underlying,
:math:`K` is strike,
:math:`r` is rate,
:math:`T` is maturity,
:math:`\sigma` is volatility,
:math:`\phi` is standard normal p.d.f,
:math:`d_{1}` is defined in
:py:func:`func_d1`.
See :py:func:`black_scholes_call_value`.
:param float underlying:
:param float strike:
:param float rate:
:param float maturity: if maturity <= 0.0, this function returns 0.
:param float vol: volatility. This must be positive.
:return: value of vega.
:rtype: float.
"""
assert(vol >= 0.0)
if maturity <= 0.0:
return 0.0
d1 = func_d1(underlying, strike, rate, maturity, vol)
return math.sqrt(maturity) * underlying * scipy.stats.norm.pdf(d1)
def black_scholes_call_volga(underlying, strike, rate, maturity, vol):
"""black_scholes_call_volg
calculates black scholes volga.
.. math::
\\frac{\partial^{2}}{\partial \sigma^{2}} c(S, K, r, T, \sigma)
S \phi^{\prime}(d_{1}(\sigma))
\\frac{
(\\frac{1}{2} \sigma^{2} - r)T
}{
\sigma^{2}
}
where
:math:`S` is underlying,
:math:`K` is strike,
:math:`r` is rate,
:math:`T` is maturity,
:math:`\sigma` is volatility,
:math:`\phi` is standard normal p.d.f,
:math:`d_{1}` is defined in
:py:func:`func_d1`.
See :py:func:`black_scholes_call_value`.
:param float underlying:
:param float strike:
:param float rate:
:param float maturity: must be non-negative.
:param float vol: volatility. This must be positive.
:return: value of volga.
:rtype: float.
"""
assert(vol >= 0.0)
if maturity < 0.0:
return 0.0
d1 = func_d1(underlying, strike, rate, maturity, vol)
pdf_fprime = mafipy.function.norm_pdf_fprime(d1)
ln_moneyness = math.log(underlying / strike)
numerator = -ln_moneyness + (0.5 * vol * vol - rate) * maturity
factor = numerator / (vol * vol)
return underlying * pdf_fprime * factor
def black_scholes_call_theta(underlying, strike, rate, maturity, vol, today):
"""black_scholes_call_theta
calculates black scholes theta.
.. math::
\\frac{\partial}{\partial t} c(t, S, K, r, T, \sigma)
= - S * \phi(d_{1})
\left(
\\frac{\sigma}{2\sqrt{T - t}}
\\right)
- r e^{-r(T - t)} K \Phi(d_{2})
where
:math:`S` is underlying,
:math:`K` is strike,
:math:`r` is rate,
:math:`T` is maturity,
:math:`\sigma` is volatility,
:math:`\phi` is standard normal p.d.f,
:math:`d_{1}` is defined in
:py:func:`func_d1`.
See :py:func:`black_scholes_call_value`.
:param float underlying:
:param float strike:
:param float rate:
:param float maturity: must be non-negative.
:param float vol: volatility. This must be positive.
:return: value of theta.
:rtype: float.
"""
assert(maturity >= 0.0)
assert(vol >= 0.0)
norm = scipy.stats.norm
time = maturity - today
d1 = func_d1(underlying, strike, rate, time, vol)
d2 = func_d2(underlying, strike, rate, time, vol)
term1 = underlying * norm.pdf(d1) * (vol / (2.0 * math.sqrt(time)))
term2 = rate * math.exp(-rate * time) * strike * norm.cdf(d2)
return - term1 - term2
def black_scholes_call_rho(underlying, strike, rate, maturity, vol, today):
"""black_scholes_call_rho
calculates black scholes rho.
.. math::
\\frac{\partial}{\partial t} c(t, S, K, r, T, \sigma)
= (T - t)
e^{-r (T - t)}
K \Phi(d_{2})
where
:math:`S` is underlying,
:math:`K` is strike,
:math:`r` is rate,
:math:`T` is maturity,
:math:`\sigma` is volatility,
:math:`\phi` is standard normal p.d.f,
:math:`d_{2}` is defined in
:py:func:`func_d2`.
See :py:func:`black_scholes_call_value`.
:param float underlying:
:param float strike:
:param float rate:
:param float maturity: must be non-negative.
:param float vol: volatility. This must be positive.
:return: value of rho.
:rtype: float.
"""
assert(maturity >= 0.0)
assert(vol >= 0.0)
norm = scipy.stats.norm
time = maturity - today
d2 = func_d2(underlying, strike, rate, time, vol)
return time * math.exp(-rate * time) * strike * norm.cdf(d2)
def black_scholes_call_vega_fprime_by_strike(
underlying, strike, rate, maturity, vol):
"""black_scholes_call_vega_fprime_by_strike
calculates derivative of black scholes vega with respect to strike.
This is required for :py:func:`sabr_pdf`.
.. math::
\\frac{\partial}{\partial K}
\mathrm{Vega}{\mathrm{BSCall}}(S, K, r, T, \sigma)
=
S\phi^{\prime}(d_{1}(S, K, r, T, \sigma))
\\frac{1}{\sigma K}
where
:math:`S` is underlying,
:math:`K` is strike,
:math:`r` is rate,
:math:`T` is maturity,
:math:`\sigma` is volatility,
:math:`\phi` is standard normal p.d.f,
:math:`d_{1}` is defined in
:py:func:`func_d1`.
See :py:func:`black_scholes_call_value`.
:param float underlying:
:param float strike:
:param float rate:
:param float maturity: if maturity <= 0.0, this function returns 0.
:param float vol: volatility. This must be positive.
:return: derivative of vega with respect to strike.
:rtype: float.
"""
assert(vol >= 0.0)
if maturity <= 0.0:
return 0.0
d1 = func_d1(underlying, strike, rate, maturity, vol)
density_fprime = mafipy.function.norm_pdf_fprime(d1)
return -underlying * density_fprime / (vol * strike)
# ----------------------------------------------------------------------------
# Black scholes distributions
# ----------------------------------------------------------------------------
def black_scholes_cdf(underlying, strike, rate, maturity, vol):
"""black_scholes_cdf
calculates value of c.d.f. of black scholes model.
:param float underlying:
:param float strike:
:param float rate:
:param float maturity:
:param float vol: must be positive.
:return: value of p.d.f. of black scholes model.
:rtype: float.
"""
assert(vol > 0.0)
return (1.0
+ black_scholes_call_value_fprime_by_strike(
underlying,
strike,
rate,
maturity,
vol) * math.exp(rate * maturity))
def black_scholes_pdf(underlying, strike, rate, maturity, vol):
"""black_scholes_pdf
calculates value of p.d.f. of black scholes model.
:param float underlying:
:param float strike:
:param float rate:
:param float maturity:
:param float vol: must be positive.
:return: value of p.d.f. of black scholes model.
:rtype: float.
"""
assert(vol > 0.0)
return (black_scholes_call_value_fhess_by_strike(
underlying,
strike,
rate,
maturity,
vol) * math.exp(rate * maturity))
|
[
"math.log",
"math.exp",
"math.sqrt",
"numpy.isclose"
] |
[((6272, 6298), 'math.exp', 'math.exp', (['(-rate * maturity)'], {}), '(-rate * maturity)\n', (6280, 6298), False, 'import math\n'), ((9071, 9093), 'math.exp', 'math.exp', (['(-rate * time)'], {}), '(-rate * time)\n', (9079, 9093), False, 'import math\n'), ((10285, 10311), 'math.exp', 'math.exp', (['(-rate * maturity)'], {}), '(-rate * maturity)\n', (10293, 10311), False, 'import math\n'), ((11942, 11968), 'math.exp', 'math.exp', (['(-rate * maturity)'], {}), '(-rate * maturity)\n', (11950, 11968), False, 'import math\n'), ((13686, 13712), 'math.exp', 'math.exp', (['(-rate * maturity)'], {}), '(-rate * maturity)\n', (13694, 13712), False, 'import math\n'), ((18502, 18531), 'math.log', 'math.log', (['(underlying / strike)'], {}), '(underlying / strike)\n', (18510, 18531), False, 'import math\n'), ((608, 635), 'numpy.isclose', 'np.isclose', (['underlying', '(0.0)'], {}), '(underlying, 0.0)\n', (618, 635), True, 'import numpy as np\n'), ((1283, 1312), 'math.log', 'math.log', (['(underlying / strike)'], {}), '(underlying / strike)\n', (1291, 1312), False, 'import math\n'), ((1376, 1395), 'math.sqrt', 'math.sqrt', (['maturity'], {}), '(maturity)\n', (1385, 1395), False, 'import math\n'), ((2025, 2054), 'math.log', 'math.log', (['(underlying / strike)'], {}), '(underlying / strike)\n', (2033, 2054), False, 'import math\n'), ((2118, 2137), 'math.sqrt', 'math.sqrt', (['maturity'], {}), '(maturity)\n', (2127, 2137), False, 'import math\n'), ((7283, 7304), 'numpy.isclose', 'np.isclose', (['time', '(0.0)'], {}), '(time, 0.0)\n', (7293, 7304), True, 'import numpy as np\n'), ((7334, 7361), 'numpy.isclose', 'np.isclose', (['underlying', '(0.0)'], {}), '(underlying, 0.0)\n', (7344, 7361), True, 'import numpy as np\n'), ((8733, 8754), 'numpy.isclose', 'np.isclose', (['time', '(0.0)'], {}), '(time, 0.0)\n', (8743, 8754), True, 'import numpy as np\n'), ((11600, 11625), 'numpy.isclose', 'np.isclose', (['maturity', '(0.0)'], {}), '(maturity, 0.0)\n', (11610, 11625), True, 'import numpy as np\n'), ((13624, 13649), 'numpy.isclose', 'np.isclose', (['maturity', '(0.0)'], {}), '(maturity, 0.0)\n', (13634, 13649), True, 'import numpy as np\n'), ((16284, 16303), 'math.sqrt', 'math.sqrt', (['maturity'], {}), '(maturity)\n', (16293, 16303), False, 'import math\n'), ((23681, 23706), 'math.exp', 'math.exp', (['(rate * maturity)'], {}), '(rate * maturity)\n', (23689, 23706), False, 'import math\n'), ((8784, 8807), 'numpy.isclose', 'np.isclose', (['strike', '(0.0)'], {}), '(strike, 0.0)\n', (8794, 8807), True, 'import numpy as np\n'), ((17318, 17337), 'math.sqrt', 'math.sqrt', (['maturity'], {}), '(maturity)\n', (17327, 17337), False, 'import math\n'), ((23123, 23148), 'math.exp', 'math.exp', (['(rate * maturity)'], {}), '(rate * maturity)\n', (23131, 23148), False, 'import math\n'), ((2938, 2957), 'math.sqrt', 'math.sqrt', (['maturity'], {}), '(maturity)\n', (2947, 2957), False, 'import math\n'), ((4974, 5000), 'math.exp', 'math.exp', (['(-rate * maturity)'], {}), '(-rate * maturity)\n', (4982, 5000), False, 'import math\n'), ((7378, 7400), 'math.exp', 'math.exp', (['(-rate * time)'], {}), '(-rate * time)\n', (7386, 7400), False, 'import math\n'), ((7430, 7453), 'numpy.isclose', 'np.isclose', (['strike', '(0.0)'], {}), '(strike, 0.0)\n', (7440, 7453), True, 'import numpy as np\n'), ((8858, 8881), 'numpy.isclose', 'np.isclose', (['strike', '(0.0)'], {}), '(strike, 0.0)\n', (8868, 8881), True, 'import numpy as np\n'), ((19888, 19903), 'math.sqrt', 'math.sqrt', (['time'], {}), '(time)\n', (19897, 19903), False, 'import math\n'), ((19925, 19947), 'math.exp', 'math.exp', (['(-rate * time)'], {}), '(-rate * time)\n', (19933, 19947), False, 'import math\n'), ((21012, 21034), 'math.exp', 'math.exp', (['(-rate * time)'], {}), '(-rate * time)\n', (21020, 21034), False, 'import math\n'), ((3651, 3670), 'math.sqrt', 'math.sqrt', (['maturity'], {}), '(maturity)\n', (3660, 3670), False, 'import math\n'), ((7491, 7514), 'math.exp', 'math.exp', (['(-rate * today)'], {}), '(-rate * today)\n', (7499, 7514), False, 'import math\n'), ((7537, 7560), 'numpy.isclose', 'np.isclose', (['strike', '(0.0)'], {}), '(strike, 0.0)\n', (7547, 7560), True, 'import numpy as np\n'), ((8932, 8955), 'math.exp', 'math.exp', (['(-rate * today)'], {}), '(-rate * today)\n', (8940, 8955), False, 'import math\n'), ((7699, 7721), 'math.exp', 'math.exp', (['(-rate * time)'], {}), '(-rate * time)\n', (7707, 7721), False, 'import math\n')]
|
# # Chapter 4: Discrete Cosine / Wavelet Transform and Deconvolution
# Author: <NAME>
###########################################
# ## Problems
# ## 1. Template matching with Phase-Correlation in Frequency Domain
get_ipython().run_line_magic('matplotlib', 'inline')
import scipy.fftpack as fp
from skimage.io import imread
from skimage.color import rgb2gray, gray2rgb
from skimage.draw import rectangle_perimeter
import numpy as np
import matplotlib.pylab as plt
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.ticker import LinearLocator, FormatStrFormatter
def plot_3d(X, Y, Z, cmap=plt.cm.seismic):
fig = plt.figure(figsize=(20,20))
ax = fig.gca(projection='3d')
# Plot the surface.
surf = ax.plot_surface(X, Y, Z, cmap=cmap, linewidth=5, antialiased=False)
#ax.plot_wireframe(X, Y, Z, rstride=10, cstride=10)
#ax.set_zscale("log", nonposx='clip')
#ax.zaxis.set_scale('log')
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
ax.set_xlabel('F1', size=30)
ax.set_ylabel('F2', size=30)
ax.set_zlabel('Freq Response', size=30)
#ax.set_zlim((-40,10))
# Add a color bar which maps values to colors.
fig.colorbar(surf) #, shrink=0.15, aspect=10)
#plt.title('Frequency Response of the Gaussian Kernel')
plt.show()
im = 255*rgb2gray(imread('images/Img_04_01.jpg'))
im_tm = 255*rgb2gray(imread('images/Img_04_02.png'))
F = fp.fftn(im)
F_tm = fp.fftn(im_tm, shape=im.shape)
F_cc = F * np.conj(F_tm)
c = (fp.ifftn(F_cc/np.abs(F_cc))).real
i, j = np.unravel_index(c.argmax(), c.shape)
print(i, j)
im2 = (gray2rgb(im)).astype(np.uint8)
rr, cc = rectangle_perimeter((i,j), end=(i + im_tm.shape[0], j + im_tm.shape[1]), shape=im.shape)
for x in range(-2,2):
for y in range(-2,2):
im2[rr + x, cc + y] = (255,0,0)
plt.figure(figsize=(2,3))
plt.gray()
plt.imshow(im_tm), plt.title('template', size=20), plt.axis('off')
plt.show()
fig, ax = plt.subplots(1, 2, sharey=True, figsize=(12,7))
ax[0].imshow(im), ax[0].set_title('target', size=20)
ax[1].imshow(im2), ax[1].set_title('matched template', size=20)
for a in ax.ravel():
a.set_axis_off()
plt.tight_layout()
plt.show()
Y = np.arange(F_cc.shape[0])
X = np.arange(F_cc.shape[1])
X, Y = np.meshgrid(X, Y)
Z = c
plot_3d(X,Y,Z, cmap='YlOrRd') #PiYG
# ## 2. Image Compression with Discrete Cosine Transform (DCT)
from scipy.fftpack import dct, idct
def dct2(a):
return dct(dct(a, axis=0, norm='ortho'), axis=1, norm='ortho')
def idct2(a):
return idct(idct(a, axis=0, norm='ortho'), axis=1, norm='ortho')
im = rgb2gray(imread('images/Img_04_04.jpg'))
imF = dct2(im)
im1 = idct2(imF)
print(np.allclose(im, im1))
plt.figure(figsize=(10,5))
plt.gray()
plt.subplot(121), plt.imshow(im), plt.axis('off'), plt.title('original image', size=15)
plt.subplot(122), plt.imshow(im1), plt.axis('off'), plt.title('reconstructed image (DCT+IDCT)', size=15)
plt.tight_layout()
plt.show()
# ### JPEG Compression
im = rgb2gray(imread('images/Img_04_05.png'))
dct_coeffs = np.zeros(im.shape)
for i in range(0, im.shape[0], 8):
for j in range(0, im.shape[1], 8):
dct_coeffs[i:(i+8),j:(j+8)] = dct2(im[i:(i+8),j:(j+8)])
index = 112
plt.figure(figsize=(10,6))
plt.gray()
plt.subplot(121), plt.imshow(im[index:index+8,index:index+8]), plt.title( "An 8x8 Image block", size=15)
plt.subplot(122), plt.imshow(dct_coeffs[index:index+8,index:index+8], vmax= np.max(dct_coeffs)*0.01, vmin = 0, extent=[0, np.pi, np.pi, 0])
plt.title("An 8x8 DCT block", size=15)
plt.show()
thresh = 0.03
dct_thresh = dct_coeffs * (abs(dct_coeffs) > (thresh*np.max(dct_coeffs)))
percent_nonzeros = np.sum( dct_thresh != 0.0 ) / (im.shape[0]*im.shape[1])
print ("Keeping only {}% of the DCT coefficients".format(percent_nonzeros*100.0))
plt.figure(figsize=(12,7))
plt.gray()
plt.subplot(121), plt.imshow(dct_coeffs,cmap='gray',vmax = np.max(dct_coeffs)*0.01,vmin = 0), plt.axis('off')
plt.title("8x8 DCTs of the image", size=15)
plt.subplot(122), plt.imshow(dct_thresh, vmax = np.max(dct_coeffs)*0.01, vmin = 0), plt.axis('off')
plt.title("Thresholded 8x8 DCTs of the image", size=15)
plt.tight_layout()
plt.show()
im_out = np.zeros(im.shape)
for i in range(0, im.shape[0], 8):
for j in range(0, im.shape[1], 8):
im_out[i:(i+8),j:(j+8)] = idct2( dct_thresh[i:(i+8),j:(j+8)] )
plt.figure(figsize=(15,7))
plt.gray()
plt.subplot(121), plt.imshow(im), plt.axis('off'), plt.title('original image', size=20)
plt.subplot(122), plt.imshow(im_out), plt.axis('off'), plt.title('DCT compressed image', size=20)
plt.tight_layout()
plt.show()
# ## 3. Image Denoising with Discrete Cosine Transform (DCT)
from skimage import img_as_float
from skimage.restoration import estimate_sigma
import cv2
im = img_as_float(imread('images/Img_04_06.jpg'))
sigma = 0.25
noisy = im + sigma * np.random.standard_normal(im.shape)
noisy = np.clip(noisy, 0, 1)
sigma_est = np.mean(estimate_sigma(noisy, multichannel=True))
print("estimated noise standard deviation = {}".format(sigma_est))
out = noisy.copy()
cv2.xphoto.dctDenoising(noisy, out, sigma_est)
out = np.clip(out, 0, 1)
plt.figure(figsize=(20,10))
plt.subplot(131), plt.imshow(im), plt.axis('off'), plt.title('original', size=20)
plt.subplot(132), plt.imshow(noisy), plt.axis('off'), plt.title('noisy', size=20)
plt.subplot(133), plt.imshow(out), plt.axis('off'), plt.title('denoised (DCT)', size=20)
plt.tight_layout()
plt.show()
# ## 4. Deconvolution for Image Deblurring
import SimpleITK as sitk
from skimage import restoration
from skimage.metrics import peak_signal_noise_ratio
# ### 4.1 Blur Detection
from scipy.signal import convolve2d
def convolve(im, kernel):
im1 = convolve2d(im, kernel, mode='same')
return im1 / np.max(im1)
def check_if_blurry(image, threshold):
# compute the Laplacian of the image and then return the focus
# measure, which is simply the variance of the Laplacian
var = cv2.Laplacian(image, cv2.CV_64F).var()
return 'Var Laplacian = {}\n{}'.format(round(var, 6), 'Blurry' if var < threshold else 'Not Blurry')
def plot_blurry(im, title):
plt.imshow(im), plt.axis('off'), plt.title(title, size=20)
def get_gaussian_edge_blur_kernel(sigma, sz=15):
# First create a 1-D Gaussian kernel
x = np.linspace(-10, 10, sz)
kernel_1d = np.exp(-x**2/sigma**2)
kernel_1d /= np.trapz(kernel_1d) # normalize the sum to 1.0
# create a 2-D Gaussian kernel from the 1-D kernel
kernel_2d = kernel_1d[:, np.newaxis] * kernel_1d[np.newaxis, :]
return kernel_2d
threshold = 0.01
imlist = []
im = rgb2gray(imread('images/Img_04_06.jpg'))
imlist.append((im, 'original image\n' + check_if_blurry(im, threshold)))
kernel = get_gaussian_edge_blur_kernel(3)
im1 = convolve(im, kernel)
imlist.append((im1, '(edge) blurred image\n' + check_if_blurry(im1, threshold)))
def get_motion_blur_kernel(ln, angle, sz=15):
kern = np.ones((1, ln), np.float32)
angle = -np.pi*angle/180
c, s = np.cos(angle), np.sin(angle)
A = np.float32([[c, -s, 0], [s, c, 0]])
sz2 = sz // 2
A[:,2] = (sz2, sz2) - np.dot(A[:,:2], ((ln-1)*0.5, 0))
kern = cv2.warpAffine(kern, A, (sz, sz), flags=cv2.INTER_CUBIC)
return kern
kernel = get_motion_blur_kernel(9, 45)
im1 = convolve(im, kernel)
imlist.append((im1, '(motion) blurred image\n' + check_if_blurry(im1, threshold)))
def get_out_of_focus_kernel(r, sz=15):
kern = np.zeros((sz, sz), np.uint8)
cv2.circle(kern, (sz, sz), r, 255, -1, cv2.LINE_AA, shift=1)
kern = np.float32(kern) / 255
return kern
kernel = get_out_of_focus_kernel(7)
im1 = convolve(im, kernel)
imlist.append((im1, '(out-of-focus) blurred image\n' + check_if_blurry(im1, threshold)))
plt.figure(figsize=(20,7))
plt.gray()
for i in range(len(imlist)):
im, title = imlist[i]
plt.subplot(1,4,i+1), plot_blurry(im, title)
plt.tight_layout()
plt.show()
# ### 4.2 Non-blind Deblurring with SimpleITK deconvolution filters
import SimpleITK as sitk
from scipy import signal
im = rgb2gray(imread('images/img_04_07.png'))
psf = get_out_of_focus_kernel(7, 9).astype(np.float)
im_blur = signal.convolve2d(im, psf, 'same')
im_blur = im_blur / np.max(im_blur)
tkfilter = sitk.InverseDeconvolutionImageFilter()
tkfilter.SetNormalize(True)
im_res_IN = sitk.GetArrayFromImage(tkfilter.Execute (sitk.GetImageFromArray(im_blur), sitk.GetImageFromArray(psf)))
tkfilter = sitk.WienerDeconvolutionImageFilter()
tkfilter.SetNoiseVariance(0)
tkfilter.SetNormalize(True)
im_res_WN = sitk.GetArrayFromImage(tkfilter.Execute (sitk.GetImageFromArray(im_blur), sitk.GetImageFromArray(psf)))
tkfilter = sitk.TikhonovDeconvolutionImageFilter()
tkfilter.SetRegularizationConstant(0.008) #0.06)
tkfilter.SetNormalize(True)
im_res_TK = sitk.GetArrayFromImage(tkfilter.Execute (sitk.GetImageFromArray(im_blur), sitk.GetImageFromArray(psf)))
tkfilter = sitk.RichardsonLucyDeconvolutionImageFilter()
tkfilter.SetNumberOfIterations(100)
tkfilter.SetNormalize(True)
im_res_RL = sitk.GetArrayFromImage(tkfilter.Execute (sitk.GetImageFromArray(im_blur), sitk.GetImageFromArray(psf)))
plt.figure(figsize=(20, 60))
plt.subplots_adjust(0,0,1,1,0.07,0.07)
plt.gray()
plt.subplot(611), plt.imshow(im), plt.axis('off'), plt.title('Original Image', size=20)
plt.subplot(612), plt.imshow(im_blur), plt.axis('off'), plt.title('Blurred (out-of-focus) Image, PSNR={:.3f}'.format(peak_signal_noise_ratio(im, im_blur)), size=20)
plt.subplot(613), plt.imshow(im_res_IN, vmin=im_blur.min(), vmax=im_blur.max()), plt.axis('off')
plt.title('Deconvolution using SimpleITK (Inverse Deconv.), PSNR={:.3f}'.format(peak_signal_noise_ratio(im, im_res_IN)), size=20)
plt.subplot(614), plt.imshow(im_res_WN, vmin=im_blur.min(), vmax=im_blur.max()), plt.axis('off')
plt.title('Deconvolution using SimpleITK (Wiener Deconv.), PSNR={:.3f}'.format(peak_signal_noise_ratio(im, im_res_WN)), size=20)
plt.subplot(615), plt.imshow(im_res_RL, vmin=im_blur.min(), vmax=im_blur.max()), plt.axis('off')
plt.title('Deconvolution using SimpleITK (Richardson-Lucy), PSNR={:.3f}'.format(peak_signal_noise_ratio(im, im_res_RL)), size=20)
plt.subplot(616), plt.imshow(im_res_TK, vmin=im_blur.min(), vmax=im_blur.max()), plt.axis('off')
plt.title('Deconvolution using SimpleITK (Tikhonov Deconv.), PSNR={:.3f}'.format(peak_signal_noise_ratio(im, im_res_TK)), size=20)
plt.show()
# ### 4.3 Non-blind Deblurring with scikit-image restoration module functions
from skimage import restoration
im_res_RL = restoration.richardson_lucy(im_blur, psf, iterations=20)
plt.figure(figsize=(20, 15))
plt.subplots_adjust(0,0,1,1,0.07,0.07)
plt.gray()
plt.imshow(im_res_RL, vmin=im_blur.min(), vmax=im_blur.max()), plt.axis('off')
plt.title('Deconvolution using skimage (Richardson-Lucy), PSNR={:.3f}'.format(peak_signal_noise_ratio(im, im_res_RL)), size=20)
plt.show()
# ## 5. Image Denoising with Wavelets
import numpy as np
import pywt
from pywt._doc_utils import wavedec2_keys, draw_2d_wp_basis
from skimage.filters import threshold_otsu
from skimage import img_as_float
import matplotlib.pylab as plt
# ### 5.0 Wavelet Basics
x = pywt.data.ascent().astype(np.float32)
shape = x.shape
plt.rcParams.update({'font.size': 20})
max_lev = 3 # how many levels of decomposition to draw
label_levels = 3 # how many levels to explicitly label on the plots
fig, axes = plt.subplots(4, 2, figsize=[15, 35])
plt.subplots_adjust(0, 0, 1, 0.95, 0.05, 0.05)
for level in range(0, max_lev + 1):
if level == 0:
# show the original image before decomposition
axes[0, 0].set_axis_off()
axes[0, 1].imshow(x, cmap=plt.cm.gray)
axes[0, 1].set_title('Image')
axes[0, 1].set_axis_off()
continue
# plot subband boundaries of a standard DWT basis
draw_2d_wp_basis(shape, wavedec2_keys(level), ax=axes[level, 0],
label_levels=label_levels)
axes[level, 0].set_title('{} level\ndecomposition'.format(level))
# compute the 2D DWT
c = pywt.wavedec2(x, 'db2', mode='periodization', level=level)
# normalize each coefficient array independently for better visibility
c[0] /= np.abs(c[0]).max()
for detail_level in range(level):
c[detail_level + 1] = [d/np.abs(d).max() > threshold_otsu(d/np.abs(d).max()) for d in c[detail_level + 1]]
# show the normalized coefficients
arr, slices = pywt.coeffs_to_array(c)
axes[level, 1].imshow(arr, cmap=plt.cm.gray)
axes[level, 1].set_title('Coefficients\n({} level)'.format(level))
axes[level, 1].set_axis_off()
plt.tight_layout()
plt.show()
# ### 5.1 Image Denoising using Wavelets with pywt
image = img_as_float(imread('images/Img_04_04.jpg'))
noise_sigma = 0.25 #16.0
image += np.random.normal(0, noise_sigma, size=image.shape)
wavelet = pywt.Wavelet('haar')
levels = int(np.floor(np.log2(image.shape[0])))
print(levels)
wavelet_coeffs = pywt.wavedec2(image, wavelet, level=levels)
def denoise(image, wavelet, noise_sigma):
levels = int(np.floor(np.log2(image.shape[0])))
wc = pywt.wavedec2(image, wavelet, level=levels)
arr, coeff_slices = pywt.coeffs_to_array(wc)
arr = pywt.threshold(arr, noise_sigma, mode='soft')
nwc = pywt.array_to_coeffs(arr, coeff_slices, output_format='wavedec2')
return pywt.waverec2(nwc, wavelet)
print(pywt.wavelist(kind='discrete'))
wlts = ['bior1.5', 'coif5', 'db6', 'dmey', 'haar', 'rbio2.8', 'sym15'] # pywt.wavelist(kind='discrete')
Denoised={}
for wlt in wlts:
out = image.copy()
for i in range(3):
out[...,i] = denoise(image[...,i], wavelet=wlt, noise_sigma=3/2*noise_sigma)
Denoised[wlt] = np.clip(out, 0, 1)
print(len(Denoised))
plt.figure(figsize=(15,8))
plt.subplots_adjust(0,0,1,0.9,0.05,0.07)
plt.subplot(241), plt.imshow(np.clip(image,0,1)), plt.axis('off'), plt.title('original image', size=15)
i = 2
for wlt in Denoised:
plt.subplot(2,4,i), plt.imshow(Denoised[wlt]), plt.axis('off'), plt.title(wlt, size=15)
i += 1
plt.suptitle('Image Denoising with Wavelets', size=20)
plt.show()
# ### 5.2 Image Denoising with Wavelets using scikit-image restoration
from skimage.restoration import (denoise_wavelet, estimate_sigma)
from skimage import data, img_as_float
from skimage.util import random_noise
from skimage.metrics import peak_signal_noise_ratio
original = img_as_float(imread('images/Img_04_08.png'))[...,:3]
sigma = 0.12
noisy = random_noise(original, var=sigma**2)
sigma_est = estimate_sigma(noisy, multichannel=True, average_sigmas=True)
print(f"Estimated Gaussian noise standard deviation = {sigma_est}")
im_bayes = denoise_wavelet(noisy, multichannel=True, convert2ycbcr=True,
method='BayesShrink', mode='soft',
rescale_sigma=True)
im_visushrink = denoise_wavelet(noisy, multichannel=True, convert2ycbcr=True,
method='VisuShrink', mode='soft',
sigma=sigma_est, rescale_sigma=True)
im_visushrink2 = denoise_wavelet(noisy, multichannel=True, convert2ycbcr=True,
method='VisuShrink', mode='soft',
sigma=sigma_est/2, rescale_sigma=True)
im_visushrink4 = denoise_wavelet(noisy, multichannel=True, convert2ycbcr=True,
method='VisuShrink', mode='soft',
sigma=sigma_est/4, rescale_sigma=True)
psnr_noisy = peak_signal_noise_ratio(original, noisy)
psnr_bayes = peak_signal_noise_ratio(original, im_bayes)
psnr_visushrink = peak_signal_noise_ratio(original, im_visushrink)
psnr_visushrink2 = peak_signal_noise_ratio(original, im_visushrink2)
psnr_visushrink4 = peak_signal_noise_ratio(original, im_visushrink4)
plt.figure(figsize=(20,20))
plt.subplots_adjust(0,0,1,1,0.05,0.05)
plt.subplot(231), plt.imshow(original), plt.axis('off'), plt.title('Original', size=20)
plt.subplot(232), plt.imshow(noisy), plt.axis('off'), plt.title('Noisy\nPSNR={:0.4g}'.format(psnr_noisy), size=20)
plt.subplot(233), plt.imshow(im_bayes/im_bayes.max()), plt.axis('off'), plt.title('Wavelet denoising\n(BayesShrink)\nPSNR={:0.4f}'.format(psnr_bayes), size=20)
plt.subplot(234), plt.imshow(im_visushrink/im_visushrink.max()), plt.axis('off')
plt.title('Wavelet denoising\n' + r'(VisuShrink, $\sigma=\sigma_{est}$)' + '\nPSNR={:0.4g}'.format(psnr_visushrink), size=20)
plt.subplot(235), plt.imshow(im_visushrink2/im_visushrink2.max()), plt.axis('off')
plt.title('Wavelet denoising\n' + r'(VisuShrink, $\sigma=\sigma_{est}/2$)' + '\nPSNR={:0.4g}'.format(psnr_visushrink2), size=20)
plt.subplot(236), plt.imshow(im_visushrink4/im_visushrink4.max()), plt.axis('off')
plt.title('Wavelet denoising\n' + r'(VisuShrink, $\sigma=\sigma_{est}/4$)' + '\nPSNR={:0.4g}'.format(psnr_visushrink4), size=20)
plt.show()
# ## 6. Image Fusion with Wavelets
import pywt
import cv2
import numpy as np
def fuseCoeff(cooef1, cooef2, method):
if (method == 'mean'):
cooef = (cooef1 + cooef2) / 2
elif (method == 'min'):
cooef = np.minimum(cooef1,cooef2)
elif (method == 'max'):
cooef = np.maximum(cooef1,cooef2)
else:
cooef = []
return cooef
fusion_method = 'mean'
im1 = cv2.imread('images/Img_04_10.jpg',0)
im2 = cv2.imread('images/Img_04_11.jpg',0)
im2 = cv2.resize(im2,(im1.shape[1], im1.shape[0])) # I do this just because i used two random images
#print(im1.shape, im2.shape)
# ### The Fusion algorithm
wavelet = 'sym2' #'bior1.1' #'haar' #'db1'
cooef1 = pywt.wavedec2(im1[:,:], wavelet)
cooef2 = pywt.wavedec2(im2[:,:], wavelet)
#print(cooef1[0].shape, len(cooef1))
fused_cooef = []
for i in range(len(cooef1)):
# The first values in each decomposition is the apprximation values of the top level
if(i == 0):
fused_cooef.append(fuseCoeff(cooef1[0], cooef2[0], fusion_method))
else:
# For the rest of the levels we have tupels with 3 coeeficents
c1 = fuseCoeff(cooef1[i][0], cooef2[i][0],fusion_method)
c2 = fuseCoeff(cooef1[i][1], cooef2[i][1], fusion_method)
c3 = fuseCoeff(cooef1[i][2], cooef2[i][2], fusion_method)
fused_cooef.append((c1,c2,c3))
#print(len(fused_cooef))
fused_image = pywt.waverec2(fused_cooef, wavelet)
#fused_image = np.multiply(np.divide(fused_image - np.min(fused_image),(np.max(fused_image) - np.min(fused_image))),255)
fused_image = 255*fused_image / np.max(fused_image)
fused_image = fused_image.astype(np.uint8)
#print(fused_image.shape)
plt.figure(figsize=(20,20))
plt.subplot(221), plt.imshow(im1), plt.axis('off'), plt.title('Image1', size=20) #cv2.cvtColor(fused_image,cv2.COLOR_BGR2RGB))
plt.subplot(222), plt.imshow(im2), plt.axis('off'), plt.title('Image2', size=20) #cv2.cvtColor(fused_image,cv2.COLOR_BGR2RGB))
#print(np.max(im1), np.max(im2))
plt.subplot(223), plt.imshow(im1//2 + im2// 2), plt.axis('off'), plt.title('Average Image', size=20) #cv2.cvtColor(fused_image,cv2.COLOR_BGR2RGB))
plt.subplot(224), plt.imshow(fused_image), plt.axis('off'), plt.title('Fused Image with Wavelets', size=20) #cv2.cvtColor(fused_image,cv2.COLOR_BGR2RGB))
plt.tight_layout()
plt.show()
# ## 7. Secure Spread Spectrum Digital Watermarking with DCT
from scipy.fftpack import dct, idct
from skimage.io import imread
from skimage.color import rgb2gray
import numpy as np
import matplotlib.pylab as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
def dct2(a):
return dct(dct(a.T, norm='ortho').T, norm='ortho')
def idct2(a):
return idct(idct(a.T, norm='ortho').T, norm='ortho')
def embed(im, k, alpha):
m, n = im.shape
d = dct2(im)
indices = np.dstack(np.unravel_index(np.argsort(d.ravel()), (m, n)))[0]
indices = indices[-(k+1):-1]
cw = d[indices[:,0], indices[:,1]]
w = np.random.randn(k)
#w = w / np.linalg.norm(w)
ci = cw * (1 + alpha * w)
d[indices[:,0], indices[:,1]] = ci
im1 = idct2(d)
return im1, indices, cw, w
def detect(test, indices, cw, w, alpha):
d = dct2(test)
testc = d[indices[:,0], indices[:,1]]
what = (testc/cw - 1) / alpha
gamma = what@w/(np.linalg.norm(what)) #*np.linalg.norm(w))
return gamma
k = 1000
alpha = 0.1
im = rgb2gray(imread('images/Img_04_04.jpg'))
im = (255*im).astype(np.uint8)
im1, indices, cw, w = embed(im, k=k, alpha=alpha)
print('mean difference={}, max difference={}'.format(np.mean(np.abs(im1-im)), np.max(np.abs(im1-im))))
similarity = detect(im1, indices, cw, w, alpha)
print('detected similarity={}'.format(similarity))
fig = plt.figure(figsize=(20,10))
plt.gray()
plt.subplots_adjust(0,0,1,0.925,0.05,0.05)
plt.subplot(131), plt.imshow(im), plt.axis('off'), plt.title('original image {}'.format(im.shape), size=20)
plt.subplot(132), plt.imshow(im1), plt.axis('off'), plt.title(r'watermarked image: $v_i^{\prime}=v_i.(1+\alpha x_i)$', size=20)
plt.subplot(133)
last_axes = plt.gca()
img = plt.imshow((np.abs(im1-im)).astype(np.uint8))
divider = make_axes_locatable(img.axes)
cax = divider.append_axes("right", size="5%", pad=0.05)
fig.colorbar(img, cax=cax)
plt.sca(last_axes)
plt.axis('off'), plt.title('difference image', size=20)
plt.show()
|
[
"pywt.coeffs_to_array",
"numpy.sum",
"pywt.threshold",
"numpy.abs",
"matplotlib.pylab.imshow",
"numpy.maximum",
"numpy.allclose",
"scipy.fftpack.dct",
"numpy.ones",
"numpy.clip",
"matplotlib.pylab.axis",
"matplotlib.pylab.gca",
"cv2.warpAffine",
"numpy.sin",
"numpy.arange",
"matplotlib.pylab.suptitle",
"numpy.random.normal",
"SimpleITK.RichardsonLucyDeconvolutionImageFilter",
"numpy.exp",
"numpy.linalg.norm",
"matplotlib.pylab.title",
"matplotlib.pylab.subplots",
"pywt.array_to_coeffs",
"SimpleITK.TikhonovDeconvolutionImageFilter",
"matplotlib.pylab.figure",
"matplotlib.pylab.show",
"numpy.meshgrid",
"scipy.signal.convolve2d",
"numpy.random.randn",
"SimpleITK.WienerDeconvolutionImageFilter",
"matplotlib.pylab.gray",
"matplotlib.pylab.subplots_adjust",
"scipy.fftpack.fftn",
"numpy.max",
"numpy.linspace",
"matplotlib.ticker.FormatStrFormatter",
"scipy.fftpack.idct",
"pywt._doc_utils.wavedec2_keys",
"skimage.restoration.denoise_wavelet",
"cv2.resize",
"skimage.io.imread",
"cv2.Laplacian",
"matplotlib.pylab.sca",
"numpy.conj",
"numpy.trapz",
"cv2.circle",
"numpy.minimum",
"SimpleITK.InverseDeconvolutionImageFilter",
"numpy.log2",
"pywt.data.ascent",
"matplotlib.pylab.rcParams.update",
"numpy.random.standard_normal",
"pywt.wavedec2",
"numpy.cos",
"matplotlib.pylab.tight_layout",
"numpy.dot",
"skimage.color.gray2rgb",
"skimage.restoration.richardson_lucy",
"pywt.Wavelet",
"mpl_toolkits.axes_grid1.make_axes_locatable",
"skimage.draw.rectangle_perimeter",
"matplotlib.pylab.subplot",
"numpy.float32",
"numpy.zeros",
"cv2.xphoto.dctDenoising",
"skimage.util.random_noise",
"cv2.imread",
"matplotlib.ticker.LinearLocator",
"pywt.wavelist",
"SimpleITK.GetImageFromArray",
"pywt.waverec2",
"skimage.metrics.peak_signal_noise_ratio",
"skimage.restoration.estimate_sigma"
] |
[((1542, 1553), 'scipy.fftpack.fftn', 'fp.fftn', (['im'], {}), '(im)\n', (1549, 1553), True, 'import scipy.fftpack as fp\n'), ((1561, 1591), 'scipy.fftpack.fftn', 'fp.fftn', (['im_tm'], {'shape': 'im.shape'}), '(im_tm, shape=im.shape)\n', (1568, 1591), True, 'import scipy.fftpack as fp\n'), ((1762, 1855), 'skimage.draw.rectangle_perimeter', 'rectangle_perimeter', (['(i, j)'], {'end': '(i + im_tm.shape[0], j + im_tm.shape[1])', 'shape': 'im.shape'}), '((i, j), end=(i + im_tm.shape[0], j + im_tm.shape[1]),\n shape=im.shape)\n', (1781, 1855), False, 'from skimage.draw import rectangle_perimeter\n'), ((1940, 1966), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(2, 3)'}), '(figsize=(2, 3))\n', (1950, 1966), True, 'import matplotlib.pylab as plt\n'), ((1966, 1976), 'matplotlib.pylab.gray', 'plt.gray', ([], {}), '()\n', (1974, 1976), True, 'import matplotlib.pylab as plt\n'), ((2044, 2054), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (2052, 2054), True, 'import matplotlib.pylab as plt\n'), ((2065, 2113), 'matplotlib.pylab.subplots', 'plt.subplots', (['(1)', '(2)'], {'sharey': '(True)', 'figsize': '(12, 7)'}), '(1, 2, sharey=True, figsize=(12, 7))\n', (2077, 2113), True, 'import matplotlib.pylab as plt\n'), ((2272, 2290), 'matplotlib.pylab.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2288, 2290), True, 'import matplotlib.pylab as plt\n'), ((2291, 2301), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (2299, 2301), True, 'import matplotlib.pylab as plt\n'), ((2306, 2330), 'numpy.arange', 'np.arange', (['F_cc.shape[0]'], {}), '(F_cc.shape[0])\n', (2315, 2330), True, 'import numpy as np\n'), ((2335, 2359), 'numpy.arange', 'np.arange', (['F_cc.shape[1]'], {}), '(F_cc.shape[1])\n', (2344, 2359), True, 'import numpy as np\n'), ((2367, 2384), 'numpy.meshgrid', 'np.meshgrid', (['X', 'Y'], {}), '(X, Y)\n', (2378, 2384), True, 'import numpy as np\n'), ((2807, 2834), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (2817, 2834), True, 'import matplotlib.pylab as plt\n'), ((2834, 2844), 'matplotlib.pylab.gray', 'plt.gray', ([], {}), '()\n', (2842, 2844), True, 'import matplotlib.pylab as plt\n'), ((3038, 3056), 'matplotlib.pylab.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3054, 3056), True, 'import matplotlib.pylab as plt\n'), ((3057, 3067), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (3065, 3067), True, 'import matplotlib.pylab as plt\n'), ((3153, 3171), 'numpy.zeros', 'np.zeros', (['im.shape'], {}), '(im.shape)\n', (3161, 3171), True, 'import numpy as np\n'), ((3324, 3351), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (3334, 3351), True, 'import matplotlib.pylab as plt\n'), ((3351, 3361), 'matplotlib.pylab.gray', 'plt.gray', ([], {}), '()\n', (3359, 3361), True, 'import matplotlib.pylab as plt\n'), ((3607, 3645), 'matplotlib.pylab.title', 'plt.title', (['"""An 8x8 DCT block"""'], {'size': '(15)'}), "('An 8x8 DCT block', size=15)\n", (3616, 3645), True, 'import matplotlib.pylab as plt\n'), ((3646, 3656), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (3654, 3656), True, 'import matplotlib.pylab as plt\n'), ((3904, 3931), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(12, 7)'}), '(figsize=(12, 7))\n', (3914, 3931), True, 'import matplotlib.pylab as plt\n'), ((3931, 3941), 'matplotlib.pylab.gray', 'plt.gray', ([], {}), '()\n', (3939, 3941), True, 'import matplotlib.pylab as plt\n'), ((4052, 4095), 'matplotlib.pylab.title', 'plt.title', (['"""8x8 DCTs of the image"""'], {'size': '(15)'}), "('8x8 DCTs of the image', size=15)\n", (4061, 4095), True, 'import matplotlib.pylab as plt\n'), ((4196, 4251), 'matplotlib.pylab.title', 'plt.title', (['"""Thresholded 8x8 DCTs of the image"""'], {'size': '(15)'}), "('Thresholded 8x8 DCTs of the image', size=15)\n", (4205, 4251), True, 'import matplotlib.pylab as plt\n'), ((4252, 4270), 'matplotlib.pylab.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4268, 4270), True, 'import matplotlib.pylab as plt\n'), ((4271, 4281), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (4279, 4281), True, 'import matplotlib.pylab as plt\n'), ((4292, 4310), 'numpy.zeros', 'np.zeros', (['im.shape'], {}), '(im.shape)\n', (4300, 4310), True, 'import numpy as np\n'), ((4457, 4484), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(15, 7)'}), '(figsize=(15, 7))\n', (4467, 4484), True, 'import matplotlib.pylab as plt\n'), ((4484, 4494), 'matplotlib.pylab.gray', 'plt.gray', ([], {}), '()\n', (4492, 4494), True, 'import matplotlib.pylab as plt\n'), ((4681, 4699), 'matplotlib.pylab.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4697, 4699), True, 'import matplotlib.pylab as plt\n'), ((4700, 4710), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (4708, 4710), True, 'import matplotlib.pylab as plt\n'), ((4994, 5014), 'numpy.clip', 'np.clip', (['noisy', '(0)', '(1)'], {}), '(noisy, 0, 1)\n', (5001, 5014), True, 'import numpy as np\n'), ((5165, 5211), 'cv2.xphoto.dctDenoising', 'cv2.xphoto.dctDenoising', (['noisy', 'out', 'sigma_est'], {}), '(noisy, out, sigma_est)\n', (5188, 5211), False, 'import cv2\n'), ((5218, 5236), 'numpy.clip', 'np.clip', (['out', '(0)', '(1)'], {}), '(out, 0, 1)\n', (5225, 5236), True, 'import numpy as np\n'), ((5238, 5266), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (5248, 5266), True, 'import matplotlib.pylab as plt\n'), ((5519, 5537), 'matplotlib.pylab.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5535, 5537), True, 'import matplotlib.pylab as plt\n'), ((5538, 5548), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (5546, 5548), True, 'import matplotlib.pylab as plt\n'), ((7814, 7841), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(20, 7)'}), '(figsize=(20, 7))\n', (7824, 7841), True, 'import matplotlib.pylab as plt\n'), ((7841, 7851), 'matplotlib.pylab.gray', 'plt.gray', ([], {}), '()\n', (7849, 7851), True, 'import matplotlib.pylab as plt\n'), ((7956, 7974), 'matplotlib.pylab.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7972, 7974), True, 'import matplotlib.pylab as plt\n'), ((7975, 7985), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (7983, 7985), True, 'import matplotlib.pylab as plt\n'), ((8216, 8250), 'scipy.signal.convolve2d', 'signal.convolve2d', (['im', 'psf', '"""same"""'], {}), "(im, psf, 'same')\n", (8233, 8250), False, 'from scipy import signal\n'), ((8299, 8337), 'SimpleITK.InverseDeconvolutionImageFilter', 'sitk.InverseDeconvolutionImageFilter', ([], {}), '()\n', (8335, 8337), True, 'import SimpleITK as sitk\n'), ((8494, 8531), 'SimpleITK.WienerDeconvolutionImageFilter', 'sitk.WienerDeconvolutionImageFilter', ([], {}), '()\n', (8529, 8531), True, 'import SimpleITK as sitk\n'), ((8717, 8756), 'SimpleITK.TikhonovDeconvolutionImageFilter', 'sitk.TikhonovDeconvolutionImageFilter', ([], {}), '()\n', (8754, 8756), True, 'import SimpleITK as sitk\n'), ((8963, 9008), 'SimpleITK.RichardsonLucyDeconvolutionImageFilter', 'sitk.RichardsonLucyDeconvolutionImageFilter', ([], {}), '()\n', (9006, 9008), True, 'import SimpleITK as sitk\n'), ((9191, 9219), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(20, 60)'}), '(figsize=(20, 60))\n', (9201, 9219), True, 'import matplotlib.pylab as plt\n'), ((9220, 9263), 'matplotlib.pylab.subplots_adjust', 'plt.subplots_adjust', (['(0)', '(0)', '(1)', '(1)', '(0.07)', '(0.07)'], {}), '(0, 0, 1, 1, 0.07, 0.07)\n', (9239, 9263), True, 'import matplotlib.pylab as plt\n'), ((9259, 9269), 'matplotlib.pylab.gray', 'plt.gray', ([], {}), '()\n', (9267, 9269), True, 'import matplotlib.pylab as plt\n'), ((10435, 10445), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (10443, 10445), True, 'import matplotlib.pylab as plt\n'), ((10571, 10627), 'skimage.restoration.richardson_lucy', 'restoration.richardson_lucy', (['im_blur', 'psf'], {'iterations': '(20)'}), '(im_blur, psf, iterations=20)\n', (10598, 10627), False, 'from skimage import restoration\n'), ((10629, 10657), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(20, 15)'}), '(figsize=(20, 15))\n', (10639, 10657), True, 'import matplotlib.pylab as plt\n'), ((10658, 10701), 'matplotlib.pylab.subplots_adjust', 'plt.subplots_adjust', (['(0)', '(0)', '(1)', '(1)', '(0.07)', '(0.07)'], {}), '(0, 0, 1, 1, 0.07, 0.07)\n', (10677, 10701), True, 'import matplotlib.pylab as plt\n'), ((10697, 10707), 'matplotlib.pylab.gray', 'plt.gray', ([], {}), '()\n', (10705, 10707), True, 'import matplotlib.pylab as plt\n'), ((10915, 10925), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (10923, 10925), True, 'import matplotlib.pylab as plt\n'), ((11251, 11289), 'matplotlib.pylab.rcParams.update', 'plt.rcParams.update', (["{'font.size': 20}"], {}), "({'font.size': 20})\n", (11270, 11289), True, 'import matplotlib.pylab as plt\n'), ((11434, 11470), 'matplotlib.pylab.subplots', 'plt.subplots', (['(4)', '(2)'], {'figsize': '[15, 35]'}), '(4, 2, figsize=[15, 35])\n', (11446, 11470), True, 'import matplotlib.pylab as plt\n'), ((11471, 11517), 'matplotlib.pylab.subplots_adjust', 'plt.subplots_adjust', (['(0)', '(0)', '(1)', '(0.95)', '(0.05)', '(0.05)'], {}), '(0, 0, 1, 0.95, 0.05, 0.05)\n', (11490, 11517), True, 'import matplotlib.pylab as plt\n'), ((12628, 12646), 'matplotlib.pylab.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (12644, 12646), True, 'import matplotlib.pylab as plt\n'), ((12647, 12657), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (12655, 12657), True, 'import matplotlib.pylab as plt\n'), ((12799, 12849), 'numpy.random.normal', 'np.random.normal', (['(0)', 'noise_sigma'], {'size': 'image.shape'}), '(0, noise_sigma, size=image.shape)\n', (12815, 12849), True, 'import numpy as np\n'), ((12861, 12881), 'pywt.Wavelet', 'pywt.Wavelet', (['"""haar"""'], {}), "('haar')\n", (12873, 12881), False, 'import pywt\n'), ((12962, 13005), 'pywt.wavedec2', 'pywt.wavedec2', (['image', 'wavelet'], {'level': 'levels'}), '(image, wavelet, level=levels)\n', (12975, 13005), False, 'import pywt\n'), ((13738, 13765), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(15, 8)'}), '(figsize=(15, 8))\n', (13748, 13765), True, 'import matplotlib.pylab as plt\n'), ((13765, 13810), 'matplotlib.pylab.subplots_adjust', 'plt.subplots_adjust', (['(0)', '(0)', '(1)', '(0.9)', '(0.05)', '(0.07)'], {}), '(0, 0, 1, 0.9, 0.05, 0.07)\n', (13784, 13810), True, 'import matplotlib.pylab as plt\n'), ((14040, 14094), 'matplotlib.pylab.suptitle', 'plt.suptitle', (['"""Image Denoising with Wavelets"""'], {'size': '(20)'}), "('Image Denoising with Wavelets', size=20)\n", (14052, 14094), True, 'import matplotlib.pylab as plt\n'), ((14095, 14105), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (14103, 14105), True, 'import matplotlib.pylab as plt\n'), ((14460, 14498), 'skimage.util.random_noise', 'random_noise', (['original'], {'var': '(sigma ** 2)'}), '(original, var=sigma ** 2)\n', (14472, 14498), False, 'from skimage.util import random_noise\n'), ((14510, 14571), 'skimage.restoration.estimate_sigma', 'estimate_sigma', (['noisy'], {'multichannel': '(True)', 'average_sigmas': '(True)'}), '(noisy, multichannel=True, average_sigmas=True)\n', (14524, 14571), False, 'from skimage.restoration import denoise_wavelet, estimate_sigma\n'), ((14652, 14773), 'skimage.restoration.denoise_wavelet', 'denoise_wavelet', (['noisy'], {'multichannel': '(True)', 'convert2ycbcr': '(True)', 'method': '"""BayesShrink"""', 'mode': '"""soft"""', 'rescale_sigma': '(True)'}), "(noisy, multichannel=True, convert2ycbcr=True, method=\n 'BayesShrink', mode='soft', rescale_sigma=True)\n", (14667, 14773), False, 'from skimage.restoration import denoise_wavelet, estimate_sigma\n'), ((14839, 14976), 'skimage.restoration.denoise_wavelet', 'denoise_wavelet', (['noisy'], {'multichannel': '(True)', 'convert2ycbcr': '(True)', 'method': '"""VisuShrink"""', 'mode': '"""soft"""', 'sigma': 'sigma_est', 'rescale_sigma': '(True)'}), "(noisy, multichannel=True, convert2ycbcr=True, method=\n 'VisuShrink', mode='soft', sigma=sigma_est, rescale_sigma=True)\n", (14854, 14976), False, 'from skimage.restoration import denoise_wavelet, estimate_sigma\n'), ((15054, 15195), 'skimage.restoration.denoise_wavelet', 'denoise_wavelet', (['noisy'], {'multichannel': '(True)', 'convert2ycbcr': '(True)', 'method': '"""VisuShrink"""', 'mode': '"""soft"""', 'sigma': '(sigma_est / 2)', 'rescale_sigma': '(True)'}), "(noisy, multichannel=True, convert2ycbcr=True, method=\n 'VisuShrink', mode='soft', sigma=sigma_est / 2, rescale_sigma=True)\n", (15069, 15195), False, 'from skimage.restoration import denoise_wavelet, estimate_sigma\n'), ((15272, 15413), 'skimage.restoration.denoise_wavelet', 'denoise_wavelet', (['noisy'], {'multichannel': '(True)', 'convert2ycbcr': '(True)', 'method': '"""VisuShrink"""', 'mode': '"""soft"""', 'sigma': '(sigma_est / 4)', 'rescale_sigma': '(True)'}), "(noisy, multichannel=True, convert2ycbcr=True, method=\n 'VisuShrink', mode='soft', sigma=sigma_est / 4, rescale_sigma=True)\n", (15287, 15413), False, 'from skimage.restoration import denoise_wavelet, estimate_sigma\n'), ((15487, 15527), 'skimage.metrics.peak_signal_noise_ratio', 'peak_signal_noise_ratio', (['original', 'noisy'], {}), '(original, noisy)\n', (15510, 15527), False, 'from skimage.metrics import peak_signal_noise_ratio\n'), ((15541, 15584), 'skimage.metrics.peak_signal_noise_ratio', 'peak_signal_noise_ratio', (['original', 'im_bayes'], {}), '(original, im_bayes)\n', (15564, 15584), False, 'from skimage.metrics import peak_signal_noise_ratio\n'), ((15603, 15651), 'skimage.metrics.peak_signal_noise_ratio', 'peak_signal_noise_ratio', (['original', 'im_visushrink'], {}), '(original, im_visushrink)\n', (15626, 15651), False, 'from skimage.metrics import peak_signal_noise_ratio\n'), ((15671, 15720), 'skimage.metrics.peak_signal_noise_ratio', 'peak_signal_noise_ratio', (['original', 'im_visushrink2'], {}), '(original, im_visushrink2)\n', (15694, 15720), False, 'from skimage.metrics import peak_signal_noise_ratio\n'), ((15740, 15789), 'skimage.metrics.peak_signal_noise_ratio', 'peak_signal_noise_ratio', (['original', 'im_visushrink4'], {}), '(original, im_visushrink4)\n', (15763, 15789), False, 'from skimage.metrics import peak_signal_noise_ratio\n'), ((15791, 15819), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(20, 20)'}), '(figsize=(20, 20))\n', (15801, 15819), True, 'import matplotlib.pylab as plt\n'), ((15819, 15862), 'matplotlib.pylab.subplots_adjust', 'plt.subplots_adjust', (['(0)', '(0)', '(1)', '(1)', '(0.05)', '(0.05)'], {}), '(0, 0, 1, 1, 0.05, 0.05)\n', (15838, 15862), True, 'import matplotlib.pylab as plt\n'), ((16852, 16862), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (16860, 16862), True, 'import matplotlib.pylab as plt\n'), ((17267, 17304), 'cv2.imread', 'cv2.imread', (['"""images/Img_04_10.jpg"""', '(0)'], {}), "('images/Img_04_10.jpg', 0)\n", (17277, 17304), False, 'import cv2\n'), ((17310, 17347), 'cv2.imread', 'cv2.imread', (['"""images/Img_04_11.jpg"""', '(0)'], {}), "('images/Img_04_11.jpg', 0)\n", (17320, 17347), False, 'import cv2\n'), ((17354, 17399), 'cv2.resize', 'cv2.resize', (['im2', '(im1.shape[1], im1.shape[0])'], {}), '(im2, (im1.shape[1], im1.shape[0]))\n', (17364, 17399), False, 'import cv2\n'), ((17560, 17593), 'pywt.wavedec2', 'pywt.wavedec2', (['im1[:, :]', 'wavelet'], {}), '(im1[:, :], wavelet)\n', (17573, 17593), False, 'import pywt\n'), ((17602, 17635), 'pywt.wavedec2', 'pywt.wavedec2', (['im2[:, :]', 'wavelet'], {}), '(im2[:, :], wavelet)\n', (17615, 17635), False, 'import pywt\n'), ((18256, 18291), 'pywt.waverec2', 'pywt.waverec2', (['fused_cooef', 'wavelet'], {}), '(fused_cooef, wavelet)\n', (18269, 18291), False, 'import pywt\n'), ((18536, 18564), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(20, 20)'}), '(figsize=(20, 20))\n', (18546, 18564), True, 'import matplotlib.pylab as plt\n'), ((19153, 19171), 'matplotlib.pylab.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (19169, 19171), True, 'import matplotlib.pylab as plt\n'), ((19172, 19182), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (19180, 19182), True, 'import matplotlib.pylab as plt\n'), ((20561, 20589), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (20571, 20589), True, 'import matplotlib.pylab as plt\n'), ((20589, 20599), 'matplotlib.pylab.gray', 'plt.gray', ([], {}), '()\n', (20597, 20599), True, 'import matplotlib.pylab as plt\n'), ((20600, 20647), 'matplotlib.pylab.subplots_adjust', 'plt.subplots_adjust', (['(0)', '(0)', '(1)', '(0.925)', '(0.05)', '(0.05)'], {}), '(0, 0, 1, 0.925, 0.05, 0.05)\n', (20619, 20647), True, 'import matplotlib.pylab as plt\n'), ((20879, 20895), 'matplotlib.pylab.subplot', 'plt.subplot', (['(133)'], {}), '(133)\n', (20890, 20895), True, 'import matplotlib.pylab as plt\n'), ((20908, 20917), 'matplotlib.pylab.gca', 'plt.gca', ([], {}), '()\n', (20915, 20917), True, 'import matplotlib.pylab as plt\n'), ((20980, 21009), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['img.axes'], {}), '(img.axes)\n', (20999, 21009), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((21093, 21111), 'matplotlib.pylab.sca', 'plt.sca', (['last_axes'], {}), '(last_axes)\n', (21100, 21111), True, 'import matplotlib.pylab as plt\n'), ((21168, 21178), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (21176, 21178), True, 'import matplotlib.pylab as plt\n'), ((710, 738), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(20, 20)'}), '(figsize=(20, 20))\n', (720, 738), True, 'import matplotlib.pylab as plt\n'), ((1418, 1428), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (1426, 1428), True, 'import matplotlib.pylab as plt\n'), ((1604, 1617), 'numpy.conj', 'np.conj', (['F_tm'], {}), '(F_tm)\n', (1611, 1617), True, 'import numpy as np\n'), ((1977, 1994), 'matplotlib.pylab.imshow', 'plt.imshow', (['im_tm'], {}), '(im_tm)\n', (1987, 1994), True, 'import matplotlib.pylab as plt\n'), ((1996, 2026), 'matplotlib.pylab.title', 'plt.title', (['"""template"""'], {'size': '(20)'}), "('template', size=20)\n", (2005, 2026), True, 'import matplotlib.pylab as plt\n'), ((2028, 2043), 'matplotlib.pylab.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2036, 2043), True, 'import matplotlib.pylab as plt\n'), ((2712, 2742), 'skimage.io.imread', 'imread', (['"""images/Img_04_04.jpg"""'], {}), "('images/Img_04_04.jpg')\n", (2718, 2742), False, 'from skimage.io import imread\n'), ((2784, 2804), 'numpy.allclose', 'np.allclose', (['im', 'im1'], {}), '(im, im1)\n', (2795, 2804), True, 'import numpy as np\n'), ((2845, 2861), 'matplotlib.pylab.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (2856, 2861), True, 'import matplotlib.pylab as plt\n'), ((2863, 2877), 'matplotlib.pylab.imshow', 'plt.imshow', (['im'], {}), '(im)\n', (2873, 2877), True, 'import matplotlib.pylab as plt\n'), ((2879, 2894), 'matplotlib.pylab.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2887, 2894), True, 'import matplotlib.pylab as plt\n'), ((2896, 2932), 'matplotlib.pylab.title', 'plt.title', (['"""original image"""'], {'size': '(15)'}), "('original image', size=15)\n", (2905, 2932), True, 'import matplotlib.pylab as plt\n'), ((2933, 2949), 'matplotlib.pylab.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (2944, 2949), True, 'import matplotlib.pylab as plt\n'), ((2951, 2966), 'matplotlib.pylab.imshow', 'plt.imshow', (['im1'], {}), '(im1)\n', (2961, 2966), True, 'import matplotlib.pylab as plt\n'), ((2968, 2983), 'matplotlib.pylab.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2976, 2983), True, 'import matplotlib.pylab as plt\n'), ((2985, 3037), 'matplotlib.pylab.title', 'plt.title', (['"""reconstructed image (DCT+IDCT)"""'], {'size': '(15)'}), "('reconstructed image (DCT+IDCT)', size=15)\n", (2994, 3037), True, 'import matplotlib.pylab as plt\n'), ((3107, 3137), 'skimage.io.imread', 'imread', (['"""images/Img_04_05.png"""'], {}), "('images/Img_04_05.png')\n", (3113, 3137), False, 'from skimage.io import imread\n'), ((3362, 3378), 'matplotlib.pylab.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (3373, 3378), True, 'import matplotlib.pylab as plt\n'), ((3380, 3428), 'matplotlib.pylab.imshow', 'plt.imshow', (['im[index:index + 8, index:index + 8]'], {}), '(im[index:index + 8, index:index + 8])\n', (3390, 3428), True, 'import matplotlib.pylab as plt\n'), ((3425, 3465), 'matplotlib.pylab.title', 'plt.title', (['"""An 8x8 Image block"""'], {'size': '(15)'}), "('An 8x8 Image block', size=15)\n", (3434, 3465), True, 'import matplotlib.pylab as plt\n'), ((3467, 3483), 'matplotlib.pylab.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (3478, 3483), True, 'import matplotlib.pylab as plt\n'), ((3765, 3790), 'numpy.sum', 'np.sum', (['(dct_thresh != 0.0)'], {}), '(dct_thresh != 0.0)\n', (3771, 3790), True, 'import numpy as np\n'), ((3942, 3958), 'matplotlib.pylab.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (3953, 3958), True, 'import matplotlib.pylab as plt\n'), ((4036, 4051), 'matplotlib.pylab.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (4044, 4051), True, 'import matplotlib.pylab as plt\n'), ((4096, 4112), 'matplotlib.pylab.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (4107, 4112), True, 'import matplotlib.pylab as plt\n'), ((4180, 4195), 'matplotlib.pylab.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (4188, 4195), True, 'import matplotlib.pylab as plt\n'), ((4495, 4511), 'matplotlib.pylab.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (4506, 4511), True, 'import matplotlib.pylab as plt\n'), ((4513, 4527), 'matplotlib.pylab.imshow', 'plt.imshow', (['im'], {}), '(im)\n', (4523, 4527), True, 'import matplotlib.pylab as plt\n'), ((4529, 4544), 'matplotlib.pylab.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (4537, 4544), True, 'import matplotlib.pylab as plt\n'), ((4546, 4582), 'matplotlib.pylab.title', 'plt.title', (['"""original image"""'], {'size': '(20)'}), "('original image', size=20)\n", (4555, 4582), True, 'import matplotlib.pylab as plt\n'), ((4583, 4599), 'matplotlib.pylab.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (4594, 4599), True, 'import matplotlib.pylab as plt\n'), ((4601, 4619), 'matplotlib.pylab.imshow', 'plt.imshow', (['im_out'], {}), '(im_out)\n', (4611, 4619), True, 'import matplotlib.pylab as plt\n'), ((4621, 4636), 'matplotlib.pylab.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (4629, 4636), True, 'import matplotlib.pylab as plt\n'), ((4638, 4680), 'matplotlib.pylab.title', 'plt.title', (['"""DCT compressed image"""'], {'size': '(20)'}), "('DCT compressed image', size=20)\n", (4647, 4680), True, 'import matplotlib.pylab as plt\n'), ((4884, 4914), 'skimage.io.imread', 'imread', (['"""images/Img_04_06.jpg"""'], {}), "('images/Img_04_06.jpg')\n", (4890, 4914), False, 'from skimage.io import imread\n'), ((5036, 5076), 'skimage.restoration.estimate_sigma', 'estimate_sigma', (['noisy'], {'multichannel': '(True)'}), '(noisy, multichannel=True)\n', (5050, 5076), False, 'from skimage.restoration import denoise_wavelet, estimate_sigma\n'), ((5266, 5282), 'matplotlib.pylab.subplot', 'plt.subplot', (['(131)'], {}), '(131)\n', (5277, 5282), True, 'import matplotlib.pylab as plt\n'), ((5284, 5298), 'matplotlib.pylab.imshow', 'plt.imshow', (['im'], {}), '(im)\n', (5294, 5298), True, 'import matplotlib.pylab as plt\n'), ((5300, 5315), 'matplotlib.pylab.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (5308, 5315), True, 'import matplotlib.pylab as plt\n'), ((5317, 5347), 'matplotlib.pylab.title', 'plt.title', (['"""original"""'], {'size': '(20)'}), "('original', size=20)\n", (5326, 5347), True, 'import matplotlib.pylab as plt\n'), ((5348, 5364), 'matplotlib.pylab.subplot', 'plt.subplot', (['(132)'], {}), '(132)\n', (5359, 5364), True, 'import matplotlib.pylab as plt\n'), ((5366, 5383), 'matplotlib.pylab.imshow', 'plt.imshow', (['noisy'], {}), '(noisy)\n', (5376, 5383), True, 'import matplotlib.pylab as plt\n'), ((5385, 5400), 'matplotlib.pylab.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (5393, 5400), True, 'import matplotlib.pylab as plt\n'), ((5402, 5429), 'matplotlib.pylab.title', 'plt.title', (['"""noisy"""'], {'size': '(20)'}), "('noisy', size=20)\n", (5411, 5429), True, 'import matplotlib.pylab as plt\n'), ((5430, 5446), 'matplotlib.pylab.subplot', 'plt.subplot', (['(133)'], {}), '(133)\n', (5441, 5446), True, 'import matplotlib.pylab as plt\n'), ((5448, 5463), 'matplotlib.pylab.imshow', 'plt.imshow', (['out'], {}), '(out)\n', (5458, 5463), True, 'import matplotlib.pylab as plt\n'), ((5465, 5480), 'matplotlib.pylab.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (5473, 5480), True, 'import matplotlib.pylab as plt\n'), ((5482, 5518), 'matplotlib.pylab.title', 'plt.title', (['"""denoised (DCT)"""'], {'size': '(20)'}), "('denoised (DCT)', size=20)\n", (5491, 5518), True, 'import matplotlib.pylab as plt\n'), ((5803, 5838), 'scipy.signal.convolve2d', 'convolve2d', (['im', 'kernel'], {'mode': '"""same"""'}), "(im, kernel, mode='same')\n", (5813, 5838), False, 'from scipy.signal import convolve2d\n'), ((6381, 6405), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)', 'sz'], {}), '(-10, 10, sz)\n', (6392, 6405), True, 'import numpy as np\n'), ((6422, 6450), 'numpy.exp', 'np.exp', (['(-x ** 2 / sigma ** 2)'], {}), '(-x ** 2 / sigma ** 2)\n', (6428, 6450), True, 'import numpy as np\n'), ((6462, 6481), 'numpy.trapz', 'np.trapz', (['kernel_1d'], {}), '(kernel_1d)\n', (6470, 6481), True, 'import numpy as np\n'), ((6698, 6728), 'skimage.io.imread', 'imread', (['"""images/Img_04_06.jpg"""'], {}), "('images/Img_04_06.jpg')\n", (6704, 6728), False, 'from skimage.io import imread\n'), ((7012, 7040), 'numpy.ones', 'np.ones', (['(1, ln)', 'np.float32'], {}), '((1, ln), np.float32)\n', (7019, 7040), True, 'import numpy as np\n'), ((7118, 7153), 'numpy.float32', 'np.float32', (['[[c, -s, 0], [s, c, 0]]'], {}), '([[c, -s, 0], [s, c, 0]])\n', (7128, 7153), True, 'import numpy as np\n'), ((7242, 7298), 'cv2.warpAffine', 'cv2.warpAffine', (['kern', 'A', '(sz, sz)'], {'flags': 'cv2.INTER_CUBIC'}), '(kern, A, (sz, sz), flags=cv2.INTER_CUBIC)\n', (7256, 7298), False, 'import cv2\n'), ((7516, 7544), 'numpy.zeros', 'np.zeros', (['(sz, sz)', 'np.uint8'], {}), '((sz, sz), np.uint8)\n', (7524, 7544), True, 'import numpy as np\n'), ((7549, 7609), 'cv2.circle', 'cv2.circle', (['kern', '(sz, sz)', 'r', '(255)', '(-1)', 'cv2.LINE_AA'], {'shift': '(1)'}), '(kern, (sz, sz), r, 255, -1, cv2.LINE_AA, shift=1)\n', (7559, 7609), False, 'import cv2\n'), ((8120, 8150), 'skimage.io.imread', 'imread', (['"""images/img_04_07.png"""'], {}), "('images/img_04_07.png')\n", (8126, 8150), False, 'from skimage.io import imread\n'), ((8271, 8286), 'numpy.max', 'np.max', (['im_blur'], {}), '(im_blur)\n', (8277, 8286), True, 'import numpy as np\n'), ((9271, 9287), 'matplotlib.pylab.subplot', 'plt.subplot', (['(611)'], {}), '(611)\n', (9282, 9287), True, 'import matplotlib.pylab as plt\n'), ((9289, 9303), 'matplotlib.pylab.imshow', 'plt.imshow', (['im'], {}), '(im)\n', (9299, 9303), True, 'import matplotlib.pylab as plt\n'), ((9305, 9320), 'matplotlib.pylab.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (9313, 9320), True, 'import matplotlib.pylab as plt\n'), ((9322, 9358), 'matplotlib.pylab.title', 'plt.title', (['"""Original Image"""'], {'size': '(20)'}), "('Original Image', size=20)\n", (9331, 9358), True, 'import matplotlib.pylab as plt\n'), ((9359, 9375), 'matplotlib.pylab.subplot', 'plt.subplot', (['(612)'], {}), '(612)\n', (9370, 9375), True, 'import matplotlib.pylab as plt\n'), ((9377, 9396), 'matplotlib.pylab.imshow', 'plt.imshow', (['im_blur'], {}), '(im_blur)\n', (9387, 9396), True, 'import matplotlib.pylab as plt\n'), ((9398, 9413), 'matplotlib.pylab.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (9406, 9413), True, 'import matplotlib.pylab as plt\n'), ((9524, 9540), 'matplotlib.pylab.subplot', 'plt.subplot', (['(613)'], {}), '(613)\n', (9535, 9540), True, 'import matplotlib.pylab as plt\n'), ((9605, 9620), 'matplotlib.pylab.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (9613, 9620), True, 'import matplotlib.pylab as plt\n'), ((9752, 9768), 'matplotlib.pylab.subplot', 'plt.subplot', (['(614)'], {}), '(614)\n', (9763, 9768), True, 'import matplotlib.pylab as plt\n'), ((9833, 9848), 'matplotlib.pylab.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (9841, 9848), True, 'import matplotlib.pylab as plt\n'), ((9979, 9995), 'matplotlib.pylab.subplot', 'plt.subplot', (['(615)'], {}), '(615)\n', (9990, 9995), True, 'import matplotlib.pylab as plt\n'), ((10060, 10075), 'matplotlib.pylab.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (10068, 10075), True, 'import matplotlib.pylab as plt\n'), ((10206, 10222), 'matplotlib.pylab.subplot', 'plt.subplot', (['(616)'], {}), '(616)\n', (10217, 10222), True, 'import matplotlib.pylab as plt\n'), ((10287, 10302), 'matplotlib.pylab.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (10295, 10302), True, 'import matplotlib.pylab as plt\n'), ((10771, 10786), 'matplotlib.pylab.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (10779, 10786), True, 'import matplotlib.pylab as plt\n'), ((12074, 12132), 'pywt.wavedec2', 'pywt.wavedec2', (['x', '"""db2"""'], {'mode': '"""periodization"""', 'level': 'level'}), "(x, 'db2', mode='periodization', level=level)\n", (12087, 12132), False, 'import pywt\n'), ((12449, 12472), 'pywt.coeffs_to_array', 'pywt.coeffs_to_array', (['c'], {}), '(c)\n', (12469, 12472), False, 'import pywt\n'), ((12732, 12762), 'skimage.io.imread', 'imread', (['"""images/Img_04_04.jpg"""'], {}), "('images/Img_04_04.jpg')\n", (12738, 12762), False, 'from skimage.io import imread\n'), ((13110, 13153), 'pywt.wavedec2', 'pywt.wavedec2', (['image', 'wavelet'], {'level': 'levels'}), '(image, wavelet, level=levels)\n', (13123, 13153), False, 'import pywt\n'), ((13178, 13202), 'pywt.coeffs_to_array', 'pywt.coeffs_to_array', (['wc'], {}), '(wc)\n', (13198, 13202), False, 'import pywt\n'), ((13213, 13258), 'pywt.threshold', 'pywt.threshold', (['arr', 'noise_sigma'], {'mode': '"""soft"""'}), "(arr, noise_sigma, mode='soft')\n", (13227, 13258), False, 'import pywt\n'), ((13269, 13334), 'pywt.array_to_coeffs', 'pywt.array_to_coeffs', (['arr', 'coeff_slices'], {'output_format': '"""wavedec2"""'}), "(arr, coeff_slices, output_format='wavedec2')\n", (13289, 13334), False, 'import pywt\n'), ((13346, 13373), 'pywt.waverec2', 'pywt.waverec2', (['nwc', 'wavelet'], {}), '(nwc, wavelet)\n', (13359, 13373), False, 'import pywt\n'), ((13381, 13411), 'pywt.wavelist', 'pywt.wavelist', ([], {'kind': '"""discrete"""'}), "(kind='discrete')\n", (13394, 13411), False, 'import pywt\n'), ((13697, 13715), 'numpy.clip', 'np.clip', (['out', '(0)', '(1)'], {}), '(out, 0, 1)\n', (13704, 13715), True, 'import numpy as np\n'), ((13806, 13822), 'matplotlib.pylab.subplot', 'plt.subplot', (['(241)'], {}), '(241)\n', (13817, 13822), True, 'import matplotlib.pylab as plt\n'), ((13856, 13871), 'matplotlib.pylab.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (13864, 13871), True, 'import matplotlib.pylab as plt\n'), ((13873, 13909), 'matplotlib.pylab.title', 'plt.title', (['"""original image"""'], {'size': '(15)'}), "('original image', size=15)\n", (13882, 13909), True, 'import matplotlib.pylab as plt\n'), ((15858, 15874), 'matplotlib.pylab.subplot', 'plt.subplot', (['(231)'], {}), '(231)\n', (15869, 15874), True, 'import matplotlib.pylab as plt\n'), ((15876, 15896), 'matplotlib.pylab.imshow', 'plt.imshow', (['original'], {}), '(original)\n', (15886, 15896), True, 'import matplotlib.pylab as plt\n'), ((15898, 15913), 'matplotlib.pylab.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (15906, 15913), True, 'import matplotlib.pylab as plt\n'), ((15915, 15945), 'matplotlib.pylab.title', 'plt.title', (['"""Original"""'], {'size': '(20)'}), "('Original', size=20)\n", (15924, 15945), True, 'import matplotlib.pylab as plt\n'), ((15946, 15962), 'matplotlib.pylab.subplot', 'plt.subplot', (['(232)'], {}), '(232)\n', (15957, 15962), True, 'import matplotlib.pylab as plt\n'), ((15964, 15981), 'matplotlib.pylab.imshow', 'plt.imshow', (['noisy'], {}), '(noisy)\n', (15974, 15981), True, 'import matplotlib.pylab as plt\n'), ((15983, 15998), 'matplotlib.pylab.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (15991, 15998), True, 'import matplotlib.pylab as plt\n'), ((16061, 16077), 'matplotlib.pylab.subplot', 'plt.subplot', (['(233)'], {}), '(233)\n', (16072, 16077), True, 'import matplotlib.pylab as plt\n'), ((16116, 16131), 'matplotlib.pylab.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (16124, 16131), True, 'import matplotlib.pylab as plt\n'), ((16221, 16237), 'matplotlib.pylab.subplot', 'plt.subplot', (['(234)'], {}), '(234)\n', (16232, 16237), True, 'import matplotlib.pylab as plt\n'), ((16286, 16301), 'matplotlib.pylab.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (16294, 16301), True, 'import matplotlib.pylab as plt\n'), ((16428, 16444), 'matplotlib.pylab.subplot', 'plt.subplot', (['(235)'], {}), '(235)\n', (16439, 16444), True, 'import matplotlib.pylab as plt\n'), ((16495, 16510), 'matplotlib.pylab.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (16503, 16510), True, 'import matplotlib.pylab as plt\n'), ((16640, 16656), 'matplotlib.pylab.subplot', 'plt.subplot', (['(236)'], {}), '(236)\n', (16651, 16656), True, 'import matplotlib.pylab as plt\n'), ((16707, 16722), 'matplotlib.pylab.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (16715, 16722), True, 'import matplotlib.pylab as plt\n'), ((18446, 18465), 'numpy.max', 'np.max', (['fused_image'], {}), '(fused_image)\n', (18452, 18465), True, 'import numpy as np\n'), ((18564, 18580), 'matplotlib.pylab.subplot', 'plt.subplot', (['(221)'], {}), '(221)\n', (18575, 18580), True, 'import matplotlib.pylab as plt\n'), ((18582, 18597), 'matplotlib.pylab.imshow', 'plt.imshow', (['im1'], {}), '(im1)\n', (18592, 18597), True, 'import matplotlib.pylab as plt\n'), ((18599, 18614), 'matplotlib.pylab.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (18607, 18614), True, 'import matplotlib.pylab as plt\n'), ((18616, 18644), 'matplotlib.pylab.title', 'plt.title', (['"""Image1"""'], {'size': '(20)'}), "('Image1', size=20)\n", (18625, 18644), True, 'import matplotlib.pylab as plt\n'), ((18691, 18707), 'matplotlib.pylab.subplot', 'plt.subplot', (['(222)'], {}), '(222)\n', (18702, 18707), True, 'import matplotlib.pylab as plt\n'), ((18709, 18724), 'matplotlib.pylab.imshow', 'plt.imshow', (['im2'], {}), '(im2)\n', (18719, 18724), True, 'import matplotlib.pylab as plt\n'), ((18726, 18741), 'matplotlib.pylab.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (18734, 18741), True, 'import matplotlib.pylab as plt\n'), ((18743, 18771), 'matplotlib.pylab.title', 'plt.title', (['"""Image2"""'], {'size': '(20)'}), "('Image2', size=20)\n", (18752, 18771), True, 'import matplotlib.pylab as plt\n'), ((18851, 18867), 'matplotlib.pylab.subplot', 'plt.subplot', (['(223)'], {}), '(223)\n', (18862, 18867), True, 'import matplotlib.pylab as plt\n'), ((18869, 18900), 'matplotlib.pylab.imshow', 'plt.imshow', (['(im1 // 2 + im2 // 2)'], {}), '(im1 // 2 + im2 // 2)\n', (18879, 18900), True, 'import matplotlib.pylab as plt\n'), ((18899, 18914), 'matplotlib.pylab.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (18907, 18914), True, 'import matplotlib.pylab as plt\n'), ((18916, 18951), 'matplotlib.pylab.title', 'plt.title', (['"""Average Image"""'], {'size': '(20)'}), "('Average Image', size=20)\n", (18925, 18951), True, 'import matplotlib.pylab as plt\n'), ((18999, 19015), 'matplotlib.pylab.subplot', 'plt.subplot', (['(224)'], {}), '(224)\n', (19010, 19015), True, 'import matplotlib.pylab as plt\n'), ((19017, 19040), 'matplotlib.pylab.imshow', 'plt.imshow', (['fused_image'], {}), '(fused_image)\n', (19027, 19040), True, 'import matplotlib.pylab as plt\n'), ((19042, 19057), 'matplotlib.pylab.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (19050, 19057), True, 'import matplotlib.pylab as plt\n'), ((19059, 19106), 'matplotlib.pylab.title', 'plt.title', (['"""Fused Image with Wavelets"""'], {'size': '(20)'}), "('Fused Image with Wavelets', size=20)\n", (19068, 19106), True, 'import matplotlib.pylab as plt\n'), ((19817, 19835), 'numpy.random.randn', 'np.random.randn', (['k'], {}), '(k)\n', (19832, 19835), True, 'import numpy as np\n'), ((20239, 20269), 'skimage.io.imread', 'imread', (['"""images/Img_04_04.jpg"""'], {}), "('images/Img_04_04.jpg')\n", (20245, 20269), False, 'from skimage.io import imread\n'), ((20643, 20659), 'matplotlib.pylab.subplot', 'plt.subplot', (['(131)'], {}), '(131)\n', (20654, 20659), True, 'import matplotlib.pylab as plt\n'), ((20661, 20675), 'matplotlib.pylab.imshow', 'plt.imshow', (['im'], {}), '(im)\n', (20671, 20675), True, 'import matplotlib.pylab as plt\n'), ((20677, 20692), 'matplotlib.pylab.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (20685, 20692), True, 'import matplotlib.pylab as plt\n'), ((20751, 20767), 'matplotlib.pylab.subplot', 'plt.subplot', (['(132)'], {}), '(132)\n', (20762, 20767), True, 'import matplotlib.pylab as plt\n'), ((20769, 20784), 'matplotlib.pylab.imshow', 'plt.imshow', (['im1'], {}), '(im1)\n', (20779, 20784), True, 'import matplotlib.pylab as plt\n'), ((20786, 20801), 'matplotlib.pylab.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (20794, 20801), True, 'import matplotlib.pylab as plt\n'), ((20803, 20879), 'matplotlib.pylab.title', 'plt.title', (['"""watermarked image: $v_i^{\\\\prime}=v_i.(1+\\\\alpha x_i)$"""'], {'size': '(20)'}), "('watermarked image: $v_i^{\\\\prime}=v_i.(1+\\\\alpha x_i)$', size=20)\n", (20812, 20879), True, 'import matplotlib.pylab as plt\n'), ((21112, 21127), 'matplotlib.pylab.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (21120, 21127), True, 'import matplotlib.pylab as plt\n'), ((21129, 21167), 'matplotlib.pylab.title', 'plt.title', (['"""difference image"""'], {'size': '(20)'}), "('difference image', size=20)\n", (21138, 21167), True, 'import matplotlib.pylab as plt\n'), ((1035, 1052), 'matplotlib.ticker.LinearLocator', 'LinearLocator', (['(10)'], {}), '(10)\n', (1048, 1052), False, 'from matplotlib.ticker import LinearLocator, FormatStrFormatter\n'), ((1087, 1114), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%.02f"""'], {}), "('%.02f')\n", (1105, 1114), False, 'from matplotlib.ticker import LinearLocator, FormatStrFormatter\n'), ((1452, 1482), 'skimage.io.imread', 'imread', (['"""images/Img_04_01.jpg"""'], {}), "('images/Img_04_01.jpg')\n", (1458, 1482), False, 'from skimage.io import imread\n'), ((1505, 1535), 'skimage.io.imread', 'imread', (['"""images/Img_04_02.png"""'], {}), "('images/Img_04_02.png')\n", (1511, 1535), False, 'from skimage.io import imread\n'), ((1722, 1734), 'skimage.color.gray2rgb', 'gray2rgb', (['im'], {}), '(im)\n', (1730, 1734), False, 'from skimage.color import rgb2gray, gray2rgb\n'), ((2557, 2585), 'scipy.fftpack.dct', 'dct', (['a'], {'axis': '(0)', 'norm': '"""ortho"""'}), "(a, axis=0, norm='ortho')\n", (2560, 2585), False, 'from scipy.fftpack import dct, idct\n'), ((2640, 2669), 'scipy.fftpack.idct', 'idct', (['a'], {'axis': '(0)', 'norm': '"""ortho"""'}), "(a, axis=0, norm='ortho')\n", (2644, 2669), False, 'from scipy.fftpack import dct, idct\n'), ((4950, 4985), 'numpy.random.standard_normal', 'np.random.standard_normal', (['im.shape'], {}), '(im.shape)\n', (4975, 4985), True, 'import numpy as np\n'), ((5856, 5867), 'numpy.max', 'np.max', (['im1'], {}), '(im1)\n', (5862, 5867), True, 'import numpy as np\n'), ((6223, 6237), 'matplotlib.pylab.imshow', 'plt.imshow', (['im'], {}), '(im)\n', (6233, 6237), True, 'import matplotlib.pylab as plt\n'), ((6239, 6254), 'matplotlib.pylab.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (6247, 6254), True, 'import matplotlib.pylab as plt\n'), ((6256, 6281), 'matplotlib.pylab.title', 'plt.title', (['title'], {'size': '(20)'}), '(title, size=20)\n', (6265, 6281), True, 'import matplotlib.pylab as plt\n'), ((7081, 7094), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (7087, 7094), True, 'import numpy as np\n'), ((7096, 7109), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (7102, 7109), True, 'import numpy as np\n'), ((7198, 7235), 'numpy.dot', 'np.dot', (['A[:, :2]', '((ln - 1) * 0.5, 0)'], {}), '(A[:, :2], ((ln - 1) * 0.5, 0))\n', (7204, 7235), True, 'import numpy as np\n'), ((7621, 7637), 'numpy.float32', 'np.float32', (['kern'], {}), '(kern)\n', (7631, 7637), True, 'import numpy as np\n'), ((7911, 7935), 'matplotlib.pylab.subplot', 'plt.subplot', (['(1)', '(4)', '(i + 1)'], {}), '(1, 4, i + 1)\n', (7922, 7935), True, 'import matplotlib.pylab as plt\n'), ((8419, 8450), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['im_blur'], {}), '(im_blur)\n', (8441, 8450), True, 'import SimpleITK as sitk\n'), ((8452, 8479), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['psf'], {}), '(psf)\n', (8474, 8479), True, 'import SimpleITK as sitk\n'), ((8642, 8673), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['im_blur'], {}), '(im_blur)\n', (8664, 8673), True, 'import SimpleITK as sitk\n'), ((8675, 8702), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['psf'], {}), '(psf)\n', (8697, 8702), True, 'import SimpleITK as sitk\n'), ((8888, 8919), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['im_blur'], {}), '(im_blur)\n', (8910, 8919), True, 'import SimpleITK as sitk\n'), ((8921, 8948), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['psf'], {}), '(psf)\n', (8943, 8948), True, 'import SimpleITK as sitk\n'), ((9127, 9158), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['im_blur'], {}), '(im_blur)\n', (9149, 9158), True, 'import SimpleITK as sitk\n'), ((9160, 9187), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['psf'], {}), '(psf)\n', (9182, 9187), True, 'import SimpleITK as sitk\n'), ((9702, 9740), 'skimage.metrics.peak_signal_noise_ratio', 'peak_signal_noise_ratio', (['im', 'im_res_IN'], {}), '(im, im_res_IN)\n', (9725, 9740), False, 'from skimage.metrics import peak_signal_noise_ratio\n'), ((9929, 9967), 'skimage.metrics.peak_signal_noise_ratio', 'peak_signal_noise_ratio', (['im', 'im_res_WN'], {}), '(im, im_res_WN)\n', (9952, 9967), False, 'from skimage.metrics import peak_signal_noise_ratio\n'), ((10156, 10194), 'skimage.metrics.peak_signal_noise_ratio', 'peak_signal_noise_ratio', (['im', 'im_res_RL'], {}), '(im, im_res_RL)\n', (10179, 10194), False, 'from skimage.metrics import peak_signal_noise_ratio\n'), ((10384, 10422), 'skimage.metrics.peak_signal_noise_ratio', 'peak_signal_noise_ratio', (['im', 'im_res_TK'], {}), '(im, im_res_TK)\n', (10407, 10422), False, 'from skimage.metrics import peak_signal_noise_ratio\n'), ((10865, 10903), 'skimage.metrics.peak_signal_noise_ratio', 'peak_signal_noise_ratio', (['im', 'im_res_RL'], {}), '(im, im_res_RL)\n', (10888, 10903), False, 'from skimage.metrics import peak_signal_noise_ratio\n'), ((11196, 11214), 'pywt.data.ascent', 'pywt.data.ascent', ([], {}), '()\n', (11212, 11214), False, 'import pywt\n'), ((11881, 11901), 'pywt._doc_utils.wavedec2_keys', 'wavedec2_keys', (['level'], {}), '(level)\n', (11894, 11901), False, 'from pywt._doc_utils import wavedec2_keys, draw_2d_wp_basis\n'), ((12905, 12928), 'numpy.log2', 'np.log2', (['image.shape[0]'], {}), '(image.shape[0])\n', (12912, 12928), True, 'import numpy as np\n'), ((13835, 13855), 'numpy.clip', 'np.clip', (['image', '(0)', '(1)'], {}), '(image, 0, 1)\n', (13842, 13855), True, 'import numpy as np\n'), ((13941, 13961), 'matplotlib.pylab.subplot', 'plt.subplot', (['(2)', '(4)', 'i'], {}), '(2, 4, i)\n', (13952, 13961), True, 'import matplotlib.pylab as plt\n'), ((13961, 13986), 'matplotlib.pylab.imshow', 'plt.imshow', (['Denoised[wlt]'], {}), '(Denoised[wlt])\n', (13971, 13986), True, 'import matplotlib.pylab as plt\n'), ((13988, 14003), 'matplotlib.pylab.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (13996, 14003), True, 'import matplotlib.pylab as plt\n'), ((14005, 14028), 'matplotlib.pylab.title', 'plt.title', (['wlt'], {'size': '(15)'}), '(wlt, size=15)\n', (14014, 14028), True, 'import matplotlib.pylab as plt\n'), ((14399, 14429), 'skimage.io.imread', 'imread', (['"""images/Img_04_08.png"""'], {}), "('images/Img_04_08.png')\n", (14405, 14429), False, 'from skimage.io import imread\n'), ((20143, 20163), 'numpy.linalg.norm', 'np.linalg.norm', (['what'], {}), '(what)\n', (20157, 20163), True, 'import numpy as np\n'), ((1637, 1649), 'numpy.abs', 'np.abs', (['F_cc'], {}), '(F_cc)\n', (1643, 1649), True, 'import numpy as np\n'), ((3725, 3743), 'numpy.max', 'np.max', (['dct_coeffs'], {}), '(dct_coeffs)\n', (3731, 3743), True, 'import numpy as np\n'), ((6046, 6078), 'cv2.Laplacian', 'cv2.Laplacian', (['image', 'cv2.CV_64F'], {}), '(image, cv2.CV_64F)\n', (6059, 6078), False, 'import cv2\n'), ((9476, 9512), 'skimage.metrics.peak_signal_noise_ratio', 'peak_signal_noise_ratio', (['im', 'im_blur'], {}), '(im, im_blur)\n', (9499, 9512), False, 'from skimage.metrics import peak_signal_noise_ratio\n'), ((12220, 12232), 'numpy.abs', 'np.abs', (['c[0]'], {}), '(c[0])\n', (12226, 12232), True, 'import numpy as np\n'), ((13075, 13098), 'numpy.log2', 'np.log2', (['image.shape[0]'], {}), '(image.shape[0])\n', (13082, 13098), True, 'import numpy as np\n'), ((17092, 17118), 'numpy.minimum', 'np.minimum', (['cooef1', 'cooef2'], {}), '(cooef1, cooef2)\n', (17102, 17118), True, 'import numpy as np\n'), ((19482, 19504), 'scipy.fftpack.dct', 'dct', (['a.T'], {'norm': '"""ortho"""'}), "(a.T, norm='ortho')\n", (19485, 19504), False, 'from scipy.fftpack import dct, idct\n'), ((19553, 19576), 'scipy.fftpack.idct', 'idct', (['a.T'], {'norm': '"""ortho"""'}), "(a.T, norm='ortho')\n", (19557, 19576), False, 'from scipy.fftpack import dct, idct\n'), ((20413, 20429), 'numpy.abs', 'np.abs', (['(im1 - im)'], {}), '(im1 - im)\n', (20419, 20429), True, 'import numpy as np\n'), ((20437, 20453), 'numpy.abs', 'np.abs', (['(im1 - im)'], {}), '(im1 - im)\n', (20443, 20453), True, 'import numpy as np\n'), ((20936, 20952), 'numpy.abs', 'np.abs', (['(im1 - im)'], {}), '(im1 - im)\n', (20942, 20952), True, 'import numpy as np\n'), ((3543, 3561), 'numpy.max', 'np.max', (['dct_coeffs'], {}), '(dct_coeffs)\n', (3549, 3561), True, 'import numpy as np\n'), ((4001, 4019), 'numpy.max', 'np.max', (['dct_coeffs'], {}), '(dct_coeffs)\n', (4007, 4019), True, 'import numpy as np\n'), ((4144, 4162), 'numpy.max', 'np.max', (['dct_coeffs'], {}), '(dct_coeffs)\n', (4150, 4162), True, 'import numpy as np\n'), ((17162, 17188), 'numpy.maximum', 'np.maximum', (['cooef1', 'cooef2'], {}), '(cooef1, cooef2)\n', (17172, 17188), True, 'import numpy as np\n'), ((12310, 12319), 'numpy.abs', 'np.abs', (['d'], {}), '(d)\n', (12316, 12319), True, 'import numpy as np\n'), ((12345, 12354), 'numpy.abs', 'np.abs', (['d'], {}), '(d)\n', (12351, 12354), True, 'import numpy as np\n')]
|
"""
mark domains/boundaries with dolfin MeshFunctions
"""
from dolfin import *
from importlib import import_module
from .params_geo import *
import numpy
synonymes = {
"pore":{"poretop", "porecenter", "porebottom"},
"fluid":{"bulkfluid","pore"},
"sin":"membranesin",
"au":"membraneau",
"sam":"membranesam",
"membrane":{"sin","au","sam"},
"solid":{"membrane", "molecule"},
"ions":"fluid",
"bulk":{"upperb","lowerb"}, #"rightfluidb"},
"samb":{"outersamb", "uppersamb", "centersamb", "lowersamb"},
"aub":"loweraub",
"sinb":{"lowersinb", "outersinb"},
"outermembraneb":{"outersamb", "outersinb"},
"innersamb": {"uppersamb", "centersamb", "lowersamb", },
"chargedsamb":"innersamb",
"chargedsinb":{"loweraub", "lowersinb"},
"membraneb":{"samb", "aub", "sinb"},
"noslip":{"membraneb","moleculeb"},
"nopressure":{"upperb","lowerb"},
"ground":"upperb",
"bV":"lowerb",
}
def norm2(x, y):
return numpy.sqrt(x**2 + y**2)
# lists containing subdomain classes, ordering is important: fluid first, molecule last
def subdomain_list(**params):
globals().update(params)
def over_line(x, y, tan, tolc):
return tan*y >= x - tolc
def under_line(x, y, tan, tolc):
return tan*y <= x + tolc
#define additional variables
Ry = Rz
Rx = R
sam = None if lsam < tolc or lsam is None else True
l0 = lsam + lsin + lau
angle2 = angle/2.0
tan = numpy.tan(angle2*numpy.pi/180)
cos = numpy.cos(angle2*numpy.pi/180)
r1 = r0 + l0*tan
rsam = r0 + lsam/cos
rsin = rsam + rlau
# subdomains
class BulkFluid(SubDomain):
def inside(self, x, on_boundary):
return True # other domains will overwrite
class Molecule(SubDomain):
def inside(self, x, on_boundary):
if x0 is not None:
return norm2(x[0], x[1]-x0[2]) <= rMolecule +tolc
else:
return False
# partion membrane into three subdomains
class MembraneSAM(SubDomain):
def inside(self, x, on_boundary):
if lsam < tolc:
return False
else:
return under_line(x[0]-r0, x[1]+l0/2, tan, tolc) \
and between(x[1], (-l0/2 -tolc, l0/2 +tolc))
class MembraneAu(SubDomain):
def inside(self, x, on_boundary):
return under_line(x[0]-rsam, x[1]+l0/2, tan, tolc) \
and between(x[1], ( -l0/2 -tolc, -lsam +l0/2 +tolc))
class MembraneSiN(SubDomain):
def inside(self, x, on_boundary):
return under_line(x[0]-rsin, x[1]+l0/2, tan, tolc) \
and between(x[1], ( -l0/2 -tolc, -l0/2 +lsin +tolc))
# partion pore into three subdomains
class PoreTop(SubDomain):
def inside(self, x, on_boundary):
return over_line(x[0]-r0, x[1]+l0/2, tan, tolc) \
and between(x[1], (l0/6 -tolc, l0/2 +tolc))
class PoreCenter(SubDomain):
def inside(self, x, on_boundary):
return over_line(x[0]-r0, x[1]+l0/2, tan, tolc) \
and between(x[1], (-l0/6 -tolc, l0/6 +tolc))
class PoreBottom(SubDomain):
def inside(self, x, on_boundary):
return over_line(x[0]-r0, x[1]+l0/2, tan, tolc) \
and between(x[1], (-l0/2 -tolc, -l0/6 +tolc))
return [BulkFluid(), PoreTop(), PoreCenter(), PoreBottom(),
MembraneSAM(), MembraneAu(), MembraneSiN(), Molecule(),]
def boundaries_list(**params):
globals().update(params)
Ry = Rz
Rx = R
#define additional variables
innerfrac = 1 - outerfrac
sam = None if lsam < tolc or lsam is None else True
l0 = lsam + lsin + lau
angle2 = angle/2
tan = numpy.tan(angle2*numpy.pi/180)
cos = numpy.cos(angle2*numpy.pi/180)
r1 = r0 + l0*tan
rsam = r0 + lsam/cos
rsin = rsam + rlau
# exterior fluid boundaries
class UpperB(SubDomain):
def inside(self, x, on_boundary):
return on_boundary and near(x[1], Ry)
class LowerB(SubDomain):
def inside(self, x, on_boundary):
return on_boundary and near(x[1], -Ry)
class LeftFluidB(SubDomain):
def inside(self, x, on_boundary):
if x0 is not None:
return on_boundary and near(x[0], 0) and \
not between(x[1], (x0[2] - rMolecule +tolc, x0[2] + rMolecule -tolc))
else:
return on_boundary and near(x[0], 0)
class RightFluidB(SubDomain):
def inside(self, x, on_boundary):
return on_boundary and near(x[0], Rx) \
and (x[1] < -l0/2 +tolc or x[1] > l0/2 -tolc)
# Molecule boundaries
class MoleculeB(SubDomain):
def inside(self, x, on_boundary):
if x0 is not None:
return between(norm2(x[0], x[1]-x0[2]), (rMolecule -tolc, rMolecule +tolc) )
else:
return False
# Membrane boundaries
class UpperSAMB(SubDomain):
def inside(self, x, on_boundary):
return between(x[0], (r1 -tolc, Rx +tolc)) \
and near(x[1], l0/2)
class CenterSAMB(SubDomain):
def inside(self, x, on_boundary):
return between(tan*(x[1]+l0/2) - (x[0]-r0), (-tolc , +tolc)) \
and between(x[1], (-l0/2 -tolc, l0/2 +tolc))
class LowerSAMB(SubDomain):
def inside(self, x, on_boundary):
return between(x[0], (r0 -tolc, rsam +tolc)) and near(x[1], -l0/2)
class LowerAuB(SubDomain):
def inside(self, x, on_boundary):
return between(x[0], (rsam -tolc, rsin +tolc)) and near(x[1], -l0/2)
class LowerSiNB(SubDomain):
def inside(self, x, on_boundary):
return between(x[0], (rsin -tolc, Rx +tolc)) \
and near(x[1], -l0/2)
class OuterSiNB(SubDomain):
def inside(self, x, on_boundary):
return between(x[0], (Rx*innerfrac -tolc, Rx +tolc)) \
and near(x[1], -l0/2)
class OuterSAMB(SubDomain):
def inside(self, x, on_boundary):
return between(x[0], (Rx*innerfrac -tolc, Rx +tolc)) \
and near(x[1], l0/2)
return [UpperB(), LowerB(), LeftFluidB(), RightFluidB(),
UpperSAMB(), CenterSAMB(), LowerSAMB(), LowerAuB(), LowerSiNB(),
OuterSiNB(), OuterSAMB(), MoleculeB(),]
|
[
"numpy.tan",
"numpy.cos",
"numpy.sqrt"
] |
[((976, 1003), 'numpy.sqrt', 'numpy.sqrt', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (986, 1003), False, 'import numpy\n'), ((1463, 1497), 'numpy.tan', 'numpy.tan', (['(angle2 * numpy.pi / 180)'], {}), '(angle2 * numpy.pi / 180)\n', (1472, 1497), False, 'import numpy\n'), ((1504, 1538), 'numpy.cos', 'numpy.cos', (['(angle2 * numpy.pi / 180)'], {}), '(angle2 * numpy.pi / 180)\n', (1513, 1538), False, 'import numpy\n'), ((3764, 3798), 'numpy.tan', 'numpy.tan', (['(angle2 * numpy.pi / 180)'], {}), '(angle2 * numpy.pi / 180)\n', (3773, 3798), False, 'import numpy\n'), ((3805, 3839), 'numpy.cos', 'numpy.cos', (['(angle2 * numpy.pi / 180)'], {}), '(angle2 * numpy.pi / 180)\n', (3814, 3839), False, 'import numpy\n')]
|
import os
import math
import codecs
import numpy as np
from PIL import Image, ImageEnhance
from config import train_parameters
def resize_img(img, target_size):
"""
强制缩放图片
:param img:
:param target_size:
:return:
"""
img = img.resize((target_size[1], target_size[2]), Image.BILINEAR)
return img
def random_crop(img, scale=[0.08, 1.0], ratio=[3. / 4., 4. / 3.]):
aspect_ratio = math.sqrt(np.random.uniform(*ratio))
w = 1. * aspect_ratio
h = 1. / aspect_ratio
bound = min((float(img.size[0]) / img.size[1]) / (w**2),
(float(img.size[1]) / img.size[0]) / (h**2))
scale_max = min(scale[1], bound)
scale_min = min(scale[0], bound)
target_area = img.size[0] * img.size[1] * np.random.uniform(scale_min,
scale_max)
target_size = math.sqrt(target_area)
w = int(target_size * w)
h = int(target_size * h)
i = np.random.randint(0, img.size[0] - w + 1)
j = np.random.randint(0, img.size[1] - h + 1)
img = img.crop((i, j, i + w, j + h))
img = img.resize((train_parameters['input_size'][1], train_parameters['input_size'][2]), Image.BILINEAR)
return img
def rotate_image(img):
"""
图像增强,增加随机旋转角度
"""
angle = np.random.randint(-14, 15)
img = img.rotate(angle)
return img
def random_brightness(img):
"""
图像增强,亮度调整
:param img:
:return:
"""
prob = np.random.uniform(0, 1)
if prob < train_parameters['image_enhance_strategy']['brightness_prob']:
brightness_delta = train_parameters['image_enhance_strategy']['brightness_delta']
delta = np.random.uniform(-brightness_delta, brightness_delta) + 1
img = ImageEnhance.Brightness(img).enhance(delta)
return img
def random_contrast(img):
"""
图像增强,对比度调整
:param img:
:return:
"""
prob = np.random.uniform(0, 1)
if prob < train_parameters['image_enhance_strategy']['contrast_prob']:
contrast_delta = train_parameters['image_enhance_strategy']['contrast_delta']
delta = np.random.uniform(-contrast_delta, contrast_delta) + 1
img = ImageEnhance.Contrast(img).enhance(delta)
return img
def random_saturation(img):
"""
图像增强,饱和度调整
:param img:
:return:
"""
prob = np.random.uniform(0, 1)
if prob < train_parameters['image_enhance_strategy']['saturation_prob']:
saturation_delta = train_parameters['image_enhance_strategy']['saturation_delta']
delta = np.random.uniform(-saturation_delta, saturation_delta) + 1
img = ImageEnhance.Color(img).enhance(delta)
return img
def random_hue(img):
"""
图像增强,色度调整
:param img:
:return:
"""
prob = np.random.uniform(0, 1)
if prob < train_parameters['image_enhance_strategy']['hue_prob']:
hue_delta = train_parameters['image_enhance_strategy']['hue_delta']
delta = np.random.uniform(-hue_delta, hue_delta)
img_hsv = np.array(img.convert('HSV'))
img_hsv[:, :, 0] = img_hsv[:, :, 0] + delta
img = Image.fromarray(img_hsv, mode='HSV').convert('RGB')
return img
def distort_color(img):
"""
概率的图像增强
:param img:
:return:
"""
prob = np.random.uniform(0, 1)
# Apply different distort order
if prob < 0.35:
img = random_brightness(img)
img = random_contrast(img)
img = random_saturation(img)
img = random_hue(img)
elif prob < 0.7:
img = random_brightness(img)
img = random_saturation(img)
img = random_hue(img)
img = random_contrast(img)
return img
def custom_image_reader(file_list, data_dir, mode):
"""
自定义用户图片读取器,先初始化图片种类,数量
:param file_list:
:param data_dir:
:param mode:
:return:
"""
with codecs.open(file_list) as flist:
lines = [line.strip() for line in flist]
def reader():
np.random.shuffle(lines)
for line in lines:
if mode == 'train':
img_path, label = line.split()
img = Image.open(img_path)
try:
if img.mode != 'RGB':
img = img.convert('RGB')
if train_parameters['image_enhance_strategy']['need_distort']:
img = distort_color(img)
if train_parameters['image_enhance_strategy']['need_rotate']:
img = rotate_image(img)
if train_parameters['image_enhance_strategy']['need_crop']:
img = random_crop(img, train_parameters['input_size'])
if train_parameters['image_enhance_strategy']['need_flip']:
mirror = int(np.random.uniform(0, 2))
if mirror == 1:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
# HWC--->CHW && normalized
img = np.array(img).astype('float32')
img -= train_parameters['mean_rgb']
img = img.transpose((2, 0, 1)) # HWC to CHW
img *= 0.007843 # 像素值归一化
yield img, int(label)
except Exception as e:
pass # 以防某些图片读取处理出错,加异常处理
if mode == 'val':
img_path, label = line.split()
img = Image.open(img_path)
if img.mode != 'RGB':
img = img.convert('RGB')
img = resize_img(img, train_parameters['input_size'])
# HWC--->CHW && normalized
img = np.array(img).astype('float32')
img -= train_parameters['mean_rgb']
img = img.transpose((2, 0, 1)) # HWC to CHW
img *= 0.007843 # 像素值归一化
yield img, int(label)
elif mode == 'test':
img_path, _ = line.split()
img = Image.open(img_path)
if img.mode != 'RGB':
img = img.convert('RGB')
img = resize_img(img, train_parameters['input_size'])
# HWC--->CHW && normalized
img = np.array(img).astype('float32')
img -= train_parameters['mean_rgb']
img = img.transpose((2, 0, 1)) # HWC to CHW
img *= 0.007843 # 像素值归一化
yield img
return reader
|
[
"numpy.random.uniform",
"PIL.ImageEnhance.Brightness",
"codecs.open",
"math.sqrt",
"PIL.ImageEnhance.Color",
"PIL.ImageEnhance.Contrast",
"PIL.Image.open",
"numpy.random.randint",
"numpy.array",
"PIL.Image.fromarray",
"numpy.random.shuffle"
] |
[((872, 894), 'math.sqrt', 'math.sqrt', (['target_area'], {}), '(target_area)\n', (881, 894), False, 'import math\n'), ((962, 1003), 'numpy.random.randint', 'np.random.randint', (['(0)', '(img.size[0] - w + 1)'], {}), '(0, img.size[0] - w + 1)\n', (979, 1003), True, 'import numpy as np\n'), ((1012, 1053), 'numpy.random.randint', 'np.random.randint', (['(0)', '(img.size[1] - h + 1)'], {}), '(0, img.size[1] - h + 1)\n', (1029, 1053), True, 'import numpy as np\n'), ((1291, 1317), 'numpy.random.randint', 'np.random.randint', (['(-14)', '(15)'], {}), '(-14, 15)\n', (1308, 1317), True, 'import numpy as np\n'), ((1461, 1484), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (1478, 1484), True, 'import numpy as np\n'), ((1899, 1922), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (1916, 1922), True, 'import numpy as np\n'), ((2327, 2350), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (2344, 2350), True, 'import numpy as np\n'), ((2754, 2777), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (2771, 2777), True, 'import numpy as np\n'), ((3255, 3278), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (3272, 3278), True, 'import numpy as np\n'), ((427, 452), 'numpy.random.uniform', 'np.random.uniform', (['*ratio'], {}), '(*ratio)\n', (444, 452), True, 'import numpy as np\n'), ((750, 789), 'numpy.random.uniform', 'np.random.uniform', (['scale_min', 'scale_max'], {}), '(scale_min, scale_max)\n', (767, 789), True, 'import numpy as np\n'), ((2940, 2980), 'numpy.random.uniform', 'np.random.uniform', (['(-hue_delta)', 'hue_delta'], {}), '(-hue_delta, hue_delta)\n', (2957, 2980), True, 'import numpy as np\n'), ((3828, 3850), 'codecs.open', 'codecs.open', (['file_list'], {}), '(file_list)\n', (3839, 3850), False, 'import codecs\n'), ((3937, 3961), 'numpy.random.shuffle', 'np.random.shuffle', (['lines'], {}), '(lines)\n', (3954, 3961), True, 'import numpy as np\n'), ((1668, 1722), 'numpy.random.uniform', 'np.random.uniform', (['(-brightness_delta)', 'brightness_delta'], {}), '(-brightness_delta, brightness_delta)\n', (1685, 1722), True, 'import numpy as np\n'), ((2100, 2150), 'numpy.random.uniform', 'np.random.uniform', (['(-contrast_delta)', 'contrast_delta'], {}), '(-contrast_delta, contrast_delta)\n', (2117, 2150), True, 'import numpy as np\n'), ((2534, 2588), 'numpy.random.uniform', 'np.random.uniform', (['(-saturation_delta)', 'saturation_delta'], {}), '(-saturation_delta, saturation_delta)\n', (2551, 2588), True, 'import numpy as np\n'), ((1741, 1769), 'PIL.ImageEnhance.Brightness', 'ImageEnhance.Brightness', (['img'], {}), '(img)\n', (1764, 1769), False, 'from PIL import Image, ImageEnhance\n'), ((2169, 2195), 'PIL.ImageEnhance.Contrast', 'ImageEnhance.Contrast', (['img'], {}), '(img)\n', (2190, 2195), False, 'from PIL import Image, ImageEnhance\n'), ((2607, 2630), 'PIL.ImageEnhance.Color', 'ImageEnhance.Color', (['img'], {}), '(img)\n', (2625, 2630), False, 'from PIL import Image, ImageEnhance\n'), ((3094, 3130), 'PIL.Image.fromarray', 'Image.fromarray', (['img_hsv'], {'mode': '"""HSV"""'}), "(img_hsv, mode='HSV')\n", (3109, 3130), False, 'from PIL import Image, ImageEnhance\n'), ((4090, 4110), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (4100, 4110), False, 'from PIL import Image, ImageEnhance\n'), ((5437, 5457), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (5447, 5457), False, 'from PIL import Image, ImageEnhance\n'), ((6000, 6020), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (6010, 6020), False, 'from PIL import Image, ImageEnhance\n'), ((5677, 5690), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (5685, 5690), True, 'import numpy as np\n'), ((4761, 4784), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(2)'], {}), '(0, 2)\n', (4778, 4784), True, 'import numpy as np\n'), ((4970, 4983), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (4978, 4983), True, 'import numpy as np\n'), ((6239, 6252), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (6247, 6252), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
from ..abstract_base_classes.solver_abc import SolverABC
from scipy.optimize import Bounds, LinearConstraint, basinhopping, minimize
from ....models.strategy_optimal import StrategyOptimal
__all__ = ['StrategyOptimalSolver']
#DIVIDER = 10**6
DIVIDER = {
'SBER': 10**6,
'GAZP': 10**6,
'LKOH': 10**3,
'GMKN': 10**3,
'ROSN': 10**6,
'SBERP': 10**6,
'ALRS': 10**6,
'IRAO': 10**6,
'TATN': 10**6,
'VTBR': 10**9,
'MGNT': 10**3,
}
class StrategyOptimalSolver(SolverABC):
def __init__(self, volaagent, transactioncostsagent, strategyoptimaltable):
self._volaagent = volaagent
self._transactioncostsagent = transactioncostsagent
self._strategyoptimaltable = strategyoptimaltable
@staticmethod
def _expected_costs(v, params):
#return params[0] * v + params[1] * v**2
return params[0] * v + params[1] * v**2 + params[2] * v**3 + params[3] * v**4
@staticmethod
def _expected_costs_d(v, params):
#return params[0] + 2 * params[1] * v
return params[0] + 2 * params[1] * v + 3 * params[2] * v**2 +\
4 * params[3] * v**3
@staticmethod
def _expected_costs_dd(v, params):
#return 2 * params[1]
return 2 * params[1] + 6 * params[2] * v + 12 * params[3] * v**2
@staticmethod
def _var_costs(v, cov_params):
var_b1 = cov_params[0, 0]
var_b2 = cov_params[1, 1]
var_b3 = cov_params[2, 2] #
var_b4 = cov_params[3, 3] #
cov_b1b2 = cov_params[0, 1]
cov_b1b3 = cov_params[0, 2] #
cov_b1b4 = cov_params[0, 3] #
cov_b2b3 = cov_params[1, 2] #
cov_b2b4 = cov_params[1, 3] #
cov_b3b4 = cov_params[2, 3] #
#var = v**2 * var_b1 + v**4 * var_b2 + 2 * cov_b1b2 * v**3
var = v**2 * var_b1 + v**4 * var_b2 + v**6 * var_b3 + v**8 * var_b4 +\
2*v**3 * cov_b1b2 + 2*v**4 * cov_b1b3 + 2*v**5 * cov_b1b4 +\
2*v**5 * cov_b2b3 + 2*v**6 * cov_b2b4 + 2*v**7 * cov_b3b4
return var
@staticmethod
def _var_costs_d(v, cov_params):
var_b1 = cov_params[0, 0]
var_b2 = cov_params[1, 1]
var_b3 = cov_params[2, 2] #
var_b4 = cov_params[3, 3] #
cov_b1b2 = cov_params[0, 1]
cov_b1b3 = cov_params[0, 2] #
cov_b1b4 = cov_params[0, 3] #
cov_b2b3 = cov_params[1, 2] #
cov_b2b4 = cov_params[1, 3] #
cov_b3b4 = cov_params[2, 3] #
#var = 2 * v * var_b1 + 4 * v**3 * var_b2 + 6 * cov_b1b2 * v**2
var = 2*v * var_b1 + 4*v**3 * var_b2 + 6*v**5 * var_b3 + 8*v**7 * var_b4 +\
6*v**2 * cov_b1b2 + 8*v**3 * cov_b1b3 + 10*v**4 * cov_b1b4 +\
10*v**4 * cov_b2b3 + 12*v**5 * cov_b2b4 + 14*v**6 * cov_b3b4
return var
@staticmethod
def _var_costs_dd(v, cov_params):
var_b1 = cov_params[0, 0]
var_b2 = cov_params[1, 1]
var_b3 = cov_params[2, 2] #
var_b4 = cov_params[3, 3] #
cov_b1b2 = cov_params[0, 1]
cov_b1b3 = cov_params[0, 2] #
cov_b1b4 = cov_params[0, 3] #
cov_b2b3 = cov_params[1, 2] #
cov_b2b4 = cov_params[1, 3] #
cov_b3b4 = cov_params[2, 3] #
#var = 2 * var_b1 + 12 * v**2 * var_b2 + 12 * cov_b1b2 * v
var = 2 * var_b1 + 12*v**2 * var_b2 + 30*v**4 * var_b3 + 56*v**6 * var_b4 +\
12*v * cov_b1b2 + 24*v**2 * cov_b1b3 + 40*v**3 * cov_b1b4 +\
40*v**3 * cov_b2b3 + 60*v**4 * cov_b2b4 + 84*v**5 * cov_b3b4
return var
def calculate(self, key):
date = key[0]
seccode = key[1]
time = key[2]
step_vola_length = key[3]
backward_num_steps = key[4]
num_steps = key[5]
step_length = key[6]
try:
DIV = DIVIDER[seccode]
except KeyError:
DIV = 1
volume_to_liquidate = key[7] / DIV
lam = key[8]
key_tc = [date, seccode, time]
key_vola = [date, seccode, time, step_vola_length, backward_num_steps,
num_steps, step_length]
transactioncosts = self._transactioncostsagent.get(key_tc)
vola = self._volaagent.get(key_vola)
tc_params = np.array(transactioncosts.get_data().params[0][0])
tc_covparams = np.array(transactioncosts.get_data().cov_params[0])
vola_estimates = np.array(vola.get_data().vola_estimates[0]) *\
DIV**2
atol = 0.000000000001
x0 = np.array([volume_to_liquidate / num_steps] * num_steps)
bounds = Bounds(0, volume_to_liquidate, keep_feasible=True)
linear_constraint = LinearConstraint([[1] * num_steps],
[volume_to_liquidate - atol], [volume_to_liquidate + atol], keep_feasible=True)
print(np.sum(x0))
print(volume_to_liquidate - atol, volume_to_liquidate + atol)
V = volume_to_liquidate
s2 = vola_estimates
def functional(v):
v2_new = np.cumsum(v[::-1])[::-1] ** 2
square = np.sqrt(np.sum(s2 * v2_new) +
np.sum(self._var_costs(v, tc_covparams)))
first = lam * square
second = np.sum(self._expected_costs(v, tc_params))
func = first + second
return func
def functional_jac(v):
coeff = lam / 2
v_new = np.cumsum(v[::-1])[::-1]
v2_new = v_new ** 2
denumerator = np.sqrt(np.sum(s2 * v2_new) +
np.sum(self._var_costs(v, tc_covparams)))
jac = np.empty(shape=len(v))
for i in range(len(v)):
v_i = v[i]
sl = (s2 * v_new)[:i+1]
numerator_i = (self._var_costs_dd(v_i, tc_covparams) + 2 * np.sum(sl))
additional_i = self._expected_costs_d(v_i, tc_params)
jac_i = coeff * numerator_i / denumerator + additional_i
jac[i] = jac_i
return jac
def functional_hess(v):
coeff = lam / 2
v_new = np.cumsum(v[::-1])[::-1]
v2_new = v_new**2
g = (np.sqrt(np.sum(s2 * v2_new) +
np.sum(self._var_costs(v, tc_covparams))))
hess = np.zeros(shape=(len(v), len(v)))
for i in range(len(v)):
for j in range(len(v)):
f_i = (self._var_costs_d(v[i], tc_covparams) +
2 * np.sum((s2 * v_new)[:i]))
f_ij = (self._var_costs_dd(v[j], tc_covparams) * (i == j) +
2 * np.sum(s2[:np.min([i, j])]))
g_j = (0.5 * (self._var_costs_d(v[j], tc_covparams) +
2 * np.sum((s2 * v_new)[:j])) / np.sqrt(np.sum(s2 * v2_new) +
np.sum(self._var_costs(v, tc_covparams))))
brackets = f_ij * g - g_j * f_i
hess_ij = (coeff * brackets / g**2 +
self._expected_costs_dd(v[j], tc_params) * (i == j))
hess[i, j] = hess_ij
return hess
minimizer_kwargs = {
'method': 'trust-constr',
'jac': functional_jac,
'hess': functional_hess,
'constraints':[linear_constraint],
'bounds': bounds}
#result = basinhopping(functional, x0, minimizer_kwargs=minimizer_kwargs)
result = minimize(functional, x0, method='trust-constr', jac=functional_jac,
hess=functional_hess, constraints=minimizer_kwargs['constraints'],
bounds=minimizer_kwargs['bounds'])
print(result)
strategy = result.x * DIV
strategy_left = np.round(key[7] - np.cumsum(strategy), 0)
strategy = key[7] - strategy_left
for j in range(len(strategy) - 1, 0, -1):
strategy[j] = strategy[j] - strategy[j - 1]
strategy = np.array(list(map(int, strategy)))
row = pd.DataFrame(columns=self._strategyoptimaltable.get_column_names())
key_names = self._strategyoptimaltable.get_key()
row[key_names[0]] = [key[0]]
row[key_names[1]] = [key[1]]
row[key_names[2]] = [key[2]]
row[key_names[3]] = [key[3]]
row[key_names[4]] = [key[4]]
row[key_names[5]] = [key[5]]
row[key_names[6]] = [key[6]]
row[key_names[7]] = [key[7]]
row[key_names[8]] = [key[8]]
row['strategy'] = [strategy.tolist()]
optimal_strategy = StrategyOptimal()
optimal_strategy.set_key(key)
optimal_strategy.set_data(row)
return optimal_strategy
|
[
"scipy.optimize.minimize",
"numpy.sum",
"scipy.optimize.LinearConstraint",
"numpy.cumsum",
"scipy.optimize.Bounds",
"numpy.min",
"numpy.array"
] |
[((4572, 4627), 'numpy.array', 'np.array', (['([volume_to_liquidate / num_steps] * num_steps)'], {}), '([volume_to_liquidate / num_steps] * num_steps)\n', (4580, 4627), True, 'import numpy as np\n'), ((4645, 4695), 'scipy.optimize.Bounds', 'Bounds', (['(0)', 'volume_to_liquidate'], {'keep_feasible': '(True)'}), '(0, volume_to_liquidate, keep_feasible=True)\n', (4651, 4695), False, 'from scipy.optimize import Bounds, LinearConstraint, basinhopping, minimize\n'), ((4724, 4844), 'scipy.optimize.LinearConstraint', 'LinearConstraint', (['[[1] * num_steps]', '[volume_to_liquidate - atol]', '[volume_to_liquidate + atol]'], {'keep_feasible': '(True)'}), '([[1] * num_steps], [volume_to_liquidate - atol], [\n volume_to_liquidate + atol], keep_feasible=True)\n', (4740, 4844), False, 'from scipy.optimize import Bounds, LinearConstraint, basinhopping, minimize\n'), ((7526, 7705), 'scipy.optimize.minimize', 'minimize', (['functional', 'x0'], {'method': '"""trust-constr"""', 'jac': 'functional_jac', 'hess': 'functional_hess', 'constraints': "minimizer_kwargs['constraints']", 'bounds': "minimizer_kwargs['bounds']"}), "(functional, x0, method='trust-constr', jac=functional_jac, hess=\n functional_hess, constraints=minimizer_kwargs['constraints'], bounds=\n minimizer_kwargs['bounds'])\n", (7534, 7705), False, 'from scipy.optimize import Bounds, LinearConstraint, basinhopping, minimize\n'), ((4872, 4882), 'numpy.sum', 'np.sum', (['x0'], {}), '(x0)\n', (4878, 4882), True, 'import numpy as np\n'), ((5457, 5475), 'numpy.cumsum', 'np.cumsum', (['v[::-1]'], {}), '(v[::-1])\n', (5466, 5475), True, 'import numpy as np\n'), ((6143, 6161), 'numpy.cumsum', 'np.cumsum', (['v[::-1]'], {}), '(v[::-1])\n', (6152, 6161), True, 'import numpy as np\n'), ((7830, 7849), 'numpy.cumsum', 'np.cumsum', (['strategy'], {}), '(strategy)\n', (7839, 7849), True, 'import numpy as np\n'), ((5064, 5082), 'numpy.cumsum', 'np.cumsum', (['v[::-1]'], {}), '(v[::-1])\n', (5073, 5082), True, 'import numpy as np\n'), ((5123, 5142), 'numpy.sum', 'np.sum', (['(s2 * v2_new)'], {}), '(s2 * v2_new)\n', (5129, 5142), True, 'import numpy as np\n'), ((5548, 5567), 'numpy.sum', 'np.sum', (['(s2 * v2_new)'], {}), '(s2 * v2_new)\n', (5554, 5567), True, 'import numpy as np\n'), ((6223, 6242), 'numpy.sum', 'np.sum', (['(s2 * v2_new)'], {}), '(s2 * v2_new)\n', (6229, 6242), True, 'import numpy as np\n'), ((5852, 5862), 'numpy.sum', 'np.sum', (['sl'], {}), '(sl)\n', (5858, 5862), True, 'import numpy as np\n'), ((6533, 6557), 'numpy.sum', 'np.sum', (['(s2 * v_new)[:i]'], {}), '((s2 * v_new)[:i])\n', (6539, 6557), True, 'import numpy as np\n'), ((6839, 6858), 'numpy.sum', 'np.sum', (['(s2 * v2_new)'], {}), '(s2 * v2_new)\n', (6845, 6858), True, 'import numpy as np\n'), ((6803, 6827), 'numpy.sum', 'np.sum', (['(s2 * v_new)[:j]'], {}), '((s2 * v_new)[:j])\n', (6809, 6827), True, 'import numpy as np\n'), ((6682, 6696), 'numpy.min', 'np.min', (['[i, j]'], {}), '([i, j])\n', (6688, 6696), True, 'import numpy as np\n')]
|
"""
Provides classes that represent quasar continuum objects.
"""
import abc
import scipy.interpolate
import numpy as np
import qusp
class Continuum(object):
"""
Abstract base class for quasar continuum objects.
"""
__metaclass__ = abc.ABCMeta
def __init__(self):
raise NotImplementedError
@abc.abstractmethod
def get_continuum(self, target, combined):
"""
Returns a SpectralFluxDensity object that represent's the specified target's unabsorbed continuum.
Args:
target (:class:`qusp.target.Target`): the target
combined (:class:`qusp.spectrum.BOSSSpectrum`): the target's combined spectrum
"""
class LinearFitContinuum(Continuum):
"""
An interface to linearized continuum fit results file.
Args:
specfits (str): name of linearized continuum fit results file.
"""
def __init__(self, specfits):
import h5py
# read data sets from specified specfits file
self.specfits = h5py.File(specfits)
self.targets = self.specfits['targets'].value
self.redshifts = self.specfits['redshifts'].value
self.amp = self.specfits['amplitude'].value
self.nu = self.specfits['nu'].value
self.rest_wave_centers = self.specfits['restWaveCenters'].value
self.obs_wave_centers = self.specfits['obsWaveCenters'].value
self.continuum = self.specfits['continuum'].value
self.transmission = self.specfits['transmission'].value
self.tiltwave = self.specfits['nu'].attrs['tiltwave']
# create interpolated transmission and continuum functions
self.trans_interp = scipy.interpolate.UnivariateSpline(self.obs_wave_centers, self.transmission, s=0)
self.cont_interp = scipy.interpolate.UnivariateSpline(self.rest_wave_centers, self.continuum, s=0)
def get_continuum(self, target, combined):
"""
Returns a SpectralFluxDensity object that represent's the specified target's unabsorbed continuum.
Args:
target (:class:`qusp.target.Target`): the target
combined (:class:`qusp.spectrum.BOSSSpectrum`): the target's combined spectrum
Raises:
ValueError: if target is not found in fit results.
"""
# make sure we the request target exists
# if not target.to_string() in self.targets:
# raise ValueError('Target not found in specified continuum results.')
target_index = np.argmax(target['target'] == self.targets)
#assert target['z'] == self.redshifts[target_index], 'target redshift does not match the redshift used in fit'
# save target's amplitude and spectral tilt
target['nu'] = self.nu[target_index]
target['amp'] = self.amp[target_index]
# build the observed continuum from fit results
redshifted_waves = combined.wavelength/(1+target['z'])
rest_continuum = target['amp']*(redshifted_waves/self.tiltwave)**target['nu']*self.cont_interp(redshifted_waves)
obs_continuum = rest_continuum/(1+target['z'])*self.trans_interp(combined.wavelength)
# return SpectralFluxDensity representation of the observed continuum
return qusp.SpectralFluxDensity(combined.wavelength, obs_continuum)
class MeanFluxContinuum(Continuum):
"""
A simple continuum estimate calculated using the mean flux for a quasar.
Args:
wave_min (float): Optional rest frame wavelength for lower bound of mean
flux calculation.
wave_max (float): Optional rest frame wavelength for upper bound of mean
flux calculation.
"""
def __init__(self, wave_min=None, wave_max=None):
try:
self.wave_min = qusp.wavelength.Wavelength(wave_min)
except TypeError:
self.wave_min = wave_min
try:
self.wave_max = qusp.wavelength.Wavelength(wave_max)
except TypeError:
self.wave_max = wave_max
def get_continuum(self, target, combined):
"""
Returns a SpectralFluxDensity object that represent's the specified target's unabsorbed continuum.
Args:
target (:class:`qusp.target.Target`): the target
combined (:class:`qusp.spectrum.BOSSSpectrum`): the target's combined spectrum
Raises:
ValueError: if mean flux <= 0
"""
# determine wavelength range
if self.wave_min is None:
wave_min = combined.wavelength[0]
else:
wave_min = self.wave_min.observed(target['z'])
if self.wave_max is None:
wave_max = combined.wavelength[-1]
else:
wave_max = self.wave_max.observed(target['z'])
# calculate mean flux in wavelength range
mean_flux = combined.mean_flux(wave_min, wave_max)
if mean_flux <= 0:
raise ValueError('mean_flux <= 0')
continuum = mean_flux*np.ones_like(combined.wavelength)
return qusp.SpectralFluxDensity(combined.wavelength, continuum)
|
[
"h5py.File",
"numpy.ones_like",
"numpy.argmax",
"qusp.wavelength.Wavelength",
"qusp.SpectralFluxDensity"
] |
[((1023, 1042), 'h5py.File', 'h5py.File', (['specfits'], {}), '(specfits)\n', (1032, 1042), False, 'import h5py\n'), ((2499, 2542), 'numpy.argmax', 'np.argmax', (["(target['target'] == self.targets)"], {}), "(target['target'] == self.targets)\n", (2508, 2542), True, 'import numpy as np\n'), ((3236, 3296), 'qusp.SpectralFluxDensity', 'qusp.SpectralFluxDensity', (['combined.wavelength', 'obs_continuum'], {}), '(combined.wavelength, obs_continuum)\n', (3260, 3296), False, 'import qusp\n'), ((5008, 5064), 'qusp.SpectralFluxDensity', 'qusp.SpectralFluxDensity', (['combined.wavelength', 'continuum'], {}), '(combined.wavelength, continuum)\n', (5032, 5064), False, 'import qusp\n'), ((3755, 3791), 'qusp.wavelength.Wavelength', 'qusp.wavelength.Wavelength', (['wave_min'], {}), '(wave_min)\n', (3781, 3791), False, 'import qusp\n'), ((3897, 3933), 'qusp.wavelength.Wavelength', 'qusp.wavelength.Wavelength', (['wave_max'], {}), '(wave_max)\n', (3923, 3933), False, 'import qusp\n'), ((4959, 4992), 'numpy.ones_like', 'np.ones_like', (['combined.wavelength'], {}), '(combined.wavelength)\n', (4971, 4992), True, 'import numpy as np\n')]
|
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np
from voronoi.events import CircleEvent
class Colors:
SWEEP_LINE = "#636e72"
CELL_POINTS = "black"
BEACH_LINE = "#636e72"
EDGE = "#636e72"
ARC = "#b2bec3"
INCIDENT_POINT_POINTER = "#dfe6e9"
INVALID_CIRCLE = "#d63031" # red
VALID_CIRCLE = "#0984e3" # blue
VERTICES = "#0984e3" # blue
TRIANGLE = "#00cec9" # orange
BOUNDING_BOX = "black" # blue
TEXT = "#00cec9" # green
HELPER = "#ff0000"
HIGH_LIGHT = "#00ff00"
class Visualization(object):
def __init__(self):
self.init()
plt.show()
def init(self):
self.fig, self.ax = plt.subplots(figsize=(17, 17))
def visualize(self, y, current_event, bounding_poly, points, vertices, edges, arc_list, event_queue, calc_cell_sizes=True):
plt.close()
self.init()
self.ax.set_title( str(current_event) )
scale = (bounding_poly.max_y - bounding_poly.min_y)
border = (bounding_poly.max_y - bounding_poly.min_y) / 4
plt.ylim((bounding_poly.min_y - border, bounding_poly.max_y + border))
plt.xlim((bounding_poly.min_x - border, bounding_poly.max_x + border))
# Create 1000 equally spaced points between -10 and 10 and setup plot window
x = np.linspace(bounding_poly.min_x, bounding_poly.max_x, 1000)
x_full = np.linspace(bounding_poly.min_x - border, bounding_poly.max_x + border, 1000)
# Plot the sweep line
self.ax.plot([bounding_poly.min_x - border, bounding_poly.max_x + border], [y, y], color=Colors.SWEEP_LINE)
# Plot all arcs
plot_lines = []
for arc in arc_list:
plot_line = arc.get_plot(x_full, y)
if plot_line is None:
self.ax.axvline(x=arc.origin.x)
else:
self.ax.plot(x_full, plot_line, linestyle="--", color=Colors.ARC)
plot_lines.append(plot_line)
if len(plot_lines) > 0:
self.ax.plot(x_full, np.min(plot_lines, axis=0), color=Colors.BEACH_LINE)
# Plot circle events
def plot_circle(evt):
x, y = evt.center.x, evt.center.y
radius = evt.radius
color = Colors.VALID_CIRCLE if evt.is_valid else Colors.INVALID_CIRCLE
# if evt.is_valid:
circle = plt.Circle((x, y), radius, fill=False, color=color, linewidth=1.2)
triangle = plt.Polygon(evt.get_triangle(), fill=False, color=Colors.TRIANGLE, linewidth=1.2)
self.ax.add_artist(circle)
self.ax.add_artist(triangle)
# Plot half-edges
for edge in edges:
# Get start and end of edges
start = edge.get_origin(y, bounding_poly.max_y)
end = edge.twin.get_origin(y, bounding_poly.max_y)
# Draw line
if start and end:
self.ax.plot([start.x, end.x], [start.y, end.y], Colors.EDGE)
# Add Name
plt.annotate(
text=str(edge),
xy=((end.x+start.x)/2, (end.y+start.y)/2)
)
# Add arrow
if start and end and start.y < float('inf'):
plt.annotate(s='', xy=(end.x, end.y), xytext=(start.x, start.y), arrowprops=dict(arrowstyle='->'))
# Point to incident point
incident_point = edge.incident_point
if start and end and incident_point:
self.ax.plot(
[(start.x + end.x) / 2, incident_point.x], [(start.y + end.y) / 2, incident_point.y],
color=Colors.INCIDENT_POINT_POINTER,
linestyle="--"
)
if isinstance(current_event, CircleEvent):
plot_circle(current_event)
for event in event_queue.queue:
if isinstance(event, CircleEvent):
plot_circle(event)
if hasattr(bounding_poly, 'radius') :
# Draw bounding box
self.ax.add_patch(
patches.Circle((bounding_poly.x, bounding_poly.x), bounding_poly.radius, fill=False,
edgecolor=Colors.BOUNDING_BOX)
)
else:
# Draw bounding box
self.ax.add_patch(
patches.Polygon(bounding_poly.get_coordinates(), fill=False, edgecolor=Colors.BOUNDING_BOX)
)
# Plot vertices
for vertex in vertices:
x, y = vertex.position.x, vertex.position.y
self.ax.scatter(x=[x], y=[y], s=50, color=Colors.VERTICES)
# Plot points
for point in points:
x, y = point.x, point.y
self.ax.scatter(x=[x], y=[y], s=50, color=Colors.CELL_POINTS)
# if calc_cell_sizes:
# size = f"{point.cell_size(digits=2)}"
# # plt.annotate(s=size, xy=(x, y), xytext=(100, y), arrowprops=dict(arrowstyle='->', facecolor="white"))
# self.ax.text(s=size, x=x + scale / 100, y=y + scale / 100, color=Colors.TEXT)
self.fig.show()
def plot_helper_points(self, A, B, center, start_ray, a, b, c):
self.ax.scatter(x=[A.x, B.x, center.x, start_ray.x], y=[A.y, B.y, center.y, start_ray.y], s=50, color=Colors.HELPER)
self.ax.plot(
[start_ray.x , center.x], [start_ray.y, (c - a* center.x)/b],
color=Colors.HELPER
)
self.fig.show()
def plot_points(self, A, B):
self.ax.scatter(x=[A.x, B.x], y=[A.y, B.y], s=50, color=Colors.HELPER)
self.fig.show()
def highlight_edge(self, y, bounding_poly, edge):
# Get start and end of edges
start = edge.get_origin(y, bounding_poly.max_y)
end = edge.twin.get_origin(y, bounding_poly.max_y)
# Draw line
if start and end:
self.ax.plot([start.x, end.x], [start.y, end.y], Colors.HIGH_LIGHT, linewidth=5)
# Add arrow
if start and end and start.y < float('inf'):
self.ax.annotate(
s='',
xy=(end.x, end.y),
xytext=(start.x, start.y),
arrowprops=dict(
arrowstyle='->',
linewidth=5,
color=Colors.HIGH_LIGHT
)
)
self.fig.show()
|
[
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.close",
"matplotlib.patches.Circle",
"numpy.min",
"matplotlib.pyplot.Circle",
"numpy.linspace",
"matplotlib.pyplot.subplots"
] |
[((644, 654), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (652, 654), True, 'import matplotlib.pyplot as plt\n'), ((704, 734), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(17, 17)'}), '(figsize=(17, 17))\n', (716, 734), True, 'import matplotlib.pyplot as plt\n'), ((872, 883), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (881, 883), True, 'import matplotlib.pyplot as plt\n'), ((1085, 1155), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(bounding_poly.min_y - border, bounding_poly.max_y + border)'], {}), '((bounding_poly.min_y - border, bounding_poly.max_y + border))\n', (1093, 1155), True, 'import matplotlib.pyplot as plt\n'), ((1164, 1234), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(bounding_poly.min_x - border, bounding_poly.max_x + border)'], {}), '((bounding_poly.min_x - border, bounding_poly.max_x + border))\n', (1172, 1234), True, 'import matplotlib.pyplot as plt\n'), ((1333, 1392), 'numpy.linspace', 'np.linspace', (['bounding_poly.min_x', 'bounding_poly.max_x', '(1000)'], {}), '(bounding_poly.min_x, bounding_poly.max_x, 1000)\n', (1344, 1392), True, 'import numpy as np\n'), ((1410, 1487), 'numpy.linspace', 'np.linspace', (['(bounding_poly.min_x - border)', '(bounding_poly.max_x + border)', '(1000)'], {}), '(bounding_poly.min_x - border, bounding_poly.max_x + border, 1000)\n', (1421, 1487), True, 'import numpy as np\n'), ((2380, 2446), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(x, y)', 'radius'], {'fill': '(False)', 'color': 'color', 'linewidth': '(1.2)'}), '((x, y), radius, fill=False, color=color, linewidth=1.2)\n', (2390, 2446), True, 'import matplotlib.pyplot as plt\n'), ((2053, 2079), 'numpy.min', 'np.min', (['plot_lines'], {'axis': '(0)'}), '(plot_lines, axis=0)\n', (2059, 2079), True, 'import numpy as np\n'), ((4090, 4209), 'matplotlib.patches.Circle', 'patches.Circle', (['(bounding_poly.x, bounding_poly.x)', 'bounding_poly.radius'], {'fill': '(False)', 'edgecolor': 'Colors.BOUNDING_BOX'}), '((bounding_poly.x, bounding_poly.x), bounding_poly.radius,\n fill=False, edgecolor=Colors.BOUNDING_BOX)\n', (4104, 4209), True, 'import matplotlib.patches as patches\n')]
|
import sys
import numpy as np
from collections import defaultdict
def DumpHistogram(h):
f = open("hist.txt", 'w')
for addr in sorted(h.keys()):
print >>f, hex(addr), h[addr]
f.close()
def CollectSamples(infile):
histogram = defaultdict(int)
checkpointctr = 0
while True:
buf = infile.read(32)
if len(buf) < 32:
break
addrs = np.frombuffer(buf, np.uint16)
# stackframe = []
for addr in addrs:
# for now, ignore stack frames and just add a single sample for each
# if addr & 0x8000:
# stackframe.append((addr & 0x7fff) * 2)
# else:
histogram[(addr & 0x7fff) * 2] += 1
checkpointctr += 1
if checkpointctr > 100:
DumpHistogram(histogram)
checkpointctr = 0
return histogram
if __name__ == '__main__':
CollectSamples(open(sys.argv[1], 'rb'))
|
[
"collections.defaultdict",
"numpy.frombuffer"
] |
[((252, 268), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (263, 268), False, 'from collections import defaultdict\n'), ((397, 426), 'numpy.frombuffer', 'np.frombuffer', (['buf', 'np.uint16'], {}), '(buf, np.uint16)\n', (410, 426), True, 'import numpy as np\n')]
|
# USDA_CoA_Cropland.py (flowsa)
# !/usr/bin/env python3
# coding=utf-8
"""
Functions used to import and parse USDA Census of Ag Cropland data
in NAICS format
"""
import json
import numpy as np
import pandas as pd
from flowsa.location import US_FIPS, abbrev_us_state
from flowsa.common import WITHDRAWN_KEYWORD, \
fba_wsec_default_grouping_fields
from flowsa.flowbyfunctions import assign_fips_location_system, \
equally_allocate_suppressed_parent_to_child_naics
def CoA_Cropland_NAICS_URL_helper(*, build_url, config, **_):
"""
This helper function uses the "build_url" input from flowbyactivity.py,
which is a base url for data imports that requires parts of the url text
string to be replaced with info specific to the data year. This function
does not parse the data, only modifies the urls from which data is
obtained.
:param build_url: string, base url
:param config: dictionary, items in FBA method yaml
:return: list, urls to call, concat, parse, format into Flow-By-Activity
format
"""
# initiate url list for coa cropland data
urls = []
# call on state acronyms from common.py (and remove entry for DC)
state_abbrevs = abbrev_us_state
state_abbrevs = {k: v for (k, v) in state_abbrevs.items() if k != "DC"}
# replace "__aggLevel__" in build_url to create three urls
for x in config['agg_levels']:
# at national level, remove the text string calling for state acronyms
if x == 'NATIONAL':
url = build_url
url = url.replace("__aggLevel__", x)
url = url.replace("&state_alpha=__stateAlpha__", "")
urls.append(url)
else:
# substitute in state acronyms for state and county url calls
for z in state_abbrevs:
url = build_url
url = url.replace("__aggLevel__", x)
url = url.replace("__stateAlpha__", z)
urls.append(url)
return urls
def coa_cropland_NAICS_call(*, resp, **_):
"""
Convert response for calling url to pandas dataframe,
begin parsing df into FBA format
:param resp: df, response from url call
:return: pandas dataframe of original source data
"""
cropland_json = json.loads(resp.text)
df_cropland = pd.DataFrame(data=cropland_json["data"])
return df_cropland
def coa_cropland_NAICS_parse(*, df_list, year, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param year: year
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
df = pd.concat(df_list, sort=False)
# specify desired data based on domain_desc
df = df[df['domain_desc'] == 'NAICS CLASSIFICATION']
# only want ag land and farm operations
df = df[df['short_desc'].str.contains("AG LAND|FARM OPERATIONS")]
# drop unused columns
df = df.drop(columns=['agg_level_desc', 'location_desc', 'state_alpha',
'sector_desc', 'country_code', 'begin_code',
'watershed_code', 'reference_period_desc',
'asd_desc', 'county_name', 'source_desc',
'congr_district_code', 'asd_code', 'week_ending',
'freq_desc', 'load_time', 'zip_5',
'watershed_desc', 'region_desc', 'state_ansi',
'state_name', 'country_name', 'county_ansi',
'end_code', 'group_desc'])
# create FIPS column by combining existing columns
df.loc[df['county_code'] == '', 'county_code'] = '000'
df['Location'] = df['state_fips_code'] + df['county_code']
df.loc[df['Location'] == '99000', 'Location'] = US_FIPS
# NAICS classification data
# flowname
df.loc[:, 'FlowName'] = df['commodity_desc'] + ', ' + \
df['class_desc'] + ', ' + df['prodn_practice_desc']
df.loc[:, 'FlowName'] = df['FlowName'].str.replace(
", ALL PRODUCTION PRACTICES", "", regex=True)
df.loc[:, 'FlowName'] = df['FlowName'].str.replace(
", ALL CLASSES", "", regex=True)
# activity consumed/produced by
df.loc[:, 'Activity'] = df['domaincat_desc']
df.loc[:, 'Activity'] = df['Activity'].str.replace(
"NAICS CLASSIFICATION: ", "", regex=True)
df.loc[:, 'Activity'] = df['Activity'].str.replace('[()]+', '', regex=True)
df['ActivityProducedBy'] = np.where(
df["unit_desc"] == 'OPERATIONS', df["Activity"], '')
df['ActivityConsumedBy'] = np.where(
df["unit_desc"] == 'ACRES', df["Activity"], '')
# rename columns to match flowbyactivity format
df = df.rename(columns={"Value": "FlowAmount", "unit_desc": "Unit",
"year": "Year", "CV (%)": "Spread",
"short_desc": "Description"})
# drop remaining unused columns
df = df.drop(columns=['Activity', 'class_desc', 'commodity_desc',
'domain_desc', 'state_fips_code', 'county_code',
'statisticcat_desc', 'prodn_practice_desc',
'domaincat_desc', 'util_practice_desc'])
# modify contents of units column
df.loc[df['Unit'] == 'OPERATIONS', 'Unit'] = 'p'
# modify contents of flowamount column, "D" is supressed data,
# "z" means less than half the unit is shown
df['FlowAmount'] = df['FlowAmount'].str.strip() # trim whitespace
df.loc[df['FlowAmount'] == "(D)", 'FlowAmount'] = WITHDRAWN_KEYWORD
df.loc[df['FlowAmount'] == "(Z)", 'FlowAmount'] = WITHDRAWN_KEYWORD
df['FlowAmount'] = df['FlowAmount'].str.replace(",", "", regex=True)
# USDA CoA 2017 states that (H) means CV >= 99.95,
# therefore replacing with 99.95 so can convert column to int
# (L) is a CV of <= 0.05
df['Spread'] = df['Spread'].str.strip() # trim whitespace
df.loc[df['Spread'] == "(H)", 'Spread'] = 99.95
df.loc[df['Spread'] == "(L)", 'Spread'] = 0.05
df.loc[df['Spread'] == "", 'Spread'] = None
df.loc[df['Spread'] == "(D)", 'Spread'] = WITHDRAWN_KEYWORD
# drop Descriptions that contain certain phrases, as these
# data are included in other categories
df = df[~df['Description'].str.contains(
'FRESH MARKET|PROCESSING|ENTIRE CROP|NONE OF CROP|PART OF CROP')]
# drop Descriptions that contain certain phrases -
# only occur in AG LAND data
df = df[~df['Description'].str.contains(
'INSURANCE|OWNED|RENTED|FAILED|FALLOW|IDLE')].reset_index(drop=True)
# add location system based on year of data
df = assign_fips_location_system(df, year)
# Add hardcoded data
df['Class'] = np.where(df["Unit"] == 'ACRES', "Land", "Other")
df['SourceName'] = "USDA_CoA_Cropland_NAICS"
df['MeasureofSpread'] = "RSD"
df['DataReliability'] = 5 # tmp
df['DataCollection'] = 2
return df
def coa_cropland_naics_fba_wsec_cleanup(fba_w_sector, **kwargs):
"""
Clean up the land fba for use in allocation
:param fba_w_sector: df, coa cropland naics flowbyactivity
with sector columns
:param kwargs: dictionary, requires df sourcename
:return: df, flowbyactivity with modified values
"""
method = kwargs.get('method')
df = equally_allocate_suppressed_parent_to_child_naics(
fba_w_sector, method, 'SectorConsumedBy',
fba_wsec_default_grouping_fields)
return df
|
[
"pandas.DataFrame",
"flowsa.flowbyfunctions.assign_fips_location_system",
"json.loads",
"flowsa.flowbyfunctions.equally_allocate_suppressed_parent_to_child_naics",
"numpy.where",
"pandas.concat"
] |
[((2261, 2282), 'json.loads', 'json.loads', (['resp.text'], {}), '(resp.text)\n', (2271, 2282), False, 'import json\n'), ((2301, 2341), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "cropland_json['data']"}), "(data=cropland_json['data'])\n", (2313, 2341), True, 'import pandas as pd\n'), ((2671, 2701), 'pandas.concat', 'pd.concat', (['df_list'], {'sort': '(False)'}), '(df_list, sort=False)\n', (2680, 2701), True, 'import pandas as pd\n'), ((4498, 4559), 'numpy.where', 'np.where', (["(df['unit_desc'] == 'OPERATIONS')", "df['Activity']", '""""""'], {}), "(df['unit_desc'] == 'OPERATIONS', df['Activity'], '')\n", (4506, 4559), True, 'import numpy as np\n'), ((4600, 4656), 'numpy.where', 'np.where', (["(df['unit_desc'] == 'ACRES')", "df['Activity']", '""""""'], {}), "(df['unit_desc'] == 'ACRES', df['Activity'], '')\n", (4608, 4656), True, 'import numpy as np\n'), ((6647, 6684), 'flowsa.flowbyfunctions.assign_fips_location_system', 'assign_fips_location_system', (['df', 'year'], {}), '(df, year)\n', (6674, 6684), False, 'from flowsa.flowbyfunctions import assign_fips_location_system, equally_allocate_suppressed_parent_to_child_naics\n'), ((6728, 6776), 'numpy.where', 'np.where', (["(df['Unit'] == 'ACRES')", '"""Land"""', '"""Other"""'], {}), "(df['Unit'] == 'ACRES', 'Land', 'Other')\n", (6736, 6776), True, 'import numpy as np\n'), ((7313, 7442), 'flowsa.flowbyfunctions.equally_allocate_suppressed_parent_to_child_naics', 'equally_allocate_suppressed_parent_to_child_naics', (['fba_w_sector', 'method', '"""SectorConsumedBy"""', 'fba_wsec_default_grouping_fields'], {}), "(fba_w_sector, method,\n 'SectorConsumedBy', fba_wsec_default_grouping_fields)\n", (7362, 7442), False, 'from flowsa.flowbyfunctions import assign_fips_location_system, equally_allocate_suppressed_parent_to_child_naics\n')]
|
# This file implements the search methods for some parameters
from ascii import preprocess_ascii, image_to_ascii, post_process
import cv2 as cv
import numpy as np
import os
def draw_patch(image, x0, y0, Tw, Th, Rw, Rh, idx):
image = np.asarray(image)
image = cv.cvtColor(image, cv.COLOR_BGR2RGB)
for i in range(Rw):
for j in range(Rh):
start_point = (x0+i*Tw, y0+j*Th)
end_point = (x0+(i+1)*Tw, y0+(j+1)*Th)
image = cv.rectangle(image, start_point, end_point, (0, 0, 255))
save_path = "./patches/"
if not os.path.exists(save_path): os.mkdir(save_path)
cv.imwrite(save_path+"patch_"+str(idx)+".jpg", image)
# cv.imshow("Patches", image)
# cv.waitKey(0)
return None
def exhaustif_search(Tw=15, Th=28):
# serach all the position of segmentation space, find the best one
# preparation
save_path = "./results/"
if not os.path.exists(save_path): os.mkdir(save_path)
letters = preprocess_ascii(Tw=Tw, Th=Th, more_char=True)
hed = cv.imread("../images/masked_hed.jpg")
hed = cv.cvtColor(hed, cv.COLOR_BGR2GRAY)
H, W = hed.shape
# exhaustif search
idx = 0
for x0 in range(0, Tw, Tw//7):
for y0 in range(0, Th, Th//7):
idx += 1
# ascii matching
whole, loss = image_to_ascii(hed, x0=x0, y0=y0, Tw=Tw, Th=Th, Rw=W//Tw-1, Rh=H//Th-1, letters=letters)
draw_patch(hed, x0=x0, y0=y0, Tw=Tw, Th=Th, Rw=W//Tw-1, Rh=H//Th-1, idx=idx)
# post process
text = ''
print("Processing the "+str(idx)+" th candidate, loss : %.2f" % loss)
post_process(whole, text, start=7*40+25, save_path="./results/whole"+str(idx)+".txt")
if __name__ == '__main__':
exhaustif_search(17, 37)
|
[
"os.mkdir",
"cv2.cvtColor",
"ascii.preprocess_ascii",
"numpy.asarray",
"os.path.exists",
"cv2.imread",
"ascii.image_to_ascii",
"cv2.rectangle"
] |
[((239, 256), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (249, 256), True, 'import numpy as np\n'), ((269, 305), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2RGB'], {}), '(image, cv.COLOR_BGR2RGB)\n', (280, 305), True, 'import cv2 as cv\n'), ((974, 1020), 'ascii.preprocess_ascii', 'preprocess_ascii', ([], {'Tw': 'Tw', 'Th': 'Th', 'more_char': '(True)'}), '(Tw=Tw, Th=Th, more_char=True)\n', (990, 1020), False, 'from ascii import preprocess_ascii, image_to_ascii, post_process\n'), ((1031, 1068), 'cv2.imread', 'cv.imread', (['"""../images/masked_hed.jpg"""'], {}), "('../images/masked_hed.jpg')\n", (1040, 1068), True, 'import cv2 as cv\n'), ((1079, 1114), 'cv2.cvtColor', 'cv.cvtColor', (['hed', 'cv.COLOR_BGR2GRAY'], {}), '(hed, cv.COLOR_BGR2GRAY)\n', (1090, 1114), True, 'import cv2 as cv\n'), ((571, 596), 'os.path.exists', 'os.path.exists', (['save_path'], {}), '(save_path)\n', (585, 596), False, 'import os\n'), ((598, 617), 'os.mkdir', 'os.mkdir', (['save_path'], {}), '(save_path)\n', (606, 617), False, 'import os\n'), ((913, 938), 'os.path.exists', 'os.path.exists', (['save_path'], {}), '(save_path)\n', (927, 938), False, 'import os\n'), ((940, 959), 'os.mkdir', 'os.mkdir', (['save_path'], {}), '(save_path)\n', (948, 959), False, 'import os\n'), ((474, 530), 'cv2.rectangle', 'cv.rectangle', (['image', 'start_point', 'end_point', '(0, 0, 255)'], {}), '(image, start_point, end_point, (0, 0, 255))\n', (486, 530), True, 'import cv2 as cv\n'), ((1321, 1421), 'ascii.image_to_ascii', 'image_to_ascii', (['hed'], {'x0': 'x0', 'y0': 'y0', 'Tw': 'Tw', 'Th': 'Th', 'Rw': '(W // Tw - 1)', 'Rh': '(H // Th - 1)', 'letters': 'letters'}), '(hed, x0=x0, y0=y0, Tw=Tw, Th=Th, Rw=W // Tw - 1, Rh=H // Th -\n 1, letters=letters)\n', (1335, 1421), False, 'from ascii import preprocess_ascii, image_to_ascii, post_process\n')]
|
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader, random_split, Subset
import json, time, pickle, csv, re, os, gc, logging, zlib, orjson, joblib
import numpy as np
from tqdm import tqdm
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import KFold
from tensorboardX import SummaryWriter
from reformer_pytorch import Reformer, ReformerLM
from reformer_pytorch.reformer_pytorch import FixedPositionalEmbedding
input_dir = '../input/'
model_dir = '../model/'
log_dir = '../logs/'
PAD_index, BOS_index, EOS_index, UNK_index = 0, 1, 2, 5
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
vocab = json.load(open(input_dir + 'vocab.json', 'r'))
vocab_inverse = {vocab[k]:k for k in vocab}
class ReviewDataset(Dataset):
def __init__(self, file_path, encoder_max_len=1584, decoder_max_len=67):
super().__init__()
self.file_path = file_path
self.encoder_max_len = encoder_max_len
self.decoder_max_len = decoder_max_len
self.docs = []
self.load_json()
def __len__(self):
return len(self.docs)
def pad_image(self, arr):
arr = ((255 - np.asarray(arr)) / 255.0).astype(np.float32)
mosaic = np.zeros((self.encoder_max_len, 32), dtype=np.float32)
height, width = arr.shape
mosaic[:width, :min(32, height)] = arr.T[:, :min(32, height)]
return mosaic
def pad_txt(self, arr):
arr = [vocab.get('[BOS]')] + [vocab.get(i, 5) for i in arr] + [vocab.get('[EOS]')]
arr = arr + [0] * (self.decoder_max_len - len(arr))
return arr
def load_json(self):
with open(input_dir + self.file_path, 'r') as file_in:
for line in file_in:
line = orjson.loads(line.strip())
mosaic = np.asarray(line['mosaic'], dtype=np.uint8)
txt = line['txt']
self.docs.append([txt, mosaic])
def __getitem__(self, index):
txt, mosaic = self.docs[index]
mosaic = self.pad_image(mosaic)
txt = self.pad_txt(txt)
return torch.tensor(txt, dtype=torch.long), torch.tensor(mosaic, dtype=torch.float)
class CRNN(nn.Module):
def __init__(self, in_channels=1, out_channels=300):
super(CRNN, self).__init__()
self.in_channels = in_channels
hidden_size = 150
self.cnn_struct = ((32, ), (64, ), (128, 128), (256, 256), (256, ))
self.cnn_paras = ((3, 1, 1), (3, 1, 1),
(3, 1, 1), (3, 1, 1), (2, 1, 0))
self.pool_struct = ((2, 2), (2, 2), (2, 1), (2, 1), None)
self.batchnorm = (False, False, False, True, False)
self.cnn = self._get_cnn_layers()
self.rnn1 = nn.LSTM(self.cnn_struct[-1][-1], hidden_size, bidirectional=True)
self.rnn2 = nn.LSTM(hidden_size*2, hidden_size, bidirectional=True)
self.dropout = nn.Dropout(0.2)
self._initialize_weights()
def forward(self, x): # input: height=32, width>=100
x = self.cnn(x) # batch, channel=512, height=1, width>=24
x = x.squeeze(2) # batch, channel=512, width>=24
x = x.permute(2, 0, 1) # width>=24, batch, channel=512
x = self.rnn1(x)[0] # length=width>=24, batch, channel=256*2
x = self.dropout(x)
x = self.rnn2(x)[0] # length=width>=24, batch, channel=256*2
x = x.transpose(0, 1)
return x
def _get_cnn_layers(self):
cnn_layers = []
in_channels = self.in_channels
for i in range(len(self.cnn_struct)):
for out_channels in self.cnn_struct[i]:
cnn_layers.append(
nn.Conv2d(in_channels, out_channels, *(self.cnn_paras[i])))
if self.batchnorm[i]:
cnn_layers.append(nn.BatchNorm2d(out_channels))
cnn_layers.append(nn.ReLU(inplace=True))
in_channels = out_channels
if (self.pool_struct[i]):
cnn_layers.append(nn.MaxPool2d(self.pool_struct[i]))
return nn.Sequential(*cnn_layers)
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, np.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
class TransformerModel(nn.Module):
def __init__(self, model_args={}):
super().__init__()
self.model_dim = model_args.get('model_dim', 300)
self.max_encoder_len = model_args.get('max_encoder_len', 1604)
self.max_decoder_len = model_args.get('max_decoder_len', 66)
self.vocab_size = model_args.get('vocab_size', 4000)
self.encoder = CRNN()
self.decoder = ReformerLM(
num_tokens = self.vocab_size,
dim = self.model_dim,
depth = 2,
heads = 1,
bucket_size = 233,
ff_dropout=0.2,
causal = True,
max_seq_len = self.max_decoder_len
)
if model_args.get('decoder_embedding', None) is not None:
self.decoder.token_emb = nn.Embedding.from_pretrained(model_args['decoder_embedding'], freeze=False)
else:
self.decoder.token_emb = nn.Embedding(num_embeddings=self.vocab_size, embedding_dim=300, padding_idx=0)
def forward(self, x, yi):
x = x.unsqueeze(-1).transpose(1, 3)
enc_keys = self.encoder(x)
input_mask = yi.ne(0).bool()
yo = self.decoder(yi, keys=enc_keys, input_mask=input_mask)
return yo
logging.basicConfig(filename=log_dir+'train1116.log', filemode="a", format="%(asctime)s %(name)s:%(levelname)s:%(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
model_args = {'decoder_embedding': torch.load(input_dir + 'word2vec.torch')}
def cal_performance(pred, gold, trg_pad_idx, smoothing=False):
loss = cal_loss(pred, gold, trg_pad_idx, smoothing=smoothing)
pred = pred.max(1)[1]
gold = gold.contiguous().view(-1)
non_pad_mask = gold.ne(trg_pad_idx)
n_correct = pred.eq(gold).masked_select(non_pad_mask).sum().item()
n_word = non_pad_mask.sum().item()
return loss, n_correct, n_word
def cal_loss(pred, gold, trg_pad_idx, smoothing=False):
''' Calculate cross entropy loss, apply label smoothing if needed. '''
gold = gold.contiguous().view(-1)
if smoothing:
eps = 0.1
n_class = pred.size(1)
one_hot = torch.zeros_like(pred).scatter(1, gold.view(-1, 1), 1)
one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / (n_class - 1)
log_prb = F.log_softmax(pred, dim=1)
non_pad_mask = gold.ne(trg_pad_idx)
loss = -(one_hot * log_prb).sum(dim=1)
loss = loss.masked_select(non_pad_mask).mean() # average later
else:
loss = F.cross_entropy(pred, gold, ignore_index=trg_pad_idx) #, reduction='mean'
return loss
def patch_trg(trg, pad_idx):
trg, gold = trg[:, :-1], trg[:, 1:].contiguous().view(-1)
return trg, gold
def load_train_dataset(dataset, part=0):
indices = list(range(len(dataset)))
fold = []
kf = KFold(n_splits=5, shuffle=True, random_state=2020)
for train_index, valid_index in kf.split(indices):
fold.append([train_index, valid_index])
train_set, valid_set = Subset(dataset, fold[part][0]), Subset(dataset, fold[part][1])
return train_set, valid_set
def train(dataset, part=0):
train_set, valid_set = load_train_dataset(dataset=dataset, part=part)
writer = SummaryWriter(log_dir + '/tensorbord1207/%d' % part)
model = TransformerModel(model_args).to(device)
optim = torch.optim.Adam(model.parameters(), lr=5e-3)
valid_loader = DataLoader(valid_set, batch_size=40, num_workers=4, shuffle=False, drop_last=True)
count = 0
PAD = torch.tensor(PAD_index).to(device)
for epoch in range(20):
torch.manual_seed(epoch)
train_loader = DataLoader(train_set, batch_size=40, num_workers=4, shuffle=True, drop_last=True, worker_init_fn=np.random.seed(epoch))
model.train()
for batch in tqdm(train_loader, total=len(train_loader), desc=' - (Training) '):
tgt, src = batch
src_seq = src.to(device)
trg_seq, gold = map(lambda x: x.to(device), patch_trg(tgt, PAD))
optim.zero_grad()
pred = model(src_seq, trg_seq)
pred = pred.view(-1, pred.shape[-1])
# backward and update parameters
loss, n_correct, n_word = cal_performance(
pred, gold, PAD, smoothing=False)
loss.backward()
optim.step()
acc = np.round(n_correct/n_word, 4)
writer.add_scalars('train', {'loss': loss.item()}, count)
writer.add_scalars('train', {'acc': acc}, count)
count +=1
# validate
model.eval()
total_loss, n_word_total, n_word_correct = 0, 0, 0
preds = []
with torch.no_grad():
for batch in tqdm(valid_loader, total=len(valid_loader), desc=' - (Validate) '):
tgt, src = batch
src_seq = src.to(device)
trg_seq, gold = map(lambda x: x.to(device), patch_trg(tgt, PAD))
optim.zero_grad()
pred = model(src_seq, trg_seq)
pred = pred.view(-1, pred.shape[-1])
for i in pred.max(1)[1]:
preds.append(i.item())
loss, n_correct, n_word = cal_performance(
pred, gold, PAD_index, smoothing=False)
# note keeping
n_word_total += n_word
n_word_correct += n_correct
accuracy = np.round(n_word_correct / n_word_total, 4)
msg = 'Part{}/{}, Validate: {}'.format(part, epoch, accuracy)
logging.info(msg)
print(msg)
writer.add_scalars('valid', {'acc': accuracy}, epoch)
checkpoint = {'part': part, 'epoch': epoch, 'score': accuracy, 'model': model.state_dict()}
model_name = model_dir + '%d_%d_%.4f.ckept' % (part, epoch, accuracy)
torch.save(checkpoint, model_name)
writer.close()
def load_model(model_name='0_3_0.9657.ckept'):
model = TransformerModel(model_args).to(device)
model.load_state_dict(torch.load(model_dir + model_name)['model'])
return model
def get_pad_mask(seq, pad_idx):
return (seq != pad_idx).unsqueeze(-2)
try:
subsequent_mask_type = torch.bool
except:
subsequent_mask_type = torch.uint8
def get_subsequent_mask(seq):
batch_size, seq_len = seq.size()
mask = torch.triu(torch.ones((seq_len, seq_len), dtype=subsequent_mask_type),
diagonal=1)
mask = mask.unsqueeze(0).expand(batch_size, -1, -1)
return mask
def decode_sentence(pred_seq):
pred = []
if isinstance(pred_seq, torch.Tensor):
pred_seq = pred_seq.detach().cpu().numpy()
for i in pred_seq:
if i == BOS_index:
continue
elif i != EOS_index and i != PAD_index:
pred.append(vocab_inverse.get(i, '[UNK]'))
else:
break
pred_line = ''.join(pred)
return pred_line
def pad_seq(hypotheses):
pad_tensor = torch.zeros((hypotheses.shape[0], 66 - hypotheses.shape[1])).to(hypotheses)
return torch.cat([hypotheses, pad_tensor], dim=1)
def test(model_name='0_1_0.9416.ckept'):
model = load_model(model_name=model_name)
valid_set = joblib.load(open(input_dir + "valid_set.0.torch", 'rb'))
model.eval()
PAD = torch.tensor(PAD_index).to(device)
BOS = torch.tensor(BOS_index).to(device)
EOS = torch.tensor(EOS_index).to(device)
result = []
beam_size = 16
vocab_size = 4000
length_norm_coefficient = 0.5
with torch.no_grad():
for batch in tqdm(valid_set, mininterval=2, desc=' - (Test)', leave=False):
tgt_seq, src_seq = batch
src_seq = src_seq.unsqueeze(dim=0).to(device)
src_seq = src_seq.unsqueeze(-1).transpose(1, 3)
enc_keys = model.encoder(src_seq) # torch.Size([1, 400, 300])
hypotheses = torch.LongTensor([[BOS]]).to(device) # (1, 1)
hypotheses_lengths = torch.LongTensor([hypotheses.size(1)]).to(device)
hypotheses_scores = torch.zeros(1).to(device)
completed_hypotheses = list()
completed_hypotheses_scores = list()
n_completed_hypotheses = beam_size
step = 1
while True:
s = hypotheses.size(0)
trg_mask = get_pad_mask(hypotheses, PAD_index) & get_subsequent_mask(hypotheses).to(hypotheses.device)
padded_hypotheses = pad_seq(hypotheses)
input_mask = padded_hypotheses.ne(PAD_index)
dec_output = model.decoder(padded_hypotheses, keys=enc_keys.repeat(s, 1, 1), input_mask=input_mask) # # (s, max_len, vocab_size)
dec_output = dec_output[:, :hypotheses.size(1), :] # (s, step, vocab_size)
scores = dec_output[:, -1, :] # (s, vocab_size)
scores = F.log_softmax(scores, dim=-1) # (s, vocab_size)
scores = hypotheses_scores.unsqueeze(1) + scores # (s, vocab_size)
top_k_hypotheses_scores, unrolled_indices = scores.view(-1).topk(beam_size, 0, True, True) # (k)
# Convert unrolled indices to actual indices of the scores tensor which yielded the best scores
prev_word_indices = unrolled_indices // vocab_size # (k)
next_word_indices = unrolled_indices % vocab_size # (k)
# Construct the the new top k hypotheses from these indices
top_k_hypotheses = torch.cat([hypotheses[prev_word_indices], next_word_indices.unsqueeze(1)], dim=1) # (k, step + 1)
# Which of these new hypotheses are complete (reached <EOS>)?
complete = next_word_indices == EOS # (k), bool
# Set aside completed hypotheses and their scores normalized by their lengths
# For the length normalization formula, see
# "Google’s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation"
completed_hypotheses.extend(top_k_hypotheses[complete].tolist())
norm = np.power(((5 + step) / (5 + 1)), length_norm_coefficient)
completed_hypotheses_scores.extend((top_k_hypotheses_scores[complete] / norm).tolist())
# Stop if we have completed enough hypotheses
if len(completed_hypotheses) >= n_completed_hypotheses:
break
# Else, continue with incomplete hypotheses
hypotheses = top_k_hypotheses[~complete] # (s, step + 1)
hypotheses_scores = top_k_hypotheses_scores[~complete] # (s)
hypotheses_lengths = torch.LongTensor(hypotheses.size(0) * [hypotheses.size(1)]).to(device) # (s)
# Stop if things have been going on for too long
if step > 66:
break
step += 1
if len(completed_hypotheses) == 0:
completed_hypotheses = hypotheses.tolist()
completed_hypotheses_scores = hypotheses_scores.tolist()
# Decode the hypotheses
all_hypotheses = list()
for i, h in enumerate(completed_hypotheses):
all_hypotheses.append({"hypothesis": decode_sentence(h), "score": completed_hypotheses_scores[i]})
# Find the best scoring completed hypothesis
i = completed_hypotheses_scores.index(max(completed_hypotheses_scores))
best_hypothesis = all_hypotheses[i]["hypothesis"]
result.append([decode_sentence(tgt_seq), best_hypothesis])
return result
def show():
batches = torch.load(input_dir + 'demo.torch')
batch = batches[0]
models = ['0_0_0.2645.ckept', '0_1_0.2932.ckept', '0_2_0.3239.ckept', '0_3_0.3594.ckept', '0_4_0.4878.ckept', '0_5_0.8006.ckept', '0_6_0.8941.ckept', '0_7_0.9254.ckept', '0_8_0.9354.ckept', '0_9_0.9360.ckept', '0_10_0.9464.ckept', '0_11_0.9447.ckept', '0_15_0.9529.ckept', '0_19_0.9564.ckept', '0_39_0.9745.ckept', '0_59_0.9778.ckept', '0_99_0.9814.ckept']
PAD = torch.tensor(PAD_index).to(device)
BOS = torch.tensor(BOS_index).to(device)
EOS = torch.tensor(EOS_index).to(device)
beam_size = 16
vocab_size = 4000
length_norm_coefficient = 0.5
tgt_seq, src_seq = batch
src_seq = src_seq.unsqueeze(dim=0).to(device)
src_seq = src_seq.unsqueeze(-1).transpose(1, 3)
for model_name in models:
model = load_model(model_name=model_name)
model.eval()
with torch.no_grad():
enc_keys = model.encoder(src_seq) # torch.Size([1, 400, 300])
hypotheses = torch.LongTensor([[BOS]]).to(device) # (1, 1)
hypotheses_lengths = torch.LongTensor([hypotheses.size(1)]).to(device)
hypotheses_scores = torch.zeros(1).to(device)
completed_hypotheses = list()
completed_hypotheses_scores = list()
n_completed_hypotheses = beam_size
step = 1
while True:
s = hypotheses.size(0)
trg_mask = get_pad_mask(hypotheses, PAD_index) & get_subsequent_mask(hypotheses).to(hypotheses.device)
padded_hypotheses = pad_seq(hypotheses)
input_mask = padded_hypotheses.ne(PAD_index)
dec_output = model.decoder(padded_hypotheses, keys=enc_keys.repeat(s, 1, 1), input_mask=input_mask) # # (s, max_len, vocab_size)
dec_output = dec_output[:, :hypotheses.size(1), :] # (s, step, vocab_size)
scores = dec_output[:, -1, :] # (s, vocab_size)
scores = F.log_softmax(scores, dim=-1) # (s, vocab_size)
scores = hypotheses_scores.unsqueeze(1) + scores # (s, vocab_size)
top_k_hypotheses_scores, unrolled_indices = scores.view(-1).topk(beam_size, 0, True, True) # (k)
# Convert unrolled indices to actual indices of the scores tensor which yielded the best scores
prev_word_indices = unrolled_indices // vocab_size # (k)
next_word_indices = unrolled_indices % vocab_size # (k)
# Construct the the new top k hypotheses from these indices
top_k_hypotheses = torch.cat([hypotheses[prev_word_indices], next_word_indices.unsqueeze(1)], dim=1) # (k, step + 1)
# Which of these new hypotheses are complete (reached <EOS>)?
complete = next_word_indices == EOS # (k), bool
completed_hypotheses.extend(top_k_hypotheses[complete].tolist())
norm = np.power(((5 + step) / (5 + 1)), length_norm_coefficient)
completed_hypotheses_scores.extend((top_k_hypotheses_scores[complete] / norm).tolist())
# Stop if we have completed enough hypotheses
if len(completed_hypotheses) >= n_completed_hypotheses:
break
# Else, continue with incomplete hypotheses
hypotheses = top_k_hypotheses[~complete] # (s, step + 1)
hypotheses_scores = top_k_hypotheses_scores[~complete] # (s)
hypotheses_lengths = torch.LongTensor(hypotheses.size(0) * [hypotheses.size(1)]).to(device) # (s)
hypotheses_hypotheses_list = hypotheses.tolist()
hypotheses_hypotheses_scores_list = hypotheses_scores.tolist()
i = hypotheses_hypotheses_scores_list.index(max(hypotheses_hypotheses_scores_list))
best = hypotheses_hypotheses_list[i]
print(step, decode_sentence(best))
# Stop if things have been going on for too long
if step > 66:
break
step += 1
if len(completed_hypotheses) == 0:
completed_hypotheses = hypotheses.tolist()
completed_hypotheses_scores = hypotheses_scores.tolist()
# Decode the hypotheses
all_hypotheses = list()
for i, h in enumerate(completed_hypotheses):
all_hypotheses.append({"hypothesis": decode_sentence(h), "score": completed_hypotheses_scores[i]})
# Find the best scoring completed hypothesis
i = completed_hypotheses_scores.index(max(completed_hypotheses_scores))
best_hypothesis = all_hypotheses[i]["hypothesis"]
print("model_name: ", model_name, ", decode: ", best_hypothesis)
if __name__ == '__main__':
if not os.path.exists(input_dir + 'dataset.torch'):
dataset = ReviewDataset('dataset.jsonl', encoder_max_len=1604)
joblib.dump(dataset, open(input_dir + 'dataset.torch', 'wb'))
else:
dataset = joblib.load(open(input_dir + 'dataset.torch', 'rb'))
train(dataset, part=0)
show()
|
[
"torch.nn.Dropout",
"numpy.random.seed",
"torch.nn.Embedding",
"torch.cat",
"torch.no_grad",
"numpy.round",
"torch.ones",
"torch.utils.data.DataLoader",
"numpy.power",
"torch.load",
"os.path.exists",
"torch.nn.Embedding.from_pretrained",
"reformer_pytorch.ReformerLM",
"torch.nn.functional.log_softmax",
"torch.zeros",
"torch.nn.LSTM",
"torch.optim.step",
"tqdm.tqdm",
"torch.zeros_like",
"torch.manual_seed",
"numpy.asarray",
"torch.nn.Conv2d",
"torch.nn.functional.cross_entropy",
"torch.nn.BatchNorm2d",
"torch.cuda.is_available",
"torch.nn.MaxPool2d",
"torch.utils.data.Subset",
"tensorboardX.SummaryWriter",
"torch.optim.zero_grad",
"torch.nn.ReLU",
"logging.basicConfig",
"torch.nn.Sequential",
"torch.LongTensor",
"numpy.zeros",
"sklearn.model_selection.KFold",
"torch.save",
"logging.info",
"torch.tensor",
"numpy.sqrt"
] |
[((5907, 6091), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': "(log_dir + 'train1116.log')", 'filemode': '"""a"""', 'format': '"""%(asctime)s %(name)s:%(levelname)s:%(message)s"""', 'datefmt': '"""%Y-%m-%d %H:%M:%S"""', 'level': 'logging.INFO'}), "(filename=log_dir + 'train1116.log', filemode='a',\n format='%(asctime)s %(name)s:%(levelname)s:%(message)s', datefmt=\n '%Y-%m-%d %H:%M:%S', level=logging.INFO)\n", (5926, 6091), False, 'import json, time, pickle, csv, re, os, gc, logging, zlib, orjson, joblib\n'), ((6116, 6156), 'torch.load', 'torch.load', (["(input_dir + 'word2vec.torch')"], {}), "(input_dir + 'word2vec.torch')\n", (6126, 6156), False, 'import torch\n'), ((7473, 7523), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(5)', 'shuffle': '(True)', 'random_state': '(2020)'}), '(n_splits=5, shuffle=True, random_state=2020)\n', (7478, 7523), False, 'from sklearn.model_selection import KFold\n'), ((7872, 7924), 'tensorboardX.SummaryWriter', 'SummaryWriter', (["(log_dir + '/tensorbord1207/%d' % part)"], {}), "(log_dir + '/tensorbord1207/%d' % part)\n", (7885, 7924), False, 'from tensorboardX import SummaryWriter\n'), ((8058, 8144), 'torch.utils.data.DataLoader', 'DataLoader', (['valid_set'], {'batch_size': '(40)', 'num_workers': '(4)', 'shuffle': '(False)', 'drop_last': '(True)'}), '(valid_set, batch_size=40, num_workers=4, shuffle=False,\n drop_last=True)\n', (8068, 8144), False, 'from torch.utils.data import Dataset, DataLoader, random_split, Subset\n'), ((11723, 11765), 'torch.cat', 'torch.cat', (['[hypotheses, pad_tensor]'], {'dim': '(1)'}), '([hypotheses, pad_tensor], dim=1)\n', (11732, 11765), False, 'import torch\n'), ((16324, 16360), 'torch.load', 'torch.load', (["(input_dir + 'demo.torch')"], {}), "(input_dir + 'demo.torch')\n", (16334, 16360), False, 'import torch\n'), ((710, 735), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (733, 735), False, 'import torch\n'), ((1328, 1382), 'numpy.zeros', 'np.zeros', (['(self.encoder_max_len, 32)'], {'dtype': 'np.float32'}), '((self.encoder_max_len, 32), dtype=np.float32)\n', (1336, 1382), True, 'import numpy as np\n'), ((2826, 2891), 'torch.nn.LSTM', 'nn.LSTM', (['self.cnn_struct[-1][-1]', 'hidden_size'], {'bidirectional': '(True)'}), '(self.cnn_struct[-1][-1], hidden_size, bidirectional=True)\n', (2833, 2891), True, 'import torch.nn as nn\n'), ((2912, 2969), 'torch.nn.LSTM', 'nn.LSTM', (['(hidden_size * 2)', 'hidden_size'], {'bidirectional': '(True)'}), '(hidden_size * 2, hidden_size, bidirectional=True)\n', (2919, 2969), True, 'import torch.nn as nn\n'), ((2991, 3006), 'torch.nn.Dropout', 'nn.Dropout', (['(0.2)'], {}), '(0.2)\n', (3001, 3006), True, 'import torch.nn as nn\n'), ((4193, 4219), 'torch.nn.Sequential', 'nn.Sequential', (['*cnn_layers'], {}), '(*cnn_layers)\n', (4206, 4219), True, 'import torch.nn as nn\n'), ((5084, 5249), 'reformer_pytorch.ReformerLM', 'ReformerLM', ([], {'num_tokens': 'self.vocab_size', 'dim': 'self.model_dim', 'depth': '(2)', 'heads': '(1)', 'bucket_size': '(233)', 'ff_dropout': '(0.2)', 'causal': '(True)', 'max_seq_len': 'self.max_decoder_len'}), '(num_tokens=self.vocab_size, dim=self.model_dim, depth=2, heads=1,\n bucket_size=233, ff_dropout=0.2, causal=True, max_seq_len=self.\n max_decoder_len)\n', (5094, 5249), False, 'from reformer_pytorch import Reformer, ReformerLM\n'), ((6949, 6975), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['pred'], {'dim': '(1)'}), '(pred, dim=1)\n', (6962, 6975), True, 'import torch.nn.functional as F\n'), ((7165, 7218), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['pred', 'gold'], {'ignore_index': 'trg_pad_idx'}), '(pred, gold, ignore_index=trg_pad_idx)\n', (7180, 7218), True, 'import torch.nn.functional as F\n'), ((7659, 7689), 'torch.utils.data.Subset', 'Subset', (['dataset', 'fold[part][0]'], {}), '(dataset, fold[part][0])\n', (7665, 7689), False, 'from torch.utils.data import Dataset, DataLoader, random_split, Subset\n'), ((7691, 7721), 'torch.utils.data.Subset', 'Subset', (['dataset', 'fold[part][1]'], {}), '(dataset, fold[part][1])\n', (7697, 7721), False, 'from torch.utils.data import Dataset, DataLoader, random_split, Subset\n'), ((8243, 8267), 'torch.manual_seed', 'torch.manual_seed', (['epoch'], {}), '(epoch)\n', (8260, 8267), False, 'import torch\n'), ((10103, 10145), 'numpy.round', 'np.round', (['(n_word_correct / n_word_total)', '(4)'], {}), '(n_word_correct / n_word_total, 4)\n', (10111, 10145), True, 'import numpy as np\n'), ((10224, 10241), 'logging.info', 'logging.info', (['msg'], {}), '(msg)\n', (10236, 10241), False, 'import json, time, pickle, csv, re, os, gc, logging, zlib, orjson, joblib\n'), ((10511, 10545), 'torch.save', 'torch.save', (['checkpoint', 'model_name'], {}), '(checkpoint, model_name)\n', (10521, 10545), False, 'import torch\n'), ((11027, 11085), 'torch.ones', 'torch.ones', (['(seq_len, seq_len)'], {'dtype': 'subsequent_mask_type'}), '((seq_len, seq_len), dtype=subsequent_mask_type)\n', (11037, 11085), False, 'import torch\n'), ((12182, 12197), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (12195, 12197), False, 'import torch\n'), ((12220, 12282), 'tqdm.tqdm', 'tqdm', (['valid_set'], {'mininterval': '(2)', 'desc': '""" - (Test)"""', 'leave': '(False)'}), "(valid_set, mininterval=2, desc=' - (Test)', leave=False)\n", (12224, 12282), False, 'from tqdm import tqdm\n'), ((21192, 21235), 'os.path.exists', 'os.path.exists', (["(input_dir + 'dataset.torch')"], {}), "(input_dir + 'dataset.torch')\n", (21206, 21235), False, 'import json, time, pickle, csv, re, os, gc, logging, zlib, orjson, joblib\n'), ((2188, 2223), 'torch.tensor', 'torch.tensor', (['txt'], {'dtype': 'torch.long'}), '(txt, dtype=torch.long)\n', (2200, 2223), False, 'import torch\n'), ((2225, 2264), 'torch.tensor', 'torch.tensor', (['mosaic'], {'dtype': 'torch.float'}), '(mosaic, dtype=torch.float)\n', (2237, 2264), False, 'import torch\n'), ((5464, 5539), 'torch.nn.Embedding.from_pretrained', 'nn.Embedding.from_pretrained', (["model_args['decoder_embedding']"], {'freeze': '(False)'}), "(model_args['decoder_embedding'], freeze=False)\n", (5492, 5539), True, 'import torch.nn as nn\n'), ((5591, 5669), 'torch.nn.Embedding', 'nn.Embedding', ([], {'num_embeddings': 'self.vocab_size', 'embedding_dim': '(300)', 'padding_idx': '(0)'}), '(num_embeddings=self.vocab_size, embedding_dim=300, padding_idx=0)\n', (5603, 5669), True, 'import torch.nn as nn\n'), ((8167, 8190), 'torch.tensor', 'torch.tensor', (['PAD_index'], {}), '(PAD_index)\n', (8179, 8190), False, 'import torch\n'), ((8693, 8710), 'torch.optim.zero_grad', 'optim.zero_grad', ([], {}), '()\n', (8708, 8710), True, 'import torch.optim as optim\n'), ((8995, 9007), 'torch.optim.step', 'optim.step', ([], {}), '()\n', (9005, 9007), True, 'import torch.optim as optim\n'), ((9027, 9058), 'numpy.round', 'np.round', (['(n_correct / n_word)', '(4)'], {}), '(n_correct / n_word, 4)\n', (9035, 9058), True, 'import numpy as np\n'), ((9345, 9360), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9358, 9360), False, 'import torch\n'), ((10709, 10743), 'torch.load', 'torch.load', (['(model_dir + model_name)'], {}), '(model_dir + model_name)\n', (10719, 10743), False, 'import torch\n'), ((11636, 11696), 'torch.zeros', 'torch.zeros', (['(hypotheses.shape[0], 66 - hypotheses.shape[1])'], {}), '((hypotheses.shape[0], 66 - hypotheses.shape[1]))\n', (11647, 11696), False, 'import torch\n'), ((11955, 11978), 'torch.tensor', 'torch.tensor', (['PAD_index'], {}), '(PAD_index)\n', (11967, 11978), False, 'import torch\n'), ((12000, 12023), 'torch.tensor', 'torch.tensor', (['BOS_index'], {}), '(BOS_index)\n', (12012, 12023), False, 'import torch\n'), ((12045, 12068), 'torch.tensor', 'torch.tensor', (['EOS_index'], {}), '(EOS_index)\n', (12057, 12068), False, 'import torch\n'), ((16756, 16779), 'torch.tensor', 'torch.tensor', (['PAD_index'], {}), '(PAD_index)\n', (16768, 16779), False, 'import torch\n'), ((16801, 16824), 'torch.tensor', 'torch.tensor', (['BOS_index'], {}), '(BOS_index)\n', (16813, 16824), False, 'import torch\n'), ((16846, 16869), 'torch.tensor', 'torch.tensor', (['EOS_index'], {}), '(EOS_index)\n', (16858, 16869), False, 'import torch\n'), ((17204, 17219), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (17217, 17219), False, 'import torch\n'), ((1903, 1945), 'numpy.asarray', 'np.asarray', (["line['mosaic']"], {'dtype': 'np.uint8'}), "(line['mosaic'], dtype=np.uint8)\n", (1913, 1945), True, 'import numpy as np\n'), ((6800, 6822), 'torch.zeros_like', 'torch.zeros_like', (['pred'], {}), '(pred)\n', (6816, 6822), False, 'import torch\n'), ((8390, 8411), 'numpy.random.seed', 'np.random.seed', (['epoch'], {}), '(epoch)\n', (8404, 8411), True, 'import numpy as np\n'), ((9643, 9660), 'torch.optim.zero_grad', 'optim.zero_grad', ([], {}), '()\n', (9658, 9660), True, 'import torch.optim as optim\n'), ((13526, 13555), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['scores'], {'dim': '(-1)'}), '(scores, dim=-1)\n', (13539, 13555), True, 'import torch.nn.functional as F\n'), ((14779, 14834), 'numpy.power', 'np.power', (['((5 + step) / (5 + 1))', 'length_norm_coefficient'], {}), '((5 + step) / (5 + 1), length_norm_coefficient)\n', (14787, 14834), True, 'import numpy as np\n'), ((18307, 18336), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['scores'], {'dim': '(-1)'}), '(scores, dim=-1)\n', (18320, 18336), True, 'import torch.nn.functional as F\n'), ((19286, 19341), 'numpy.power', 'np.power', (['((5 + step) / (5 + 1))', 'length_norm_coefficient'], {}), '((5 + step) / (5 + 1), length_norm_coefficient)\n', (19294, 19341), True, 'import numpy as np\n'), ((3805, 3861), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels', '*self.cnn_paras[i]'], {}), '(in_channels, out_channels, *self.cnn_paras[i])\n', (3814, 3861), True, 'import torch.nn as nn\n'), ((4005, 4026), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (4012, 4026), True, 'import torch.nn as nn\n'), ((4143, 4176), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['self.pool_struct[i]'], {}), '(self.pool_struct[i])\n', (4155, 4176), True, 'import torch.nn as nn\n'), ((4448, 4464), 'numpy.sqrt', 'np.sqrt', (['(2.0 / n)'], {}), '(2.0 / n)\n', (4455, 4464), True, 'import numpy as np\n'), ((12540, 12565), 'torch.LongTensor', 'torch.LongTensor', (['[[BOS]]'], {}), '([[BOS]])\n', (12556, 12565), False, 'import torch\n'), ((12702, 12716), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (12713, 12716), False, 'import torch\n'), ((17321, 17346), 'torch.LongTensor', 'torch.LongTensor', (['[[BOS]]'], {}), '([[BOS]])\n', (17337, 17346), False, 'import torch\n'), ((17483, 17497), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (17494, 17497), False, 'import torch\n'), ((1266, 1281), 'numpy.asarray', 'np.asarray', (['arr'], {}), '(arr)\n', (1276, 1281), True, 'import numpy as np\n'), ((3941, 3969), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_channels'], {}), '(out_channels)\n', (3955, 3969), True, 'import torch.nn as nn\n')]
|
"""
Interactron Random Training Loop
The interactorn model is trained on random sequences of data.
"""
import math
from tqdm import tqdm
import numpy as np
import os
from datetime import datetime
import torch
from torch.utils.data.dataloader import DataLoader
from datasets.sequence_dataset import SequenceDataset
from utils.transform_utis import transform, train_transform
from utils.logging_utils import TBLogger
from utils.storage_utils import collate_fn
class InteractronRandomTrainer:
def __init__(self, model, config, evaluator=None):
self.model = model
self.config = config
self.evaluator = evaluator
# set up logging and saving
self.out_dir = os.path.join(self.config.TRAINER.OUTPUT_DIRECTORY, datetime.now().strftime("%m-%d-%Y:%H:%M:%S"))
# os.makedirs(self.out_dir, exist_ok=True)
self.logger = TBLogger(os.path.join(self.out_dir, "logs"))
self.model.set_logger(self.logger)
self.checkpoint_path = os.path.join(self.out_dir, "detector.pt")
self.train_dataset = SequenceDataset(config.DATASET.TRAIN.IMAGE_ROOT, config.DATASET.TRAIN.ANNOTATION_ROOT,
config.DATASET.TRAIN.MODE, transform=train_transform)
self.test_dataset = SequenceDataset(config.DATASET.TEST.IMAGE_ROOT, config.DATASET.TEST.ANNOTATION_ROOT,
config.DATASET.TEST.MODE, transform=transform)
# take over whatever gpus are on the system
self.device = 'cpu'
if torch.cuda.is_available():
self.device = torch.cuda.current_device()
self.model = torch.nn.DataParallel(self.model).to(self.device)
def save_checkpoint(self):
# DataParallel wrappers keep raw model object in .module attribute
raw_model = self.model.module if hasattr(self.model, "module") else self.model
torch.save({"model": raw_model.state_dict()}, self.checkpoint_path)
def train(self):
model, config = self.model, self.config.TRAINER
raw_model = self.model.module if hasattr(self.model, "module") else self.model
detector_optimizer = torch.optim.SGD(raw_model.decoder.parameters(), lr=1e-3, momentum=0.9)
supervisor_optimizer = torch.optim.AdamW(raw_model.fusion.parameters(), lr=3e-4)
def run_epoch(split):
is_train = split == 'train'
loader = DataLoader(self.train_dataset if is_train else self.test_dataset, shuffle=is_train,
pin_memory=True, batch_size=config.BATCH_SIZE, num_workers=config.NUM_WORKERS,
collate_fn=collate_fn)
loss_list = []
pbar = tqdm(enumerate(loader), total=len(loader))
for it, data in pbar:
# place data on the correct device
data["frames"] = data["frames"].to(self.device)
data["masks"] = data["masks"].to(self.device)
data["category_ids"] = [[j.to(self.device)+1 for j in i] for i in data["category_ids"]]
data["boxes"] = [[j.to(self.device) for j in i] for i in data["boxes"]]
# forward the model
predictions, losses = model(data)
detector_loss = losses["loss_detector_ce"]
supervisor_loss = losses["loss_supervisor_ce"]
# log the losses
for name, loss_comp in losses.items():
self.logger.add_value("{}/{}".format("Train" if is_train else "Test", name), loss_comp.mean())
total_loss = detector_loss + supervisor_loss
self.logger.add_value("{}/Total Loss".format("Train" if is_train else "Test"), total_loss.mean())
loss_list.append(total_loss.item())
if is_train:
supervisor_optimizer.step()
detector_optimizer.step()
raw_model.zero_grad()
# decay the learning rate based on our progress
if config.LR_DECAY:
self.tokens += data["frames"].shape[0] * data["frames"].shape[1]
if self.tokens < config.WARMUP_TOKENS:
# linear warmup
lr_mult = float(self.tokens) / float(max(1, config.WARMUP_TOKENS))
else:
# cosine learning rate decay
progress = float(self.tokens - config.WARMUP_TOKENS) / \
float(max(1, config.FINAL_TOKENS - config.WARMUP_TOKENS))
lr_mult = max(0.1, 0.5 * (1.0 + math.cos(math.pi * progress)))
lr = config.LEARNING_RATE * lr_mult
for param_group in supervisor_optimizer.param_groups:
param_group['lr'] = lr
for param_group in detector_optimizer.param_groups:
param_group['lr'] = lr
else:
lr = config.LEARNING_RATE
# report progress
pbar.set_description(
f"epoch {epoch} iter {it}: train loss {float(np.mean(loss_list)):.5f}. lr {lr:e}"
)
if not is_train:
test_loss = float(np.mean(loss_list))
return test_loss
def run_evaluation():
test_loss = run_epoch('test')
mAP, tps, fps, fns = self.evaluator.evaluate(save_results=False)
return mAP
best_ap = 0.0
self.tokens = 0 # counter used for learning rate decay
mAP = run_evaluation()
self.logger.log_values()
for epoch in range(1, config.MAX_EPOCHS):
run_epoch('train')
if epoch % 1 == 0 and self.test_dataset is not None and self.evaluator is not None:
mAP = run_evaluation()
self.logger.log_values()
# supports early stopping based on the test loss, or just save always if no test set is provided
if self.test_dataset is not None and self.evaluator is not None and mAP > best_ap:
best_ap = mAP
self.save_checkpoint()
|
[
"datasets.sequence_dataset.SequenceDataset",
"datetime.datetime.now",
"numpy.mean",
"torch.cuda.is_available",
"math.cos",
"torch.utils.data.dataloader.DataLoader",
"torch.cuda.current_device",
"torch.nn.DataParallel",
"os.path.join"
] |
[((992, 1033), 'os.path.join', 'os.path.join', (['self.out_dir', '"""detector.pt"""'], {}), "(self.out_dir, 'detector.pt')\n", (1004, 1033), False, 'import os\n'), ((1064, 1209), 'datasets.sequence_dataset.SequenceDataset', 'SequenceDataset', (['config.DATASET.TRAIN.IMAGE_ROOT', 'config.DATASET.TRAIN.ANNOTATION_ROOT', 'config.DATASET.TRAIN.MODE'], {'transform': 'train_transform'}), '(config.DATASET.TRAIN.IMAGE_ROOT, config.DATASET.TRAIN.\n ANNOTATION_ROOT, config.DATASET.TRAIN.MODE, transform=train_transform)\n', (1079, 1209), False, 'from datasets.sequence_dataset import SequenceDataset\n'), ((1273, 1409), 'datasets.sequence_dataset.SequenceDataset', 'SequenceDataset', (['config.DATASET.TEST.IMAGE_ROOT', 'config.DATASET.TEST.ANNOTATION_ROOT', 'config.DATASET.TEST.MODE'], {'transform': 'transform'}), '(config.DATASET.TEST.IMAGE_ROOT, config.DATASET.TEST.\n ANNOTATION_ROOT, config.DATASET.TEST.MODE, transform=transform)\n', (1288, 1409), False, 'from datasets.sequence_dataset import SequenceDataset\n'), ((1537, 1562), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1560, 1562), False, 'import torch\n'), ((882, 916), 'os.path.join', 'os.path.join', (['self.out_dir', '"""logs"""'], {}), "(self.out_dir, 'logs')\n", (894, 916), False, 'import os\n'), ((1590, 1617), 'torch.cuda.current_device', 'torch.cuda.current_device', ([], {}), '()\n', (1615, 1617), False, 'import torch\n'), ((2409, 2604), 'torch.utils.data.dataloader.DataLoader', 'DataLoader', (['(self.train_dataset if is_train else self.test_dataset)'], {'shuffle': 'is_train', 'pin_memory': '(True)', 'batch_size': 'config.BATCH_SIZE', 'num_workers': 'config.NUM_WORKERS', 'collate_fn': 'collate_fn'}), '(self.train_dataset if is_train else self.test_dataset, shuffle=\n is_train, pin_memory=True, batch_size=config.BATCH_SIZE, num_workers=\n config.NUM_WORKERS, collate_fn=collate_fn)\n', (2419, 2604), False, 'from torch.utils.data.dataloader import DataLoader\n'), ((754, 768), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (766, 768), False, 'from datetime import datetime\n'), ((1643, 1676), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['self.model'], {}), '(self.model)\n', (1664, 1676), False, 'import torch\n'), ((5384, 5402), 'numpy.mean', 'np.mean', (['loss_list'], {}), '(loss_list)\n', (5391, 5402), True, 'import numpy as np\n'), ((5261, 5279), 'numpy.mean', 'np.mean', (['loss_list'], {}), '(loss_list)\n', (5268, 5279), True, 'import numpy as np\n'), ((4688, 4716), 'math.cos', 'math.cos', (['(math.pi * progress)'], {}), '(math.pi * progress)\n', (4696, 4716), False, 'import math\n')]
|
#!/usr/bin/env python3
from typing import List
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.axes import Axes
from matplotlib.figure import Figure
from info import EEG_SHAPE, participants
band_names = ['delta', 'theta', 'alpha', 'beta', 'gamma']
if __name__ == '__main__':
T, H, W, R = EEG_SHAPE
data = np.load('data/data-processed-bands.npz')
# plot configuration
step = 5
C = 6
for p in participants:
fig, axs = plt.subplots(R, C, sharex='all', sharey='all', figsize=(12, 4)) # type: Figure, List[List[Axes]]
fig.suptitle(f'Normalized Power Spectrum - Participant {p}')
# row
for r in range(R):
axs[r][0].set_ylabel(band_names[-r - 1])
# column
for c in range(C):
lo = c * step
hi = (c + 1) * step
axs[-1][c].set_xlabel(f'({lo}-{hi})')
# select data - shape: (N, C, H, W, R)
d = data[f'{p}_x']
# get exponent of differential entropy, for true power
d = np.exp(d)
# select the chunk with highest dispersion
n = (np.var(d, axis=(1, 2, 3, 4)) / np.mean(d, axis=(1, 2, 3, 4))).argmax()
d = d[n]
# normalize by mean of each frequency bin
d = d / np.mean(d, axis=(0, 1, 2), keepdims=True)
# rescale data
d_min = np.min(d)
d_max = np.max(d)
d = (d - d_min) / (d_max - d_min)
# obtain data within time range
d = np.amax(d[lo:hi], axis=0)
# power spectrum
ax = axs[r][c]
im = ax.imshow(d[:, :, -r - 1], cmap='inferno', vmin=0, vmax=1, interpolation='spline36')
ax.set_xticks([])
ax.set_yticks([])
fig.subplots_adjust(wspace=0.05, hspace=0.05)
fig.colorbar(im, ax=np.ravel(axs).tolist())
fig.savefig(f'figures/{p}.png', bbox_inches='tight')
print(f'{p} - OK')
print('DONE')
|
[
"numpy.load",
"numpy.ravel",
"numpy.var",
"numpy.amax",
"numpy.max",
"numpy.min",
"numpy.mean",
"numpy.exp",
"matplotlib.pyplot.subplots"
] |
[((342, 382), 'numpy.load', 'np.load', (['"""data/data-processed-bands.npz"""'], {}), "('data/data-processed-bands.npz')\n", (349, 382), True, 'import numpy as np\n'), ((478, 541), 'matplotlib.pyplot.subplots', 'plt.subplots', (['R', 'C'], {'sharex': '"""all"""', 'sharey': '"""all"""', 'figsize': '(12, 4)'}), "(R, C, sharex='all', sharey='all', figsize=(12, 4))\n", (490, 541), True, 'from matplotlib import pyplot as plt\n'), ((1092, 1101), 'numpy.exp', 'np.exp', (['d'], {}), '(d)\n', (1098, 1101), True, 'import numpy as np\n'), ((1457, 1466), 'numpy.min', 'np.min', (['d'], {}), '(d)\n', (1463, 1466), True, 'import numpy as np\n'), ((1491, 1500), 'numpy.max', 'np.max', (['d'], {}), '(d)\n', (1497, 1500), True, 'import numpy as np\n'), ((1619, 1644), 'numpy.amax', 'np.amax', (['d[lo:hi]'], {'axis': '(0)'}), '(d[lo:hi], axis=0)\n', (1626, 1644), True, 'import numpy as np\n'), ((1360, 1401), 'numpy.mean', 'np.mean', (['d'], {'axis': '(0, 1, 2)', 'keepdims': '(True)'}), '(d, axis=(0, 1, 2), keepdims=True)\n', (1367, 1401), True, 'import numpy as np\n'), ((1965, 1978), 'numpy.ravel', 'np.ravel', (['axs'], {}), '(axs)\n', (1973, 1978), True, 'import numpy as np\n'), ((1182, 1210), 'numpy.var', 'np.var', (['d'], {'axis': '(1, 2, 3, 4)'}), '(d, axis=(1, 2, 3, 4))\n', (1188, 1210), True, 'import numpy as np\n'), ((1213, 1242), 'numpy.mean', 'np.mean', (['d'], {'axis': '(1, 2, 3, 4)'}), '(d, axis=(1, 2, 3, 4))\n', (1220, 1242), True, 'import numpy as np\n')]
|
import numpy as np
import scipy.linalg
def register_points(P, Q, allowReflection = False):
'''
Find the best-fit rigid transformation aligning points in Q to points in P:
min_(R, t) sum_i ||P_i - (R Q_i + t)||^2
Parameters
----------
P : (N, D) array_like
Collection of N points to align with (one D-dimensional point per row)
Q : (N, D) array_like
Collection of N points to align (one D-dimensional point per row)
Returns
-------
(R, t)
The rigid transformation best aligning Q to P
'''
Pcm = np.mean(P, axis=0)
Pcentered = P - Pcm
Qcm = np.mean(Q, axis=0)
Qcentered = Q - Qcm
A = Pcentered.T @ Qcentered
U, s, Vh = scipy.linalg.svd(A)
R = U @ Vh
if (not allowReflection and (np.linalg.det(R) < 0)):
U[:, -1] = -U[:, -1]
R = U @ Vh
return R, Pcm - R @ Qcm
def align_points_with_axes_xform(V, diagonal = False):
'''
Get the rigid transformation (R, t) that positions the point cloud `V` at
the origin and orients its longest axis along X, medium along y and
shortest along Z.
If `diagonal` is True, the longest axis is oriented along the diagonal of
the XY plane (2D)
Returns
-------
(R, t)
The rigid transformation V ==> R^T * (V + t) reorienting V
'''
c = np.mean(V, axis=0)
Vcentered = V - c
R = np.linalg.eig(Vcentered.T @ Vcentered)[1]
if (np.linalg.det(R) < 0): R[:, 2] *= -1
if diagonal:
if (V.shape[1] != 2): raise Exception('Only 2D is implemented')
R = [[np.sqrt(2), np.sqrt(2)], [-np.sqrt(2), np.sqrt(2)]] @ R
return R, -c
def align_points_with_axes(V, alignmentSubset = None, diagonal = False):
'''
Center the point cloud `V` at the origin and orient its longest axis along X, medium along y and shortest along Z.
Parameters
----------
V
Points to align
alignmentSubset
Subset of the points used to compute alignment transformation
diagonal
If true, the longest axis is instead oriented along the diagonal of the
XY plane (2D)
Returns
-------
The rigidly transformed point cloud.
'''
if (alignmentSubset is None):
R, t = align_points_with_axes_xform(V, diagonal)
else:
R, t = align_points_with_axes_xform(V[alignmentSubset], diagonal)
return (V + t) @ R
|
[
"numpy.linalg.det",
"numpy.mean",
"numpy.linalg.eig",
"numpy.sqrt"
] |
[((582, 600), 'numpy.mean', 'np.mean', (['P'], {'axis': '(0)'}), '(P, axis=0)\n', (589, 600), True, 'import numpy as np\n'), ((635, 653), 'numpy.mean', 'np.mean', (['Q'], {'axis': '(0)'}), '(Q, axis=0)\n', (642, 653), True, 'import numpy as np\n'), ((1350, 1368), 'numpy.mean', 'np.mean', (['V'], {'axis': '(0)'}), '(V, axis=0)\n', (1357, 1368), True, 'import numpy as np\n'), ((1399, 1437), 'numpy.linalg.eig', 'np.linalg.eig', (['(Vcentered.T @ Vcentered)'], {}), '(Vcentered.T @ Vcentered)\n', (1412, 1437), True, 'import numpy as np\n'), ((1449, 1465), 'numpy.linalg.det', 'np.linalg.det', (['R'], {}), '(R)\n', (1462, 1465), True, 'import numpy as np\n'), ((794, 810), 'numpy.linalg.det', 'np.linalg.det', (['R'], {}), '(R)\n', (807, 810), True, 'import numpy as np\n'), ((1589, 1599), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1596, 1599), True, 'import numpy as np\n'), ((1601, 1611), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1608, 1611), True, 'import numpy as np\n'), ((1628, 1638), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1635, 1638), True, 'import numpy as np\n'), ((1616, 1626), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1623, 1626), True, 'import numpy as np\n')]
|
from anndata import read_h5ad
import sys
from time import time
from scipy import stats, sparse
import numpy as np
import collections
import pickle
from sklearn.preprocessing import normalize
import os
from collections import Counter
from scipy import spatial
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score,accuracy_score,precision_recall_fscore_support, cohen_kappa_score, auc, average_precision_score,f1_score,precision_recall_curve
import time
import copy
from sklearn import preprocessing
from sklearn.metrics.pairwise import cosine_similarity
#from libs import *
from sklearn.utils.graph_shortest_path import graph_shortest_path
from scipy.sparse.linalg import svds, eigs
def get_ontology_parents(GO_net, g, dfs_depth):
term_valid = set()
ngh_GO = set()
ngh_GO.add(g)
depth = {}
depth[g] = 0
while len(ngh_GO) > 0:
for GO in list(ngh_GO):
for GO1 in GO_net[GO]:
ngh_GO.add(GO1)
depth[GO1] = depth[GO] + 1
ngh_GO.remove(GO)
if depth[GO] < dfs_depth:
term_valid.add(GO)
return term_valid
def creat_cell_ontology_matrix(train_Y, co2co_graph, cell_ontology_ids, dfs_depth):
lset = set(cell_ontology_ids)
seen_l = sorted(np.unique(train_Y))
unseen_l = sorted(lset - set(train_Y))
ys = np.concatenate((seen_l, unseen_l))
i2l = {}
l2i = {}
for l in ys:
nl = len(i2l)
l2i[l] = nl
i2l[nl] = l
nco = len(i2l)
net_dict = collections.defaultdict(dict)
net_mat = np.zeros((nco,nco))
for co1 in co2co_graph:
l1 = l2i[co1]
for co2 in co2co_graph[co1]:
l2 = l2i[co2]
net_dict[l1][l2] = 1
net_mat[l1][l2] = 1
for n in range(nco):
ngh = get_ontology_parents(net_dict, n, dfs_depth)
net_dict[n][n] = 1
for n1 in ngh:
net_dict[n][n1] = 1
return unseen_l, l2i, i2l, net_dict, net_mat
def create_propagate_networks_using_nlp(l2i, onto_net, cls2cls, co2vec_nlp, rsts = [0.5,0.6,0.7,0.8], diss=[2,3], thress=[1,0.8]):
ncls = np.shape(cls2cls)[0]
#onto_net_nlp, onto_net_bin, stack_net_nlp, stack_net_bin, onto_net_nlp_all_pairs = create_nlp_networks(l2i, onto_net, cls2cls, ontology_nlp_file)
#ncls = np.shape(cls2cls)[0]
onto_net_nlp_all_pairs = (cosine_similarity(co2vec_nlp) + 1 ) /2#1 - spatial.distance.cosine(onto_nlp_emb, onto_nlp_emb)
onto_net_nlp = np.zeros((ncls, ncls))
stack_net_nlp = np.zeros((ncls, ncls))
for n1 in onto_net:
for n2 in onto_net[n1]:
if n1==n2:
continue
stack_net_nlp[n2,n1] = onto_net_nlp_all_pairs[n2, n1]
stack_net_nlp[n1,n2] = onto_net_nlp_all_pairs[n1, n2]
for n1 in range(ncls):
for n2 in range(ncls):
if cls2cls[n1,n2] == 1 or cls2cls[n2,n1] == 1:
onto_net_nlp[n1,n2] = onto_net_nlp_all_pairs[n1, n2]
onto_net_nlp[n2,n1] = onto_net_nlp_all_pairs[n2, n1]
#network = create_consensus_networks(rsts, stack_net_nlp, onto_net_nlp_all_pairs, cls2cls)
cls2cls_sp = graph_shortest_path(cls2cls,method='FW',directed =False)
networks = []
for rst in rsts:
for dis in diss:
for thres in thress:
#use_net = np.copy(stack_net_nlp)
use_net = np.copy(onto_net_nlp)
use_net[(cls2cls_sp<=dis)&(onto_net_nlp_all_pairs > thres)] = onto_net_nlp_all_pairs[(cls2cls_sp<=dis)&(onto_net_nlp_all_pairs > thres)]
onto_net_rwr = RandomWalkRestart(use_net, rst)
networks.append(onto_net_rwr)
return networks
def map_genes(test_X, test_genes, train_genes):
ntest_cell = np.shape(test_X)[0]
ntrain_gene = len(train_genes)
new_test_x = np.zeros((ntest_cell, ntrain_gene))
genes = set(test_genes) & set(train_genes)
train_genes = list(train_genes)
test_genes = list(test_genes)
ind1 = []
ind2 = []
for i,g in enumerate(genes):
ind1.append(train_genes.index(g))
ind2.append(test_genes.index(g))
ind1 = np.array(ind1)
ind2 = np.array(ind2)
new_test_x[:,ind1] = test_X[:,ind2]
return new_test_x
def extend_prediction_2unseen(pred_Y_seen, networks, nseen, ratio=200, use_normalize=False):
if not isinstance(networks, list):
networks = [networks]
pred_Y_all_totoal = 0.
for onto_net_rwr in networks:
if use_normalize:
onto_net_rwr = onto_net_rwr - np.tile(np.mean(onto_net_rwr, axis = 1), (np.shape(onto_net_rwr)[0], 1))
pred_Y_seen_norm = pred_Y_seen / pred_Y_seen.sum(axis=1)[:, np.newaxis]
pred_Y_all = np.dot(pred_Y_seen_norm, onto_net_rwr[:nseen,:])
pred_Y_all[:,:nseen] = normalize(pred_Y_all[:,:nseen],norm='l1',axis=1)
pred_Y_all[:,nseen:] = normalize(pred_Y_all[:,nseen:],norm='l1',axis=1) * ratio
pred_Y_all_totoal += pred_Y_all
return pred_Y_all_totoal
def create_consensus_networks(rsts, onto_net_mat, onto_net_nlp_all_pairs, cls2cls, diss=[2,3], thress=[1,0.8]):
cls2cls_sp = graph_shortest_path(cls2cls,method='FW',directed =False)
ncls = np.shape(onto_net_mat)[0]
networks = []
for rst in rsts:
for dis in diss:
for thres in thress:
use_net = np.copy(onto_net_mat)
use_net[(cls2cls_sp<=dis)&(onto_net_nlp_all_pairs > thres)] = onto_net_nlp_all_pairs[(cls2cls_sp<=dis)&(onto_net_nlp_all_pairs > thres)]
onto_net_rwr = RandomWalkRestart(use_net, rst)
networks.append(onto_net_rwr)
return networks
def fine_nearest_co_using_nlp(sentences,co2emb,cutoff=0.8):
from sentence_transformers import SentenceTransformer
model = SentenceTransformer('bert-base-nli-mean-tokens')
sentence_embeddings = model.encode(sentences)
co_embeddings = []
cos = []
for co in co2emb:
co_embeddings.append(co2emb[co])
cos.append(co)
co_embeddings = np.array(co_embeddings)
sent2co = {}
for sentence, embedding, ind in zip(sentences, sentence_embeddings, range(len(sentences))):
scs = cosine_similarity(co_embeddings, embedding.reshape(1,-1))
co_id = np.argmax(scs)
sc = scs[co_id]
if sc>cutoff:
sent2co[sentence] = cos[co_id]
return sent2co
def read_cell_type_nlp_network(nlp_emb_file, cell_type_network_file):
cell_ontology_ids = set()
fin = open(cell_type_network_file)
co2co_graph = {}
for line in fin:
w = line.strip().split('\t')
if w[0] not in co2co_graph:
co2co_graph[w[0]] = set()
co2co_graph[w[0]].add(w[1])
cell_ontology_ids.add(w[0])
cell_ontology_ids.add(w[1])
fin.close()
if nlp_emb_file is not None:
fin = open(nlp_emb_file)
co2vec_nlp = {}
for line in fin:
w = line.strip().split('\t')
vec = []
for i in range(1,len(w)):
vec.append(float(w[i]))
co2vec_nlp[w[0]] = np.array(vec)
fin.close()
co2co_nlp = {}
for id1 in co2co_graph:
co2co_nlp[id1] = {}
for id2 in co2co_graph[id1]:
sc = 1 - spatial.distance.cosine(co2vec_nlp[id1], co2vec_nlp[id2])
co2co_nlp[id1][id2] = sc
else:
co2co_nlp = {}
for id1 in co2co_graph:
co2co_nlp[id1] = {}
for id2 in co2co_graph[id1]:
co2co_nlp[id1][id2] = 1.
co2vec_nlp = {}
for c in cell_ontology_ids:
co2vec_nlp[c] = np.ones((10))
return co2co_graph, co2co_nlp, co2vec_nlp, cell_ontology_ids
def create_unseen_candidates(cell_type_network_file, co2i, i2co, nseen, use_unseen_distance, test_Y_pred_all):
nct = len(co2i)
A = np.zeros((nct, nct))
fin = open(cell_type_network_file)
for line in fin:
w = line.strip().split('\t')
A[co2i[w[0]], co2i[w[1]]] = 1.
A[co2i[w[1]], co2i[w[0]]] = 1.
fin.close()
A_dis = graph_shortest_path(A,method='FW',directed =False)
min_d = np.min(A_dis[:nseen, nseen:], axis = 0)
assert(len(min_d) == nct - nseen)
unseen_cand = np.where(min_d > use_unseen_distance)[0] + nseen
test_Y_pred_all[:, unseen_cand] = 0
assert(np.shape(test_Y_pred_all)[1] == nct)
return test_Y_pred_all
def graph_embedding_dca(A, i2l, mi=0, dim=20,unseen_l=None):
nl = np.shape(A)[0]
seen_ind = []
unseen_ind = []
for i in range(nl):
if i2l[i] in unseen_l:
unseen_ind.append(i)
else:
seen_ind.append(i)
seen_ind = np.array(seen_ind)
unseen_ind = np.array(unseen_ind)
#if len(seen_ind) * 0.8 < dim:
# dim = int(len(seen_ind) * 0.8)
if mi==0 or mi == 1:
sp = graph_shortest_path(A,method='FW',directed =False)
else:
sp = RandomWalkRestart(A, 0.8)
sp = sp[seen_ind, :]
sp = sp[:,seen_ind]
X = np.zeros((np.shape(sp)[0],dim))
svd_dim = min(dim, np.shape(sp)[0]-1)
if mi==0 or mi == 2:
X[:,:svd_dim] = svd_emb(sp, dim=svd_dim)
else:
X[:,:svd_dim] = DCA_vector(sp, dim=svd_dim)[0]
X_ret = np.zeros((nl, dim))
X_ret[seen_ind,:] = X
if mi==2 or mi == 3:
sp *= -1
return sp, X_ret
def DCA_vector(Q, dim):
nnode = Q.shape[0]
alpha = 1. / (nnode **2)
Q = np.log(Q + alpha) - np.log(alpha);
#Q = Q * Q';
[U, S, V] = svds(Q, dim);
S = np.diag(S)
X = np.dot(U, np.sqrt(S))
Y = np.dot(np.sqrt(S), V)
Y = np.transpose(Y)
return X,U,S,V,Y
def RandomWalkRestart(A, rst_prob, delta = 1e-4, reset=None, max_iter=50,use_torch=False,return_torch=False):
if use_torch:
device = torch.device("cuda:0")
nnode = A.shape[0]
#print nnode
if reset is None:
reset = np.eye(nnode)
nsample,nnode = reset.shape
#print nsample,nnode
P = renorm(A)
P = P.T
norm_reset = renorm(reset.T)
norm_reset = norm_reset.T
if use_torch:
norm_reset = torch.from_numpy(norm_reset).float().to(device)
P = torch.from_numpy(P).float().to(device)
Q = norm_reset
for i in range(1,max_iter):
#Q = gnp.garray(Q)
#P = gnp.garray(P)
if use_torch:
Q_new = rst_prob*norm_reset + (1-rst_prob) * torch.mm(Q, P)#.as_numpy_array()
delta = torch.norm(Q-Q_new, 2)
else:
Q_new = rst_prob*norm_reset + (1-rst_prob) * np.dot(Q, P)#.as_numpy_array()
delta = np.linalg.norm(Q-Q_new, 'fro')
Q = Q_new
#print (i,Q)
sys.stdout.flush()
if delta < 1e-4:
break
if use_torch and not return_torch:
Q = Q.cpu().numpy()
return Q
def renorm(X):
Y = X.copy()
Y = Y.astype(float)
ngene,nsample = Y.shape
s = np.sum(Y, axis=0)
#print s.shape()
for i in range(nsample):
if s[i]==0:
s[i] = 1
if i < ngene:
Y[i,i] = 1
else:
for j in range(ngene):
Y[j,i] = 1. / ngene
Y[:,i] = Y[:,i]/s[i]
return Y
def mean_normalization(train_X_mean, test_X, train_X_std = 0):
test_X = np.log1p(test_X)
test_X_mean = np.mean(test_X, axis = 0)
ncell, ngene = np.shape(test_X)
for i in range(ncell):
for j in range(ngene):
test_X[i,j] = (test_X[i,j] - test_X_mean[j] + train_X_mean[j]) / train_X_std[j]
return test_X
def process_expression(train_X, test_X, train_genes, test_genes):
#this data process function is adapted from ACTINN, please check ACTINN for more information.
test_X = map_genes(test_X, test_genes, train_genes)
c2g = np.vstack([train_X, test_X])
c2g = np.array(c2g, dtype=np.float64)
c2g = c2g.T
index = np.sum(c2g, axis=1)>0
c2g = c2g[index, :]
train_genes = train_genes[index]
c2g = np.divide(c2g, np.sum(c2g, axis=0, keepdims=True)) * 10000
c2g = np.log2(c2g+1)
expr = np.sum(c2g, axis=1)
#total_set = total_set[np.logical_and(expr >= np.percentile(expr, 1), expr <= np.percentile(expr, 99)),]
index = np.logical_and(expr >= np.percentile(expr, 1), expr <= np.percentile(expr, 99))
c2g = c2g[index,]
train_genes = train_genes[index]
#print (np.logical_and(expr >= np.percentile(expr, 1), expr <= np.percentile(expr, 99)))
cv = np.std(c2g, axis=1) / np.mean(c2g, axis=1)
index = np.logical_and(cv >= np.percentile(cv, 1), cv <= np.percentile(cv, 99))
c2g = c2g[index,]
train_genes = train_genes[index]
c2g = c2g.T
c2g_list_new = []
index = 0
for c in [train_X, test_X]:
ncell = np.shape(c)[0]
c2g_list_new.append(c2g[index:index+ncell,:])
index = ncell
assert (len(train_genes) == np.shape(c2g)[1])
return c2g_list_new[0], c2g_list_new[1], train_genes
def emb_ontology(i2l, ontology_mat, co2co_nlp, dim=5, mi=0, unseen_l = None):
nco = len(i2l)
network = np.zeros((nco, nco))
for i in range(nco):
c1 = i2l[i]
for j in range(nco):
if ontology_mat[i,j] == 1:
network[i,j] = co2co_nlp[c1][i2l[j]]
network[j,i] = co2co_nlp[c1][i2l[j]]
idd = 0
sp, i2emb = graph_embedding_dca(network, i2l, mi=mi, dim=dim, unseen_l=unseen_l)
return i2emb
|
[
"numpy.sum",
"numpy.argmax",
"numpy.ones",
"collections.defaultdict",
"numpy.shape",
"numpy.mean",
"sys.stdout.flush",
"numpy.linalg.norm",
"sklearn.utils.graph_shortest_path.graph_shortest_path",
"numpy.diag",
"numpy.unique",
"sklearn.metrics.pairwise.cosine_similarity",
"numpy.copy",
"scipy.sparse.linalg.svds",
"numpy.std",
"numpy.transpose",
"numpy.log1p",
"numpy.log2",
"numpy.percentile",
"numpy.min",
"sklearn.preprocessing.normalize",
"numpy.dot",
"sentence_transformers.SentenceTransformer",
"numpy.vstack",
"numpy.concatenate",
"scipy.spatial.distance.cosine",
"numpy.log",
"numpy.zeros",
"numpy.where",
"numpy.array",
"numpy.eye",
"numpy.sqrt"
] |
[((1270, 1304), 'numpy.concatenate', 'np.concatenate', (['(seen_l, unseen_l)'], {}), '((seen_l, unseen_l))\n', (1284, 1304), True, 'import numpy as np\n'), ((1412, 1441), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (1435, 1441), False, 'import collections\n'), ((1453, 1473), 'numpy.zeros', 'np.zeros', (['(nco, nco)'], {}), '((nco, nco))\n', (1461, 1473), True, 'import numpy as np\n'), ((2269, 2291), 'numpy.zeros', 'np.zeros', (['(ncls, ncls)'], {}), '((ncls, ncls))\n', (2277, 2291), True, 'import numpy as np\n'), ((2309, 2331), 'numpy.zeros', 'np.zeros', (['(ncls, ncls)'], {}), '((ncls, ncls))\n', (2317, 2331), True, 'import numpy as np\n'), ((2840, 2897), 'sklearn.utils.graph_shortest_path.graph_shortest_path', 'graph_shortest_path', (['cls2cls'], {'method': '"""FW"""', 'directed': '(False)'}), "(cls2cls, method='FW', directed=False)\n", (2859, 2897), False, 'from sklearn.utils.graph_shortest_path import graph_shortest_path\n'), ((3420, 3455), 'numpy.zeros', 'np.zeros', (['(ntest_cell, ntrain_gene)'], {}), '((ntest_cell, ntrain_gene))\n', (3428, 3455), True, 'import numpy as np\n'), ((3695, 3709), 'numpy.array', 'np.array', (['ind1'], {}), '(ind1)\n', (3703, 3709), True, 'import numpy as np\n'), ((3718, 3732), 'numpy.array', 'np.array', (['ind2'], {}), '(ind2)\n', (3726, 3732), True, 'import numpy as np\n'), ((4606, 4663), 'sklearn.utils.graph_shortest_path.graph_shortest_path', 'graph_shortest_path', (['cls2cls'], {'method': '"""FW"""', 'directed': '(False)'}), "(cls2cls, method='FW', directed=False)\n", (4625, 4663), False, 'from sklearn.utils.graph_shortest_path import graph_shortest_path\n'), ((5178, 5226), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['"""bert-base-nli-mean-tokens"""'], {}), "('bert-base-nli-mean-tokens')\n", (5197, 5226), False, 'from sentence_transformers import SentenceTransformer\n'), ((5392, 5415), 'numpy.array', 'np.array', (['co_embeddings'], {}), '(co_embeddings)\n', (5400, 5415), True, 'import numpy as np\n'), ((6917, 6937), 'numpy.zeros', 'np.zeros', (['(nct, nct)'], {}), '((nct, nct))\n', (6925, 6937), True, 'import numpy as np\n'), ((7111, 7162), 'sklearn.utils.graph_shortest_path.graph_shortest_path', 'graph_shortest_path', (['A'], {'method': '"""FW"""', 'directed': '(False)'}), "(A, method='FW', directed=False)\n", (7130, 7162), False, 'from sklearn.utils.graph_shortest_path import graph_shortest_path\n'), ((7171, 7208), 'numpy.min', 'np.min', (['A_dis[:nseen, nseen:]'], {'axis': '(0)'}), '(A_dis[:nseen, nseen:], axis=0)\n', (7177, 7208), True, 'import numpy as np\n'), ((7645, 7663), 'numpy.array', 'np.array', (['seen_ind'], {}), '(seen_ind)\n', (7653, 7663), True, 'import numpy as np\n'), ((7678, 7698), 'numpy.array', 'np.array', (['unseen_ind'], {}), '(unseen_ind)\n', (7686, 7698), True, 'import numpy as np\n'), ((8136, 8155), 'numpy.zeros', 'np.zeros', (['(nl, dim)'], {}), '((nl, dim))\n', (8144, 8155), True, 'import numpy as np\n'), ((8369, 8381), 'scipy.sparse.linalg.svds', 'svds', (['Q', 'dim'], {}), '(Q, dim)\n', (8373, 8381), False, 'from scipy.sparse.linalg import svds, eigs\n'), ((8388, 8398), 'numpy.diag', 'np.diag', (['S'], {}), '(S)\n', (8395, 8398), True, 'import numpy as np\n'), ((8458, 8473), 'numpy.transpose', 'np.transpose', (['Y'], {}), '(Y)\n', (8470, 8473), True, 'import numpy as np\n'), ((9558, 9575), 'numpy.sum', 'np.sum', (['Y'], {'axis': '(0)'}), '(Y, axis=0)\n', (9564, 9575), True, 'import numpy as np\n'), ((9846, 9862), 'numpy.log1p', 'np.log1p', (['test_X'], {}), '(test_X)\n', (9854, 9862), True, 'import numpy as np\n'), ((9878, 9901), 'numpy.mean', 'np.mean', (['test_X'], {'axis': '(0)'}), '(test_X, axis=0)\n', (9885, 9901), True, 'import numpy as np\n'), ((9920, 9936), 'numpy.shape', 'np.shape', (['test_X'], {}), '(test_X)\n', (9928, 9936), True, 'import numpy as np\n'), ((10306, 10334), 'numpy.vstack', 'np.vstack', (['[train_X, test_X]'], {}), '([train_X, test_X])\n', (10315, 10334), True, 'import numpy as np\n'), ((10342, 10373), 'numpy.array', 'np.array', (['c2g'], {'dtype': 'np.float64'}), '(c2g, dtype=np.float64)\n', (10350, 10373), True, 'import numpy as np\n'), ((10547, 10563), 'numpy.log2', 'np.log2', (['(c2g + 1)'], {}), '(c2g + 1)\n', (10554, 10563), True, 'import numpy as np\n'), ((10570, 10589), 'numpy.sum', 'np.sum', (['c2g'], {'axis': '(1)'}), '(c2g, axis=1)\n', (10576, 10589), True, 'import numpy as np\n'), ((11481, 11501), 'numpy.zeros', 'np.zeros', (['(nco, nco)'], {}), '((nco, nco))\n', (11489, 11501), True, 'import numpy as np\n'), ((1203, 1221), 'numpy.unique', 'np.unique', (['train_Y'], {}), '(train_Y)\n', (1212, 1221), True, 'import numpy as np\n'), ((1932, 1949), 'numpy.shape', 'np.shape', (['cls2cls'], {}), '(cls2cls)\n', (1940, 1949), True, 'import numpy as np\n'), ((3354, 3370), 'numpy.shape', 'np.shape', (['test_X'], {}), '(test_X)\n', (3362, 3370), True, 'import numpy as np\n'), ((4214, 4263), 'numpy.dot', 'np.dot', (['pred_Y_seen_norm', 'onto_net_rwr[:nseen, :]'], {}), '(pred_Y_seen_norm, onto_net_rwr[:nseen, :])\n', (4220, 4263), True, 'import numpy as np\n'), ((4288, 4339), 'sklearn.preprocessing.normalize', 'normalize', (['pred_Y_all[:, :nseen]'], {'norm': '"""l1"""', 'axis': '(1)'}), "(pred_Y_all[:, :nseen], norm='l1', axis=1)\n", (4297, 4339), False, 'from sklearn.preprocessing import normalize\n'), ((4671, 4693), 'numpy.shape', 'np.shape', (['onto_net_mat'], {}), '(onto_net_mat)\n', (4679, 4693), True, 'import numpy as np\n'), ((5600, 5614), 'numpy.argmax', 'np.argmax', (['scs'], {}), '(scs)\n', (5609, 5614), True, 'import numpy as np\n'), ((7486, 7497), 'numpy.shape', 'np.shape', (['A'], {}), '(A)\n', (7494, 7497), True, 'import numpy as np\n'), ((7795, 7846), 'sklearn.utils.graph_shortest_path.graph_shortest_path', 'graph_shortest_path', (['A'], {'method': '"""FW"""', 'directed': '(False)'}), "(A, method='FW', directed=False)\n", (7814, 7846), False, 'from sklearn.utils.graph_shortest_path import graph_shortest_path\n'), ((8306, 8323), 'numpy.log', 'np.log', (['(Q + alpha)'], {}), '(Q + alpha)\n', (8312, 8323), True, 'import numpy as np\n'), ((8326, 8339), 'numpy.log', 'np.log', (['alpha'], {}), '(alpha)\n', (8332, 8339), True, 'import numpy as np\n'), ((8414, 8424), 'numpy.sqrt', 'np.sqrt', (['S'], {}), '(S)\n', (8421, 8424), True, 'import numpy as np\n'), ((8438, 8448), 'numpy.sqrt', 'np.sqrt', (['S'], {}), '(S)\n', (8445, 8448), True, 'import numpy as np\n'), ((8715, 8728), 'numpy.eye', 'np.eye', (['nnode'], {}), '(nnode)\n', (8721, 8728), True, 'import numpy as np\n'), ((9361, 9379), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (9377, 9379), False, 'import sys\n'), ((10397, 10416), 'numpy.sum', 'np.sum', (['c2g'], {'axis': '(1)'}), '(c2g, axis=1)\n', (10403, 10416), True, 'import numpy as np\n'), ((10935, 10954), 'numpy.std', 'np.std', (['c2g'], {'axis': '(1)'}), '(c2g, axis=1)\n', (10941, 10954), True, 'import numpy as np\n'), ((10957, 10977), 'numpy.mean', 'np.mean', (['c2g'], {'axis': '(1)'}), '(c2g, axis=1)\n', (10964, 10977), True, 'import numpy as np\n'), ((2158, 2187), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['co2vec_nlp'], {}), '(co2vec_nlp)\n', (2175, 2187), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((4362, 4413), 'sklearn.preprocessing.normalize', 'normalize', (['pred_Y_all[:, nseen:]'], {'norm': '"""l1"""', 'axis': '(1)'}), "(pred_Y_all[:, nseen:], norm='l1', axis=1)\n", (4371, 4413), False, 'from sklearn.preprocessing import normalize\n'), ((6280, 6293), 'numpy.array', 'np.array', (['vec'], {}), '(vec)\n', (6288, 6293), True, 'import numpy as np\n'), ((6707, 6718), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (6714, 6718), True, 'import numpy as np\n'), ((7261, 7298), 'numpy.where', 'np.where', (['(min_d > use_unseen_distance)'], {}), '(min_d > use_unseen_distance)\n', (7269, 7298), True, 'import numpy as np\n'), ((7355, 7380), 'numpy.shape', 'np.shape', (['test_Y_pred_all'], {}), '(test_Y_pred_all)\n', (7363, 7380), True, 'import numpy as np\n'), ((9301, 9333), 'numpy.linalg.norm', 'np.linalg.norm', (['(Q - Q_new)', '"""fro"""'], {}), "(Q - Q_new, 'fro')\n", (9315, 9333), True, 'import numpy as np\n'), ((10496, 10530), 'numpy.sum', 'np.sum', (['c2g'], {'axis': '(0)', 'keepdims': '(True)'}), '(c2g, axis=0, keepdims=True)\n', (10502, 10530), True, 'import numpy as np\n'), ((10728, 10750), 'numpy.percentile', 'np.percentile', (['expr', '(1)'], {}), '(expr, 1)\n', (10741, 10750), True, 'import numpy as np\n'), ((10760, 10783), 'numpy.percentile', 'np.percentile', (['expr', '(99)'], {}), '(expr, 99)\n', (10773, 10783), True, 'import numpy as np\n'), ((11008, 11028), 'numpy.percentile', 'np.percentile', (['cv', '(1)'], {}), '(cv, 1)\n', (11021, 11028), True, 'import numpy as np\n'), ((11036, 11057), 'numpy.percentile', 'np.percentile', (['cv', '(99)'], {}), '(cv, 99)\n', (11049, 11057), True, 'import numpy as np\n'), ((11194, 11205), 'numpy.shape', 'np.shape', (['c'], {}), '(c)\n', (11202, 11205), True, 'import numpy as np\n'), ((3025, 3046), 'numpy.copy', 'np.copy', (['onto_net_nlp'], {}), '(onto_net_nlp)\n', (3032, 3046), True, 'import numpy as np\n'), ((4787, 4808), 'numpy.copy', 'np.copy', (['onto_net_mat'], {}), '(onto_net_mat)\n', (4794, 4808), True, 'import numpy as np\n'), ((7945, 7957), 'numpy.shape', 'np.shape', (['sp'], {}), '(sp)\n', (7953, 7957), True, 'import numpy as np\n'), ((7987, 7999), 'numpy.shape', 'np.shape', (['sp'], {}), '(sp)\n', (7995, 7999), True, 'import numpy as np\n'), ((11303, 11316), 'numpy.shape', 'np.shape', (['c2g'], {}), '(c2g)\n', (11311, 11316), True, 'import numpy as np\n'), ((4060, 4089), 'numpy.mean', 'np.mean', (['onto_net_rwr'], {'axis': '(1)'}), '(onto_net_rwr, axis=1)\n', (4067, 4089), True, 'import numpy as np\n'), ((6419, 6476), 'scipy.spatial.distance.cosine', 'spatial.distance.cosine', (['co2vec_nlp[id1]', 'co2vec_nlp[id2]'], {}), '(co2vec_nlp[id1], co2vec_nlp[id2])\n', (6442, 6476), False, 'from scipy import spatial\n'), ((9259, 9271), 'numpy.dot', 'np.dot', (['Q', 'P'], {}), '(Q, P)\n', (9265, 9271), True, 'import numpy as np\n'), ((4094, 4116), 'numpy.shape', 'np.shape', (['onto_net_rwr'], {}), '(onto_net_rwr)\n', (4102, 4116), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import tqdm
def fitness(length):
return 1 / length
def route_length(route, distance_matrix):
n = route.size
idx = np.concatenate((route, [route[0]]))
length = np.sum(distance_matrix[idx[:n], idx[1:n+1]])
# length = 0
# for i in range(n - 1):
# length += distance_matrix[route[i], route[i+1]]
# length += distance_matrix[route[n-1], route[0]]
return length
def pmx_cross(routes):
n = routes[0].size
i, j = np.random.choice(n, 2)
if i > j:
i, j = j, i
j += 1
new_routes = routes.copy()
new_routes[0, i:j] = routes[1, i:j]
new_routes[1, i:j] = routes[0, i:j]
mapping = np.array(range(n), dtype=int)
mapping[new_routes[0, i:j]] = routes[0, i:j]
for k in range(i):
while new_routes[0, k] != mapping[new_routes[0, k]]:
new_routes[0, k] = mapping[new_routes[0, k]]
for k in range(j, n):
while new_routes[0, k] != mapping[new_routes[0, k]]:
new_routes[0, k] = mapping[new_routes[0, k]]
mapping = np.array(range(n), dtype=int)
mapping[new_routes[1, i:j]] = routes[1, i:j]
for k in range(i):
while new_routes[1, k] != mapping[new_routes[1, k]]:
new_routes[1, k] = mapping[new_routes[1, k]]
for k in range(j, n):
while new_routes[1, k] != mapping[new_routes[1, k]]:
new_routes[1, k] = mapping[new_routes[1, k]]
return new_routes
def cx_cross(routes):
def _cx_impl(routes):
n = routes[0].size
new_routes = np.zeros(n, dtype=int) - 1
mapped = np.zeros(n, dtype=bool)
i = np.random.randint(n)
while not mapped[routes[0][i]]:
new_routes[i] = routes[0][i]
mapped[routes[0][i]] = True
i = routes[1][i]
j = 0
for i in range(n):
if new_routes[i] == -1:
while mapped[routes[1][j]]:
j += 1
new_routes[i] = routes[1][j]
mapped[routes[1][j]] = True
return new_routes
return [_cx_impl(routes), _cx_impl(np.flip(routes))]
def cross(routes):
return pmx_cross(routes)
def mutate(route):
n = route.size
i, j = np.random.choice(n, 2, replace=False)
if i > j:
i, j = j, i
# route[i], route[j] = route[j], route[i]
route[i:j+1] = np.flip(route[i:j+1])
def ga(distance_matrix, population_size, cross_rate, mutation_rate, max_iters, thres=-1):
n = distance_matrix.shape[0]
ncross = np.ceil(population_size * cross_rate * 0.5).astype(int) * 2
nreserve = population_size - ncross
nmutation = np.ceil(population_size * mutation_rate).astype(int)
best_iter = 0
best_route = []
best_fit = 0
iterator = tqdm.trange(max_iters)
for it in iterator:
if it == 0:
population = np.array([np.random.permutation(n) for i in range(population_size)], dtype=int)
else:
idx = np.argsort(fits)
new_population = np.zeros(population.shape, dtype=int)
new_population[:nreserve, :] = population[idx[-nreserve:], :]
prob = fits / np.linalg.norm(fits, ord=1)
for i in range(ncross // 2):
parents_idx = np.random.choice(population_size, 2, replace=False, p=prob)
new_population[nreserve+i+i:nreserve+i+i+2, :] = cross(population[parents_idx])
mutation_idx = np.random.choice(population_size, nmutation, replace=False)
for i in mutation_idx:
mutate(new_population[i])
population = new_population
fits = np.array([fitness(route_length(route, distance_matrix)) for route in population])
for route, fit in zip(population, fits):
if fit > best_fit:
best_iter = it
best_route = route
best_fit = fit
if thres > 0 and it - best_iter > thres:
iterator.close()
break
return route_length(best_route, distance_matrix), best_route, it + 1
def load_tsplib(filename):
coords = []
with open(filename, 'r') as f:
node_coord_section = False
for line in f.readlines():
if line.strip() == 'EOF':
break
if node_coord_section:
x, y = line.strip().split()[1:3]
coords.append(np.array([float(x), float(y)]))
if line.strip() == 'NODE_COORD_SECTION':
node_coord_section = True
n = len(coords)
distance_matrix = np.zeros([n, n])
for i in range(n):
for j in range(n):
distance_matrix[i, j] = np.linalg.norm(coords[i] - coords[j])
return np.array(coords), distance_matrix
if __name__ == '__main__':
coords, distance_matrix = load_tsplib('./xqf131.tsp')
length, route, it = ga(distance_matrix, population_size=64, cross_rate=0.9, mutation_rate = 0.05, max_iters=20000, thres=1000)
print("iteration:", it)
print("length:", length)
print("route:", list(route))
pts = coords[np.concatenate((route, [route[0]])), :]
plt.plot(pts[:, 0], pts[:, 1], c='k', lw=1, marker='o', mec='r', mfc='r', ms=3)
plt.title('iter: %d, length: %f' % (it, length))
plt.savefig('route.pdf')
plt.show()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"numpy.flip",
"numpy.sum",
"matplotlib.pyplot.plot",
"tqdm.trange",
"matplotlib.pyplot.show",
"numpy.ceil",
"numpy.zeros",
"numpy.argsort",
"numpy.random.randint",
"numpy.array",
"numpy.linalg.norm",
"numpy.random.choice",
"numpy.random.permutation",
"numpy.concatenate"
] |
[((229, 264), 'numpy.concatenate', 'np.concatenate', (['(route, [route[0]])'], {}), '((route, [route[0]]))\n', (243, 264), True, 'import numpy as np\n'), ((278, 324), 'numpy.sum', 'np.sum', (['distance_matrix[idx[:n], idx[1:n + 1]]'], {}), '(distance_matrix[idx[:n], idx[1:n + 1]])\n', (284, 324), True, 'import numpy as np\n'), ((558, 580), 'numpy.random.choice', 'np.random.choice', (['n', '(2)'], {}), '(n, 2)\n', (574, 580), True, 'import numpy as np\n'), ((2289, 2326), 'numpy.random.choice', 'np.random.choice', (['n', '(2)'], {'replace': '(False)'}), '(n, 2, replace=False)\n', (2305, 2326), True, 'import numpy as np\n'), ((2426, 2449), 'numpy.flip', 'np.flip', (['route[i:j + 1]'], {}), '(route[i:j + 1])\n', (2433, 2449), True, 'import numpy as np\n'), ((2825, 2847), 'tqdm.trange', 'tqdm.trange', (['max_iters'], {}), '(max_iters)\n', (2836, 2847), False, 'import tqdm\n'), ((4608, 4624), 'numpy.zeros', 'np.zeros', (['[n, n]'], {}), '([n, n])\n', (4616, 4624), True, 'import numpy as np\n'), ((5163, 5242), 'matplotlib.pyplot.plot', 'plt.plot', (['pts[:, 0]', 'pts[:, 1]'], {'c': '"""k"""', 'lw': '(1)', 'marker': '"""o"""', 'mec': '"""r"""', 'mfc': '"""r"""', 'ms': '(3)'}), "(pts[:, 0], pts[:, 1], c='k', lw=1, marker='o', mec='r', mfc='r', ms=3)\n", (5171, 5242), True, 'import matplotlib.pyplot as plt\n'), ((5247, 5295), 'matplotlib.pyplot.title', 'plt.title', (["('iter: %d, length: %f' % (it, length))"], {}), "('iter: %d, length: %f' % (it, length))\n", (5256, 5295), True, 'import matplotlib.pyplot as plt\n'), ((5300, 5324), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""route.pdf"""'], {}), "('route.pdf')\n", (5311, 5324), True, 'import matplotlib.pyplot as plt\n'), ((5329, 5339), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5337, 5339), True, 'import matplotlib.pyplot as plt\n'), ((1661, 1684), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': 'bool'}), '(n, dtype=bool)\n', (1669, 1684), True, 'import numpy as np\n'), ((1697, 1717), 'numpy.random.randint', 'np.random.randint', (['n'], {}), '(n)\n', (1714, 1717), True, 'import numpy as np\n'), ((4760, 4776), 'numpy.array', 'np.array', (['coords'], {}), '(coords)\n', (4768, 4776), True, 'import numpy as np\n'), ((1617, 1639), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': 'int'}), '(n, dtype=int)\n', (1625, 1639), True, 'import numpy as np\n'), ((2170, 2185), 'numpy.flip', 'np.flip', (['routes'], {}), '(routes)\n', (2177, 2185), True, 'import numpy as np\n'), ((2702, 2742), 'numpy.ceil', 'np.ceil', (['(population_size * mutation_rate)'], {}), '(population_size * mutation_rate)\n', (2709, 2742), True, 'import numpy as np\n'), ((3029, 3045), 'numpy.argsort', 'np.argsort', (['fits'], {}), '(fits)\n', (3039, 3045), True, 'import numpy as np\n'), ((3075, 3112), 'numpy.zeros', 'np.zeros', (['population.shape'], {'dtype': 'int'}), '(population.shape, dtype=int)\n', (3083, 3112), True, 'import numpy as np\n'), ((3495, 3554), 'numpy.random.choice', 'np.random.choice', (['population_size', 'nmutation'], {'replace': '(False)'}), '(population_size, nmutation, replace=False)\n', (3511, 3554), True, 'import numpy as np\n'), ((4711, 4748), 'numpy.linalg.norm', 'np.linalg.norm', (['(coords[i] - coords[j])'], {}), '(coords[i] - coords[j])\n', (4725, 4748), True, 'import numpy as np\n'), ((5119, 5154), 'numpy.concatenate', 'np.concatenate', (['(route, [route[0]])'], {}), '((route, [route[0]]))\n', (5133, 5154), True, 'import numpy as np\n'), ((2586, 2629), 'numpy.ceil', 'np.ceil', (['(population_size * cross_rate * 0.5)'], {}), '(population_size * cross_rate * 0.5)\n', (2593, 2629), True, 'import numpy as np\n'), ((3213, 3240), 'numpy.linalg.norm', 'np.linalg.norm', (['fits'], {'ord': '(1)'}), '(fits, ord=1)\n', (3227, 3240), True, 'import numpy as np\n'), ((3312, 3371), 'numpy.random.choice', 'np.random.choice', (['population_size', '(2)'], {'replace': '(False)', 'p': 'prob'}), '(population_size, 2, replace=False, p=prob)\n', (3328, 3371), True, 'import numpy as np\n'), ((2927, 2951), 'numpy.random.permutation', 'np.random.permutation', (['n'], {}), '(n)\n', (2948, 2951), True, 'import numpy as np\n')]
|
#!/usr/bin/env pypy3 python3
import os
import time
import glob
import pandas as pd
import sys
import matplotlib.pyplot as plt
import seaborn as sns
from collections import OrderedDict
from decimal import Decimal
from scipy.stats import hypergeom
import math
import mechanize
from urllib.error import HTTPError
import numpy as np
import shutil
start_time = time.time()
wd = os.getcwd()
def gene_file(genefile):
cons = glob.glob(wd+'/'+"Functional-datafiles"+"/"+"conversation/*")
global p2
func = []
funcC = []
with open(genefile, 'r') as ge:
for g in ge:
g = g.rstrip().split("\t")
p = ge.name
p2 = p.split('.')
with open(os.path.join(wd+'/'+"Functional-datafiles"+"/"+"Functional-data.txt")) as rr:
for r in rr:
r = r.rstrip().split("\t")
if g[0] == r[-1] and int(g[1]) >= int(r[2]) and int(r[3]) >= int(g[1]):
func.append('\t'.join((g[0], r[1], g[1], r[4])))
for i in func:
i = i.rstrip().split("\t")
for c in cons:
c1 = c.split('/')
c2 = c1[-1].split('.')
if i[0] == c2[0]:
with open(c) as cc:
for ci in cc:
con = ci.rstrip().split('\t')
if int(i[2]) == int(con[0]):
funcC.append('\t'.join((i[0], i[1], i[2], i[-1], con[-1])))
# PPF = Protein Functional Region, MGL = Mutated Genomic Region, MAA = Muated Amino Acid, D-PFF = Description of PFF, Cons = Conservation
header = ['Gene', 'PFF', 'MAA', 'D-PFF', "Cons"]
with open(p2[0]+"_functional-accessment.txt", 'a') as ii:
ii.write('\t'.join(header) + '\n')
for fc in OrderedDict.fromkeys(funcC):
ii.write(fc+'\n')
if p2[0]+"_functional-accessment.txt":
rat1 = pd.read_csv(p2[0]+"_functional-accessment.txt", sep='\t')
rat = rat1.sort_values(by=['Cons'])
e_count = []
for index, gene in enumerate(rat['Gene']):
subset = rat.iloc[:index + 1]
count = len(subset[subset['Gene'] == gene])
e_count.append(count)
x = rat['Gene']
y = e_count
hue = rat['Cons']
plt.figure(figsize=(6.5, 4.5))
ax = sns.scatterplot(x, y, hue=hue, s=15, legend="full", palette="RdYlGn")
ax.grid(False) # Remove grid
ax.get_legend().remove() # Delete default legend
scale_legend = plt.Normalize(hue.min() - 1, hue.max()) # Create a scale for the colormap.
color_map = plt.cm.ScalarMappable(cmap="RdYlGn", norm=scale_legend) # Colormap used in legend.
color_map.set_array([]) # Dummy variable needed to create a colormap.
cb = ax.figure.colorbar(color_map, shrink=0.40) # Add colormap as a legend.
cb.ax.tick_params(labelsize=3)
plt.xticks(rotation='vertical', fontsize=3)
plt.ylabel(" SNP count", size=7)
plt.xlabel('Protein', fontsize=7)
plt.yticks(fontsize=5)
plt.gcf().text(0.85, 0.45, "Conservation*", fontsize=4, rotation=90) # Label used for colormap.
plt.savefig(p2[0]+'_Mutated-Proteins-at-Conserved-Regions.png', format='png', dpi=900, bbox_inches='tight')
print(p2[0]+" conservation graph is done")
sc = glob.glob(wd+'/'+"Functional-datafiles"+"/"+"numbers/*")
df = rat1.drop_duplicates(subset = ["Gene"], keep='last')
for c in sc:
f1 = c.split('/')
f2 = f1[-1].split('.')
for d in df['Gene']:
if d == f2[0]:
with open(c) as ij:
for i in ij:
if not 'protein-coding gene' in i and not 'Unknown' in i:
with open(p2[0]+"_scExpression-data.txt", 'a') as k:
k.write(d +'\t'+ str(i))
print(p2[0]+" sc-Expression analysis is done")
if p2[0]+"_scExpression-data.txt":
plt.figure(figsize=(6.5, 4.5))
palette = np.repeat(np.array(sns.color_palette("deep")),1, axis=0)
plt.yticks(fontsize=3)
plt.xticks(fontsize=4)
scx = pd.read_csv(p2[0]+"_scExpression-data.txt", sep='\t', index_col=None)
scx.columns = ["Genes", "Cell", "Number"]
sns.barplot(x='Number', y='Cell', data=scx, hue='Genes', orient='h',palette=palette)
plt.ylabel(" cell-type", size=7)
plt.xlabel("number of clusters", fontsize=7)
sns.set_color_codes("pastel")
plt.legend(loc='upper left', fontsize="x-small", title='Genes', title_fontsize=3, bbox_to_anchor=(1.05, 1))
plt.savefig(p2[0]+'_scExp.png', format='png', dpi=900, bbox_inches='tight')
print(p2[0]+" sc-Expression analysis visualisation is done")
def downstream(func_file):
global p3
phen = []
with open(func_file, 'r') as gg:
for g in gg:
g = g.rstrip().split()
p = gg.name
p1 = p.split('.')
p3 = p1[0].split("_")
with open(os.path.join(wd+'/'+"Functional-datafiles"+"/"+"Phenotype.txt")) as rr:
for r in rr:
d = r.rstrip().split("\t")
if d[0] == g[0]:
phen.append('\t'.join(d))
with open(p3[0]+"_phenotype_accessment.txt", 'a') as ii:
for i in phen:
ii.write(i+'\n')
def testScore(f):
global p4
l = []
with open(f, 'r') as ij:
p = ij.name
p1 = p.split('.')
p4 = p1[0].split("_")
for i in ij.readlines()[1:]:
i = i.rstrip().split('\t')
l.append('\t'.join((i[1], i[2])))
de = {}
lines = (line.rstrip('\t') for line in l)
unique = OrderedDict.fromkeys( (line for line in lines if line) )
k = [(line.split('\t'))[0] for line in unique]
for word in k:
de[word] = de.get(word, 0) + 1
word_freq = []
for key, value in de.items():
word_freq.append(list((value, key)))
pathway = []
word_freq.sort(reverse=True)
for i in word_freq:
pv = [( 'Binding site' , '14' ),
( 'Coiled coil' , '1337' ),
( 'Cross-link' , '12' ),
( 'DNA binding' , '52' ),
( 'Disulfide bond' , '4117' ),
( 'Domain' , '9114' ),
( 'Glycosylation' , '84' ),
( 'Helix' , '592' ),
( 'Initiator methionine' , '2' ),
( 'Metal binding' , '11' ),
( 'Modified residue' , '164' ),
( 'Motif' , '74' ),
( 'Nucleotide binding' , '68' ),
( 'Region' , '5472' ),
( 'Repeat' , '2049' ),
( 'Signal peptide' , '752' ),
( 'Avtive Site' , '6' ),
( 'Transit peptide ' , '208' ),
( 'Transmembrane' , '1596' ),
( 'Turn' , '82' ),
( 'Zinc finger' , '273' )]
for v in pv:
if v[0] == i[1]:
# Number of SNPs
N = len(k)
# functional plus regulatory SNPs
M = 340091
p = f"{Decimal(hypergeom(M, int(v[1]), N).pmf(i[0])):.2E}"
pathway.append((v[0], p))
pathway.sort(key = lambda x: float(x[1]), reverse = False)
for pa in pathway:
with open(p4[0]+"_Enriched-functional-regions.txt",'a') as eg:
eg.write('\t'.join(pa)+'\n')
if p4[0]+'_Enriched-functional-regions.txt':
fig, ax = plt.subplots(figsize=(7, 5.5))
x = []
y = []
with open(p4[0]+'_Enriched-functional-regions.txt','r') as ij:
for i in ij.readlines():
i = i.rstrip().split('\t')
x.append(i[0])
try:
y.append(abs(math.log10(float(i[1]))))
except ValueError:
pass
xt = tuple(x)
ys, xs = zip(*sorted(zip(y, xt)))
my_range=list(range(1,len(y)+1))
plt.hlines(y=my_range, xmin=ys, xmax=ys[0], color='indigo', alpha=0.2, linewidth=3)
plt.plot(ys, my_range, "o", markersize=3, color='indigo', alpha=0.6)
plt.yticks(my_range, xs)
ax.tick_params(axis='both', which='major', labelsize=5)
plt.xlabel ('-log10(p-value)', size=7)
plt.ylabel(" regions", size=7)
fig.savefig(p4[0]+'_Enriched-functional-regions.png', format='png', dpi=900, bbox_inches='tight')
def network(functional):
global p5
import json
import urllib.request
from urllib.request import urlopen
with open(functional, 'r') as func:
p = func.name
p1 = p.split('.')
p5 = p1[0].split("_")
enrich = []
k = [(line.split())[0] for line in func]
for i in k:
enrich.append(i)
string_api_url = "https://string-db.org/api"
output_format = "json"
method_enrich = "enrichment"
species = "10090"
my_app = "www.aweseome_app.org"
caller_identity = 'aarslan'
request_url = string_api_url + "/" + output_format + "/" + method_enrich + "?"
request_url += "identifiers=" + "%0d".join(enrich[1:200])
request_url += "&" + "species=" + species
request_url += "&" + "caller_identity=" + my_app
response = urllib.request.urlopen(request_url)
result = response.read()
if result:
data = json.loads(result.decode('utf-8'))
for row in data:
term = row["term"]
preferred_names = ",".join(row["preferredNames"])
fdr = row["fdr"]
category = row["category"]
description = row["description"]
if fdr <= 0.01 and category == 'Process':
with open(p5[0]+'_BiologicalProcess.txt', 'a') as biop, open(p5[0]+'_BiologicalProcess.txt', 'a') as biolp:
biop.write("\t".join([term, str(fdr), description, preferred_names])+'\n')
biolp.write("\t".join([term, str(fdr)])+'\n')
if fdr <= 0.01 and category == 'RCTM' or category == 'KEGG':
with open(p5[0]+'_Pathways.txt', 'a') as pathw:
pathw.write("\t".join([term, str(fdr), description, preferred_names])+'\n')
else:
with open(p5[0]+'_enrichment.txt', 'a') as en:
en.write("\t".join([term, str(fdr), description, preferred_names])+'\n')
else:
with open(p5[0]+'_enrichment.txt', 'a+') as en:
en.write("No significant enrichment detected")
output_format_figure = "image"
method = "network"
request_urlf = string_api_url + "/" + output_format_figure + "/" + method + "?"
request_urlf += "identifiers=%s"
request_urlf += "&" + "species=" + species
request_urlf += "&" + "add_white_nodes=0"
request_urlf += "&" + "network_flavor=actions"
request_urlf += "&" + "caller_identity=aarslan" + my_app
c = "%0d".join(enrich[1:200])
urllib.request.urlretrieve(request_urlf % c, "%s.png" % "".join(p2[0]+"_Protein_Network"))
url = "http://revigo.irb.hr/"
mec = mechanize.Browser()
mec.set_handle_robots(False)
mec.open(url)
mec.select_form(name="submitToRevigo")
try:
x = []
y = []
with open(p5[0]+'_BiologicalProcess.txt', 'r') as ij:
data = ij.read().replace('\t', ' ')
mec["goList"] = data
res = mec.submit()
content = res.read()
f = mec.retrieve('http://revigo.irb.hr/export.jsp?table=1','REVIGO.csv')[0]
g = open(f, 'r')
g1 = g.readlines()[1:]
for g2 in g1:
g2 = g2.rstrip().split(',')
x.append(g2[1])
y.append(abs(float(g2[6])))
with open(p5[0]+'_BiologicalProcess_Revigo.txt', 'a') as revi:
revi.write('\t'.join(g2)+'\n')
except HTTPError as q:
print(q)
if not y:
with open(p5[0]+"_BiologicalProcess.txt", 'r') as ij:
for i in ij.readlines()[:70]:
ii = i.rstrip().split('\t')
if float(ii[1]) < 0.005 and len(ii) >= 3:
y.append(abs(math.log10(float(ii[1]))))
x.append(ii[2])
os.remove('REVIGO.csv')
fig, ax = plt.subplots(figsize=(7, 5.5)) #very good size
xt = tuple(x)
ys, xs = zip(*sorted(zip(y, xt)))
my_range=list(range(1,len(y)+1))
plt.hlines(y=my_range, xmin=0, xmax=ys, color='#007acc', alpha=0.2, linewidth=3)
plt.plot(ys, my_range, "o", markersize=3, color='#007acc', alpha=0.6)
plt.yticks(my_range, xs)
plt.yticks(fontsize=5, rotation=0)
plt.xlabel ('-log10(p-value)', fontsize=6)
fig.savefig(p5[0]+'_Significantly-Altered-Biological-Process.png', format='png', dpi=900, bbox_inches='tight')
if p5[0]+'_Pathways.txt':
fig, ax = plt.subplots(figsize=(7, 5.5))
x = []
y = []
with open(p5[0]+'_Pathways.txt','r') as ij:
for i in ij.readlines()[:30]:
i = i.rstrip().split('\t')
x.append(i[2])
y.append(abs(math.log10(float(i[1]))))
xt = tuple(x)
ys, xs = zip(*sorted(zip(y, xt)))
my_range=list(range(1,len(y)+1))
plt.hlines(y=my_range, xmin=ys, xmax=ys[0], color='#197213', alpha=0.2, linewidth=3)
plt.plot(ys, my_range, "o", markersize=3, color='#197213', alpha=0.6)
plt.yticks(my_range, xs)
ax.tick_params(axis='both', which='major', labelsize=6)
plt.xlabel ('-log10(p-value)', fontsize=7)
fig.savefig(p5[0]+'_Significantly-Altered-Biological-Pathways.png', format='png', dpi=900, bbox_inches='tight')
def go():
try:
if p2[0]+'_enrichment.txt':
fig, ax = plt.subplots(figsize=(9, 5.5))
plt.tick_params(labelsize=6)
ij = pd.read_csv(p2[0]+'_enrichment.txt', sep='\t')
ij.columns = ['GOt', 'FDR', 'Desc', "Genes"]
ij = ij[ij.iloc[:,0].str.contains("GO.")]
ij = ij[(ij.iloc[:,1] <= 0.01 )]
ij = ij.sort_values(by='FDR', ascending=True)
ij = ij.head(50)
df1 = ij.iloc[:,-1].str.split(',')
fdr = round(abs(np.log10(ij['FDR'])),1)
scatter = ax.scatter(fdr, ij['Desc'], c=ij['FDR'], s=[i for i in range(len(ij['FDR']))])
handles, labels = scatter.legend_elements(prop="sizes", alpha=0.3)
legend2 = ax.legend(handles, labels, title='number of genes', fontsize=4, bbox_to_anchor=(1.15, 1))
legend2.get_title().set_fontsize('5')
plt.xlabel(" -log10 FDR", fontsize=7)
fig.savefig(p2[0]+'_GO-term-Enrichment.png', format='png', dpi=900, bbox_inches='tight')
except ValueError as q:
print(q)
def noncoding(genefile):
global p1
c = []
with open(genefile, 'r') as mut:
for a in mut:
a = a.rstrip().split('\t')
p = mut.name
p1 = p.split('.')
c.append('%s %s' % (a[0], a[1]))
for m in c:
m = m.rstrip().split()
with open(os.path.join(wd+'/'+'Regulatory datafiles'+'/'+'Vista_enhancers_flankinGenes.txt')) as vi:
for v in vi:
v = v.rstrip().split('\t')
if m[0] == v[0] and int(m[1]) >= int(v[1]) and int(v[2]) >= int(m[1]):
with open(p1[0]+"_regulatory-analysis.txt", 'a') as k:
k.write('\t'.join((v[0], m[1], v[-2], v[-1], 'Enhancer'))+'\n')
with open(os.path.join(wd+'/'+'Regulatory datafiles'+'/'+'EPDnew_promoter.txt')) as vi:
for v in vi:
v = v.rstrip().split('\t')
if m[0] == v[0] and int(m[1]) >= int(v[1]) and int(v[2]) >= int(m[1]):
with open(p1[0]+"_regulatory-analysis.txt", 'a+') as k:
k.write('\t'.join((v[0], m[1], v[3], 'Promoter'))+'\n')
with open(os.path.join(wd+'/'+'Regulatory datafiles'+'/'+'TSS_promoter.txt')) as vi:
for v in vi:
v = v.rstrip().split('\t')
if m[0] == v[0] and int(m[1]) == int(v[2]):
# tss.append('%s %s %s %s %s' % (v[0], m[1], v[1], v[-1], 'TSS'))
with open(p1[0]+"_regulatory-analysis.txt", 'a+') as k:
k.write('\t'.join((v[0], m[1], v[1], v[-1], 'TSS'))+'\n')
with open(os.path.join(wd+'/'+'Regulatory datafiles'+'/'+'cpgIsland.txt')) as vi:
for v in vi:
v = v.rstrip().split('\t')
if m[0] == v[0] and int(m[1]) >= int(v[1]) and int(v[2]) >= int(m[1]):
with open(p1[0]+"_regulatory-analysis.txt", 'a+') as k:
k.write('\t'.join((v[0], m[1], v[-1], 'CpG'))+'\n')
with open(os.path.join(wd+'/'+'Regulatory datafiles'+'/'+'insulator-CTCF-binding-sites.txt')) as vi:
for v in vi:
v = v.rstrip().split('\t')
if m[0] == v[0] and int(m[1]) >= int(v[1]) and int(v[2]) >= int(m[1]):
with open(p1[0]+"_regulatory-analysis.txt", 'a+') as k:
k.write('\t'.join((v[0], m[1], v[-2], v[-1], 'Insulator'))+'\n')
with open(os.path.join(wd+'/'+'Regulatory datafiles'+'/'+'miRNA.txt')) as vi:
for v in vi:
v = v.rstrip().split('\t')
if m[0] == v[0] and int(m[1]) == int(v[1]):
with open(p1[0]+"_regulatory-analysis.txt", 'a+') as k:
k.write('\t'.join((v))+'\n')
with open(os.path.join(wd+'/'+'Regulatory datafiles'+'/'+'AltCodon.txt')) as vi:
for v in vi:
v = v.rstrip().split('\t')
if m[0] == v[0] and int(m[1]) == int(v[2]):
with open(p1[0]+"_regulatory-analysis.txt", 'a+') as k:
k.write('\t'.join((v[0], v[2], v[1], 'AltCodon'))+'\n')
with open(os.path.join(wd+'/'+'Regulatory datafiles'+'/'+'SPLICE_sires-snps.txt')) as vi:
for v in vi:
v = v.rstrip().split('\t')
if m[0] == v[0] and int(m[1]) == int(v[1]):
with open(p1[0]+"_regulatory-analysis.txt", 'a+') as k:
k.write('\t'.join((v[0], m[1], v[-1], 'Splice-site'))+'\n')
def ncnetwork(functional):
global p1
import json
import urllib.request
from urllib.request import urlopen
with open(functional, 'r') as nc:
p = nc.name
p1 = p.split('.')
enrich = []
for n in nc:
n = n.rstrip().split('\t')
if n[-1] == 'Enhancer':
enrich.append(n[-3] +'\t'+n[-2])
if n[-1] == 'Promoter' or n[-1] == 'TSS':
take1 = n[-2].split('_')
enrich.append(take1[0])
if n[-1] == 'miRNA':
enrich.append(n[-3] +'\t'+n[-2])
if n[-1] == 'Alternative-Translation-site':
enrich.append(n[-3] +'\t'+n[-2])
if n[-1] == 'Splice-site':
enrich.append(n[-3] +'\t'+n[-2])
de = {}
lines = (line.rstrip('\t') for line in enrich)
unique = OrderedDict.fromkeys( (line for line in lines if line) )
k = [(line.split())[0] for line in unique]
for word in k:
de[word] = de.get(word, 0) + 1
word_freq = []
for key, value in de.items():
word_freq.append(list((value, key)))
word_freq.sort(reverse=True)
for i in word_freq:
with open(p1[0]+"_Most-Altered-Mouse-Genes.txt", 'a') as fil:
fil.write(i[1] +'\t'+ str(i[0]) +'\t'+ str(round(i[0] / len(k) * 100, 2))+'%'+'\n')
y = [j[0] for j in word_freq]
x = [z[1] for z in word_freq]
fig, ax = plt.subplots(figsize=(6, 3.5))
xt = tuple(x)
my_range=list(range(1,len(y)+1))
plt.plot(my_range, y, "o", markersize=2, color='#55342d', alpha=0.6)
plt.xticks(my_range, xt , rotation=90)
ax.tick_params(axis='both', which='major', labelsize=2)
plt.ylabel ('Number of SNPs', size=7)
plt.xlabel ('Protein', size=7)
fig.savefig(p1[0]+'_Most-Altered-Mouse-Genes.png', format='png', dpi=900, bbox_inches='tight')
sc = glob.glob(wd+'/'+"Functional-datafiles"+"/"+"numbers/*")
for c in sc:
f1 = c.split('/')
f2 = f1[-1].split('.')
k = [(line.split())[0] for line in unique]
for di in k:
if di == f2[0]:
with open(c) as ij:
for i in ij:
if not 'protein-coding gene' in i and not 'Unknown' in i:
with open(p1[0]+'_scExpression-data.txt', 'a') as k:
k.write(di +'\t'+ str(i))
print(p1[0]+" sc-Expression analysis is done")
if p1[0]+'_scExpression-data.txt':
plt.figure(figsize=(6, 4))
#plt.yscale('symlog')
palette = np.repeat(np.array(sns.color_palette("deep")),1, axis=0)
plt.yticks(fontsize=3)
plt.xticks(fontsize=4)
scx = pd.read_csv(p1[0]+"_scExpression-data.txt", sep='\t', index_col=None)
scx.columns = ["Genes", "Cell", "Number"]
sns.barplot(x='Number', y='Cell', data=scx, hue='Genes', orient='h',palette=palette)
plt.ylabel(" cell-type", size=7)
plt.xlabel("number of clusters", fontsize=7)
sns.set_color_codes("pastel")
plt.legend(loc='upper left', fontsize=4, title='Genes', title_fontsize=4, bbox_to_anchor=(1.05, 1))
plt.savefig(p1[0]+'_scExp.png', format='png', dpi=350, bbox_inches='tight')
print(p1[0]+" sc-Expression analysis visualisation is done")
phen = []
with open(os.path.join(wd+'/'+"Functional-datafiles"+"/"+"Phenotype.txt")) as rr:
for r in rr:
d = r.rstrip().split("\t")
for g in enrich:
g = g.rstrip().split('\t')
if d[0] == g[0]:
phen.append('\t'.join(d))
with open(p1[0]+"_phenotype_accessment.txt", 'a') as pi:
for pe in OrderedDict.fromkeys(phen):
pi.write(pe+'\n')
ei = [(line.split())[0] for line in enrich]
string_api_url = "https://string-db.org/api"
output_format = "json"
method_enrich = "enrichment"
species = "10090"
my_app = "www.aweseome_app.org"
caller_identity = 'aarslan'
request_url = string_api_url + "/" + output_format + "/" + method_enrich + "?"
request_url += "identifiers=" + "%0d".join(ei[:200])
request_url += "&" + "species=" + species
request_url += "&" + "caller_identity=" + my_app
response = urllib.request.urlopen(request_url)
result = response.read()
if result:
data = json.loads(result.decode('utf-8'))
for row in data:
term = row["term"]
preferred_names = ",".join(row["preferredNames"])
fdr = row["fdr"]
description = row["description"]
category = row["category"]
fig, ax = plt.subplots(figsize=(9, 5.5))
if fdr <= 0.05 and category == 'Process':
with open(p1[0]+'_BiologicalProcess.txt', 'a') as biop:
biop.write("\t".join([term, str(fdr), description, preferred_names])+'\n')
else:
with open(p1[0]+'_enrichment.txt', 'a+') as en:
en.write("No significant enrichment detected")
if fdr <= 0.05 and category == 'RCTM' or category == 'KEGG':
with open(p1[0]+'_Pathways.txt', 'a') as pathw:
pathw.write("\t".join([term, str(fdr), description, preferred_names])+'\n')
else:
with open(p1[0]+'_enrichment.txt', 'a+') as en:
en.write("No significant enrichment detected")
else:
with open(p1[0]+'_enrichment.txt', 'a+') as en:
en.write("No significant enrichment detected")
output_format_figure = "image"
method = "network"
request_urlf = string_api_url + "/" + output_format_figure + "/" + method + "?"
request_urlf += "identifiers=%s"
request_urlf += "&" + "species=" + species
request_urlf += "&" + "add_white_nodes=0"
request_urlf += "&" + "network_flavor=actions"
request_urlf += "&" + "caller_identity=aarslan" + my_app
c = "%0d".join(ei[:200])
urllib.request.urlretrieve(request_urlf % c, "%s.png" % "".join(p1[0]+"_Protein_Network"))
try:
if p1[0]+'_Pathways.txt':
fig, ax = plt.subplots(figsize=(7.5, 5.5))
x = []
y = []
with open(p1[0]+'_Pathways.txt') as ij:
for i in ij.readlines()[:30]:
i = i.rstrip().split('\t')
x.append(i[2])
y.append(abs(math.log10(float(i[1]))))
xt = tuple(x)
ys, xs = zip(*sorted(zip(y, xt)))
my_range=list(range(1,len(y)+1))
plt.hlines(y=my_range, xmin=ys, xmax=ys[0], color='#197213', alpha=0.2, linewidth=3)
plt.plot(ys, my_range, "o", markersize=3, color='#197213', alpha=0.6)
plt.yticks(my_range, xs)
ax.set_xticks(ax.get_xticks()[::2])
ax.tick_params(axis='both', which='major', labelsize=6)
plt.xlabel ('-log10(p-value)', size=7)
fig.savefig(p1[0]+'_Significantly-Altered-Biological-Pathways.png', format='png', dpi=900, bbox_inches='tight')
except (StopIteration, FileNotFoundError) as q:
print(q)
try:
if p1[0]+'_BiologicalProcess.txt':
fig, ax = plt.subplots(figsize=(7.5, 5.5))
x = []
y = []
with open(p1[0]+'_BiologicalProcess.txt','r') as ij:
for i in ij.readlines()[:30]:
i = i.rstrip().split('\t')
x.append(i[2])
y.append(abs(math.log10(float(i[1]))))
xt = tuple(x)
ys, xs = zip(*sorted(zip(y, xt)))
my_range=list(range(1,len(y)+1))
plt.hlines(y=my_range, xmin=ys, xmax=ys[0], color='#007acc', alpha=0.2, linewidth=3)
plt.plot(ys, my_range, "o", markersize=3, color='#007acc', alpha=0.6)
plt.yticks(my_range, xs)
ax.set_xticks(ax.get_xticks()[::2])
ax.tick_params(axis='both', which='major', labelsize=6)
plt.xlabel ('-log10(p-value)', size=7)
fig.savefig(p1[0]+'_Significantly-Altered_BiologicalProcess.png', format='png', dpi=900, bbox_inches='tight')
except (StopIteration,FileNotFoundError) as q:
print(q)
def testScorer(functional):
global p1
l = []
with open(functional) as ij:
p = ij.name
p1 = p.split('.')
for i in ij:
i = i.rstrip().split('\t')
l.append('\t'.join((i[-1], i[1])))
de = {}
lines = (line.rstrip('\t') for line in l)
unique = OrderedDict.fromkeys( (line for line in lines if line) )
k = [(line.split('\t'))[0] for line in unique]
for word in k:
de[word] = de.get(word, 0) + 1
word_freq = []
for key, value in de.items():
word_freq.append(list((value, key)))
pathw = []
word_freq.sort(reverse=True)
for i in word_freq:
pv = [( 'Enhancer' , '28947' ),
( 'Splice-site' , '486' ),
( 'CpG' , '8938' ),
( 'Insulator' , '12640' ),
( 'Promoter' , '60136' ),
( 'TSS' , '22' ),
( 'AltCodon' , '15' ),
( 'miRNA' , '425' )]
for v in pv:
if v[0] == i[1]:
# Number of SNPs
N = len(k)
# functional plus regulatory SNPs
M = 340091
p = f"{Decimal(hypergeom(M, int(v[1]), N).pmf(i[0])):.2E}"
pathw.append((v[0], p))
pathw.sort(key = lambda x: float(x[1]), reverse = False)
for pa in pathw:
with open(p1[0]+"_Enriched.txt",'a') as eg:
eg.write('\t'.join(pa)+'\n')
if p1[0]+'_Enriched.txt':
fig, ax = plt.subplots(figsize=(7.5, 5.5))
x = []
y = []
with open(p1[0]+'_Enriched.txt','r') as ij:
for i in ij.readlines():
i = i.rstrip().split('\t')
x.append(i[0])
try:
y.append(abs(math.log10(float(i[1]))))
except ValueError:
pass
xt = tuple(x)
ys, xs = zip(*sorted(zip(y, xt)))
my_range=list(range(1,len(y)+1))
plt.hlines(y=my_range, xmin=ys, xmax=ys[0], color='indigo', alpha=0.2, linewidth=3)
plt.plot(ys, my_range, "o", markersize=3, color='indigo', alpha=0.6)
plt.yticks(my_range, xs)
ax.tick_params(axis='both', which='major', labelsize=5)
plt.xlabel ('-log10(p-value)', size=7)
fig.savefig(p1[0]+'_Enriched-regulatory-regions.png', format='png', dpi=900, bbox_inches='tight')
print(p1[0]+'_Enriched-regulatory-regions is ready')
def mainResultsR():
try:
pathway = p1[0]+"_Significantly-Altered-Biological-Pathways.png"
except FileNotFoundError as e:
print(e)
try:
process = p1[0]+"_Significantly-Altered_BiologicalProcess.png"
except FileNotFoundError as e:
print(e)
try:
genes = p1[0]+'_Most-Altered-Mouse-Genes.png'
except FileNotFoundError as e:
print(e)
try:
domain = p1[0]+'_Enriched-regulatory-regions.png'
except FileNotFoundError as e:
print(e)
try:
network = p1[0]+'_Protein_Network.png'
except FileNotFoundError as e:
print(e)
try:
singlecell = p1[0]+'_scExp.png'
except FileNotFoundError as e:
print(e)
try:
GO = p1[0]+"_GO-term-Enrichment.png"
except FileNotFoundError as e:
print(e)
with open(p1[0]+'.html', 'a') as ht:
html0 = f"""<html>
<br> <body> <link rel="stylesheet" href="style.css">
<body><img src="mmap.png" width="20%" alt="logo" />
<br>
</br>
<div class="wrapper"> <center>
<h1> mMap results for {p1[0]} </div>
<br>
</br>
<br><center>
<div class="container">
<input type="checkbox" id="zoomCheck1">
<label for="zoomCheck1">
<h2> Proteins with Alternative Alleles and Allele Conversation</h2>
<img src={genes} onerror="WARNGING: no mutated genes found!" alt="WARNING: no mutated genes found!">
</label>
<br> *conservation scale = more nagative means more conserved site and vice-versa </br>
</div>
<br>
</br>
<br>
</br>
<div><center>
<div class="container">
<input type="checkbox" id="zoomCheck2">
<label for="zoomCheck2">
<h2> Altered Protein Functional Regions Enrichment</h2>
<img src={domain} onerror="WARNGING: no mutated domain found!" alt="WARNING: no mutated genes found!">
</label>
</div>
<br>
</br>
<br>
</br>
<div><center>
<div class="container">
<input type="checkbox" id="zoomCheck3">
<label for="zoomCheck3">
<h2> Altered GO terms Enrichment</h2>
<img src={GO} onerror="WARNGING: no mutated GO term found!" alt="WARNING: no mutated genes found!" >
</label>
</div>
<br>
</br>
<br>
</br>
<div><center>
<div class="container">
<input type="checkbox" id="zoomCheck4">
<label for="zoomCheck4">
<h2> Altered Biological Pathways Enrichment</h2>
<br>
<img src={pathway} onerror="WARNGING: no enrichment of pathways found!" alt="WARNING: no enrichment of pathways found!" >
</label>
</div>
<br>
</br>
<br>
</br>
<div><center>
<div class="container">
<input type="checkbox" id="zoomCheck5">
<label for="zoomCheck5">
<h2> Altered Biological processes Enrichment</h2>
<img src={process} onerror="WARNGING: no enrichment of pathways found!" alt="WARNING: no enrichment of pathways found!" >
</label>
</div>
<br>
</br>
<br>
</br>
<div><center>
<div class="container">
<input type="checkbox" id="zoomCheck6">
<label for="zoomCheck6">
<h2> Alternative Allele containing Protein(s) Interaction Network</h2>
<img src={network} onerror="WARNGING: no protein network available for in the input data!" alt="WARNING: no mutated genes found!" >
</label>
</div>
<br>
</br>
<br>
</br>
<div><center>
<div class="container">
<input type="checkbox" id="zoomCheck7">
<label for="zoomCheck7">
<h2> Alternative Allele containing Protein(s) Single Cell RNA Expression</h2>
<img src={singlecell} onerror="WARNGING: no sc data found!" alt="WARNING: no mutated genes found!" >
</label>
<br>
</br>
<br>
</br>
<div id="footer_container">
<font color="white" text-align= "left">
<div id="footer"> © 2020 Developed by <NAME> <<EMAIL>> @ Peltz Lab Stanford University School of Medicine
</div>
</div>
</body> </html>"""
ht.write(html0)
print(p1[0]+" mMap results are ready")
def nocode_mutation(file1):
try:
noncoding(os.path.join(file1))
print('Regulatory analysis is done...')
except IOError:
pass
try:
ncnetwork(os.path.join(p1[0]+"_regulatory-analysis.txt"))
print('Network analysis is done...')
except IOError:
pass
try:
testScorer(os.path.join(p1[0]+".txt"))
print('Enriched Regions analysis is done...')
except IOError:
pass
try:
mainResultsR()
print('Final HTML page of mMap resutlts is ready ...')
except IOError:
pass
try:
mg = (time.time() - start_time)
os.system("mkdir "+wd+"/"+p1[0]+"_mutations_"+ str(mg))
try:
shutil.move(wd+"/"+p1[0]+".txt", wd+"/"+p1[0]+"_mutations_"+ str(mg))
except FileNotFoundError as e:
print(e)
try:
shutil.move(wd+"/"+p1[0]+"_Enriched.txt", wd+"/"+p1[0]+"_mutations_"+ str(mg))
except FileNotFoundError as e:
print(e)
try:
shutil.move(wd+"/"+p1[0]+'_Enriched-regulatory-regions.png', wd+"/"+p1[0]+"_mutations_"+ str(mg))
except FileNotFoundError as e:
print(e)
try:
shutil.move(wd+"/"+p1[0]+'_Most-Altered-Mouse-Genes.txt', wd+"/"+p1[0]+"_mutations_"+ str(mg))
except FileNotFoundError as e:
print(e)
try:
shutil.move(wd+"/"+p1[0]+'_Most-Altered-Mouse-Genes.png', wd+"/"+p1[0]+"_mutations_"+ str(mg))
except FileNotFoundError as e:
print(e)
try:
shutil.move(wd+"/"+p1[0]+"_phenotype_accessment.txt", wd+"/"+p1[0]+"_mutations_"+ str(mg))
except FileNotFoundError as e:
print(e)
try:
shutil.move(wd+"/"+p1[0]+'_scExp.png', wd+"/"+p1[0]+"_mutations_"+ str(mg))
except FileNotFoundError as e:
print(e)
try:
shutil.move(wd+"/"+p1[0]+'_scExpression-data.txt', wd+"/"+p1[0]+"_mutations_"+ str(mg))
except FileNotFoundError as e:
print(e)
try:
shutil.move(wd+"/"+p1[0]+'_Significantly-Altered-Biological-Pathways.png', wd+"/"+p1[0]+"_mutations_"+ str(mg))
except FileNotFoundError as e:
print(e)
try:
shutil.move(wd+"/"+p1[0]+'_Significantly-Altered_BiologicalProcess.png', wd+"/"+p1[0]+"_mutations_"+ str(mg))
except FileNotFoundError as e:
print(e)
try:
shutil.move(wd+"/"+p1[0]+'_BiologicalProcess.txt', wd+"/"+p1[0]+"_mutations_"+ str(mg))
except FileNotFoundError as e:
print(e)
try:
shutil.move(wd+"/"+p1[0]+'_enrichment.txt', wd+"/"+p1[0]+"_mutations_"+ str(mg))
except FileNotFoundError as e:
print(e)
try:
shutil.move(wd+"/"+p1[0]+"_Protein_Network.png", wd+"/"+p1[0]+"_mutations_"+ str(mg))
except FileNotFoundError as e:
print(e)
try:
shutil.move(wd+"/"+p1[0]+"_Pathways.txt", wd+"/"+p1[0]+"_mutations_"+ str(mg))
except FileNotFoundError as e:
print(e)
try:
shutil.move(wd+"/"+p1[0]+".html", wd+"/"+p1[0]+"_mutations_"+ str(mg))
except FileNotFoundError as e:
print(e)
try:
shutil.copy(wd+"/"+'style.css', wd+"/"+p1[0]+"_mutations_"+ str(mg))
shutil.copy(wd+"/"+'mmap.png', wd+"/"+p1[0]+"_mutations_"+ str(mg))
except FileNotFoundError as e:
print(e)
except IOError as e:
print(e)
def mainResults():
try:
GO = p2[0]+"_GO-term-Enrichment.png"
except FileNotFoundError as e:
print(e)
try:
pathway = p2[0]+"_Significantly-Altered-Biological-Pathways.png"
except FileNotFoundError as e:
print(e)
try:
process = p2[0]+"_Significantly-Altered-Biological-Process.png"
except FileNotFoundError as e:
print(e)
try:
genes = p2[0]+'_Mutated-Proteins-at-Conserved-Regions.png'
except FileNotFoundError as e:
print(e)
try:
domain = p2[0]+"_Enriched-functional-regions.png"
except FileNotFoundError as e:
print(e)
try:
network = p2[0]+'_Protein_Network.png'
except FileNotFoundError as e:
print(e)
try:
singlecell = p2[0]+'_scExp.png'
except FileNotFoundError as e:
print(e)
with open(p2[0]+'.html', 'a') as ht:
html0 = f"""<html>
<br> <body> <link rel="stylesheet" href="style.css">
<body><img src="mmap.png" width="20%" alt="logo" />
<br>
</br>
<div class="wrapper"> <center>
<h1> mMap results for {p2[0]} </div>
<br>
</br>
<br><center>
<div class="container">
<input type="checkbox" id="zoomCheck1">
<label for="zoomCheck1">
<h2> Proteins with Alternative Alleles and Allele Conversation</h2>
<img src={genes} onerror="WARNGING: no mutated genes found!" alt="WARNING: no mutated genes found!">
</label>
<br> *conservation scale = more nagative means more conserved site and vice-versa </br>
</div>
<br>
</br>
<br>
</br>
<div><center>
<div class="container">
<input type="checkbox" id="zoomCheck2">
<label for="zoomCheck2">
<h2> Altered Protein Functional Regions Enrichment</h2>
<img src={domain} onerror="WARNGING: no mutated domain found!" alt="WARNING: no mutated genes found!">
</label>
</div>
<br>
</br>
<br>
</br>
<div><center>
<div class="container">
<input type="checkbox" id="zoomCheck3">
<label for="zoomCheck3">
<h2> Altered GO terms Enrichment</h2>
<img src={GO} onerror="WARNGING: no mutated GO term found!" alt="WARNING: no mutated genes found!" >
</label>
</div>
<br>
</br>
<br>
</br>
<div><center>
<div class="container">
<input type="checkbox" id="zoomCheck4">
<label for="zoomCheck4">
<h2> Altered Biological Pathways Enrichment</h2>
<br>
<img src={pathway} onerror="WARNGING: no enrichment of pathways found!" alt="WARNING: no enrichment of pathways found!" >
</label>
</div>
<br>
</br>
<br>
</br>
<div><center>
<div class="container">
<input type="checkbox" id="zoomCheck5">
<label for="zoomCheck5">
<h2> Altered Biological processes Enrichment</h2>
<img src={process} onerror="WARNGING: no enrichment of pathways found!" alt="WARNING: no enrichment of pathways found!" >
</label>
</div>
<br>
</br>
<br>
</br>
<div><center>
<div class="container">
<input type="checkbox" id="zoomCheck6">
<label for="zoomCheck6">
<h2> Alternative Allele containing Protein(s) Interaction Network</h2>
<img src={network} onerror="WARNGING: no protein network available for in the input data!" alt="WARNING: no mutated genes found!" >
</label>
</div>
<br>
</br>
<br>
</br>
<div><center>
<div class="container">
<input type="checkbox" id="zoomCheck7">
<label for="zoomCheck7">
<h2> Alternative Allele containing Protein(s) Single Cell RNA Expression</h2>
<img src={singlecell} onerror="WARNGING: no sc data found!" alt="WARNING: no mutated genes found!" >
</label>
<br>
</br>
<br>
</br>
<div id="footer_container">
<font color="white" text-align= "left">
<div id="footer"> © 2020 Developed by <NAME> <<EMAIL>> @ Peltz Lab Stanford University School of Medicine
</div>
</div>
</body> </html>"""
ht.write(html0)
print(p2[0]+" mMap results are ready")
def gene_mutation(genefile):
try:
gene_file(os.path.join(genefile))
print(p2[0]+' conservation analysis of functional SNPs is finished')
except Exception as e:
print(e)
try:
testScore(os.path.join(p2[0]+"_functional-accessment.txt"))
print('testScore analysis is finished')
except Exception as e:
print(e)
try:
network(os.path.join(p2[0]+"_functional-accessment.txt"))
print('Network is generated')
except Exception as e:
print(e)
try:
downstream(os.path.join(p2[0]+"_functional-accessment.txt"))
print('Disease-Gene NLP analysis is done...')
except Exception as e:
print(e)
try:
go()
print('GO plot is generated')
except Exception as e:
print(e)
try:
mainResults()
print('mMap analysis is done and Final HTML page of mMap resutlts is ready ...')
except Exception as e:
print(e)
try:
mg = (time.time() - start_time)
os.system("mkdir "+wd+"/"+p2[0]+"_mutations_"+ str(mg))
try:
shutil.move(wd+"/"+p2[0]+"_functional-accessment.txt", wd+"/"+p2[0]+"_mutations_" + str(mg))
except IOError:
pass
try:
shutil.move(wd+"/"+p2[0]+"_Protein_Network.png", wd+"/"+p2[0]+"_mutations_" + str(mg))
except IOError:
pass
try:
shutil.move(wd+"/"+p2[0]+"_enrichment.txt", wd+"/"+p2[0]+"_mutations_" + str(mg))
except IOError:
pass
try:
shutil.move(wd+"/"+p2[0]+'_Mutated-Proteins-at-Conserved-Regions.png', wd+"/"+p2[0]+"_mutations_" + str(mg))
except IOError:
pass
try:
shutil.move(wd+"/"+p2[0]+'_Significantly-Altered-Biological-Pathways.png', wd+"/"+p2[0]+"_mutations_" + str(mg))
except IOError:
pass
try:
shutil.move(wd+"/"+p2[0]+'_Pathways.txt', wd+"/"+p2[0]+"_mutations_" + str(mg))
except IOError:
pass
try:
shutil.move(wd+"/"+p2[0]+'_BiologicalProcess.txt', wd+"/"+p2[0]+"_mutations_" + str(mg))
except IOError:
pass
try:
shutil.move(wd+"/"+p2[0]+'_Significantly-Altered-Biological-Process.png', wd+"/"+p2[0]+"_mutations_" + str(mg))
except IOError:
pass
try:
shutil.move(wd+"/"+ p2[0]+'_Enriched-functional-regions.txt', wd+"/"+p2[0]+"_mutations_" + str(mg))
except IOError:
pass
try:
shutil.move(wd+"/"+p2[0]+'_BiologicalProcess_Revigo.txt', wd+"/"+p2[0]+"_mutations_" + str(mg))
except IOError:
pass
try:
shutil.move(wd+"/"+ p2[0]+'_Enriched-functional-regions.png', wd+"/"+p2[0]+"_mutations_" + str(mg))
except IOError:
pass
try:
shutil.move(wd+"/"+ p2[0]+'_GO-term-Enrichment.png', wd+"/"+p2[0]+"_mutations_" + str(mg))
except IOError:
pass
try:
shutil.move(wd+"/"+p2[0]+"_phenotype_accessment.txt", wd+"/"+p2[0]+"_mutations_" + str(mg))
except IOError:
pass
try:
shutil.move(wd+"/"+p2[0]+"_scExpression-data.txt", wd+"/"+p2[0]+"_mutations_" + str(mg))
except IOError:
pass
try:
shutil.move(wd+"/"+p2[0]+'_scExp.png', wd+"/"+p2[0]+"_mutations_" + str(mg))
except IOError:
pass
try:
shutil.move(wd+"/"+p2[0]+'.html', wd+"/"+p2[0]+"_mutations_" + str(mg))
shutil.copy(wd+"/"+'style.css', wd+"/"+p2[0]+"_mutations_"+ str(mg))
shutil.copy(wd+"/"+'mmap.png', wd+"/"+p2[0]+"_mutations_"+ str(mg))
except IOError:
pass
print('All analyses for ' +p2[0]+' are completed!' +'\n'+ '<<< mMap curated and maintained by <NAME> <<EMAIL>> >>>')
except IOError:
pass
return "Done"
if __name__=='__main__':
import argparse
from multiprocessing import Pool
p = Pool()
sys.setrecursionlimit(2000)
parser = argparse.ArgumentParser()
parser.add_argument('-g','--genes', action='append', help='performs the msMap on genes level mutation positions')
parser.add_argument('-nc', '--noncoding', action='append', help='performs the mMap on proteins level mutation positions')
args = parser.parse_args()
if args.genes:
try:
genefile = [f for f in glob.glob(os.path.join("*.txt")) if f.endswith(sys.argv[-1])]
p.map(gene_mutation, genefile)
p.close()
p.join()
except (IOError, IndexError):
pass
elif args.noncoding:
try:
gene1 = [f for f in glob.glob(os.path.join("*.txt")) if f.endswith(sys.argv[-1])]
p.map(nocode_mutation, gene1)
p.close()
p.join()
except (IOError, IndexError):
pass
else:
print ("to run a mMap function seek help")
|
[
"os.remove",
"argparse.ArgumentParser",
"pandas.read_csv",
"matplotlib.pyplot.figure",
"glob.glob",
"matplotlib.pyplot.tick_params",
"sys.setrecursionlimit",
"matplotlib.pyplot.hlines",
"os.path.join",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.cm.ScalarMappable",
"collections.OrderedDict.fromkeys",
"mechanize.Browser",
"numpy.log10",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.subplots",
"seaborn.scatterplot",
"seaborn.set_color_codes",
"matplotlib.pyplot.legend",
"seaborn.barplot",
"multiprocessing.Pool",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.plot",
"os.getcwd",
"time.time",
"seaborn.color_palette",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((357, 368), 'time.time', 'time.time', ([], {}), '()\n', (366, 368), False, 'import time\n'), ((374, 385), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (383, 385), False, 'import os\n'), ((421, 490), 'glob.glob', 'glob.glob', (["(wd + '/' + 'Functional-datafiles' + '/' + 'conversation/*')"], {}), "(wd + '/' + 'Functional-datafiles' + '/' + 'conversation/*')\n", (430, 490), False, 'import glob\n'), ((2922, 2986), 'glob.glob', 'glob.glob', (["(wd + '/' + 'Functional-datafiles' + '/' + 'numbers/*')"], {}), "(wd + '/' + 'Functional-datafiles' + '/' + 'numbers/*')\n", (2931, 2986), False, 'import glob\n'), ((4907, 4959), 'collections.OrderedDict.fromkeys', 'OrderedDict.fromkeys', (['(line for line in lines if line)'], {}), '(line for line in lines if line)\n', (4927, 4959), False, 'from collections import OrderedDict\n'), ((6665, 6752), 'matplotlib.pyplot.hlines', 'plt.hlines', ([], {'y': 'my_range', 'xmin': 'ys', 'xmax': 'ys[0]', 'color': '"""indigo"""', 'alpha': '(0.2)', 'linewidth': '(3)'}), "(y=my_range, xmin=ys, xmax=ys[0], color='indigo', alpha=0.2,\n linewidth=3)\n", (6675, 6752), True, 'import matplotlib.pyplot as plt\n'), ((6750, 6818), 'matplotlib.pyplot.plot', 'plt.plot', (['ys', 'my_range', '"""o"""'], {'markersize': '(3)', 'color': '"""indigo"""', 'alpha': '(0.6)'}), "(ys, my_range, 'o', markersize=3, color='indigo', alpha=0.6)\n", (6758, 6818), True, 'import matplotlib.pyplot as plt\n'), ((6820, 6844), 'matplotlib.pyplot.yticks', 'plt.yticks', (['my_range', 'xs'], {}), '(my_range, xs)\n', (6830, 6844), True, 'import matplotlib.pyplot as plt\n'), ((6903, 6940), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""-log10(p-value)"""'], {'size': '(7)'}), "('-log10(p-value)', size=7)\n", (6913, 6940), True, 'import matplotlib.pyplot as plt\n'), ((6943, 6973), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['""" regions"""'], {'size': '(7)'}), "(' regions', size=7)\n", (6953, 6973), True, 'import matplotlib.pyplot as plt\n'), ((22868, 22920), 'collections.OrderedDict.fromkeys', 'OrderedDict.fromkeys', (['(line for line in lines if line)'], {}), '(line for line in lines if line)\n', (22888, 22920), False, 'from collections import OrderedDict\n'), ((24159, 24246), 'matplotlib.pyplot.hlines', 'plt.hlines', ([], {'y': 'my_range', 'xmin': 'ys', 'xmax': 'ys[0]', 'color': '"""indigo"""', 'alpha': '(0.2)', 'linewidth': '(3)'}), "(y=my_range, xmin=ys, xmax=ys[0], color='indigo', alpha=0.2,\n linewidth=3)\n", (24169, 24246), True, 'import matplotlib.pyplot as plt\n'), ((24244, 24312), 'matplotlib.pyplot.plot', 'plt.plot', (['ys', 'my_range', '"""o"""'], {'markersize': '(3)', 'color': '"""indigo"""', 'alpha': '(0.6)'}), "(ys, my_range, 'o', markersize=3, color='indigo', alpha=0.6)\n", (24252, 24312), True, 'import matplotlib.pyplot as plt\n'), ((24314, 24338), 'matplotlib.pyplot.yticks', 'plt.yticks', (['my_range', 'xs'], {}), '(my_range, xs)\n', (24324, 24338), True, 'import matplotlib.pyplot as plt\n'), ((24397, 24434), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""-log10(p-value)"""'], {'size': '(7)'}), "('-log10(p-value)', size=7)\n", (24407, 24434), True, 'import matplotlib.pyplot as plt\n'), ((38806, 38812), 'multiprocessing.Pool', 'Pool', ([], {}), '()\n', (38810, 38812), False, 'from multiprocessing import Pool\n'), ((38814, 38841), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(2000)'], {}), '(2000)\n', (38835, 38841), False, 'import sys\n'), ((38853, 38878), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (38876, 38878), False, 'import argparse\n'), ((1633, 1692), 'pandas.read_csv', 'pd.read_csv', (["(p2[0] + '_functional-accessment.txt')"], {'sep': '"""\t"""'}), "(p2[0] + '_functional-accessment.txt', sep='\\t')\n", (1644, 1692), True, 'import pandas as pd\n'), ((1952, 1982), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.5, 4.5)'}), '(figsize=(6.5, 4.5))\n', (1962, 1982), True, 'import matplotlib.pyplot as plt\n'), ((1990, 2059), 'seaborn.scatterplot', 'sns.scatterplot', (['x', 'y'], {'hue': 'hue', 's': '(15)', 'legend': '"""full"""', 'palette': '"""RdYlGn"""'}), "(x, y, hue=hue, s=15, legend='full', palette='RdYlGn')\n", (2005, 2059), True, 'import seaborn as sns\n'), ((2251, 2306), 'matplotlib.pyplot.cm.ScalarMappable', 'plt.cm.ScalarMappable', ([], {'cmap': '"""RdYlGn"""', 'norm': 'scale_legend'}), "(cmap='RdYlGn', norm=scale_legend)\n", (2272, 2306), True, 'import matplotlib.pyplot as plt\n'), ((2522, 2565), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '"""vertical"""', 'fontsize': '(3)'}), "(rotation='vertical', fontsize=3)\n", (2532, 2565), True, 'import matplotlib.pyplot as plt\n'), ((2568, 2600), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['""" SNP count"""'], {'size': '(7)'}), "(' SNP count', size=7)\n", (2578, 2600), True, 'import matplotlib.pyplot as plt\n'), ((2603, 2636), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Protein"""'], {'fontsize': '(7)'}), "('Protein', fontsize=7)\n", (2613, 2636), True, 'import matplotlib.pyplot as plt\n'), ((2639, 2661), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(5)'}), '(fontsize=5)\n', (2649, 2661), True, 'import matplotlib.pyplot as plt\n'), ((2763, 2877), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(p2[0] + '_Mutated-Proteins-at-Conserved-Regions.png')"], {'format': '"""png"""', 'dpi': '(900)', 'bbox_inches': '"""tight"""'}), "(p2[0] + '_Mutated-Proteins-at-Conserved-Regions.png', format=\n 'png', dpi=900, bbox_inches='tight')\n", (2774, 2877), True, 'import matplotlib.pyplot as plt\n'), ((3424, 3454), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.5, 4.5)'}), '(figsize=(6.5, 4.5))\n', (3434, 3454), True, 'import matplotlib.pyplot as plt\n'), ((3526, 3548), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(3)'}), '(fontsize=3)\n', (3536, 3548), True, 'import matplotlib.pyplot as plt\n'), ((3551, 3573), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(4)'}), '(fontsize=4)\n', (3561, 3573), True, 'import matplotlib.pyplot as plt\n'), ((3582, 3653), 'pandas.read_csv', 'pd.read_csv', (["(p2[0] + '_scExpression-data.txt')"], {'sep': '"""\t"""', 'index_col': 'None'}), "(p2[0] + '_scExpression-data.txt', sep='\\t', index_col=None)\n", (3593, 3653), True, 'import pandas as pd\n'), ((3698, 3787), 'seaborn.barplot', 'sns.barplot', ([], {'x': '"""Number"""', 'y': '"""Cell"""', 'data': 'scx', 'hue': '"""Genes"""', 'orient': '"""h"""', 'palette': 'palette'}), "(x='Number', y='Cell', data=scx, hue='Genes', orient='h',\n palette=palette)\n", (3709, 3787), True, 'import seaborn as sns\n'), ((3785, 3817), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['""" cell-type"""'], {'size': '(7)'}), "(' cell-type', size=7)\n", (3795, 3817), True, 'import matplotlib.pyplot as plt\n'), ((3820, 3864), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""number of clusters"""'], {'fontsize': '(7)'}), "('number of clusters', fontsize=7)\n", (3830, 3864), True, 'import matplotlib.pyplot as plt\n'), ((3867, 3896), 'seaborn.set_color_codes', 'sns.set_color_codes', (['"""pastel"""'], {}), "('pastel')\n", (3886, 3896), True, 'import seaborn as sns\n'), ((3899, 4010), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""', 'fontsize': '"""x-small"""', 'title': '"""Genes"""', 'title_fontsize': '(3)', 'bbox_to_anchor': '(1.05, 1)'}), "(loc='upper left', fontsize='x-small', title='Genes',\n title_fontsize=3, bbox_to_anchor=(1.05, 1))\n", (3909, 4010), True, 'import matplotlib.pyplot as plt\n'), ((4009, 4086), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(p2[0] + '_scExp.png')"], {'format': '"""png"""', 'dpi': '(900)', 'bbox_inches': '"""tight"""'}), "(p2[0] + '_scExp.png', format='png', dpi=900, bbox_inches='tight')\n", (4020, 4086), True, 'import matplotlib.pyplot as plt\n'), ((6291, 6321), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(7, 5.5)'}), '(figsize=(7, 5.5))\n', (6303, 6321), True, 'import matplotlib.pyplot as plt\n'), ((9421, 9440), 'mechanize.Browser', 'mechanize.Browser', ([], {}), '()\n', (9438, 9440), False, 'import mechanize\n'), ((10368, 10391), 'os.remove', 'os.remove', (['"""REVIGO.csv"""'], {}), "('REVIGO.csv')\n", (10377, 10391), False, 'import os\n'), ((10404, 10434), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(7, 5.5)'}), '(figsize=(7, 5.5))\n', (10416, 10434), True, 'import matplotlib.pyplot as plt\n'), ((10540, 10625), 'matplotlib.pyplot.hlines', 'plt.hlines', ([], {'y': 'my_range', 'xmin': '(0)', 'xmax': 'ys', 'color': '"""#007acc"""', 'alpha': '(0.2)', 'linewidth': '(3)'}), "(y=my_range, xmin=0, xmax=ys, color='#007acc', alpha=0.2, linewidth=3\n )\n", (10550, 10625), True, 'import matplotlib.pyplot as plt\n'), ((10623, 10692), 'matplotlib.pyplot.plot', 'plt.plot', (['ys', 'my_range', '"""o"""'], {'markersize': '(3)', 'color': '"""#007acc"""', 'alpha': '(0.6)'}), "(ys, my_range, 'o', markersize=3, color='#007acc', alpha=0.6)\n", (10631, 10692), True, 'import matplotlib.pyplot as plt\n'), ((10695, 10719), 'matplotlib.pyplot.yticks', 'plt.yticks', (['my_range', 'xs'], {}), '(my_range, xs)\n', (10705, 10719), True, 'import matplotlib.pyplot as plt\n'), ((10722, 10756), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(5)', 'rotation': '(0)'}), '(fontsize=5, rotation=0)\n', (10732, 10756), True, 'import matplotlib.pyplot as plt\n'), ((10759, 10800), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""-log10(p-value)"""'], {'fontsize': '(6)'}), "('-log10(p-value)', fontsize=6)\n", (10769, 10800), True, 'import matplotlib.pyplot as plt\n'), ((11278, 11366), 'matplotlib.pyplot.hlines', 'plt.hlines', ([], {'y': 'my_range', 'xmin': 'ys', 'xmax': 'ys[0]', 'color': '"""#197213"""', 'alpha': '(0.2)', 'linewidth': '(3)'}), "(y=my_range, xmin=ys, xmax=ys[0], color='#197213', alpha=0.2,\n linewidth=3)\n", (11288, 11366), True, 'import matplotlib.pyplot as plt\n'), ((11365, 11434), 'matplotlib.pyplot.plot', 'plt.plot', (['ys', 'my_range', '"""o"""'], {'markersize': '(3)', 'color': '"""#197213"""', 'alpha': '(0.6)'}), "(ys, my_range, 'o', markersize=3, color='#197213', alpha=0.6)\n", (11373, 11434), True, 'import matplotlib.pyplot as plt\n'), ((11437, 11461), 'matplotlib.pyplot.yticks', 'plt.yticks', (['my_range', 'xs'], {}), '(my_range, xs)\n', (11447, 11461), True, 'import matplotlib.pyplot as plt\n'), ((11522, 11563), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""-log10(p-value)"""'], {'fontsize': '(7)'}), "('-log10(p-value)', fontsize=7)\n", (11532, 11563), True, 'import matplotlib.pyplot as plt\n'), ((16207, 16259), 'collections.OrderedDict.fromkeys', 'OrderedDict.fromkeys', (['(line for line in lines if line)'], {}), '(line for line in lines if line)\n', (16227, 16259), False, 'from collections import OrderedDict\n'), ((16739, 16769), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(6, 3.5)'}), '(figsize=(6, 3.5))\n', (16751, 16769), True, 'import matplotlib.pyplot as plt\n'), ((16824, 16892), 'matplotlib.pyplot.plot', 'plt.plot', (['my_range', 'y', '"""o"""'], {'markersize': '(2)', 'color': '"""#55342d"""', 'alpha': '(0.6)'}), "(my_range, y, 'o', markersize=2, color='#55342d', alpha=0.6)\n", (16832, 16892), True, 'import matplotlib.pyplot as plt\n'), ((16895, 16932), 'matplotlib.pyplot.xticks', 'plt.xticks', (['my_range', 'xt'], {'rotation': '(90)'}), '(my_range, xt, rotation=90)\n', (16905, 16932), True, 'import matplotlib.pyplot as plt\n'), ((16994, 17030), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of SNPs"""'], {'size': '(7)'}), "('Number of SNPs', size=7)\n", (17004, 17030), True, 'import matplotlib.pyplot as plt\n'), ((17034, 17063), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Protein"""'], {'size': '(7)'}), "('Protein', size=7)\n", (17044, 17063), True, 'import matplotlib.pyplot as plt\n'), ((17170, 17234), 'glob.glob', 'glob.glob', (["(wd + '/' + 'Functional-datafiles' + '/' + 'numbers/*')"], {}), "(wd + '/' + 'Functional-datafiles' + '/' + 'numbers/*')\n", (17179, 17234), False, 'import glob\n'), ((18735, 18761), 'collections.OrderedDict.fromkeys', 'OrderedDict.fromkeys', (['phen'], {}), '(phen)\n', (18755, 18761), False, 'from collections import OrderedDict\n'), ((21231, 21319), 'matplotlib.pyplot.hlines', 'plt.hlines', ([], {'y': 'my_range', 'xmin': 'ys', 'xmax': 'ys[0]', 'color': '"""#197213"""', 'alpha': '(0.2)', 'linewidth': '(3)'}), "(y=my_range, xmin=ys, xmax=ys[0], color='#197213', alpha=0.2,\n linewidth=3)\n", (21241, 21319), True, 'import matplotlib.pyplot as plt\n'), ((21318, 21387), 'matplotlib.pyplot.plot', 'plt.plot', (['ys', 'my_range', '"""o"""'], {'markersize': '(3)', 'color': '"""#197213"""', 'alpha': '(0.6)'}), "(ys, my_range, 'o', markersize=3, color='#197213', alpha=0.6)\n", (21326, 21387), True, 'import matplotlib.pyplot as plt\n'), ((21390, 21414), 'matplotlib.pyplot.yticks', 'plt.yticks', (['my_range', 'xs'], {}), '(my_range, xs)\n', (21400, 21414), True, 'import matplotlib.pyplot as plt\n'), ((21513, 21550), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""-log10(p-value)"""'], {'size': '(7)'}), "('-log10(p-value)', size=7)\n", (21523, 21550), True, 'import matplotlib.pyplot as plt\n'), ((22116, 22204), 'matplotlib.pyplot.hlines', 'plt.hlines', ([], {'y': 'my_range', 'xmin': 'ys', 'xmax': 'ys[0]', 'color': '"""#007acc"""', 'alpha': '(0.2)', 'linewidth': '(3)'}), "(y=my_range, xmin=ys, xmax=ys[0], color='#007acc', alpha=0.2,\n linewidth=3)\n", (22126, 22204), True, 'import matplotlib.pyplot as plt\n'), ((22203, 22272), 'matplotlib.pyplot.plot', 'plt.plot', (['ys', 'my_range', '"""o"""'], {'markersize': '(3)', 'color': '"""#007acc"""', 'alpha': '(0.6)'}), "(ys, my_range, 'o', markersize=3, color='#007acc', alpha=0.6)\n", (22211, 22272), True, 'import matplotlib.pyplot as plt\n'), ((22275, 22299), 'matplotlib.pyplot.yticks', 'plt.yticks', (['my_range', 'xs'], {}), '(my_range, xs)\n', (22285, 22299), True, 'import matplotlib.pyplot as plt\n'), ((22398, 22435), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""-log10(p-value)"""'], {'size': '(7)'}), "('-log10(p-value)', size=7)\n", (22408, 22435), True, 'import matplotlib.pyplot as plt\n'), ((23802, 23834), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(7.5, 5.5)'}), '(figsize=(7.5, 5.5))\n', (23814, 23834), True, 'import matplotlib.pyplot as plt\n'), ((1530, 1557), 'collections.OrderedDict.fromkeys', 'OrderedDict.fromkeys', (['funcC'], {}), '(funcC)\n', (1550, 1557), False, 'from collections import OrderedDict\n'), ((10960, 10990), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(7, 5.5)'}), '(figsize=(7, 5.5))\n', (10972, 10990), True, 'import matplotlib.pyplot as plt\n'), ((11742, 11772), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(9, 5.5)'}), '(figsize=(9, 5.5))\n', (11754, 11772), True, 'import matplotlib.pyplot as plt\n'), ((11777, 11805), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelsize': '(6)'}), '(labelsize=6)\n', (11792, 11805), True, 'import matplotlib.pyplot as plt\n'), ((11815, 11863), 'pandas.read_csv', 'pd.read_csv', (["(p2[0] + '_enrichment.txt')"], {'sep': '"""\t"""'}), "(p2[0] + '_enrichment.txt', sep='\\t')\n", (11826, 11863), True, 'import pandas as pd\n'), ((12463, 12500), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['""" -log10 FDR"""'], {'fontsize': '(7)'}), "(' -log10 FDR', fontsize=7)\n", (12473, 12500), True, 'import matplotlib.pyplot as plt\n'), ((17666, 17692), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (17676, 17692), True, 'import matplotlib.pyplot as plt\n'), ((17791, 17813), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(3)'}), '(fontsize=3)\n', (17801, 17813), True, 'import matplotlib.pyplot as plt\n'), ((17817, 17839), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(4)'}), '(fontsize=4)\n', (17827, 17839), True, 'import matplotlib.pyplot as plt\n'), ((17849, 17920), 'pandas.read_csv', 'pd.read_csv', (["(p1[0] + '_scExpression-data.txt')"], {'sep': '"""\t"""', 'index_col': 'None'}), "(p1[0] + '_scExpression-data.txt', sep='\\t', index_col=None)\n", (17860, 17920), True, 'import pandas as pd\n'), ((17967, 18056), 'seaborn.barplot', 'sns.barplot', ([], {'x': '"""Number"""', 'y': '"""Cell"""', 'data': 'scx', 'hue': '"""Genes"""', 'orient': '"""h"""', 'palette': 'palette'}), "(x='Number', y='Cell', data=scx, hue='Genes', orient='h',\n palette=palette)\n", (17978, 18056), True, 'import seaborn as sns\n'), ((18055, 18087), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['""" cell-type"""'], {'size': '(7)'}), "(' cell-type', size=7)\n", (18065, 18087), True, 'import matplotlib.pyplot as plt\n'), ((18091, 18135), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""number of clusters"""'], {'fontsize': '(7)'}), "('number of clusters', fontsize=7)\n", (18101, 18135), True, 'import matplotlib.pyplot as plt\n'), ((18139, 18168), 'seaborn.set_color_codes', 'sns.set_color_codes', (['"""pastel"""'], {}), "('pastel')\n", (18158, 18168), True, 'import seaborn as sns\n'), ((18172, 18275), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""', 'fontsize': '(4)', 'title': '"""Genes"""', 'title_fontsize': '(4)', 'bbox_to_anchor': '(1.05, 1)'}), "(loc='upper left', fontsize=4, title='Genes', title_fontsize=4,\n bbox_to_anchor=(1.05, 1))\n", (18182, 18275), True, 'import matplotlib.pyplot as plt\n'), ((18275, 18352), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(p1[0] + '_scExp.png')"], {'format': '"""png"""', 'dpi': '(350)', 'bbox_inches': '"""tight"""'}), "(p1[0] + '_scExp.png', format='png', dpi=350, bbox_inches='tight')\n", (18286, 18352), True, 'import matplotlib.pyplot as plt\n'), ((18440, 18511), 'os.path.join', 'os.path.join', (["(wd + '/' + 'Functional-datafiles' + '/' + 'Phenotype.txt')"], {}), "(wd + '/' + 'Functional-datafiles' + '/' + 'Phenotype.txt')\n", (18452, 18511), False, 'import os\n'), ((19587, 19617), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(9, 5.5)'}), '(figsize=(9, 5.5))\n', (19599, 19617), True, 'import matplotlib.pyplot as plt\n'), ((20908, 20940), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(7.5, 5.5)'}), '(figsize=(7.5, 5.5))\n', (20920, 20940), True, 'import matplotlib.pyplot as plt\n'), ((21787, 21819), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(7.5, 5.5)'}), '(figsize=(7.5, 5.5))\n', (21799, 21819), True, 'import matplotlib.pyplot as plt\n'), ((28550, 28569), 'os.path.join', 'os.path.join', (['file1'], {}), '(file1)\n', (28562, 28569), False, 'import os\n'), ((28655, 28703), 'os.path.join', 'os.path.join', (["(p1[0] + '_regulatory-analysis.txt')"], {}), "(p1[0] + '_regulatory-analysis.txt')\n", (28667, 28703), False, 'import os\n'), ((28785, 28813), 'os.path.join', 'os.path.join', (["(p1[0] + '.txt')"], {}), "(p1[0] + '.txt')\n", (28797, 28813), False, 'import os\n'), ((29005, 29016), 'time.time', 'time.time', ([], {}), '()\n', (29014, 29016), False, 'import time\n'), ((35463, 35485), 'os.path.join', 'os.path.join', (['genefile'], {}), '(genefile)\n', (35475, 35485), False, 'import os\n'), ((35614, 35664), 'os.path.join', 'os.path.join', (["(p2[0] + '_functional-accessment.txt')"], {}), "(p2[0] + '_functional-accessment.txt')\n", (35626, 35664), False, 'import os\n'), ((35760, 35810), 'os.path.join', 'os.path.join', (["(p2[0] + '_functional-accessment.txt')"], {}), "(p2[0] + '_functional-accessment.txt')\n", (35772, 35810), False, 'import os\n'), ((35898, 35948), 'os.path.join', 'os.path.join', (["(p2[0] + '_functional-accessment.txt')"], {}), "(p2[0] + '_functional-accessment.txt')\n", (35910, 35948), False, 'import os\n'), ((36272, 36283), 'time.time', 'time.time', ([], {}), '()\n', (36281, 36283), False, 'import time\n'), ((2664, 2673), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (2671, 2673), True, 'import matplotlib.pyplot as plt\n'), ((3486, 3511), 'seaborn.color_palette', 'sns.color_palette', (['"""deep"""'], {}), "('deep')\n", (3503, 3511), True, 'import seaborn as sns\n'), ((12882, 12976), 'os.path.join', 'os.path.join', (["(wd + '/' + 'Regulatory datafiles' + '/' + 'Vista_enhancers_flankinGenes.txt')"], {}), "(wd + '/' + 'Regulatory datafiles' + '/' +\n 'Vista_enhancers_flankinGenes.txt')\n", (12894, 12976), False, 'import os\n'), ((13238, 13315), 'os.path.join', 'os.path.join', (["(wd + '/' + 'Regulatory datafiles' + '/' + 'EPDnew_promoter.txt')"], {}), "(wd + '/' + 'Regulatory datafiles' + '/' + 'EPDnew_promoter.txt')\n", (13250, 13315), False, 'import os\n'), ((13574, 13648), 'os.path.join', 'os.path.join', (["(wd + '/' + 'Regulatory datafiles' + '/' + 'TSS_promoter.txt')"], {}), "(wd + '/' + 'Regulatory datafiles' + '/' + 'TSS_promoter.txt')\n", (13586, 13648), False, 'import os\n'), ((13953, 14024), 'os.path.join', 'os.path.join', (["(wd + '/' + 'Regulatory datafiles' + '/' + 'cpgIsland.txt')"], {}), "(wd + '/' + 'Regulatory datafiles' + '/' + 'cpgIsland.txt')\n", (13965, 14024), False, 'import os\n'), ((14279, 14373), 'os.path.join', 'os.path.join', (["(wd + '/' + 'Regulatory datafiles' + '/' + 'insulator-CTCF-binding-sites.txt')"], {}), "(wd + '/' + 'Regulatory datafiles' + '/' +\n 'insulator-CTCF-binding-sites.txt')\n", (14291, 14373), False, 'import os\n'), ((14638, 14705), 'os.path.join', 'os.path.join', (["(wd + '/' + 'Regulatory datafiles' + '/' + 'miRNA.txt')"], {}), "(wd + '/' + 'Regulatory datafiles' + '/' + 'miRNA.txt')\n", (14650, 14705), False, 'import os\n'), ((14910, 14980), 'os.path.join', 'os.path.join', (["(wd + '/' + 'Regulatory datafiles' + '/' + 'AltCodon.txt')"], {}), "(wd + '/' + 'Regulatory datafiles' + '/' + 'AltCodon.txt')\n", (14922, 14980), False, 'import os\n'), ((15212, 15291), 'os.path.join', 'os.path.join', (["(wd + '/' + 'Regulatory datafiles' + '/' + 'SPLICE_sires-snps.txt')"], {}), "(wd + '/' + 'Regulatory datafiles' + '/' + 'SPLICE_sires-snps.txt')\n", (15224, 15291), False, 'import os\n'), ((644, 721), 'os.path.join', 'os.path.join', (["(wd + '/' + 'Functional-datafiles' + '/' + 'Functional-data.txt')"], {}), "(wd + '/' + 'Functional-datafiles' + '/' + 'Functional-data.txt')\n", (656, 721), False, 'import os\n'), ((4350, 4421), 'os.path.join', 'os.path.join', (["(wd + '/' + 'Functional-datafiles' + '/' + 'Phenotype.txt')"], {}), "(wd + '/' + 'Functional-datafiles' + '/' + 'Phenotype.txt')\n", (4362, 4421), False, 'import os\n'), ((12124, 12143), 'numpy.log10', 'np.log10', (["ij['FDR']"], {}), "(ij['FDR'])\n", (12132, 12143), True, 'import numpy as np\n'), ((17750, 17775), 'seaborn.color_palette', 'sns.color_palette', (['"""deep"""'], {}), "('deep')\n", (17767, 17775), True, 'import seaborn as sns\n'), ((39207, 39228), 'os.path.join', 'os.path.join', (['"""*.txt"""'], {}), "('*.txt')\n", (39219, 39228), False, 'import os\n'), ((39421, 39442), 'os.path.join', 'os.path.join', (['"""*.txt"""'], {}), "('*.txt')\n", (39433, 39442), False, 'import os\n')]
|
import errno
import os
import pickle
import numpy
from utilities_nn.ResourceManager import ResourceManager
class WordVectorsManager(ResourceManager):
def __init__(self, corpus=None, dim=None, omit_non_english=False):
super().__init__()
self.omit_non_english = omit_non_english
self.wv_filename = "{}.{}d".format(corpus, str(dim))
self.parsed_filename = "{}.{}d.pickle".format(corpus, str(dim))
def is_ascii(self, text):
try:
text.encode('ascii')
return True
except:
return False
def write(self):
_word_vector_file = os.path.join(os.path.dirname(__file__), self.wv_filename) # return file directory
if os.path.exists(_word_vector_file):
print('Indexing file {} ...'.format(self.wv_filename))
embeddings_dict = {}
with open(_word_vector_file, "r", encoding="utf-8") as file:
for i, line in enumerate(file):
if line.strip() != "" or line != "\n": # or len(line) > 0
values = line.split()
word = values[0]
coefs = numpy.asarray(values[1:], dtype='float32')
if word.lower() in {'<unk>', "<unknown>"}:
print(word)
print("UNKNOWN")
print()
if self.omit_non_english and not self.is_ascii(word):
continue
if word not in embeddings_dict or word.strip() == "":
embeddings_dict[word] = coefs
# 'House': array([0.174788, 0.091168, -0.317676,...])
print('Found %s word vectors.' % len(embeddings_dict))
# save Embeddings into a pickle-File
with open(os.path.join(os.path.dirname(__file__), self.parsed_filename), 'wb') as pickle_file:
pickle.dump(embeddings_dict, pickle_file)
else:
print("{} not found!".format(_word_vector_file))
raise FileNotFoundError(
errno.ENOENT, os.strerror(errno.ENOENT), _word_vector_file)
# load pickle file
def read(self):
_parsed_file = os.path.join(os.path.dirname(__file__), self.parsed_filename)
if os.path.exists(_parsed_file): # pickle file for Embeddings available
with open(_parsed_file, 'rb') as f:
return pickle.load(f)
else: # no pickle file for Embeddings available
self.write()
return self.read()
|
[
"pickle.dump",
"os.path.dirname",
"numpy.asarray",
"os.path.exists",
"pickle.load",
"os.strerror"
] |
[((721, 754), 'os.path.exists', 'os.path.exists', (['_word_vector_file'], {}), '(_word_vector_file)\n', (735, 754), False, 'import os\n'), ((2352, 2380), 'os.path.exists', 'os.path.exists', (['_parsed_file'], {}), '(_parsed_file)\n', (2366, 2380), False, 'import os\n'), ((640, 665), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (655, 665), False, 'import os\n'), ((2292, 2317), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2307, 2317), False, 'import os\n'), ((1981, 2022), 'pickle.dump', 'pickle.dump', (['embeddings_dict', 'pickle_file'], {}), '(embeddings_dict, pickle_file)\n', (1992, 2022), False, 'import pickle\n'), ((2166, 2191), 'os.strerror', 'os.strerror', (['errno.ENOENT'], {}), '(errno.ENOENT)\n', (2177, 2191), False, 'import os\n'), ((2493, 2507), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2504, 2507), False, 'import pickle\n'), ((1175, 1217), 'numpy.asarray', 'numpy.asarray', (['values[1:]'], {'dtype': '"""float32"""'}), "(values[1:], dtype='float32')\n", (1188, 1217), False, 'import numpy\n'), ((1893, 1918), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1908, 1918), False, 'import os\n')]
|
#!/usr/bin/env python
# <NAME>
# Plot the "region plot" of BGC candidates in a bacterial genomes (horizontal colored lines for each model).
import argparse
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import os
def candidate_regions(cands, safety_limit=50, xlim=0, xstep=100000, colors=None):
"""
Plot the "region plot" of BGC candidates in a bacterial genomes (horizontal colored lines for each model).
:param cands: DataFrame of candidates with 'contig_id' column defining the contig and 'model' defining the model used to predict the candidate
:param safety_limit: Maxmimum number of contigs to plot
:param xlim: Starting genomic coordinate
:param xstep: Step for X-axis ticks
:param colors: Dict of colors by 'model'
:return: Figure for the "region plot" of BGC candidates
"""
cands_by_contig = cands.groupby('contig_id')
if len(cands_by_contig) > safety_limit:
raise AttributeError('You probably did not want to plot more than {} contigs! '
'Otherwise increase the safety_limit parameter.'.format(safety_limit))
models = cands['model'].unique()
lengths = cands_by_contig['nucl_end'].max()
max_end = lengths.max()
with plt.style.context(('ggplot')):
rows = len(cands_by_contig)
num_models = len(models)
width = 15 + (max_end-xlim) / 230000
fig, axes = plt.subplots(rows, 1, figsize=(width, 2 + 0.20 * rows * num_models))
if rows == 1:
axes = [axes]
for i, (contig_id, contig_cands) in enumerate(cands_by_contig):
print('{} of length {}'.format(contig_id, lengths[contig_id]))
ax = axes[i]
ax.set_title(contig_id, loc='left')
ax.set_facecolor('white')
ax.set_yticks([])
ax.set_xlim([xlim, max_end])
ax.set_ylim([-0.1, num_models + 0.5])
ax.set_xticks(range(xlim, int(lengths[contig_id]), xstep))
ax.set_xticklabels(
['{:.0f}kb'.format(x / 1e3) if x < 1e6 else '{:.1f}Mb'.format(x / 1e6) for x in ax.get_xticks()])
cands_by_model = contig_cands.groupby('model')
for m, model in enumerate(models):
if model not in cands_by_model.groups:
continue
model_cands = cands_by_model.get_group(model)
if model == 'true_output':
color = 'grey'
for c, cand in model_cands.iterrows():
ax.axvspan(cand['nucl_start'], cand['nucl_end'], color='black', alpha=0.13)
else:
color = colors[model] if colors else plt.cm.tab10(m)
x = model_cands[['nucl_start', 'nucl_end']].values.reshape(1, -1)[0]
y = np.ones(x.shape) * (num_models - m)
y[1::2] = np.nan
for d in np.arange(-0.08, 0.08, 0.01):
ax.step(x, y + d, color=color, where='post', lw=0.25, label=None)
# hidden plot for legend
ax.step([-100, -120], [0, 0], color=color, where='post', lw=4, label=model)
axes[0].legend(loc='upper right', bbox_to_anchor=(-0.01, 1.0))
fig.patch.set_facecolor('white')
fig.tight_layout()
fig.subplots_adjust(left=0.03 + 3 / width)
return fig
def create_and_save(cands, path, **kwargs):
fig = candidate_regions(cands, **kwargs)
fig.savefig(path, dpi=200)
plt.close(fig)
print('Saved regions plot to: ', path)
if __name__ == "__main__":
# Parse command line
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--true", dest="true", required=False,
help="True BGCs candidate csv file path.", metavar="FILE")
parser.add_argument("--candidates", dest="candidates", required=True, action='append',
help="Model candidates CSV file path.", metavar="FILE")
parser.add_argument("--color", dest="colors", action='append', required=True,
help="Model color.", metavar="STRING")
parser.add_argument("--label", dest="labels", action='append', required=True,
help="Model label.", metavar="STRING")
parser.add_argument("-s", "--separate", dest="separate", action='store_true', default=False,
help="Create one separate file for each contig.")
parser.add_argument("-o", "--output", dest="output", required=True,
help="Output file path.", metavar="FILE")
options = parser.parse_args()
cands = []
if options.true:
true_output = pd.read_csv(options.true)
true_output['model'] = 'true_output'
cands.append(true_output)
colors = {}
for i, path in enumerate(options.candidates):
name = options.labels[i]
c = pd.read_csv(path)
c['model'] = name
if options.colors and len(options.colors) > i:
colors[name] = options.colors[i]
cands.append(c)
cands: pd.DataFrame = pd.concat(cands)
if options.separate:
print('Creating output directory {}'.format(options.output))
os.mkdir(options.output)
grouped = cands.groupby('contig_id')
print('Drawing {} contigs...'.format(len(grouped)))
for contig_id, contig_cands in grouped:
path = os.path.join(options.output, '{}.png'.format(contig_id))
create_and_save(contig_cands, path, colors=colors)
else:
print('Drawing region plot...')
create_and_save(cands, options.output, colors=colors)
|
[
"os.mkdir",
"argparse.ArgumentParser",
"matplotlib.pyplot.cm.tab10",
"pandas.read_csv",
"matplotlib.pyplot.close",
"matplotlib.pyplot.style.context",
"numpy.ones",
"numpy.arange",
"matplotlib.pyplot.subplots",
"pandas.concat"
] |
[((3497, 3511), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (3506, 3511), True, 'import matplotlib.pyplot as plt\n'), ((3622, 3647), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3645, 3647), False, 'import argparse\n'), ((5066, 5082), 'pandas.concat', 'pd.concat', (['cands'], {}), '(cands)\n', (5075, 5082), True, 'import pandas as pd\n'), ((1252, 1279), 'matplotlib.pyplot.style.context', 'plt.style.context', (['"""ggplot"""'], {}), "('ggplot')\n", (1269, 1279), True, 'import matplotlib.pyplot as plt\n'), ((1417, 1484), 'matplotlib.pyplot.subplots', 'plt.subplots', (['rows', '(1)'], {'figsize': '(width, 2 + 0.2 * rows * num_models)'}), '(rows, 1, figsize=(width, 2 + 0.2 * rows * num_models))\n', (1429, 1484), True, 'import matplotlib.pyplot as plt\n'), ((4655, 4680), 'pandas.read_csv', 'pd.read_csv', (['options.true'], {}), '(options.true)\n', (4666, 4680), True, 'import pandas as pd\n'), ((4872, 4889), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (4883, 4889), True, 'import pandas as pd\n'), ((5186, 5210), 'os.mkdir', 'os.mkdir', (['options.output'], {}), '(options.output)\n', (5194, 5210), False, 'import os\n'), ((2915, 2943), 'numpy.arange', 'np.arange', (['(-0.08)', '(0.08)', '(0.01)'], {}), '(-0.08, 0.08, 0.01)\n', (2924, 2943), True, 'import numpy as np\n'), ((2821, 2837), 'numpy.ones', 'np.ones', (['x.shape'], {}), '(x.shape)\n', (2828, 2837), True, 'import numpy as np\n'), ((2699, 2714), 'matplotlib.pyplot.cm.tab10', 'plt.cm.tab10', (['m'], {}), '(m)\n', (2711, 2714), True, 'import matplotlib.pyplot as plt\n')]
|
import random
import json
import argparse
import numpy as np
import cv2
import tensorflow as tf
from colormath.color_diff import delta_e_cie1976
from colormath.color_objects import LabColor
from utils.helpers import load_module
from vehicle_attributes.trainer import create_session, resnet_v1_10_1
from vehicle_attributes.readers.vehicle_attributes_json import BarrierAttributesJson
def parse_args():
parser = argparse.ArgumentParser(description='Perform inference of vehicle attributes model')
parser.add_argument('path_to_config', help='Path to a config.py')
return parser.parse_args()
def normalized_to_absolute(prediction):
colorcar = np.zeros((1, 1, 3), dtype=np.uint8)
for i in range(3):
if prediction[i] < 0:
colorcar[0, 0, i] = 0
elif prediction[i] > 1:
colorcar[0, 0, i] = 255
else:
colorcar[0, 0, i] = prediction[i]*255
return colorcar
# pylint: disable=too-many-locals, too-many-statements, invalid-name, too-many-boolean-expressions, len-as-condition
def infer(config):
session_config = create_session(config, 'infer')
run_config = tf.estimator.RunConfig(session_config=session_config)
va_estimator = tf.estimator.Estimator(
model_fn=resnet_v1_10_1,
params=config.resnet_params,
model_dir=config.model_dir,
config=run_config)
with open(config.infer.annotation_path) as f:
data = json.load(f)
pic = 0
summ = 0
random.seed(666)
for _ in range(len(data)):
pic = random.randint(0, len(data)-1)
images, annotations = BarrierAttributesJson.get_annotations(data[pic])
if len(images) == 0:
pic += 1
continue
print("pic = ", pic)
predict_input_fn = tf.estimator.inputs.numpy_input_fn(
x=np.array([images], dtype=np.float32).reshape([-1] + list(config.input_shape)),
num_epochs=1,
shuffle=False)
predict = va_estimator.predict(input_fn=predict_input_fn)
img = cv2.imread(data[pic]['image'], -1)
cv2.namedWindow("example")
cars = []
for y in range(len(data[pic]['objects'])):
if data[pic]['objects'][y]['label'] == 'vehicle' and 'bbox' in data[pic]['objects'][y] and \
len(data[pic]['objects'][y]['bbox']) != 0 and 'color_bbox' in data[pic]['objects'][y]['attributes'] and \
len(data[pic]['objects'][y]['attributes']['color_bbox']) != 0 and \
'type' in data[pic]['objects'][y]['attributes']:
cars.append(y)
it = 0
summ_temp = 0
for i in predict:
colorcar = normalized_to_absolute(i['color_lab'])
n = cars[it]
bbox_car = data[pic]['objects'][n]['bbox']
color_detected = LabColor(colorcar[0][0][0], colorcar[0][0][1], colorcar[0][0][2])
colorcar_rgb = cv2.cvtColor(colorcar, cv2.COLOR_LAB2BGR)[0, 0].tolist()
cv2.rectangle(img, (int(bbox_car[0]), int(bbox_car[1])), (int(bbox_car[2]), int(bbox_car[3])),
colorcar_rgb,
thickness=5)
l2diss = 10000
tempy = 0
for j, item in enumerate(annotations):
colorcar_given = normalized_to_absolute(item[4:7])
color_given = LabColor(colorcar_given[0][0][0], colorcar_given[0][0][1], colorcar_given[0][0][2])
l2diss_temp = delta_e_cie1976(color_given, color_detected)
if l2diss_temp <= l2diss:
l2diss = l2diss_temp
tempy = j
colorcar_given = normalized_to_absolute(annotations[tempy][4:7])
colorcar_given_rgb = cv2.cvtColor(colorcar_given, cv2.COLOR_LAB2BGR)[0, 0].tolist()
color_given = LabColor(colorcar_given[0][0][0], colorcar_given[0][0][1], colorcar_given[0][0][2])
l2diss = delta_e_cie1976(color_given, color_detected)
vtype = BarrierAttributesJson.one_hot_annotation_to_type(i['types_class'])
gttype = data[pic]['objects'][n]['attributes']['type']
if gttype in ('suv', 'mpv', 'other'):
gttype = 'car'
if gttype == 'pickup':
gttype = 'truck'
overlay = img.copy()
cv2.rectangle(img, (0, 0 + 60 * it), (120, 60 + 60 * it), (255, 255, 255), -1)
cv2.addWeighted(overlay, 0.3, img, 1.0 - 0.3, 0.0, img)
cv2.rectangle(img, (5, 5 + 60 * it), (15, 15 + 60 * it), colorcar_rgb, -1)
cv2.rectangle(img, (5, 25 + 60 * it), (15, 35 + 60 * it), colorcar_given_rgb, -1)
font = cv2.FONT_HERSHEY_PLAIN
cv2.putText(img, vtype, (35, 15 + 60 * it), font, 1, (0, 0, 0), 2, cv2.LINE_AA)
cv2.putText(img, gttype + " gt", (35, 35 + 60 * it), font, 1, (0, 0, 0), 2, cv2.LINE_AA)
it += 1
summ_temp += l2diss
summ_temp /= it
summ += summ_temp
pic += 1
cv2.imshow("example", img)
press = cv2.waitKey(0)
if press == 27:
break
summ /= len(data)
print(summ)
def main(_):
args = parse_args()
cfg = load_module(args.path_to_config)
infer(cfg)
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run(main)
|
[
"argparse.ArgumentParser",
"tensorflow.logging.set_verbosity",
"colormath.color_objects.LabColor",
"vehicle_attributes.readers.vehicle_attributes_json.BarrierAttributesJson.one_hot_annotation_to_type",
"tensorflow.estimator.Estimator",
"cv2.rectangle",
"cv2.imshow",
"cv2.cvtColor",
"vehicle_attributes.trainer.create_session",
"random.seed",
"tensorflow.app.run",
"cv2.waitKey",
"vehicle_attributes.readers.vehicle_attributes_json.BarrierAttributesJson.get_annotations",
"cv2.addWeighted",
"utils.helpers.load_module",
"colormath.color_diff.delta_e_cie1976",
"json.load",
"cv2.putText",
"tensorflow.estimator.RunConfig",
"numpy.zeros",
"cv2.imread",
"numpy.array",
"cv2.namedWindow"
] |
[((416, 505), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Perform inference of vehicle attributes model"""'}), "(description=\n 'Perform inference of vehicle attributes model')\n", (439, 505), False, 'import argparse\n'), ((652, 687), 'numpy.zeros', 'np.zeros', (['(1, 1, 3)'], {'dtype': 'np.uint8'}), '((1, 1, 3), dtype=np.uint8)\n', (660, 687), True, 'import numpy as np\n'), ((1049, 1080), 'vehicle_attributes.trainer.create_session', 'create_session', (['config', '"""infer"""'], {}), "(config, 'infer')\n", (1063, 1080), False, 'from vehicle_attributes.trainer import create_session, resnet_v1_10_1\n'), ((1097, 1150), 'tensorflow.estimator.RunConfig', 'tf.estimator.RunConfig', ([], {'session_config': 'session_config'}), '(session_config=session_config)\n', (1119, 1150), True, 'import tensorflow as tf\n'), ((1169, 1296), 'tensorflow.estimator.Estimator', 'tf.estimator.Estimator', ([], {'model_fn': 'resnet_v1_10_1', 'params': 'config.resnet_params', 'model_dir': 'config.model_dir', 'config': 'run_config'}), '(model_fn=resnet_v1_10_1, params=config.resnet_params,\n model_dir=config.model_dir, config=run_config)\n', (1191, 1296), True, 'import tensorflow as tf\n'), ((4879, 4911), 'utils.helpers.load_module', 'load_module', (['args.path_to_config'], {}), '(args.path_to_config)\n', (4890, 4911), False, 'from utils.helpers import load_module\n'), ((4955, 4996), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (4979, 4996), True, 'import tensorflow as tf\n'), ((4999, 5015), 'tensorflow.app.run', 'tf.app.run', (['main'], {}), '(main)\n', (5009, 5015), True, 'import tensorflow as tf\n'), ((1370, 1382), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1379, 1382), False, 'import json\n'), ((1412, 1428), 'random.seed', 'random.seed', (['(666)'], {}), '(666)\n', (1423, 1428), False, 'import random\n'), ((1531, 1579), 'vehicle_attributes.readers.vehicle_attributes_json.BarrierAttributesJson.get_annotations', 'BarrierAttributesJson.get_annotations', (['data[pic]'], {}), '(data[pic])\n', (1568, 1579), False, 'from vehicle_attributes.readers.vehicle_attributes_json import BarrierAttributesJson\n'), ((1940, 1974), 'cv2.imread', 'cv2.imread', (["data[pic]['image']", '(-1)'], {}), "(data[pic]['image'], -1)\n", (1950, 1974), False, 'import cv2\n'), ((1981, 2007), 'cv2.namedWindow', 'cv2.namedWindow', (['"""example"""'], {}), "('example')\n", (1996, 2007), False, 'import cv2\n'), ((4709, 4735), 'cv2.imshow', 'cv2.imshow', (['"""example"""', 'img'], {}), "('example', img)\n", (4719, 4735), False, 'import cv2\n'), ((4750, 4764), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (4761, 4764), False, 'import cv2\n'), ((2673, 2738), 'colormath.color_objects.LabColor', 'LabColor', (['colorcar[0][0][0]', 'colorcar[0][0][1]', 'colorcar[0][0][2]'], {}), '(colorcar[0][0][0], colorcar[0][0][1], colorcar[0][0][2])\n', (2681, 2738), False, 'from colormath.color_objects import LabColor\n'), ((3599, 3687), 'colormath.color_objects.LabColor', 'LabColor', (['colorcar_given[0][0][0]', 'colorcar_given[0][0][1]', 'colorcar_given[0][0][2]'], {}), '(colorcar_given[0][0][0], colorcar_given[0][0][1], colorcar_given[0\n ][0][2])\n', (3607, 3687), False, 'from colormath.color_objects import LabColor\n'), ((3700, 3744), 'colormath.color_diff.delta_e_cie1976', 'delta_e_cie1976', (['color_given', 'color_detected'], {}), '(color_given, color_detected)\n', (3715, 3744), False, 'from colormath.color_diff import delta_e_cie1976\n'), ((3762, 3828), 'vehicle_attributes.readers.vehicle_attributes_json.BarrierAttributesJson.one_hot_annotation_to_type', 'BarrierAttributesJson.one_hot_annotation_to_type', (["i['types_class']"], {}), "(i['types_class'])\n", (3810, 3828), False, 'from vehicle_attributes.readers.vehicle_attributes_json import BarrierAttributesJson\n'), ((4059, 4137), 'cv2.rectangle', 'cv2.rectangle', (['img', '(0, 0 + 60 * it)', '(120, 60 + 60 * it)', '(255, 255, 255)', '(-1)'], {}), '(img, (0, 0 + 60 * it), (120, 60 + 60 * it), (255, 255, 255), -1)\n', (4072, 4137), False, 'import cv2\n'), ((4146, 4201), 'cv2.addWeighted', 'cv2.addWeighted', (['overlay', '(0.3)', 'img', '(1.0 - 0.3)', '(0.0)', 'img'], {}), '(overlay, 0.3, img, 1.0 - 0.3, 0.0, img)\n', (4161, 4201), False, 'import cv2\n'), ((4210, 4284), 'cv2.rectangle', 'cv2.rectangle', (['img', '(5, 5 + 60 * it)', '(15, 15 + 60 * it)', 'colorcar_rgb', '(-1)'], {}), '(img, (5, 5 + 60 * it), (15, 15 + 60 * it), colorcar_rgb, -1)\n', (4223, 4284), False, 'import cv2\n'), ((4293, 4378), 'cv2.rectangle', 'cv2.rectangle', (['img', '(5, 25 + 60 * it)', '(15, 35 + 60 * it)', 'colorcar_given_rgb', '(-1)'], {}), '(img, (5, 25 + 60 * it), (15, 35 + 60 * it),\n colorcar_given_rgb, -1)\n', (4306, 4378), False, 'import cv2\n'), ((4421, 4500), 'cv2.putText', 'cv2.putText', (['img', 'vtype', '(35, 15 + 60 * it)', 'font', '(1)', '(0, 0, 0)', '(2)', 'cv2.LINE_AA'], {}), '(img, vtype, (35, 15 + 60 * it), font, 1, (0, 0, 0), 2, cv2.LINE_AA)\n', (4432, 4500), False, 'import cv2\n'), ((4509, 4601), 'cv2.putText', 'cv2.putText', (['img', "(gttype + ' gt')", '(35, 35 + 60 * it)', 'font', '(1)', '(0, 0, 0)', '(2)', 'cv2.LINE_AA'], {}), "(img, gttype + ' gt', (35, 35 + 60 * it), font, 1, (0, 0, 0), 2,\n cv2.LINE_AA)\n", (4520, 4601), False, 'import cv2\n'), ((3168, 3256), 'colormath.color_objects.LabColor', 'LabColor', (['colorcar_given[0][0][0]', 'colorcar_given[0][0][1]', 'colorcar_given[0][0][2]'], {}), '(colorcar_given[0][0][0], colorcar_given[0][0][1], colorcar_given[0\n ][0][2])\n', (3176, 3256), False, 'from colormath.color_objects import LabColor\n'), ((3276, 3320), 'colormath.color_diff.delta_e_cie1976', 'delta_e_cie1976', (['color_given', 'color_detected'], {}), '(color_given, color_detected)\n', (3291, 3320), False, 'from colormath.color_diff import delta_e_cie1976\n'), ((1739, 1775), 'numpy.array', 'np.array', (['[images]'], {'dtype': 'np.float32'}), '([images], dtype=np.float32)\n', (1747, 1775), True, 'import numpy as np\n'), ((2762, 2803), 'cv2.cvtColor', 'cv2.cvtColor', (['colorcar', 'cv2.COLOR_LAB2BGR'], {}), '(colorcar, cv2.COLOR_LAB2BGR)\n', (2774, 2803), False, 'import cv2\n'), ((3514, 3561), 'cv2.cvtColor', 'cv2.cvtColor', (['colorcar_given', 'cv2.COLOR_LAB2BGR'], {}), '(colorcar_given, cv2.COLOR_LAB2BGR)\n', (3526, 3561), False, 'import cv2\n')]
|
# -*- coding: utf-8 -*-
"""
Classes in this module enhance several stationary covariance functions with the
Stochastic Differential Equation (SDE) functionality.
"""
from .rbf import RBF
from .stationary import Exponential
from .stationary import RatQuad
import numpy as np
import scipy as sp
try:
from scipy.linalg import solve_continuous_lyapunov as lyap
except ImportError:
from scipy.linalg import solve_lyapunov as lyap
class sde_RBF(RBF):
"""
Class provide extra functionality to transfer this covariance function into
SDE form.
Radial Basis Function kernel:
.. math::
k(r) = \sigma^2 \exp \\bigg(- \\frac{1}{2} r^2 \\bigg) \\ \\ \\ \\ \text{ where } r = \sqrt{\sum_{i=1}^{input dim} \frac{(x_i-y_i)^2}{\ell_i^2} }
"""
def sde_update_gradient_full(self, gradients):
"""
Update gradient in the order in which parameters are represented in the
kernel
"""
self.variance.gradient = gradients[0]
self.lengthscale.gradient = gradients[1]
def sde(self):
"""
Return the state space representation of the covariance.
"""
N = 10# approximation order ( number of terms in exponent series expansion)
roots_rounding_decimals = 6
fn = np.math.factorial(N)
kappa = 1.0/2.0/self.lengthscale**2
Qc = np.array((self.variance*np.sqrt(np.pi/kappa)*fn*(4*kappa)**N,),)
pp = np.zeros((2*N+1,)) # array of polynomial coefficients from higher power to lower
for n in range(0, N+1): # (2N+1) - number of polynomial coefficients
pp[2*(N-n)] = fn*(4.0*kappa)**(N-n)/np.math.factorial(n)*(-1)**n
pp = sp.poly1d(pp)
roots = sp.roots(pp)
neg_real_part_roots = roots[np.round(np.real(roots) ,roots_rounding_decimals) < 0]
aa = sp.poly1d(neg_real_part_roots, r=True).coeffs
F = np.diag(np.ones((N-1,)),1)
F[-1,:] = -aa[-1:0:-1]
L= np.zeros((N,1))
L[N-1,0] = 1
H = np.zeros((1,N))
H[0,0] = 1
# Infinite covariance:
Pinf = lyap(F, -np.dot(L,np.dot( Qc[0,0],L.T)))
Pinf = 0.5*(Pinf + Pinf.T)
# Allocating space for derivatives
dF = np.empty([F.shape[0],F.shape[1],2])
dQc = np.empty([Qc.shape[0],Qc.shape[1],2])
dPinf = np.empty([Pinf.shape[0],Pinf.shape[1],2])
# Derivatives:
dFvariance = np.zeros(F.shape)
dFlengthscale = np.zeros(F.shape)
dFlengthscale[-1,:] = -aa[-1:0:-1]/self.lengthscale * np.arange(-N,0,1)
dQcvariance = Qc/self.variance
dQclengthscale = np.array(((self.variance*np.sqrt(2*np.pi)*fn*2**N*self.lengthscale**(-2*N)*(1-2*N,),)))
dPinf_variance = Pinf/self.variance
lp = Pinf.shape[0]
coeff = np.arange(1,lp+1).reshape(lp,1) + np.arange(1,lp+1).reshape(1,lp) - 2
coeff[np.mod(coeff,2) != 0] = 0
dPinf_lengthscale = -1/self.lengthscale*Pinf*coeff
dF[:,:,0] = dFvariance
dF[:,:,1] = dFlengthscale
dQc[:,:,0] = dQcvariance
dQc[:,:,1] = dQclengthscale
dPinf[:,:,0] = dPinf_variance
dPinf[:,:,1] = dPinf_lengthscale
P0 = Pinf.copy()
dP0 = dPinf.copy()
# Benefits of this are not very sound. Helps only in one case:
# SVD Kalman + RBF kernel
import GPy.models.state_space_main as ssm
(F, L, Qc, H, Pinf, P0, dF, dQc, dPinf,dP0, T) = ssm.balance_ss_model(F, L, Qc, H, Pinf, P0, dF, dQc, dPinf, dP0 )
return (F, L, Qc, H, Pinf, P0, dF, dQc, dPinf, dP0)
class sde_Exponential(Exponential):
"""
Class provide extra functionality to transfer this covariance function into
SDE form.
Exponential kernel:
.. math::
k(r) = \sigma^2 \exp \\bigg(- \\frac{1}{2} r \\bigg) \\ \\ \\ \\ \text{ where } r = \sqrt{\sum_{i=1}^{input dim} \frac{(x_i-y_i)^2}{\ell_i^2} }
"""
def sde_update_gradient_full(self, gradients):
"""
Update gradient in the order in which parameters are represented in the
kernel
"""
self.variance.gradient = gradients[0]
self.lengthscale.gradient = gradients[1]
def sde(self):
"""
Return the state space representation of the covariance.
"""
variance = float(self.variance.values)
lengthscale = float(self.lengthscale)
F = np.array(((-1.0/lengthscale,),))
L = np.array(((1.0,),))
Qc = np.array( ((2.0*variance/lengthscale,),) )
H = np.array(((1.0,),))
Pinf = np.array(((variance,),))
P0 = Pinf.copy()
dF = np.zeros((1,1,2));
dQc = np.zeros((1,1,2));
dPinf = np.zeros((1,1,2));
dF[:,:,0] = 0.0
dF[:,:,1] = 1.0/lengthscale**2
dQc[:,:,0] = 2.0/lengthscale
dQc[:,:,1] = -2.0*variance/lengthscale**2
dPinf[:,:,0] = 1.0
dPinf[:,:,1] = 0.0
dP0 = dPinf.copy()
return (F, L, Qc, H, Pinf, P0, dF, dQc, dPinf, dP0)
class sde_RatQuad(RatQuad):
"""
Class provide extra functionality to transfer this covariance function into
SDE form.
Rational Quadratic kernel:
.. math::
k(r) = \sigma^2 \\bigg( 1 + \\frac{r^2}{2} \\bigg)^{- \alpha} \\ \\ \\ \\ \text{ where } r = \sqrt{\sum_{i=1}^{input dim} \frac{(x_i-y_i)^2}{\ell_i^2} }
"""
def sde(self):
"""
Return the state space representation of the covariance.
"""
assert False, 'Not Implemented'
# Params to use:
# self.lengthscale
# self.variance
#self.power
#return (F, L, Qc, H, Pinf, dF, dQc, dPinf)
|
[
"scipy.poly1d",
"scipy.roots",
"numpy.empty",
"numpy.zeros",
"numpy.ones",
"numpy.mod",
"numpy.math.factorial",
"numpy.array",
"numpy.arange",
"numpy.real",
"numpy.dot",
"GPy.models.state_space_main.balance_ss_model",
"numpy.sqrt"
] |
[((1316, 1336), 'numpy.math.factorial', 'np.math.factorial', (['N'], {}), '(N)\n', (1333, 1336), True, 'import numpy as np\n'), ((1505, 1527), 'numpy.zeros', 'np.zeros', (['(2 * N + 1,)'], {}), '((2 * N + 1,))\n', (1513, 1527), True, 'import numpy as np\n'), ((1772, 1785), 'scipy.poly1d', 'sp.poly1d', (['pp'], {}), '(pp)\n', (1781, 1785), True, 'import scipy as sp\n'), ((1802, 1814), 'scipy.roots', 'sp.roots', (['pp'], {}), '(pp)\n', (1810, 1814), True, 'import scipy as sp\n'), ((2089, 2105), 'numpy.zeros', 'np.zeros', (['(N, 1)'], {}), '((N, 1))\n', (2097, 2105), True, 'import numpy as np\n'), ((2147, 2163), 'numpy.zeros', 'np.zeros', (['(1, N)'], {}), '((1, N))\n', (2155, 2163), True, 'import numpy as np\n'), ((2380, 2417), 'numpy.empty', 'np.empty', (['[F.shape[0], F.shape[1], 2]'], {}), '([F.shape[0], F.shape[1], 2])\n', (2388, 2417), True, 'import numpy as np\n'), ((2432, 2471), 'numpy.empty', 'np.empty', (['[Qc.shape[0], Qc.shape[1], 2]'], {}), '([Qc.shape[0], Qc.shape[1], 2])\n', (2440, 2471), True, 'import numpy as np\n'), ((2487, 2530), 'numpy.empty', 'np.empty', (['[Pinf.shape[0], Pinf.shape[1], 2]'], {}), '([Pinf.shape[0], Pinf.shape[1], 2])\n', (2495, 2530), True, 'import numpy as np\n'), ((2583, 2600), 'numpy.zeros', 'np.zeros', (['F.shape'], {}), '(F.shape)\n', (2591, 2600), True, 'import numpy as np\n'), ((2625, 2642), 'numpy.zeros', 'np.zeros', (['F.shape'], {}), '(F.shape)\n', (2633, 2642), True, 'import numpy as np\n'), ((3678, 3742), 'GPy.models.state_space_main.balance_ss_model', 'ssm.balance_ss_model', (['F', 'L', 'Qc', 'H', 'Pinf', 'P0', 'dF', 'dQc', 'dPinf', 'dP0'], {}), '(F, L, Qc, H, Pinf, P0, dF, dQc, dPinf, dP0)\n', (3698, 3742), True, 'import GPy.models.state_space_main as ssm\n'), ((4683, 4717), 'numpy.array', 'np.array', (['((-1.0 / lengthscale,),)'], {}), '(((-1.0 / lengthscale,),))\n', (4691, 4717), True, 'import numpy as np\n'), ((4729, 4748), 'numpy.array', 'np.array', (['((1.0,),)'], {}), '(((1.0,),))\n', (4737, 4748), True, 'import numpy as np\n'), ((4763, 4807), 'numpy.array', 'np.array', (['((2.0 * variance / lengthscale,),)'], {}), '(((2.0 * variance / lengthscale,),))\n', (4771, 4807), True, 'import numpy as np\n'), ((4819, 4838), 'numpy.array', 'np.array', (['((1.0,),)'], {}), '(((1.0,),))\n', (4827, 4838), True, 'import numpy as np\n'), ((4855, 4879), 'numpy.array', 'np.array', (['((variance,),)'], {}), '(((variance,),))\n', (4863, 4879), True, 'import numpy as np\n'), ((4936, 4955), 'numpy.zeros', 'np.zeros', (['(1, 1, 2)'], {}), '((1, 1, 2))\n', (4944, 4955), True, 'import numpy as np\n'), ((4971, 4990), 'numpy.zeros', 'np.zeros', (['(1, 1, 2)'], {}), '((1, 1, 2))\n', (4979, 4990), True, 'import numpy as np\n'), ((5007, 5026), 'numpy.zeros', 'np.zeros', (['(1, 1, 2)'], {}), '((1, 1, 2))\n', (5015, 5026), True, 'import numpy as np\n'), ((1936, 1974), 'scipy.poly1d', 'sp.poly1d', (['neg_real_part_roots'], {'r': '(True)'}), '(neg_real_part_roots, r=True)\n', (1945, 1974), True, 'import scipy as sp\n'), ((2019, 2036), 'numpy.ones', 'np.ones', (['(N - 1,)'], {}), '((N - 1,))\n', (2026, 2036), True, 'import numpy as np\n'), ((2705, 2724), 'numpy.arange', 'np.arange', (['(-N)', '(0)', '(1)'], {}), '(-N, 0, 1)\n', (2714, 2724), True, 'import numpy as np\n'), ((3074, 3090), 'numpy.mod', 'np.mod', (['coeff', '(2)'], {}), '(coeff, 2)\n', (3080, 3090), True, 'import numpy as np\n'), ((1720, 1740), 'numpy.math.factorial', 'np.math.factorial', (['n'], {}), '(n)\n', (1737, 1740), True, 'import numpy as np\n'), ((1877, 1891), 'numpy.real', 'np.real', (['roots'], {}), '(roots)\n', (1884, 1891), True, 'import numpy as np\n'), ((2255, 2276), 'numpy.dot', 'np.dot', (['Qc[0, 0]', 'L.T'], {}), '(Qc[0, 0], L.T)\n', (2261, 2276), True, 'import numpy as np\n'), ((2990, 3010), 'numpy.arange', 'np.arange', (['(1)', '(lp + 1)'], {}), '(1, lp + 1)\n', (2999, 3010), True, 'import numpy as np\n'), ((3024, 3044), 'numpy.arange', 'np.arange', (['(1)', '(lp + 1)'], {}), '(1, lp + 1)\n', (3033, 3044), True, 'import numpy as np\n'), ((1443, 1465), 'numpy.sqrt', 'np.sqrt', (['(np.pi / kappa)'], {}), '(np.pi / kappa)\n', (1450, 1465), True, 'import numpy as np\n'), ((2813, 2831), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (2820, 2831), True, 'import numpy as np\n')]
|
"""
Script for translating the KITTI 3D bounding box annotation format into the BB3TXT data format.
A BB3TXT file is formatted like this:
filename label confidence xmin ymin xmax ymax fblx fbly fbrx fbry rblx rbly ftly
filename label confidence xmin ymin xmax ymax fblx fbly fbrx fbry rblx rbly ftly
filename label confidence xmin ymin xmax ymax fblx fbly fbrx fbry rblx rbly ftly
...
----------------------------------------------------------------------------------------------------
python kitti2bb3txt.py path_labels path_images outfile.bb3txt
----------------------------------------------------------------------------------------------------
"""
__date__ = '03/17/2017'
__author__ = '<NAME>'
__email__ = '<EMAIL>'
import argparse
import os
import numpy as np
import cv2
from mappings.utils import LabelMappingManager
from mappings.utils import available_categories
from shared.geometry import R3x3_y, t3x1, Rt4x4
####################################################################################################
# DEFINITIONS #
####################################################################################################
# IMPORTANT !!
# The labels must translate precisely into the numbers in the kitti.yaml mapping file!
LABELS = {
'Car': 1,
'Van': 2,
'Truck': 3,
'Pedestrian': 4,
'Person_sitting': 5,
'Cyclist': 6,
'Tram': 7,
# Throw away 'Misc' and 'DontCare'
}
# Initialize the LabelMappingManager
LMM = LabelMappingManager()
MAPPING = LMM.get_mapping('kitti')
####################################################################################################
# FUNCTIONS #
####################################################################################################
def read_camera_matrix(line):
"""
Reads a camera matrix P (3x4) stored in the row-major scheme.
Input:
line: Row-major stored matrix separated by spaces, first element is the matrix name
Returns:
camera matrix P 4x4
"""
data = line.split(' ')
if data[0] != 'P2:':
print('ERROR: We need left camera matrix (P2)!')
exit(1)
P = np.asmatrix([[float(data[1]), float(data[2]), float(data[3]), float(data[4])],
[float(data[5]), float(data[6]), float(data[7]), float(data[8])],
[float(data[9]), float(data[10]), float(data[11]), float(data[12])]])
return P
def extract_3D_bb(data, P):
"""
Extract 3D bounding box coordinates in the image from the KITTI labels.
Input:
data: One split line of the label file (line.split(' '))
P: 3x4 camera projection matrix
Returns:
matrix of corners: fbr, rbr, fbl, rbl, ftr, rtr, ftl, rtl
"""
# Object dimensions
h = float(data[8])
w = float(data[9])
l = float(data[10])
# Position of the center point on the ground plane (xz plane)
cx = float(data[11])
cy = float(data[12])
cz = float(data[13])
# Rotation of the object around y
ry = float(data[14])
# 3D box corners - careful, the coordinate system of the car is that x points
# forward, not z! (It is rotated by 90deg with respect to the camera one)
# fbr, rbr, fbl, rbl, ftr, rtr, ftl, rtl
X = np.asmatrix([[l/2, -l/2, l/2, -l/2, l/2, -l/2, l/2, -l/2],
[0, 0, 0, 0, -h, -h, -h, -h],
[-w/2, -w/2, w/2, w/2, -w/2, -w/2, w/2, w/2],
[1, 1, 1, 1, 1, 1, 1, 1]])
# Rotate the 3D box around y axis and translate it to the correct position in the camera frame
X = Rt4x4(R3x3_y(ry), t3x1(cx, cy, cz)) * X
x = P * X
# x is in homogeneous coordinates -> get u, v
x = x / x[2,:]
x = x[0:2,:]
# image = cv2.imread(path_image)
# # Front
# cv2.line(image, (int(x[0,0]), int(x[1,0])), (int(x[0,2]), int(x[1,2])), (0,255,0), 3)
# cv2.line(image, (int(x[0,4]), int(x[1,4])), (int(x[0,6]), int(x[1,6])), (0,255,0))
# cv2.line(image, (int(x[0,0]), int(x[1,0])), (int(x[0,4]), int(x[1,4])), (0,255,0))
# cv2.line(image, (int(x[0,2]), int(x[1,2])), (int(x[0,6]), int(x[1,6])), (0,255,0), 3)
# # Rear
# cv2.line(image, (int(x[0,1]), int(x[1,1])), (int(x[0,3]), int(x[1,3])), (0,0,255))
# cv2.line(image, (int(x[0,5]), int(x[1,5])), (int(x[0,7]), int(x[1,7])), (0,0,255))
# cv2.line(image, (int(x[0,1]), int(x[1,1])), (int(x[0,5]), int(x[1,5])), (0,0,255))
# cv2.line(image, (int(x[0,3]), int(x[1,3])), (int(x[0,7]), int(x[1,7])), (0,0,255))
# # Connections
# cv2.line(image, (int(x[0,0]), int(x[1,0])), (int(x[0,1]), int(x[1,1])), (255,0,0))
# cv2.line(image, (int(x[0,2]), int(x[1,2])), (int(x[0,3]), int(x[1,3])), (255,0,0), 3)
# cv2.line(image, (int(x[0,4]), int(x[1,4])), (int(x[0,5]), int(x[1,5])), (255,0,0))
# cv2.line(image, (int(x[0,6]), int(x[1,6])), (int(x[0,7]), int(x[1,7])), (255,0,0))
# # Show image
# cv2.imshow('img', image)
# cv2.waitKey()
return x
def flip_3D_bb(x, image_width):
"""
Flips the annotation of the image around y axis.
Input:
x: coordinates of points fbr, rbr, fbl, rbl, ftr, rtr, ftl, rtl
image_width: width of the flipped image
Return:
x - flipped coordinates
"""
# First flip the x coordinates of the points
x[0,:] = image_width - x[0,:]
# Now switch left and right points
x_out = np.matrix(np.copy(x))
x_out[:,0] = x[:,2]
x_out[:,1] = x[:,3]
x_out[:,2] = x[:,0]
x_out[:,3] = x[:,1]
x_out[:,4] = x[:,6]
x_out[:,5] = x[:,7]
x_out[:,6] = x[:,4]
x_out[:,7] = x[:,5]
return x_out
def process_image(path_image, path_label_file, path_calib_file, label, flip, filter, outfile):
"""
Processes one image from the dataset and writes it out to the outfile.
Input:
path_image: Path to the image file
path_label_file: Path to the label file with KITTI labels
path_calib_file: Path to the calibration file for this image
label: Which class label should be extracted from the dataset (default None)
flip: True/False whether the images should also be flipped by this script
filter: True/False whether we should filter out very occluded and truncated boxes
outfile: File handle of the open output BBTXT file
"""
if flip:
# We have to flip the image and save it
image = cv2.imread(path_image)
image_width = image.shape[1]
filename = os.path.basename(path_image)
directory = os.path.dirname(path_image).rstrip('/') + '_flip'
path_image = os.path.join(directory, filename)
if not os.path.exists(directory):
os.makedirs(directory)
if not os.path.exists(path_image):
image = cv2.flip(image, 1)
cv2.imwrite(path_image, image)
with open(path_label_file, 'r') as infile_label, open(path_calib_file, 'r') as infile_calib:
# Read camera calibration matrices
for line in infile_calib:
if line[:2] == 'P2':
P = read_camera_matrix(line.rstrip('\n'))
# Read the objects
for line in infile_label:
line = line.rstrip('\n')
data = line.split(' ')
# First element of the data is the label. We don't want to process 'Misc' and
# 'DontCare' labels
if data[0] == 'Misc' or data[0] == 'DontCare': continue
# Check label, if required
if label is not None and MAPPING[LABELS[data[0]]] != label: continue
# We do not want to include objects, which are occluded or truncated too much
if filter and (int(data[2]) >= 2 or float(data[1]) > 0.75): continue
# Extract image coordinates (positions) of 3D bounding box corners, the corners are
# in the following order: fbr, rbr, fbl, rbl, ftr, rtr, ftl, rtl
x = extract_3D_bb(data, P)
if flip:
x = flip_3D_bb(x, image_width)
min_uv = np.min(x, axis=1) # xmin, ymin
max_uv = np.max(x, axis=1) # xmax, ymax
# The size of an image in KITTI is 1250x375. If the bounding box is significantly
# larger, discard it - probably just some large distortion from camera
if max_uv[1,0]-min_uv[1,0] > 700 or max_uv[0,0]-min_uv[0,0] > 1500:
continue
line_out = path_image + ' '
line_out += str(LABELS[data[0]]) + ' '
# For confidence we put one - just to have something
line_out += '1 '
# 3D bounding box is specified by the image coordinates of the front bottom left and
# right corners, rear bottom left corner and y coordinate of the front top left
# corner
line_out += str(min_uv[0,0]) + ' ' + str(min_uv[1,0]) + ' ' \
+ str(max_uv[0,0]) + ' ' + str(max_uv[1,0]) + ' ' \
+ str(x[0,2]) + ' ' + str(x[1,2]) + ' ' + str(x[0,0]) + ' ' \
+ str(x[1,0]) + ' ' + str(x[0,3]) + ' ' + str(x[1,3]) + ' ' \
+ str(x[1,6]) + '\n'
outfile.write(line_out)
def translate_file(path_labels, path_images, outfile, label, flip, filter):
"""
Runs the translation of the KITTI 3d bounding box label format into the BB3TXT format.
Input:
path_labels: Path to the "label_2" folder of the KITTI dataset
path_images: Path to the "image_2" folder with images from the KITTI dataset
outfile: File handle of the open output BBTXT file
label: Which class label should be extracted from the dataset (default None)
flip: True/False whether the images should also be flipped by this script
filter: True/False whether we should filter out very occluded and truncated boxes
"""
print('-- TRANSLATING KITTI TO BB3TXT')
# Get the list of all label files in the directory
filenames = [f for f in os.listdir(path_labels) if os.path.isfile(os.path.join(path_labels, f))]
if len(filenames) != 7481:
print('Wrong number (%d) of files in the KITTI dataset! Should be 7481.'%(len(filenames)))
return
# Read each file and write the labels from it
for f in filenames:
path_label_file = os.path.join(path_labels, f)
path_calib_file = os.path.join(path_labels.rstrip('/').rstrip('label_2'), 'calib', f)
if not os.path.exists(path_calib_file):
print('ERROR: We need camera calibration matrices "%s"'%(path_calib_file))
exit(1)
path_image = os.path.join(path_images, os.path.splitext(f)[0]) + '.png'
if not os.path.isfile(path_image):
print('WARNING: Image "%s" does not exist!'%(path_image))
process_image(path_image, path_label_file, path_calib_file, label, False, filter, outfile)
if flip:
# Add also the flipped image
process_image(path_image, path_label_file, path_calib_file, label, True, filter, outfile)
print('-- TRANSLATION DONE')
####################################################################################################
# MAIN #
####################################################################################################
def parse_arguments():
"""
Parse input options of the script.
"""
parser = argparse.ArgumentParser(description='Convert KITTI label files into BBTXT.')
parser.add_argument('path_labels', metavar='path_labels', type=str,
help='Path to the "label_2" folder of the KITTI dataset')
parser.add_argument('path_images', metavar='path_images', type=str,
help='Path to the "image_2" folder of the KITTI dataset')
parser.add_argument('outfile', metavar='path_outfile', type=argparse.FileType('w'),
help='Path to the output BBTXT file (including the extension)')
parser.add_argument('--label', metavar='label', type=str, default=None,
help='Single class of objects that should be separated from the dataset. ' \
'One from ' + str(available_categories(MAPPING)))
parser.add_argument('--flip', dest='flip', action='store_true', default=False,
help='If provided, the images will also be flipped')
parser.add_argument('--filter', dest='filter', action='store_true', default=False,
help='If provided, very occluded and truncated bounding boxes will be ' \
'filtered out')
args = parser.parse_args()
if not os.path.exists(args.path_labels):
print('Input path "%s" does not exist!'%(args.path_labels))
parser.print_help()
exit(1)
if not os.path.exists(args.path_images):
print('Input path "%s" does not exist!'%(args.path_images))
parser.print_help()
exit(1)
if args.label is not None and args.label not in available_categories(MAPPING):
print('Unknown class label "%s"!'%(args.label))
exit(1)
return args
def main():
args = parse_arguments()
translate_file(args.path_labels, args.path_images, args.outfile, args.label, args.flip,
args.filter)
args.outfile.close()
if __name__ == '__main__':
main()
|
[
"argparse.ArgumentParser",
"os.path.isfile",
"mappings.utils.LabelMappingManager",
"os.path.join",
"numpy.copy",
"cv2.imwrite",
"os.path.dirname",
"os.path.exists",
"numpy.max",
"argparse.FileType",
"mappings.utils.available_categories",
"os.path.basename",
"numpy.min",
"cv2.flip",
"os.listdir",
"shared.geometry.t3x1",
"os.makedirs",
"cv2.imread",
"numpy.asmatrix",
"os.path.splitext",
"shared.geometry.R3x3_y"
] |
[((1537, 1558), 'mappings.utils.LabelMappingManager', 'LabelMappingManager', ([], {}), '()\n', (1556, 1558), False, 'from mappings.utils import LabelMappingManager\n'), ((3286, 3487), 'numpy.asmatrix', 'np.asmatrix', (['[[l / 2, -l / 2, l / 2, -l / 2, l / 2, -l / 2, l / 2, -l / 2], [0, 0, 0, 0,\n -h, -h, -h, -h], [-w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2, \n w / 2], [1, 1, 1, 1, 1, 1, 1, 1]]'], {}), '([[l / 2, -l / 2, l / 2, -l / 2, l / 2, -l / 2, l / 2, -l / 2],\n [0, 0, 0, 0, -h, -h, -h, -h], [-w / 2, -w / 2, w / 2, w / 2, -w / 2, -w /\n 2, w / 2, w / 2], [1, 1, 1, 1, 1, 1, 1, 1]])\n', (3297, 3487), True, 'import numpy as np\n'), ((10767, 10843), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Convert KITTI label files into BBTXT."""'}), "(description='Convert KITTI label files into BBTXT.')\n", (10790, 10843), False, 'import argparse\n'), ((5335, 5345), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (5342, 5345), True, 'import numpy as np\n'), ((6276, 6298), 'cv2.imread', 'cv2.imread', (['path_image'], {}), '(path_image)\n', (6286, 6298), False, 'import cv2\n'), ((6347, 6375), 'os.path.basename', 'os.path.basename', (['path_image'], {}), '(path_image)\n', (6363, 6375), False, 'import os\n'), ((6456, 6489), 'os.path.join', 'os.path.join', (['directory', 'filename'], {}), '(directory, filename)\n', (6468, 6489), False, 'import os\n'), ((9697, 9725), 'os.path.join', 'os.path.join', (['path_labels', 'f'], {}), '(path_labels, f)\n', (9709, 9725), False, 'import os\n'), ((11883, 11915), 'os.path.exists', 'os.path.exists', (['args.path_labels'], {}), '(args.path_labels)\n', (11897, 11915), False, 'import os\n'), ((12019, 12051), 'os.path.exists', 'os.path.exists', (['args.path_images'], {}), '(args.path_images)\n', (12033, 12051), False, 'import os\n'), ((3638, 3648), 'shared.geometry.R3x3_y', 'R3x3_y', (['ry'], {}), '(ry)\n', (3644, 3648), False, 'from shared.geometry import R3x3_y, t3x1, Rt4x4\n'), ((3650, 3666), 'shared.geometry.t3x1', 't3x1', (['cx', 'cy', 'cz'], {}), '(cx, cy, cz)\n', (3654, 3666), False, 'from shared.geometry import R3x3_y, t3x1, Rt4x4\n'), ((6500, 6525), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (6514, 6525), False, 'import os\n'), ((6530, 6552), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (6541, 6552), False, 'import os\n'), ((6562, 6588), 'os.path.exists', 'os.path.exists', (['path_image'], {}), '(path_image)\n', (6576, 6588), False, 'import os\n'), ((6601, 6619), 'cv2.flip', 'cv2.flip', (['image', '(1)'], {}), '(image, 1)\n', (6609, 6619), False, 'import cv2\n'), ((6623, 6653), 'cv2.imwrite', 'cv2.imwrite', (['path_image', 'image'], {}), '(path_image, image)\n', (6634, 6653), False, 'import cv2\n'), ((7661, 7678), 'numpy.min', 'np.min', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (7667, 7678), True, 'import numpy as np\n'), ((7705, 7722), 'numpy.max', 'np.max', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (7711, 7722), True, 'import numpy as np\n'), ((9404, 9427), 'os.listdir', 'os.listdir', (['path_labels'], {}), '(path_labels)\n', (9414, 9427), False, 'import os\n'), ((9824, 9855), 'os.path.exists', 'os.path.exists', (['path_calib_file'], {}), '(path_calib_file)\n', (9838, 9855), False, 'import os\n'), ((10030, 10056), 'os.path.isfile', 'os.path.isfile', (['path_image'], {}), '(path_image)\n', (10044, 10056), False, 'import os\n'), ((11171, 11193), 'argparse.FileType', 'argparse.FileType', (['"""w"""'], {}), "('w')\n", (11188, 11193), False, 'import argparse\n'), ((12196, 12225), 'mappings.utils.available_categories', 'available_categories', (['MAPPING'], {}), '(MAPPING)\n', (12216, 12225), False, 'from mappings.utils import available_categories\n'), ((9446, 9474), 'os.path.join', 'os.path.join', (['path_labels', 'f'], {}), '(path_labels, f)\n', (9458, 9474), False, 'import os\n'), ((6391, 6418), 'os.path.dirname', 'os.path.dirname', (['path_image'], {}), '(path_image)\n', (6406, 6418), False, 'import os\n'), ((9988, 10007), 'os.path.splitext', 'os.path.splitext', (['f'], {}), '(f)\n', (10004, 10007), False, 'import os\n'), ((11447, 11476), 'mappings.utils.available_categories', 'available_categories', (['MAPPING'], {}), '(MAPPING)\n', (11467, 11476), False, 'from mappings.utils import available_categories\n')]
|
import pytest
import zarr
from numpy import zeros
from ome_zarr.data import create_zarr
from ome_zarr.format import FormatV01, FormatV02, FormatV03
from ome_zarr.io import parse_url
from ome_zarr.reader import Label, Labels, Multiscales, Node, Plate, Well
from ome_zarr.writer import write_image, write_plate_metadata, write_well_metadata
class TestNode:
@pytest.fixture(autouse=True)
def initdir(self, tmpdir):
self.path = tmpdir.mkdir("data")
create_zarr(str(self.path))
def test_image(self):
node = Node(parse_url(str(self.path)), list())
assert node.data
assert node.metadata
assert len(node.specs) == 2
assert isinstance(node.specs[0], Multiscales)
def test_labels(self):
filename = str(self.path.join("labels"))
node = Node(parse_url(filename), list())
assert not node.data
assert not node.metadata
assert len(node.specs) == 1
assert isinstance(node.specs[0], Labels)
def test_label(self):
filename = str(self.path.join("labels", "coins"))
node = Node(parse_url(filename), list())
assert node.data
assert node.metadata
assert len(node.specs) == 2
assert isinstance(node.specs[0], Label)
assert isinstance(node.specs[1], Multiscales)
class TestHCSNode:
@pytest.fixture(autouse=True)
def initdir(self, tmpdir):
self.path = tmpdir.mkdir("data")
self.store = parse_url(str(self.path), mode="w").store
self.root = zarr.group(store=self.store)
def test_minimal_plate(self):
write_plate_metadata(self.root, ["A"], ["1"], ["A/1"])
row_group = self.root.require_group("A")
well = row_group.require_group("1")
write_well_metadata(well, ["0"])
image = well.require_group("0")
write_image(zeros((1, 1, 1, 256, 256)), image)
node = Node(parse_url(str(self.path)), list())
assert node.data
assert node.metadata
assert len(node.specs) == 1
assert isinstance(node.specs[0], Plate)
assert node.specs[0].row_names == ["A"]
assert node.specs[0].col_names == ["1"]
assert node.specs[0].well_paths == ["A/1"]
assert node.specs[0].row_count == 1
assert node.specs[0].column_count == 1
node = Node(parse_url(str(self.path / "A" / "1")), list())
assert node.data
assert node.metadata
assert len(node.specs) == 1
assert isinstance(node.specs[0], Well)
@pytest.mark.parametrize("fmt", (FormatV01(), FormatV02(), FormatV03()))
def test_multiwells_plate(self, fmt):
row_names = ["A", "B", "C"]
col_names = ["1", "2", "3", "4"]
well_paths = ["A/1", "A/2", "A/4", "B/2", "B/3", "C/1", "C/3", "C/4"]
write_plate_metadata(self.root, row_names, col_names, well_paths, fmt=fmt)
for wp in well_paths:
row, col = wp.split("/")
row_group = self.root.require_group(row)
well = row_group.require_group(col)
write_well_metadata(well, ["0", "1", "2"], fmt=fmt)
for field in range(3):
image = well.require_group(str(field))
write_image(zeros((1, 1, 1, 256, 256)), image)
node = Node(parse_url(str(self.path)), list())
assert node.data
assert node.metadata
assert len(node.specs) == 1
assert isinstance(node.specs[0], Plate)
assert node.specs[0].row_names == row_names
assert node.specs[0].col_names == col_names
assert node.specs[0].well_paths == well_paths
assert node.specs[0].row_count == 3
assert node.specs[0].column_count == 4
for wp in well_paths:
node = Node(parse_url(str(self.path / wp)), list())
assert node.data
assert node.metadata
assert len(node.specs) == 1
assert isinstance(node.specs[0], Well)
empty_wells = ["A/3", "B/1", "B/4", "C/2"]
for wp in empty_wells:
assert parse_url(str(self.path / wp)) is None
@pytest.mark.xfail(reason="https://github.com/ome/ome-zarr-py/issues/145")
@pytest.mark.parametrize(
"axes, dims",
(
(["y", "x"], (256, 256)),
(["t", "y", "x"], (1, 256, 256)),
(["z", "y", "x"], (1, 256, 256)),
(["c", "y", "x"], (1, 256, 256)),
(["c", "z", "y", "x"], (1, 1, 256, 256)),
(["t", "z", "y", "x"], (1, 1, 256, 256)),
(["t", "c", "y", "x"], (1, 1, 256, 256)),
),
)
def test_plate_2D5D(self, axes, dims):
write_plate_metadata(self.root, ["A"], ["1"], ["A/1"], fmt=FormatV03())
row_group = self.root.require_group("A")
well = row_group.require_group("1")
write_well_metadata(well, ["0"], fmt=FormatV03())
image = well.require_group("0")
write_image(zeros(dims), image, fmt=FormatV03(), axes=axes)
node = Node(parse_url(str(self.path)), list())
assert node.data
assert node.metadata
assert len(node.specs) == 1
assert isinstance(node.specs[0], Plate)
node = Node(parse_url(str(self.path / "A" / "1")), list())
assert node.data
assert node.metadata
assert len(node.specs) == 1
assert isinstance(node.specs[0], Well)
|
[
"ome_zarr.format.FormatV01",
"ome_zarr.writer.write_plate_metadata",
"ome_zarr.writer.write_well_metadata",
"pytest.fixture",
"numpy.zeros",
"ome_zarr.format.FormatV02",
"zarr.group",
"ome_zarr.format.FormatV03",
"pytest.mark.parametrize",
"ome_zarr.io.parse_url",
"pytest.mark.xfail"
] |
[((363, 391), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (377, 391), False, 'import pytest\n'), ((1351, 1379), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (1365, 1379), False, 'import pytest\n'), ((4109, 4182), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""https://github.com/ome/ome-zarr-py/issues/145"""'}), "(reason='https://github.com/ome/ome-zarr-py/issues/145')\n", (4126, 4182), False, 'import pytest\n'), ((4188, 4494), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""axes, dims"""', "((['y', 'x'], (256, 256)), (['t', 'y', 'x'], (1, 256, 256)), (['z', 'y',\n 'x'], (1, 256, 256)), (['c', 'y', 'x'], (1, 256, 256)), (['c', 'z', 'y',\n 'x'], (1, 1, 256, 256)), (['t', 'z', 'y', 'x'], (1, 1, 256, 256)), ([\n 't', 'c', 'y', 'x'], (1, 1, 256, 256)))"], {}), "('axes, dims', ((['y', 'x'], (256, 256)), (['t', 'y',\n 'x'], (1, 256, 256)), (['z', 'y', 'x'], (1, 256, 256)), (['c', 'y', 'x'\n ], (1, 256, 256)), (['c', 'z', 'y', 'x'], (1, 1, 256, 256)), (['t', 'z',\n 'y', 'x'], (1, 1, 256, 256)), (['t', 'c', 'y', 'x'], (1, 1, 256, 256))))\n", (4211, 4494), False, 'import pytest\n'), ((1535, 1563), 'zarr.group', 'zarr.group', ([], {'store': 'self.store'}), '(store=self.store)\n', (1545, 1563), False, 'import zarr\n'), ((1607, 1661), 'ome_zarr.writer.write_plate_metadata', 'write_plate_metadata', (['self.root', "['A']", "['1']", "['A/1']"], {}), "(self.root, ['A'], ['1'], ['A/1'])\n", (1627, 1661), False, 'from ome_zarr.writer import write_image, write_plate_metadata, write_well_metadata\n'), ((1763, 1795), 'ome_zarr.writer.write_well_metadata', 'write_well_metadata', (['well', "['0']"], {}), "(well, ['0'])\n", (1782, 1795), False, 'from ome_zarr.writer import write_image, write_plate_metadata, write_well_metadata\n'), ((2811, 2885), 'ome_zarr.writer.write_plate_metadata', 'write_plate_metadata', (['self.root', 'row_names', 'col_names', 'well_paths'], {'fmt': 'fmt'}), '(self.root, row_names, col_names, well_paths, fmt=fmt)\n', (2831, 2885), False, 'from ome_zarr.writer import write_image, write_plate_metadata, write_well_metadata\n'), ((823, 842), 'ome_zarr.io.parse_url', 'parse_url', (['filename'], {}), '(filename)\n', (832, 842), False, 'from ome_zarr.io import parse_url\n'), ((1104, 1123), 'ome_zarr.io.parse_url', 'parse_url', (['filename'], {}), '(filename)\n', (1113, 1123), False, 'from ome_zarr.io import parse_url\n'), ((1856, 1882), 'numpy.zeros', 'zeros', (['(1, 1, 1, 256, 256)'], {}), '((1, 1, 1, 256, 256))\n', (1861, 1882), False, 'from numpy import zeros\n'), ((3066, 3117), 'ome_zarr.writer.write_well_metadata', 'write_well_metadata', (['well', "['0', '1', '2']"], {'fmt': 'fmt'}), "(well, ['0', '1', '2'], fmt=fmt)\n", (3085, 3117), False, 'from ome_zarr.writer import write_image, write_plate_metadata, write_well_metadata\n'), ((2566, 2577), 'ome_zarr.format.FormatV01', 'FormatV01', ([], {}), '()\n', (2575, 2577), False, 'from ome_zarr.format import FormatV01, FormatV02, FormatV03\n'), ((2579, 2590), 'ome_zarr.format.FormatV02', 'FormatV02', ([], {}), '()\n', (2588, 2590), False, 'from ome_zarr.format import FormatV01, FormatV02, FormatV03\n'), ((2592, 2603), 'ome_zarr.format.FormatV03', 'FormatV03', ([], {}), '()\n', (2601, 2603), False, 'from ome_zarr.format import FormatV01, FormatV02, FormatV03\n'), ((4934, 4945), 'numpy.zeros', 'zeros', (['dims'], {}), '(dims)\n', (4939, 4945), False, 'from numpy import zeros\n'), ((4710, 4721), 'ome_zarr.format.FormatV03', 'FormatV03', ([], {}), '()\n', (4719, 4721), False, 'from ome_zarr.format import FormatV01, FormatV02, FormatV03\n'), ((4861, 4872), 'ome_zarr.format.FormatV03', 'FormatV03', ([], {}), '()\n', (4870, 4872), False, 'from ome_zarr.format import FormatV01, FormatV02, FormatV03\n'), ((4958, 4969), 'ome_zarr.format.FormatV03', 'FormatV03', ([], {}), '()\n', (4967, 4969), False, 'from ome_zarr.format import FormatV01, FormatV02, FormatV03\n'), ((3236, 3262), 'numpy.zeros', 'zeros', (['(1, 1, 1, 256, 256)'], {}), '((1, 1, 1, 256, 256))\n', (3241, 3262), False, 'from numpy import zeros\n')]
|
import typing
import time
import numpy as np
import pyautogui as pg
import vboard as vb
class MouseClicker:
def __init__(self):
scr = vb.make_screenshot(bw=False)
self.screenshot_wh = scr.shape[::-1]
self.screen_wh = tuple(pg.size())
def click(self, ploc: typing.Tuple[int, int], leftbutton: bool):
sloc = (int(ploc[0] * self.screen_wh[0] / self.screenshot_wh[0]),
int(ploc[1] * self.screen_wh[1] / self.screenshot_wh[1]))
pg.moveTo(sloc[0], sloc[1])
button = 'left' if leftbutton else 'right'
pg.click(button=button)
class ActionPlanner:
def __init__(self, delay_after: float, bdetector: vb.BoardLocator,
sdetector: vb.SmilyDetector):
self.mc = MouseClicker()
self.delay_after = delay_after
self.bd = bdetector
self.sd = sdetector
def click_smily_and_check_stage(self):
sloc = self.sd.get_smily_pixel_location()
self.mc.click(sloc, True)
time.sleep(0.5)
scr = vb.make_screenshot()
stage = self.sd.get_game_stage(scr)
return stage
def click_mines(self, board, qidx_mine):
raise NotImplementedError
class PlainActionPlanner(ActionPlanner):
def __init__(self, delay_after: float, bdetector: vb.BoardLocator,
sdetector: vb.SmilyDetector):
super().__init__(delay_after, bdetector, sdetector)
def click_mines(self, board, qidx_mine):
blocs = np.ravel_multi_index(qidx_mine[:,:2].T,
(self.bd.height, self.bd.width))
for bloc, mine_under in zip(blocs, qidx_mine[:,2]):
ploc = vb.cellid_as_pixelloc(self.bd, bloc)
self.mc.click(ploc, not bool(mine_under))
time.sleep(self.delay_after)
class NoFlagActionPlanner(ActionPlanner):
def __init__(self, delay_after: float, bdetector: vb.BoardLocator,
sdetector: vb.SmilyDetector):
super().__init__(delay_after, bdetector, sdetector)
def click_mines(self, board, qidx_mine):
blocs = np.ravel_multi_index(qidx_mine[:,:2].T,
(self.bd.height, self.bd.width))
for bloc, mine_under in zip(blocs, qidx_mine[:,2]):
if not mine_under:
ploc = vb.cellid_as_pixelloc(self.bd, bloc)
self.mc.click(ploc, True)
time.sleep(self.delay_after)
class AreaOpenActionPlanner(ActionPlanner):
"""
Note on "AreaOpen": Clicking on numbered/satisfied square will open
all its neighbors.
"""
def __init__(self, delay_after: float, bdetector: vb.BoardLocator,
sdetector: vb.SmilyDetector):
super().__init__(delay_after, bdetector, sdetector)
def click_mines(self, board, qidx_mine):
# TODO
raise NotImplementedError
|
[
"time.sleep",
"vboard.cellid_as_pixelloc",
"numpy.ravel_multi_index",
"vboard.make_screenshot",
"pyautogui.click",
"pyautogui.size",
"pyautogui.moveTo"
] |
[((150, 178), 'vboard.make_screenshot', 'vb.make_screenshot', ([], {'bw': '(False)'}), '(bw=False)\n', (168, 178), True, 'import vboard as vb\n'), ((492, 519), 'pyautogui.moveTo', 'pg.moveTo', (['sloc[0]', 'sloc[1]'], {}), '(sloc[0], sloc[1])\n', (501, 519), True, 'import pyautogui as pg\n'), ((579, 602), 'pyautogui.click', 'pg.click', ([], {'button': 'button'}), '(button=button)\n', (587, 602), True, 'import pyautogui as pg\n'), ((1008, 1023), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (1018, 1023), False, 'import time\n'), ((1038, 1058), 'vboard.make_screenshot', 'vb.make_screenshot', ([], {}), '()\n', (1056, 1058), True, 'import vboard as vb\n'), ((1487, 1560), 'numpy.ravel_multi_index', 'np.ravel_multi_index', (['qidx_mine[:, :2].T', '(self.bd.height, self.bd.width)'], {}), '(qidx_mine[:, :2].T, (self.bd.height, self.bd.width))\n', (1507, 1560), True, 'import numpy as np\n'), ((1775, 1803), 'time.sleep', 'time.sleep', (['self.delay_after'], {}), '(self.delay_after)\n', (1785, 1803), False, 'import time\n'), ((2088, 2161), 'numpy.ravel_multi_index', 'np.ravel_multi_index', (['qidx_mine[:, :2].T', '(self.bd.height, self.bd.width)'], {}), '(qidx_mine[:, :2].T, (self.bd.height, self.bd.width))\n', (2108, 2161), True, 'import numpy as np\n'), ((2399, 2427), 'time.sleep', 'time.sleep', (['self.delay_after'], {}), '(self.delay_after)\n', (2409, 2427), False, 'import time\n'), ((255, 264), 'pyautogui.size', 'pg.size', ([], {}), '()\n', (262, 264), True, 'import pyautogui as pg\n'), ((1676, 1712), 'vboard.cellid_as_pixelloc', 'vb.cellid_as_pixelloc', (['self.bd', 'bloc'], {}), '(self.bd, bloc)\n', (1697, 1712), True, 'import vboard as vb\n'), ((2312, 2348), 'vboard.cellid_as_pixelloc', 'vb.cellid_as_pixelloc', (['self.bd', 'bloc'], {}), '(self.bd, bloc)\n', (2333, 2348), True, 'import vboard as vb\n')]
|
"""
Data readers for remote sensing devices (e.g., 3D data)
Based on https://github.com/NWTC/datatools/blob/master/remote_sensing.py
"""
import numpy as np
import pandas as pd
expected_profiler_datatypes=['wind','winds','rass']
def profiler(fname,scans=None,
data_type=None,
datetime_format=None,
num_info_lines=5,
check_na=['SPD','DIR'],na_values=999999,
height_name='HT',
read_scan_properties=False,
verbose=False):
"""Wind Profiler radar with RASS
Users:
- Earth Sciences Research Laboratory (ESRL)
- Texas Tech University (TTU)
Assumed data format for consensus data format rev 5.1 based on
provided reference for rev 4.1 from:
https://a2e.energy.gov/data/wfip2/attach/915mhz-cns-winds-data-format.txt
- Winds variables of interest: SPD, DIR(, SNR)
- RASS variables of interest: T, Tc, W
Other data may be readable by specifying the `datetime_format` and
`num_info_lines` kwargs.
Additional data format reference:
https://www.esrl.noaa.gov/psd/data/obs/formats/
Usage
=====
scans : int, list, or None
Number of data blocks to read from file; a list of zero-indexed
scans to read from file; or set to None to read all data
data_type : str or None
Data-type identifier in second line of each data block, used to
override the expected types; if None then check against
`expected_profiler_datatypes` list
datetime_format : str or None
Datetime format to parse fourth line of each data block; if
None, then assume the TTU radar format:
YY MM DD HH MM SS
num_info_lines : int
Number of header lines in between the fourth datetime line and
the 'HT SPD DIR ...' header line, presumably containing scan
information.
check_na : list
Column names from file to check for n/a or nan values
na_values : values or list of values
Values to be considered n/a and set to nan
height_name : str or None
Name of height column to return a multi-indexed dataframe, or
None to return with datetime index only
read_scan_properties : bool, list, optional
Read scan properties for each data block if True or an existing
scan information list is provided (to be updated). Note that
this has only be implemented for the TTU radar format at the
moment.
"""
dataframes = []
if read_scan_properties is True:
scantypes = []
print_scan_properties = True
elif read_scan_properties is not False:
# scantypes provided as a list of dicts
assert isinstance(read_scan_properties, list)
scantypes = read_scan_properties
read_scan_properties = True
print_scan_properties = False
def match_scan_type(newscan):
assert (newscan is not None)
match = False
for itype, scaninfo in enumerate(scantypes):
if newscan==scaninfo:
match = True
break
if match:
scantypeid = itype
else:
# new scan type
scantypes.append(newscan)
scantypeid = len(scantypes)-1
return scantypeid
with open(fname,'r') as f:
if scans is not None:
if hasattr(scans,'__iter__'):
# specified scans to read
scans_to_read = np.arange(np.max(scans)+1)
else:
# specified number of scans
scans_to_read = np.arange(scans)
scans = scans_to_read
for iscan in scans_to_read:
try:
df,scaninfo = _read_profiler_data_block(
f, expected_data_type=data_type,
datetime_format=datetime_format,
num_info_lines=num_info_lines,
read_scan_properties=read_scan_properties)
except (IOError,IndexError):
break
if iscan in scans:
if verbose:
print('Adding scan',iscan,
'at',df['datetime'].unique(),df.index)
if read_scan_properties:
df['scan_type'] = match_scan_type(scaninfo)
dataframes.append(df)
else:
if verbose:
print('Skipping scan',iscan,
'at',df['datetime'].unique(),df.index)
else:
# read all scans
iscan = 0
while True:
try:
df,scaninfo = _read_profiler_data_block(
f, expected_data_type=data_type,
datetime_format=datetime_format,
num_info_lines=num_info_lines,
read_scan_properties=read_scan_properties)
except (IOError,IndexError):
break
else:
if verbose:
print('Read scan',iscan,
'at',df['datetime'].unique(),df.index)
if read_scan_properties:
df['scan_type'] = match_scan_type(scaninfo)
dataframes.append(df)
iscan += 1
df = pd.concat(dataframes)
if na_values is not None:
nalist = []
for col in check_na:
if col in df.columns:
matches = [col]
else:
matches = [ c for c in df.columns if c.startswith(col+'.') ]
if len(matches) > 0:
nalist += matches
else:
if verbose:
print('Note: column '+col+'* not found')
check_na = nalist
if not hasattr(na_values,'__iter__'):
na_values = [na_values]
for val in na_values:
for col in check_na:
if verbose:
print('Checking',col,'for',val)
df.loc[df[col]==val,col] = np.nan # flag bad values
if read_scan_properties and print_scan_properties:
for itype,scantype in enumerate(scantypes):
print('scan type',itype,scantype)
if height_name is not None and height_name in df.columns:
df.rename(columns={height_name:'height'},inplace=True)
df = df.set_index(['datetime','height'])
else:
df = df.set_index('datetime')
return df
def _read_profiler_data_block(f,
expected_data_type=None,
datetime_format=None,
num_info_lines=5,
read_scan_properties=False):
"""Used by radar profiler. This was originally developed to process
the TTU radar profiler output (WINDS/RASS).
General expected data block format, line by line, terminated by the
'$' character:
1: description
2: profiler_datatype version_information
3: location_information
4: scan_info (line 1)
5: scan_info (line 2)
...
3+num_info_lines: scan_info (line num_info_lines)
header: HT SPD DIR ...
height0 spd0 dir0 ...
height1 spd1 dir1 ...
...
$
"""
# Line 1 (may not be present for subsequent blocks within the same file
name = f.readline().strip()
if name == '':
# Line 2: station name
name = f.readline().strip()
# Line 3: WINDS, version
data_format = f.readline().strip()
datatype = data_format.split()[0]
if expected_data_type is None:
assert datatype.lower() in expected_profiler_datatypes
else:
assert datatype == expected_data_type
# Line 4: lat (N), long (W), elevation (m)
#lat,lon,elev = [float(val) for val in f.readline().split()] # TTU
location_info = [float(val) for val in f.readline().split()]
# Line 5: date
#Y,m,d,H,M,S,_ = f.readline().split() # TTU
datetime_info = f.readline().split()
if datetime_format is None:
try:
# TTU data, e.g.: " 13 11 08 00 00 01 0"
Y,m,d,H,M,S,_ = datetime_info
except ValueError:
# not enough values to unpack (expected 7, got ...)
raise ValueError('Unexpected header line 4--need to specify datetime_format')
else:
datetime = pd.to_datetime('20{}{}{} {}{}{}'.format(Y,m,d,H,M,S))
else:
# more general data, e.g., "2015-08-24 12:00:00 00:00"
# - figure out expected string length by evaluating strftime
# with the specified format
testdate_str = pd.datetime.strftime(pd.datetime.today(),
format=datetime_format)
# - recombine the split string with spaces (this gets rid of
# repeated spaces)
datetime_str = ' '.join(datetime_info)[:len(testdate_str)]
datetime = pd.to_datetime(datetime_str,format=datetime_format)
if read_scan_properties:
# Line 6: consensus averaging time [min], # beams, # range gates
cns_avg_time, num_beams, num_ranges = [int(val) for val in f.readline().split()]
# Line 7: for each beam: num_records:tot_records (consensus_window_size)
lineitems = f.readline().split()
assert len(lineitems) == 2*num_beams
num_records = [int(item.split(':')[0]) for item in lineitems[::2]]
tot_records = [int(item.split(':')[1]) for item in lineitems[::2]]
cns_window_size = [float(item.strip('()')) for item in lineitems[1::2]]
if datatype=='WINDS':
# Line 8: processing info (oblique/vertical pairs)
lineitems = [int(val) for val in f.readline().split()]
num_coherent_integrations = lineitems[:2]
num_spectral_averages = lineitems[2:4]
pulse_width = lineitems[4:6] # [ns]
inner_pulse_period = lineitems[6:8] # [ms]
# Line 9: processing info (oblique/vertical pairs)
lineitems = f.readline().split()
doppler_value = [float(val) for val in lineitems[:2]] # [m/s]
vertical_correction = bool(lineitems[2])
delay = [int(val) for val in lineitems[3:5]] # [ns]
num_gates = [int(val) for val in lineitems[5:7]]
gate_spacing = [int(val) for val in lineitems[7:9]] # [ns]
# Line 10: for each beam: azimuth, elevation
lineitems = [float(val) for val in f.readline().split()]
assert len(lineitems) == 2*num_beams
beam_azimuth = lineitems[::2] # [deg]
beam_elevation = lineitems[1::2] # [deg]
elif datatype=='RASS':
# Line 8: processing info
lineitems = [int(val) for val in f.readline().split()]
num_coherent_integrations = lineitems[0]
num_spectral_averages = lineitems[1]
pulse_width = lineitems[2] # [ns]
inner_pulse_period = lineitems[3] # [ms]
# Line 9: processing info (oblique/vertical pairs)
lineitems = f.readline().split()
doppler_value = float(lineitems[0]) # [m/s]
vertical_correction = 'n/a'
delay = int(lineitems[1]) # [ns]
num_gates = int(lineitems[2])
gate_spacing = int(lineitems[3]) # [ns]
# Line 10: for each beam: azimuth, elevation
lineitems = [float(val) for val in f.readline().split()]
assert len(lineitems) == 2*num_beams
beam_azimuth = lineitems[::2] # [deg]
beam_elevation = lineitems[1::2] # [deg]
else:
for _ in range(num_info_lines):
f.readline()
# Line 11: Column labels
header = f.readline().split()
header = [ col + '.' + str(header[:i].count(col))
if header.count(col) > 1
else col
for i,col in enumerate(header) ]
# Line 12: Start of data
block = []
line = f.readline()
while not line.strip()=='$' and not line=='':
block.append(line.split())
line = f.readline()
df = pd.DataFrame(data=block,columns=header,dtype=float)
df['datetime'] = datetime
# return data and header info if requested
if read_scan_properties:
scaninfo = {
'station':name,
'data_format':data_format,
# Line 6
'consensus_avg_time_min':cns_avg_time,
'num_beams':num_beams,
'num_range_gates':num_ranges,
# Line 7
'beam:reqd_records_for_consensus': num_records,
'beam:tot_num_records': tot_records,
'beam:consensus_window_size_m/s': cns_window_size,
# Line 8
'num_coherent_integrations': num_coherent_integrations,
'num_spectral_averages': num_spectral_averages,
'pulse_width_ns': pulse_width,
'inner_pulse_period_ms': inner_pulse_period,
# Line 9
'fullscale_doppler_value_m/s': doppler_value,
'vertical_correction_to_obliques': vertical_correction,
'delay_to_first_gate_ns': delay,
'num_gates': num_gates,
'gate_spacing_ns': gate_spacing,
# Line 10
'beam:azimuth_deg': beam_azimuth,
'beam:elevation_deg': beam_elevation,
}
return df, scaninfo
else:
return df, None
|
[
"pandas.DataFrame",
"pandas.datetime.today",
"numpy.max",
"numpy.arange",
"pandas.to_datetime",
"pandas.concat"
] |
[((5468, 5489), 'pandas.concat', 'pd.concat', (['dataframes'], {}), '(dataframes)\n', (5477, 5489), True, 'import pandas as pd\n'), ((12293, 12346), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'block', 'columns': 'header', 'dtype': 'float'}), '(data=block, columns=header, dtype=float)\n', (12305, 12346), True, 'import pandas as pd\n'), ((9124, 9176), 'pandas.to_datetime', 'pd.to_datetime', (['datetime_str'], {'format': 'datetime_format'}), '(datetime_str, format=datetime_format)\n', (9138, 9176), True, 'import pandas as pd\n'), ((8851, 8870), 'pandas.datetime.today', 'pd.datetime.today', ([], {}), '()\n', (8868, 8870), True, 'import pandas as pd\n'), ((3589, 3605), 'numpy.arange', 'np.arange', (['scans'], {}), '(scans)\n', (3598, 3605), True, 'import numpy as np\n'), ((3478, 3491), 'numpy.max', 'np.max', (['scans'], {}), '(scans)\n', (3484, 3491), True, 'import numpy as np\n')]
|
import datetime
from .functions import read_json, aggregate_surveys_no_config
import glob
import json
import logging
import math
import numpy as np
import os
import pandas as pd
import pytz
from typing import List
def convert_time_to_date(submit_time, day, time):
"""
Takes a single array of timings and a single day
Args:
submit_time(datetime):
date in week for which we want to extract another date and time
day(int):
desired day of week
time(list):
List of timings times from the configuration surveys information
"""
# Convert inputted desired day into an integer between 0 and 6
day = day % 7
# Get the days of the given week using the dow of the given submit day
dow = submit_time.weekday()
days = [submit_time + datetime.timedelta(days=i) for i in range(0 - dow, 7 - dow)]
time = [str(datetime.timedelta(seconds=t)) for t in time]
time = [t.split(':') for t in time]
time = [[int(p) for p in t] for t in time]
# Get rid of timing
# https://stackoverflow.com/questions/26882499/reset-time-part-of-a-pandas-timestamp
# print(time)
days = [d - pd.offsets.Micro(0) for d in days]
days = [[d.replace(hour=t[0], minute=t[1], second=t[2], microsecond=0) for t in time] for d in days]
return days[day]
def generate_survey_times(time_start, time_end, timings=[], survey_type='weekly'):
"""
Takes a start time and end time and generates a schedule of all sent surveys in time frame for the given survey type
Args:
time_start(str):
The first date for which we want to generate survey times
time_end(str):
The last date for which we want to generate survey times
timings(list):
list of survey timings, directly from the configuration file survey information
survey_type(str):
What type of survey schedule to generate times for
NOTE: As of now this only works for weekly surveys
Returns:
surveys(list):
A list of all survey times that occur between the time_start and time_end per the given survey timings schedule
"""
if survey_type not in ['weekly', 'absolute', 'relative']:
raise ValueError('Incorrect type of survey. Ensure this is weekly, absolute, or relative.')
# Get the number of weeks between start and end time
t_start = pd.Timestamp(time_start)
t_end = pd.Timestamp(time_end)
weeks = pd.Timedelta(t_end - t_start).days
# Get ceiling number of weeks
weeks = math.ceil(weeks / 7.0)
# Roll dates
t_lag = list(np.roll(np.array(timings, dtype="object"), -1))
# for each week, generate the survey times and append to a list
start_dates = [time_start + datetime.timedelta(days=7 * (i)) for i in range(weeks)]
surveys = []
for s in start_dates:
# Get the starting day of week
# dow_s = s.weekday()
for i, t in enumerate(t_lag):
if len(t) > 0:
surveys.extend(convert_time_to_date(s, day=i, time=t))
return surveys
def gen_survey_schedule(config_path, time_start, time_end, beiwe_ids):
"""
Args:
config_path(str):
File path to study configuration file
time_start(str):
The first date of the survey data
time_end(str):
The last date of the survey data
beiwe_ids(list):
List of users in study for which we are generating a survey schedule
Returns:
times_sur(DataFrame):
DataFrame with a line for every survey deployed to every user in the study for the given time range
"""
# List of surveys
surveys = read_json(config_path)['surveys']
# For each survey create a list of survey times
times_sur = []
for u_id in beiwe_ids:
for i, s in enumerate(surveys):
if s['timings']:
s_times = generate_survey_times(time_start, time_end, timings=s['timings'])
# Add in relative and absolute survey timings here
###
tbl = pd.DataFrame(s_times, columns=['delivery_time'])
# Create the "next" time column too, which indicates the next time the survey will be deployed
tbl['next_delivery_time'] = tbl.delivery_time.shift(-1)
tbl['id'] = i
tbl['beiwe_id'] = u_id
# Get all question IDs for the survey
qs = [q['question_id'] for q in s['content'] if 'question_id' in q.keys()]
if len(qs) > 0:
q_ids = pd.DataFrame({'question_id': qs})
tbl = pd.merge(tbl, q_ids, how='cross')
times_sur.append(tbl)
times_sur = pd.concat(times_sur).reset_index(drop=True)
return times_sur
def survey_submits(study_dir, config_path, time_start, time_end, beiwe_ids, agg, study_tz=None):
"""
Args:
study_dir(str):
File path to study data
config_path(str):
File path to study configuration file
time_start(str):
The first date of the survey data
time_end(str):
The last date of the survey data
beiwe_ids(list):
List of users in study for which we are generating a survey schedule
study_tz(str):
Timezone of study. This defaults to 'America/New_York'
Returns:
A DataFrame with all surveys deployed in the given timeframe on the study to the users with completion times
"""
time_start = pd.Timestamp(time_start)
time_end = pd.Timestamp(time_end)
# Generate aggregated survey data
# agg = functions.aggregate_surveys_config(study_dir, config_path, study_tz)
# Generate scheduled surveys data
sched = gen_survey_schedule(config_path, time_start, time_end, beiwe_ids)
# Merge survey submit lines onto the schedule data and identify submitted lines
submit_lines = pd.merge(
sched[['delivery_time', 'next_delivery_time', 'id', 'beiwe_id']].drop_duplicates(),
agg[['Local time', 'config_id', 'survey id', 'beiwe_id']].loc[agg.submit_line == 1].drop_duplicates(),
how='left',
left_on=['id', 'beiwe_id'],
right_on=['config_id', 'beiwe_id'])
# Get the submigged survey line
submit_lines['submit_flg'] = np.where(
(submit_lines['Local time'] >= submit_lines['delivery_time']) &
(submit_lines['Local time'] < submit_lines['next_delivery_time']),
1, 0
)
# Take the maximum survey submit line
submit_lines2 = submit_lines.groupby(['delivery_time', 'next_delivery_time', 'survey id', 'beiwe_id', 'config_id'])[
'submit_flg'].max().reset_index()
# Merge on the times of the survey submission
merge_cols = ['delivery_time', 'next_delivery_time', 'survey id', 'beiwe_id', 'config_id', 'submit_flg']
submit_lines3 = pd.merge(submit_lines2, submit_lines[merge_cols + ['Local time']], how='left', left_on=merge_cols,
right_on=merge_cols)
submit_lines3['submit_time'] = np.where(submit_lines3.submit_flg == 1, submit_lines3['Local time'],
np.array(0, dtype='datetime64[ns]'))
# # Select appropriate columns
submit_lines3 = submit_lines3[['survey id', 'delivery_time', 'beiwe_id', 'submit_flg', 'submit_time']]
# submit_lines3['time_to_submit'] = np.where(submit_lines3['submit_flg'] == 1, submit_lines3['submit_time'] - submit_lines3['delivery_time'], np.array(0, dtype='datetime64[ns]'))
submit_lines3['time_to_submit'] = submit_lines3['submit_time'] - submit_lines3['delivery_time']
# Create a summary that has survey_id, beiwe_id, num_surveys, num submitted surveys, average time to submit
summary_cols = ['survey id', 'beiwe_id']
num_surveys = submit_lines3.groupby(summary_cols)['submit_flg'].count()
num_complete_surveys = submit_lines3.groupby(summary_cols)['submit_flg'].sum()
avg_time_to_submit = submit_lines3.loc[submit_lines3.submit_flg == 1].groupby(summary_cols)['time_to_submit'].apply(
lambda x: sum(x, datetime.timedelta()) / len(x))
# avg_time_to_submit = submit_lines3.groupby(summary_cols)['time_to_submit'].apply(lambda x: sum(x, datetime.timedelta())/len(x))
submit_lines_summary = pd.concat([num_surveys, num_complete_surveys, avg_time_to_submit], axis=1).reset_index()
submit_lines_summary.columns = ['survey id', 'beiwe_id', 'num_surveys', 'num_complete_surveys',
'avg_time_to_submit']
return submit_lines3.sort_values(['survey id', 'beiwe_id']).drop_duplicates(), submit_lines_summary
def survey_submits_no_config(study_dir, study_tz=None):
"""
Alternative function for getting the survey completions (doesn't have expected times of surveys)
Args:
agg(DataFrame):
Output of aggregate_surveys_no_config
"""
tmp = aggregate_surveys_no_config(study_dir, study_tz)
def summarize_submits(df):
tmp = {
'min_time': df.min(),
'max_time': df.max()
}
return pd.Series(tmp, index=['min_time', 'max_time'])
tmp = tmp.groupby(['survey id', 'beiwe_id', 'surv_inst_flg'])['Local time'].apply(summarize_submits).reset_index()
tmp = tmp.pivot(index=['survey id', 'beiwe_id', 'surv_inst_flg'], columns='level_3',
values='Local time').reset_index()
tmp['time_to_complete'] = tmp['max_time'] - tmp['min_time']
tmp['time_to_complete'] = [t.seconds for t in tmp['time_to_complete']]
return tmp.sort_values(['beiwe_id', 'survey id'])
|
[
"pandas.DataFrame",
"pandas.Timestamp",
"math.ceil",
"pandas.merge",
"pandas.offsets.Micro",
"numpy.where",
"numpy.array",
"pandas.Series",
"datetime.timedelta",
"pandas.Timedelta",
"pandas.concat"
] |
[((2432, 2456), 'pandas.Timestamp', 'pd.Timestamp', (['time_start'], {}), '(time_start)\n', (2444, 2456), True, 'import pandas as pd\n'), ((2469, 2491), 'pandas.Timestamp', 'pd.Timestamp', (['time_end'], {}), '(time_end)\n', (2481, 2491), True, 'import pandas as pd\n'), ((2586, 2608), 'math.ceil', 'math.ceil', (['(weeks / 7.0)'], {}), '(weeks / 7.0)\n', (2595, 2608), False, 'import math\n'), ((5606, 5630), 'pandas.Timestamp', 'pd.Timestamp', (['time_start'], {}), '(time_start)\n', (5618, 5630), True, 'import pandas as pd\n'), ((5646, 5668), 'pandas.Timestamp', 'pd.Timestamp', (['time_end'], {}), '(time_end)\n', (5658, 5668), True, 'import pandas as pd\n'), ((6396, 6546), 'numpy.where', 'np.where', (["((submit_lines['Local time'] >= submit_lines['delivery_time']) & (\n submit_lines['Local time'] < submit_lines['next_delivery_time']))", '(1)', '(0)'], {}), "((submit_lines['Local time'] >= submit_lines['delivery_time']) & (\n submit_lines['Local time'] < submit_lines['next_delivery_time']), 1, 0)\n", (6404, 6546), True, 'import numpy as np\n'), ((6958, 7082), 'pandas.merge', 'pd.merge', (['submit_lines2', "submit_lines[merge_cols + ['Local time']]"], {'how': '"""left"""', 'left_on': 'merge_cols', 'right_on': 'merge_cols'}), "(submit_lines2, submit_lines[merge_cols + ['Local time']], how=\n 'left', left_on=merge_cols, right_on=merge_cols)\n", (6966, 7082), True, 'import pandas as pd\n'), ((2505, 2534), 'pandas.Timedelta', 'pd.Timedelta', (['(t_end - t_start)'], {}), '(t_end - t_start)\n', (2517, 2534), True, 'import pandas as pd\n'), ((7256, 7291), 'numpy.array', 'np.array', (['(0)'], {'dtype': '"""datetime64[ns]"""'}), "(0, dtype='datetime64[ns]')\n", (7264, 7291), True, 'import numpy as np\n'), ((9200, 9246), 'pandas.Series', 'pd.Series', (['tmp'], {'index': "['min_time', 'max_time']"}), "(tmp, index=['min_time', 'max_time'])\n", (9209, 9246), True, 'import pandas as pd\n'), ((818, 844), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'i'}), '(days=i)\n', (836, 844), False, 'import datetime\n'), ((896, 925), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 't'}), '(seconds=t)\n', (914, 925), False, 'import datetime\n'), ((1185, 1204), 'pandas.offsets.Micro', 'pd.offsets.Micro', (['(0)'], {}), '(0)\n', (1201, 1204), True, 'import pandas as pd\n'), ((2652, 2685), 'numpy.array', 'np.array', (['timings'], {'dtype': '"""object"""'}), "(timings, dtype='object')\n", (2660, 2685), True, 'import numpy as np\n'), ((2793, 2823), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(7 * i)'}), '(days=7 * i)\n', (2811, 2823), False, 'import datetime\n'), ((4797, 4817), 'pandas.concat', 'pd.concat', (['times_sur'], {}), '(times_sur)\n', (4806, 4817), True, 'import pandas as pd\n'), ((8389, 8463), 'pandas.concat', 'pd.concat', (['[num_surveys, num_complete_surveys, avg_time_to_submit]'], {'axis': '(1)'}), '([num_surveys, num_complete_surveys, avg_time_to_submit], axis=1)\n', (8398, 8463), True, 'import pandas as pd\n'), ((4142, 4190), 'pandas.DataFrame', 'pd.DataFrame', (['s_times'], {'columns': "['delivery_time']"}), "(s_times, columns=['delivery_time'])\n", (4154, 4190), True, 'import pandas as pd\n'), ((4648, 4681), 'pandas.DataFrame', 'pd.DataFrame', (["{'question_id': qs}"], {}), "({'question_id': qs})\n", (4660, 4681), True, 'import pandas as pd\n'), ((4708, 4741), 'pandas.merge', 'pd.merge', (['tbl', 'q_ids'], {'how': '"""cross"""'}), "(tbl, q_ids, how='cross')\n", (4716, 4741), True, 'import pandas as pd\n'), ((8191, 8211), 'datetime.timedelta', 'datetime.timedelta', ([], {}), '()\n', (8209, 8211), False, 'import datetime\n')]
|
import numpy as np
def linear_y(t0, t_step, slope, y0):
"""
A function to generate y values that satisfied to linear relationship with independent value t_list, slope, and start point of y
Parameters:
-----------
t0: t0, with dependent variable as startpoint_y
t_step: step of t
slope: slope
y0: startpoint of y
Return:
-------
y1: array of dependent variable that satisfied linear relationship with a given slope
Example:
--------
>>> y_list = linear_y(20, 1, 3, 0)
"""
return t_step+t0, y0+t_step*slope
def varied_rb(t0, t_step, rdn, rb0, tau, y0):
"""
"""
if rb0 != rdn:
slope = 1.0*(rdn - rb0)/tau
else:
slope = 0
t, m = linear_y(t0, t_step, slope, y0)
return t, m
def microsaccades(stop_command_time, deltat_mean = 95, deltat_std = 40, deltas_mean = 30, deltas_std = 7, rb_mean = 8, rb_std = 2, efferent_delay = 20, thr_M = 1000, decay = 7, rdn = -8, tau = 50, t_step = 0.1, last_success_canceled = False):
"""
A microsaccade model
---------------------
Parameters:
stop_command_time: stop command time
deltat_mean: Mean of afferent delay for ongoing microsaccades, by default is 95ms
deltat_std: STD of afferent delay for ongoing microsaccades, by default is 40ms
deltas_mean: Mean of afferent delay for stimulus onset processing, by default is 30ms
deltas_std: STD of afferent delay for stimulus onset processing, by default is 8ms
rb_mean: Mean of Buildup rate, by default is 8
rb_std: STD of Buildup rate, by default is 2
efferent_delay: After M reaches a threshold of 1000, a microsaccade is triggered as an efferent delay later, by default is 20ms
thr_M: a threshold of M, by default is 1000
decay: decay, by default is 7ms
rdn: the dynamicas of the build-down of a activity after peripheral stimulus onset, by default is -1*rb_mean = -8
tau: tau, by default is 50ms
t_step: interval used for counting time, by default is 0.1ms
last_success_canceled: whether last process is a succeed cancellation
Returns:
--------
t: list of t
M: list of M
is_success_canceled: whether a succeed cancellation
"""
deltat = np.random.normal(deltat_mean, deltat_std)
deltas = np.random.normal(deltas_mean, deltas_std)
rb0 = np.random.normal(rb_mean, rb_std)
if last_success_canceled is True:
deltat = 0.5*deltat
rb0 = 2*rb0
m_point = 0
M = []
t = []
M.append(m_point)
t.append(deltat)
t1 = t[0]
is_success_canceled = False
# linear increment
while np.max(M)<1000:
if not (0 < np.abs(t1-stop_command_time) < 1):
t1, m_point = linear_y(t1, t_step, rb0, m_point)
M.append(m_point)
t.append(t1)
else:
# print('afferent start time: {}, m: {}'.format(t1, m_point))
# Afferent delay for stimulus onset processing: deltas
for i in range(int(deltas/t_step)):
t1, m_point = linear_y(t1, t_step, rb0, m_point)
M.append(m_point)
t.append(t1)
break
# print('linear increment t: {}, m: {}, deltas: {}'.format(t1, m_point, deltas))
rb = 1.0*rb0
i = 0
expo_flag = False
while m_point > 1:
if m_point > 1000:
expo_flag = True
if (1<m_point<1000) & (expo_flag is False):
_, rb = varied_rb(t1, t_step, rdn, rb0, tau, rb)
t1, m_point = linear_y(t1, t_step, rb, m_point)
M.append(m_point)
t.append(t1)
else:
# After arriving at 1000, after an efferent delay to trigger a microsaccade
# if i<= int(efferent_delay/t_step):
# t1, m_point = linear_y(t1, t_step, rb0, m_point)
# M.append(m_point)
# t.append(t1)
# else:
# exponential decrease
slope = -1.0*m_point/decay
t1, m_point = linear_y(t1, t_step, slope, m_point)
M.append(m_point)
t.append(t1)
if np.max(M)<1000:
is_success_canceled = True
return t, M, is_success_canceled
|
[
"numpy.max",
"numpy.abs",
"numpy.random.normal"
] |
[((2231, 2272), 'numpy.random.normal', 'np.random.normal', (['deltat_mean', 'deltat_std'], {}), '(deltat_mean, deltat_std)\n', (2247, 2272), True, 'import numpy as np\n'), ((2286, 2327), 'numpy.random.normal', 'np.random.normal', (['deltas_mean', 'deltas_std'], {}), '(deltas_mean, deltas_std)\n', (2302, 2327), True, 'import numpy as np\n'), ((2338, 2371), 'numpy.random.normal', 'np.random.normal', (['rb_mean', 'rb_std'], {}), '(rb_mean, rb_std)\n', (2354, 2371), True, 'import numpy as np\n'), ((2618, 2627), 'numpy.max', 'np.max', (['M'], {}), '(M)\n', (2624, 2627), True, 'import numpy as np\n'), ((4109, 4118), 'numpy.max', 'np.max', (['M'], {}), '(M)\n', (4115, 4118), True, 'import numpy as np\n'), ((2654, 2684), 'numpy.abs', 'np.abs', (['(t1 - stop_command_time)'], {}), '(t1 - stop_command_time)\n', (2660, 2684), True, 'import numpy as np\n')]
|
import numpy as np
from typing import Optional, Union, Sequence, List, Callable, Tuple
from scipy.ndimage.filters import gaussian_filter
from scipy.ndimage import map_coordinates
import itertools
import collections
from collections import OrderedDict
import torch
import os
from scipy import ndimage as ndi
from batchgenerators.augmentations.utils import resize_segmentation
from skimage.transform import resize
def resample_data_or_seg(data, new_shape, is_seg, axis=None, order=3, do_separate_z=False, order_z=0):
"""
separate_z=True will resample with order 0 along z
:param data:
:param new_shape:
:param is_seg:
:param axis:
:param order:
:param do_separate_z:
:param cval:
:param order_z: only applies if do_separate_z is True
:return:
"""
assert len(data.shape) == 4, "data must be (c, x, y, z)"
if is_seg:
resize_fn = resize_segmentation
kwargs = OrderedDict()
else:
resize_fn = resize
kwargs = {'mode': 'edge', 'anti_aliasing': False}
dtype_data = data.dtype
shape = np.array(data[0].shape)
new_shape = np.array(new_shape)
if np.any(shape != new_shape):
data = data.astype(float)
if do_separate_z:
print("separate z, order in z is", order_z, "order inplane is", order)
assert len(axis) == 1, "only one anisotropic axis supported"
axis = axis[0]
if axis == 0:
new_shape_2d = new_shape[1:]
elif axis == 1:
new_shape_2d = new_shape[[0, 2]]
else:
new_shape_2d = new_shape[:-1]
reshaped_final_data = []
for c in range(data.shape[0]):
reshaped_data = []
for slice_id in range(shape[axis]):
if axis == 0:
reshaped_data.append(resize_fn(data[c, slice_id], new_shape_2d, order, **kwargs))
elif axis == 1:
reshaped_data.append(resize_fn(data[c, :, slice_id], new_shape_2d, order, **kwargs))
else:
reshaped_data.append(resize_fn(data[c, :, :, slice_id], new_shape_2d, order,
**kwargs))
reshaped_data = np.stack(reshaped_data, axis)
if shape[axis] != new_shape[axis]:
# The following few lines are blatantly copied and modified from sklearn's resize()
rows, cols, dim = new_shape[0], new_shape[1], new_shape[2]
orig_rows, orig_cols, orig_dim = reshaped_data.shape
row_scale = float(orig_rows) / rows
col_scale = float(orig_cols) / cols
dim_scale = float(orig_dim) / dim
map_rows, map_cols, map_dims = np.mgrid[:rows, :cols, :dim]
map_rows = row_scale * (map_rows + 0.5) - 0.5
map_cols = col_scale * (map_cols + 0.5) - 0.5
map_dims = dim_scale * (map_dims + 0.5) - 0.5
coord_map = np.array([map_rows, map_cols, map_dims])
if not is_seg or order_z == 0:
reshaped_final_data.append(map_coordinates(reshaped_data, coord_map, order=order_z,
mode='nearest')[None])
else:
unique_labels = np.unique(reshaped_data)
reshaped = np.zeros(new_shape, dtype=dtype_data)
for i, cl in enumerate(unique_labels):
reshaped_multihot = np.round(
map_coordinates((reshaped_data == cl).astype(float), coord_map, order=order_z,
mode='nearest'))
reshaped[reshaped_multihot > 0.5] = cl
reshaped_final_data.append(reshaped[None])
else:
reshaped_final_data.append(reshaped_data[None])
reshaped_final_data = np.vstack(reshaped_final_data)
else:
print("no separate z, order", order)
reshaped = []
for c in range(data.shape[0]):
reshaped.append(resize_fn(data[c], new_shape, order, **kwargs)[None])
reshaped_final_data = np.vstack(reshaped)
return reshaped_final_data.astype(dtype_data)
else:
print("no resampling necessary")
return data
def issequenceiterable(obj) -> bool:
"""
Determine if the object is an iterable sequence and is not a string.
"""
if isinstance(obj, torch.Tensor):
return int(obj.dim()) > 0 # a 0-d tensor is not iterable
return isinstance(obj, collections.abc.Iterable) and not isinstance(obj, (str, bytes))
def ensure_tuple_rep(tup, dim: int):
"""
Returns a copy of `tup` with `dim` values by either shortened or duplicated input.
Raises:
ValueError: When ``tup`` is a sequence and ``tup`` length is not ``dim``.
Examples::
>>> ensure_tuple_rep(1, 3)
(1, 1, 1)
>>> ensure_tuple_rep(None, 3)
(None, None, None)
>>> ensure_tuple_rep('test', 3)
('test', 'test', 'test')
>>> ensure_tuple_rep([1, 2, 3], 3)
(1, 2, 3)
>>> ensure_tuple_rep(range(3), 3)
(0, 1, 2)
>>> ensure_tuple_rep([1, 2], 3)
ValueError: Sequence must have length 3, got length 2.
"""
if isinstance(tup, torch.Tensor):
tup = tup.detach().cpu().numpy()
if isinstance(tup, np.ndarray):
tup = tup.tolist()
if not issequenceiterable(tup):
return (tup,) * dim
if len(tup) == dim:
return tuple(tup)
raise ValueError(f"Sequence must have length {dim}, got {len(tup)}.")
# def compute_divisible_spatial_size(spatial_shape: Sequence[int], k: Union[Sequence[int], int]):
# """
# Compute the target spatial size which should be divisible by `k`.
# Args:
# spatial_shape: original spatial shape.
# k: the target k for each spatial dimension.
# if `k` is negative or 0, the original size is preserved.
# if `k` is an int, the same `k` be applied to all the input spatial dimensions.
# """
# k = fall_back_tuple(k, (1,) * len(spatial_shape))
# new_size = []
# for k_d, dim in zip(k, spatial_shape):
# new_dim = int(np.ceil(dim / k_d) * k_d) if k_d > 0 else dim
# new_size.append(new_dim)
# return new_size
class Pad:
"""
Perform padding for a given an amount of padding in each dimension.
If input is `torch.Tensor`, `torch.nn.functional.pad` will be used, otherwise, `np.pad` will be used.
Args:
to_pad: the amount to be padded in each dimension [(low_H, high_H), (low_W, high_W), ...].
mode: available modes for numpy array:{``"constant"``, ``"edge"``, ``"linear_ramp"``, ``"maximum"``,
``"mean"``, ``"median"``, ``"minimum"``, ``"reflect"``, ``"symmetric"``, ``"wrap"``, ``"empty"``}
available modes for PyTorch Tensor: {``"constant"``, ``"reflect"``, ``"replicate"``, ``"circular"``}.
One of the listed string values or a user supplied function. Defaults to ``"constant"``.
See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html
https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html
kwargs: other arguments for the `np.pad` or `torch.pad` function.
note that `np.pad` treats channel dimension as the first dimension.
"""
def __init__(
self,
to_pad: List[Tuple[int, int]],
mode = "constant",
**kwargs,
) -> None:
self.to_pad = to_pad
self.mode = mode
self.kwargs = kwargs
@staticmethod
def _np_pad(img: np.ndarray, all_pad_width, mode, **kwargs) -> np.ndarray:
return np.pad(img, all_pad_width, mode=mode, **kwargs) # type: ignore
def __call__(
self, img, mode = None
):
"""
Args:
img: data to be transformed, assuming `img` is channel-first and
padding doesn't apply to the channel dim.
mode: available modes for numpy array:{``"constant"``, ``"edge"``, ``"linear_ramp"``, ``"maximum"``,
``"mean"``, ``"median"``, ``"minimum"``, ``"reflect"``, ``"symmetric"``, ``"wrap"``, ``"empty"``}
available modes for PyTorch Tensor: {``"constant"``, ``"reflect"``, ``"replicate"`` or ``"circular"``}.
One of the listed string values or a user supplied function. Defaults to `self.mode`.
See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html
https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html
"""
if not np.asarray(self.to_pad).any():
# all zeros, skip padding
return img
mode = convert_pad_mode(dst=img, mode=mode or self.mode).value
pad = self._np_pad
return pad(img, self.to_pad, mode, **self.kwargs) # type: ignore
def convert_pad_mode(dst, mode):
"""
Utility to convert padding mode between numpy array and PyTorch Tensor.
Args:
dst: target data to convert padding mode for, should be numpy array or PyTorch Tensor.
mode: current padding mode.
"""
if isinstance(dst, np.ndarray):
if mode == "circular":
mode = "wrap"
if mode == "replicate":
mode = "edge"
return mode
raise ValueError(f"unsupported data type: {type(dst)}.")
def is_positive(img):
"""
Returns a boolean version of `img` where the positive values are converted into True, the other values are False.
"""
return img > 0
def where(condition, x=None, y=None):
"""
Note that `torch.where` may convert y.dtype to x.dtype.
"""
if isinstance(condition, np.ndarray):
if x is not None:
result = np.where(condition, x, y)
else:
result = np.where(condition)
else:
if x is not None:
x = torch.as_tensor(x, device=condition.device)
y = torch.as_tensor(y, device=condition.device, dtype=x.dtype)
result = torch.where(condition, x, y)
else:
result = torch.where(condition) # type: ignore
return result
def generate_spatial_bounding_box(
img,
select_fn: Callable = is_positive,
channel_indices = None,
margin: Union[Sequence[int], int] = 0,
) -> Tuple[List[int], List[int]]:
"""
generate the spatial bounding box of foreground in the image with start-end positions.
Users can define arbitrary function to select expected foreground from the whole image or specified channels.
And it can also add margin to every dim of the bounding box.
The output format of the coordinates is:
[1st_spatial_dim_start, 2nd_spatial_dim_start, ..., Nth_spatial_dim_start],
[1st_spatial_dim_end, 2nd_spatial_dim_end, ..., Nth_spatial_dim_end]
The bounding boxes edges are aligned with the input image edges.
This function returns [-1, -1, ...], [-1, -1, ...] if there's no positive intensity.
Args:
img: source image to generate bounding box from.
select_fn: function to select expected foreground, default is to select values > 0.
channel_indices: if defined, select foreground only on the specified channels
of image. if None, select foreground on the whole image.
margin: add margin value to spatial dims of the bounding box, if only 1 value provided, use it for all dims.
"""
data = img[list(channel_indices)] if channel_indices is not None else img
data = select_fn(data).any(0)
ndim = len(data.shape)
margin = ensure_tuple_rep(margin, ndim)
for m in margin:
if m < 0:
raise ValueError("margin value should not be negative number.")
box_start = [0] * ndim
box_end = [0] * ndim
for di, ax in enumerate(itertools.combinations(reversed(range(ndim)), ndim - 1)):
dt = data
if len(ax) != 0:
dt = np.any(dt, ax)
if not dt.any():
# if no foreground, return all zero bounding box coords
return [0] * ndim, [0] * ndim
arg_max = where(dt == dt.max())[0]
min_d = max(arg_max[0] - margin[di], 0)
max_d = arg_max[-1] + margin[di] + 1
box_start[di] = min_d.detach().cpu().item() if isinstance(min_d, torch.Tensor) else min_d # type: ignore
box_end[di] = max_d.detach().cpu().item() if isinstance(max_d, torch.Tensor) else max_d # type: ignore
return box_start, box_end
def create_zero_centered_coordinate_mesh(shape):
tmp = tuple([np.arange(i) for i in shape])
coords = np.array(np.meshgrid(*tmp, indexing='ij')).astype(float)
for d in range(len(shape)):
coords[d] -= ((np.array(shape).astype(float) - 1) / 2.)[d]
return coords
def elastic_deform_coordinates(coordinates, alpha, sigma, random_state):
n_dim = len(coordinates)
offsets = []
for _ in range(n_dim):
offsets.append(
gaussian_filter((random_state.random(coordinates.shape[1:]) * 2 - 1), sigma, mode="constant", cval=0) * alpha)
offsets = np.array(offsets)
indices = offsets + coordinates
return indices
def interpolate_img(img, coords, order=3, mode='nearest', cval=0.0, is_seg=False):
if is_seg and order != 0:
unique_labels = np.unique(img)
result = np.zeros(coords.shape[1:], img.dtype)
for i, c in enumerate(unique_labels):
res_new = map_coordinates((img == c).astype(float), coords, order=order, mode=mode, cval=cval)
result[res_new >= 0.5] = c
return result
else:
return map_coordinates(img.astype(float), coords, order=order, mode=mode, cval=cval).astype(img.dtype)
def scale_coords(coords, scale):
if isinstance(scale, (tuple, list, np.ndarray)):
assert len(scale) == len(coords)
for i in range(len(scale)):
coords[i] *= scale[i]
else:
coords *= scale
return coords
def correct_crop_centers(
centers: List[np.ndarray], spatial_size: Union[Sequence[int], int], label_spatial_shape: Sequence[int]
) -> List[np.ndarray]:
"""
Utility to correct the crop center if the crop size is bigger than the image size.
Args:
ceters: pre-computed crop centers, will correct based on the valid region.
spatial_size: spatial size of the ROIs to be sampled.
label_spatial_shape: spatial shape of the original label data to compare with ROI.
"""
spatial_size = spatial_size
default=label_spatial_shape
if not (np.subtract(label_spatial_shape, spatial_size) >= 0).all():
raise ValueError("The size of the proposed random crop ROI is larger than the image size.")
# Select subregion to assure valid roi
valid_start = np.floor_divide(spatial_size, 2)
# add 1 for random
valid_end = np.subtract(label_spatial_shape + np.array(1), spatial_size / np.array(2)).astype(np.uint16)
# int generation to have full range on upper side, but subtract unfloored size/2 to prevent rounded range
# from being too high
for i, valid_s in enumerate(valid_start):
# need this because np.random.randint does not work with same start and end
if valid_s == valid_end[i]:
valid_end[i] += 1
for i, c in enumerate(centers):
center_i = c
if c < valid_start[i]:
center_i = valid_start[i]
if c >= valid_end[i]:
center_i = valid_end[i] - 1
centers[i] = center_i
return centers
def generate_pos_neg_label_crop_centers(
spatial_size: Union[Sequence[int], int],
num_samples: int,
pos_ratio: float,
label_spatial_shape: Sequence[int],
fg_indices: np.ndarray,
bg_indices: np.ndarray,
rand_state: Optional[np.random.RandomState] = None,
) -> List[List[np.ndarray]]:
"""
Generate valid sample locations based on the label with option for specifying foreground ratio
Valid: samples sitting entirely within image, expected input shape: [C, H, W, D] or [C, H, W]
Args:
spatial_size: spatial size of the ROIs to be sampled.
num_samples: total sample centers to be generated.
pos_ratio: ratio of total locations generated that have center being foreground.
label_spatial_shape: spatial shape of the original label data to unravel selected centers.
fg_indices: pre-computed foreground indices in 1 dimension.
bg_indices: pre-computed background indices in 1 dimension.
rand_state: numpy randomState object to align with other modules.
Raises:
ValueError: When the proposed roi is larger than the image.
ValueError: When the foreground and background indices lengths are 0.
"""
if rand_state is None:
rand_state = np.random.random.__self__ # type: ignore
centers = []
fg_indices, bg_indices = np.asarray(fg_indices), np.asarray(bg_indices)
if fg_indices.size == 0 and bg_indices.size == 0:
raise ValueError("No sampling location available.")
if fg_indices.size == 0 or bg_indices.size == 0:
print(
f"N foreground {len(fg_indices)}, N background {len(bg_indices)},"
"unable to generate class balanced samples."
)
pos_ratio = 0 if fg_indices.size == 0 else 1
for _ in range(num_samples):
indices_to_use = fg_indices if rand_state.rand() < pos_ratio else bg_indices
random_int = rand_state.randint(len(indices_to_use))
center = np.unravel_index(indices_to_use[random_int], label_spatial_shape)
# shift center to range of valid centers
center_ori = list(center)
centers.append(correct_crop_centers(center_ori, spatial_size, label_spatial_shape))
return centers
def augment_gamma(data_sample, gamma_range=(0.5, 2), epsilon=1e-7, per_channel=False,
retain_stats: Union[bool, Callable[[], bool]] = False):
if not per_channel:
retain_stats_here = retain_stats() if callable(retain_stats) else retain_stats
if retain_stats_here:
mn = data_sample.mean()
sd = data_sample.std()
if gamma_range[0] < 1:
gamma = np.random.uniform(gamma_range[0], 1)
else:
gamma = np.random.uniform(max(gamma_range[0], 1), gamma_range[1])
minm = data_sample.min()
rnge = data_sample.max() - minm
data_sample = np.power(((data_sample - minm) / float(rnge + epsilon)), gamma) * rnge + minm
if retain_stats_here:
data_sample = data_sample - data_sample.mean()
data_sample = data_sample / (data_sample.std() + 1e-8) * sd
data_sample = data_sample + mn
else:
for c in range(data_sample.shape[0]):
retain_stats_here = retain_stats() if callable(retain_stats) else retain_stats
if retain_stats_here:
mn = data_sample[c].mean()
sd = data_sample[c].std()
if np.random.random() < 0.5 and gamma_range[0] < 1:
gamma = np.random.uniform(gamma_range[0], 1)
else:
gamma = np.random.uniform(max(gamma_range[0], 1), gamma_range[1])
minm = data_sample[c].min()
rnge = data_sample[c].max() - minm
data_sample[c] = np.power(((data_sample[c] - minm) / float(rnge + epsilon)), gamma) * float(rnge + epsilon) + minm
if retain_stats_here:
data_sample[c] = data_sample[c] - data_sample[c].mean()
data_sample[c] = data_sample[c] / (data_sample[c].std() + 1e-8) * sd
data_sample[c] = data_sample[c] + mn
return data_sample
def augment_mirroring(sample_data, random_state, sample_seg=None, axes=(0, 1, 2)):
if len(sample_seg.shape) == 3:
# add a dimension
sample_seg = np.expand_dims(sample_seg, axis=0)
if (len(sample_data.shape) != 3) and (len(sample_data.shape) != 4):
raise Exception(
"Invalid dimension for sample_data and sample_seg. sample_data and sample_seg should be either "
"[channels, x, y] or [channels, x, y, z]")
if 0 in axes and random_state.uniform() < 0.5:
sample_data[:, :] = sample_data[:, ::-1]
if sample_seg is not None:
sample_seg[:, :] = sample_seg[:, ::-1]
if 1 in axes and random_state.uniform() < 0.5:
sample_data[:, :, :] = sample_data[:, :, ::-1]
if sample_seg is not None:
sample_seg[:, :, :] = sample_seg[:, :, ::-1]
if 2 in axes and len(sample_data.shape) == 4:
if random_state.uniform() < 0.5:
sample_data[:, :, :, :] = sample_data[:, :, :, ::-1]
if sample_seg is not None:
sample_seg[:, :, :, :] = sample_seg[:, :, :, ::-1]
if len(sample_seg.shape) == 4:
sample_seg = np.squeeze(sample_seg, axis=0)
return sample_data, sample_seg
|
[
"numpy.arange",
"numpy.unique",
"numpy.pad",
"numpy.meshgrid",
"numpy.stack",
"torch.where",
"numpy.floor_divide",
"numpy.asarray",
"numpy.squeeze",
"numpy.vstack",
"numpy.random.uniform",
"numpy.subtract",
"numpy.zeros",
"numpy.unravel_index",
"numpy.expand_dims",
"numpy.any",
"numpy.where",
"numpy.array",
"numpy.random.random",
"collections.OrderedDict",
"torch.as_tensor",
"scipy.ndimage.map_coordinates"
] |
[((1082, 1105), 'numpy.array', 'np.array', (['data[0].shape'], {}), '(data[0].shape)\n', (1090, 1105), True, 'import numpy as np\n'), ((1122, 1141), 'numpy.array', 'np.array', (['new_shape'], {}), '(new_shape)\n', (1130, 1141), True, 'import numpy as np\n'), ((1149, 1175), 'numpy.any', 'np.any', (['(shape != new_shape)'], {}), '(shape != new_shape)\n', (1155, 1175), True, 'import numpy as np\n'), ((13399, 13416), 'numpy.array', 'np.array', (['offsets'], {}), '(offsets)\n', (13407, 13416), True, 'import numpy as np\n'), ((15069, 15101), 'numpy.floor_divide', 'np.floor_divide', (['spatial_size', '(2)'], {}), '(spatial_size, 2)\n', (15084, 15101), True, 'import numpy as np\n'), ((933, 946), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (944, 946), False, 'from collections import OrderedDict\n'), ((8006, 8053), 'numpy.pad', 'np.pad', (['img', 'all_pad_width'], {'mode': 'mode'}), '(img, all_pad_width, mode=mode, **kwargs)\n', (8012, 8053), True, 'import numpy as np\n'), ((13610, 13624), 'numpy.unique', 'np.unique', (['img'], {}), '(img)\n', (13619, 13624), True, 'import numpy as np\n'), ((13642, 13679), 'numpy.zeros', 'np.zeros', (['coords.shape[1:]', 'img.dtype'], {}), '(coords.shape[1:], img.dtype)\n', (13650, 13679), True, 'import numpy as np\n'), ((17162, 17184), 'numpy.asarray', 'np.asarray', (['fg_indices'], {}), '(fg_indices)\n', (17172, 17184), True, 'import numpy as np\n'), ((17186, 17208), 'numpy.asarray', 'np.asarray', (['bg_indices'], {}), '(bg_indices)\n', (17196, 17208), True, 'import numpy as np\n'), ((17789, 17854), 'numpy.unravel_index', 'np.unravel_index', (['indices_to_use[random_int]', 'label_spatial_shape'], {}), '(indices_to_use[random_int], label_spatial_shape)\n', (17805, 17854), True, 'import numpy as np\n'), ((20131, 20165), 'numpy.expand_dims', 'np.expand_dims', (['sample_seg'], {'axis': '(0)'}), '(sample_seg, axis=0)\n', (20145, 20165), True, 'import numpy as np\n'), ((21130, 21160), 'numpy.squeeze', 'np.squeeze', (['sample_seg'], {'axis': '(0)'}), '(sample_seg, axis=0)\n', (21140, 21160), True, 'import numpy as np\n'), ((4137, 4167), 'numpy.vstack', 'np.vstack', (['reshaped_final_data'], {}), '(reshaped_final_data)\n', (4146, 4167), True, 'import numpy as np\n'), ((4420, 4439), 'numpy.vstack', 'np.vstack', (['reshaped'], {}), '(reshaped)\n', (4429, 4439), True, 'import numpy as np\n'), ((10067, 10092), 'numpy.where', 'np.where', (['condition', 'x', 'y'], {}), '(condition, x, y)\n', (10075, 10092), True, 'import numpy as np\n'), ((10128, 10147), 'numpy.where', 'np.where', (['condition'], {}), '(condition)\n', (10136, 10147), True, 'import numpy as np\n'), ((10200, 10243), 'torch.as_tensor', 'torch.as_tensor', (['x'], {'device': 'condition.device'}), '(x, device=condition.device)\n', (10215, 10243), False, 'import torch\n'), ((10260, 10318), 'torch.as_tensor', 'torch.as_tensor', (['y'], {'device': 'condition.device', 'dtype': 'x.dtype'}), '(y, device=condition.device, dtype=x.dtype)\n', (10275, 10318), False, 'import torch\n'), ((10340, 10368), 'torch.where', 'torch.where', (['condition', 'x', 'y'], {}), '(condition, x, y)\n', (10351, 10368), False, 'import torch\n'), ((10404, 10426), 'torch.where', 'torch.where', (['condition'], {}), '(condition)\n', (10415, 10426), False, 'import torch\n'), ((12232, 12246), 'numpy.any', 'np.any', (['dt', 'ax'], {}), '(dt, ax)\n', (12238, 12246), True, 'import numpy as np\n'), ((12849, 12861), 'numpy.arange', 'np.arange', (['i'], {}), '(i)\n', (12858, 12861), True, 'import numpy as np\n'), ((18481, 18517), 'numpy.random.uniform', 'np.random.uniform', (['gamma_range[0]', '(1)'], {}), '(gamma_range[0], 1)\n', (18498, 18517), True, 'import numpy as np\n'), ((2310, 2339), 'numpy.stack', 'np.stack', (['reshaped_data', 'axis'], {}), '(reshaped_data, axis)\n', (2318, 2339), True, 'import numpy as np\n'), ((12905, 12937), 'numpy.meshgrid', 'np.meshgrid', (['*tmp'], {'indexing': '"""ij"""'}), "(*tmp, indexing='ij')\n", (12916, 12937), True, 'import numpy as np\n'), ((19341, 19377), 'numpy.random.uniform', 'np.random.uniform', (['gamma_range[0]', '(1)'], {}), '(gamma_range[0], 1)\n', (19358, 19377), True, 'import numpy as np\n'), ((3127, 3167), 'numpy.array', 'np.array', (['[map_rows, map_cols, map_dims]'], {}), '([map_rows, map_cols, map_dims])\n', (3135, 3167), True, 'import numpy as np\n'), ((8919, 8942), 'numpy.asarray', 'np.asarray', (['self.to_pad'], {}), '(self.to_pad)\n', (8929, 8942), True, 'import numpy as np\n'), ((14847, 14893), 'numpy.subtract', 'np.subtract', (['label_spatial_shape', 'spatial_size'], {}), '(label_spatial_shape, spatial_size)\n', (14858, 14893), True, 'import numpy as np\n'), ((15175, 15186), 'numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (15183, 15186), True, 'import numpy as np\n'), ((15203, 15214), 'numpy.array', 'np.array', (['(2)'], {}), '(2)\n', (15211, 15214), True, 'import numpy as np\n'), ((19268, 19286), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (19284, 19286), True, 'import numpy as np\n'), ((3483, 3507), 'numpy.unique', 'np.unique', (['reshaped_data'], {}), '(reshaped_data)\n', (3492, 3507), True, 'import numpy as np\n'), ((3543, 3580), 'numpy.zeros', 'np.zeros', (['new_shape'], {'dtype': 'dtype_data'}), '(new_shape, dtype=dtype_data)\n', (3551, 3580), True, 'import numpy as np\n'), ((13016, 13031), 'numpy.array', 'np.array', (['shape'], {}), '(shape)\n', (13024, 13031), True, 'import numpy as np\n'), ((3270, 3342), 'scipy.ndimage.map_coordinates', 'map_coordinates', (['reshaped_data', 'coord_map'], {'order': 'order_z', 'mode': '"""nearest"""'}), "(reshaped_data, coord_map, order=order_z, mode='nearest')\n", (3285, 3342), False, 'from scipy.ndimage import map_coordinates\n')]
|
import numpy as np
# print('numpy:', np.__version__)
# print(dir(np))
# Listas de Python Normal
python_list = [1, 2, 3, 4, 5]
two_dimensional_list = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
# Criando um numpy (numeral python) array de uma python list
numpy_array_from_list_with_int = np.array(python_list)
# Criando uma numpy float array
numpy_array_from_list_with_float = np.array(python_list, dtype=float)
# Criando uma numpy boolean array
numpy_array_from_list_with_bool = np.array([0, 1, -1, 0,0 ], dtype=bool)
# Criando multidimensional numpypy array
numpy_two_dimensional_list = np.array(two_dimensional_list)
# Revertendo np array for list
numpy_to_list = numpy_array_from_list_with_int.tolist()
# Criando uma numpy array from tuple
python_tuple = (1, 2, 3, 4, 5)
numpy_array_from_tuple = np.array(python_tuple)
# Shape of numpy array
nums = np.array([1, 2, 3, 4, 5])
# print(nums)
# print(nums.shape) # (5,) 5 columns because has no rows
# print(numpy_two_dimensional_list)
# print(numpy_two_dimensional_list.shape) # (3, 3) 3 rows, 3 columns
three_by_four_array = np.array([[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11]]) # 3 rows, 4 columns,
# print(three_by_four_array.shape)
# Data type of numpy array
# Type of data types: str, int, float, complex, bool, list, None
int_lists = [-3, -2, -1, 0, 1, 2, 3]
int_array = np.array(int_lists)
float_array = np.array(int_lists, dtype=float)
# print(int_array.dtype)
# print(float_array.dtype)
# Size of a numpy array
num_array_from_list = np.array([1, 2, 3, 4, 5])
two_dimensional_list = np.array([[0,1,2],
[3,4,5],
[6,7,8]])
# print(num_array_from_list.size)
# print(two_dimensional_list.size)
|
[
"numpy.array"
] |
[((280, 301), 'numpy.array', 'np.array', (['python_list'], {}), '(python_list)\n', (288, 301), True, 'import numpy as np\n'), ((370, 404), 'numpy.array', 'np.array', (['python_list'], {'dtype': 'float'}), '(python_list, dtype=float)\n', (378, 404), True, 'import numpy as np\n'), ((474, 512), 'numpy.array', 'np.array', (['[0, 1, -1, 0, 0]'], {'dtype': 'bool'}), '([0, 1, -1, 0, 0], dtype=bool)\n', (482, 512), True, 'import numpy as np\n'), ((584, 614), 'numpy.array', 'np.array', (['two_dimensional_list'], {}), '(two_dimensional_list)\n', (592, 614), True, 'import numpy as np\n'), ((798, 820), 'numpy.array', 'np.array', (['python_tuple'], {}), '(python_tuple)\n', (806, 820), True, 'import numpy as np\n'), ((852, 877), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (860, 877), True, 'import numpy as np\n'), ((1076, 1130), 'numpy.array', 'np.array', (['[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]]'], {}), '([[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]])\n', (1084, 1130), True, 'import numpy as np\n'), ((1391, 1410), 'numpy.array', 'np.array', (['int_lists'], {}), '(int_lists)\n', (1399, 1410), True, 'import numpy as np\n'), ((1425, 1457), 'numpy.array', 'np.array', (['int_lists'], {'dtype': 'float'}), '(int_lists, dtype=float)\n', (1433, 1457), True, 'import numpy as np\n'), ((1557, 1582), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (1565, 1582), True, 'import numpy as np\n'), ((1606, 1649), 'numpy.array', 'np.array', (['[[0, 1, 2], [3, 4, 5], [6, 7, 8]]'], {}), '([[0, 1, 2], [3, 4, 5], [6, 7, 8]])\n', (1614, 1649), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
import anomaly_detection
dat = np.random.random(24*4*30) + 10
dts = pd.date_range(start='2018-08-01', freq='15min', periods=24*4*30)
df = pd.DataFrame(dat, index=dts, columns=['y'])
outliers = np.random.randint(low=0, high=24*4*30, size=20)
df.y.iloc[outliers] = df.y.iloc[outliers]*2
|
[
"pandas.DataFrame",
"numpy.random.randint",
"pandas.date_range",
"numpy.random.random"
] |
[((108, 176), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2018-08-01"""', 'freq': '"""15min"""', 'periods': '(24 * 4 * 30)'}), "(start='2018-08-01', freq='15min', periods=24 * 4 * 30)\n", (121, 176), True, 'import pandas as pd\n'), ((178, 221), 'pandas.DataFrame', 'pd.DataFrame', (['dat'], {'index': 'dts', 'columns': "['y']"}), "(dat, index=dts, columns=['y'])\n", (190, 221), True, 'import pandas as pd\n'), ((234, 285), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(24 * 4 * 30)', 'size': '(20)'}), '(low=0, high=24 * 4 * 30, size=20)\n', (251, 285), True, 'import numpy as np\n'), ((71, 100), 'numpy.random.random', 'np.random.random', (['(24 * 4 * 30)'], {}), '(24 * 4 * 30)\n', (87, 100), True, 'import numpy as np\n')]
|
# Copyright 2019, Imperial College London
#
# CO416 - Machine Learning for Imaging
#
# This file: Functions to visualise medical imaging data.
import numpy as np
import SimpleITK as sitk
import matplotlib.pyplot as plt
from ipywidgets import interact, fixed
from IPython.display import display
# Calculate parameters low and high from window and level
def wl_to_lh(window, level):
low = level - window/2
high = level + window/2
return low,high
def display_image(img, x=None, y=None, z=None, window=None, level=None, colormap='gray', crosshair=False):
# Convert SimpleITK image to NumPy array
img_array = sitk.GetArrayFromImage(img)
# Get image dimensions in millimetres
size = img.GetSize()
spacing = img.GetSpacing()
width = size[0] * spacing[0]
height = size[1] * spacing[1]
depth = size[2] * spacing[2]
if x is None:
x = np.floor(size[0]/2).astype(int)
if y is None:
y = np.floor(size[1]/2).astype(int)
if z is None:
z = np.floor(size[2]/2).astype(int)
if window is None:
window = np.max(img_array) - np.min(img_array)
if level is None:
level = window / 2 + np.min(img_array)
low,high = wl_to_lh(window,level)
# Display the orthogonal slices
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(10, 4))
ax1.imshow(img_array[z,:,:], cmap=colormap, clim=(low, high), extent=(0, width, height, 0))
ax2.imshow(img_array[:,y,:], origin='lower', cmap=colormap, clim=(low, high), extent=(0, width, 0, depth))
ax3.imshow(img_array[:,:,x], origin='lower', cmap=colormap, clim=(low, high), extent=(0, height, 0, depth))
# Additionally display crosshairs
if crosshair:
ax1.axhline(y * spacing[1], lw=1)
ax1.axvline(x * spacing[0], lw=1)
ax2.axhline(z * spacing[2], lw=1)
ax2.axvline(x * spacing[0], lw=1)
ax3.axhline(z * spacing[2], lw=1)
ax3.axvline(y * spacing[1], lw=1)
plt.show()
def interactive_view(img):
size = img.GetSize()
img_array = sitk.GetArrayFromImage(img)
interact(display_image,img=fixed(img),
x=(0, size[0] - 1),
y=(0, size[1] - 1),
z=(0, size[2] - 1),
window=(0,np.max(img_array) - np.min(img_array)),
level=(np.min(img_array),np.max(img_array)));
|
[
"matplotlib.pyplot.show",
"numpy.floor",
"SimpleITK.GetArrayFromImage",
"numpy.max",
"numpy.min",
"ipywidgets.fixed",
"matplotlib.pyplot.subplots"
] |
[((629, 656), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['img'], {}), '(img)\n', (651, 656), True, 'import SimpleITK as sitk\n'), ((1317, 1352), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(10, 4)'}), '(1, 3, figsize=(10, 4))\n', (1329, 1352), True, 'import matplotlib.pyplot as plt\n'), ((1988, 1998), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1996, 1998), True, 'import matplotlib.pyplot as plt\n'), ((2073, 2100), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['img'], {}), '(img)\n', (2095, 2100), True, 'import SimpleITK as sitk\n'), ((1098, 1115), 'numpy.max', 'np.max', (['img_array'], {}), '(img_array)\n', (1104, 1115), True, 'import numpy as np\n'), ((1118, 1135), 'numpy.min', 'np.min', (['img_array'], {}), '(img_array)\n', (1124, 1135), True, 'import numpy as np\n'), ((1192, 1209), 'numpy.min', 'np.min', (['img_array'], {}), '(img_array)\n', (1198, 1209), True, 'import numpy as np\n'), ((2132, 2142), 'ipywidgets.fixed', 'fixed', (['img'], {}), '(img)\n', (2137, 2142), False, 'from ipywidgets import interact, fixed\n'), ((897, 918), 'numpy.floor', 'np.floor', (['(size[0] / 2)'], {}), '(size[0] / 2)\n', (905, 918), True, 'import numpy as np\n'), ((959, 980), 'numpy.floor', 'np.floor', (['(size[1] / 2)'], {}), '(size[1] / 2)\n', (967, 980), True, 'import numpy as np\n'), ((1021, 1042), 'numpy.floor', 'np.floor', (['(size[2] / 2)'], {}), '(size[2] / 2)\n', (1029, 1042), True, 'import numpy as np\n'), ((2326, 2343), 'numpy.min', 'np.min', (['img_array'], {}), '(img_array)\n', (2332, 2343), True, 'import numpy as np\n'), ((2344, 2361), 'numpy.max', 'np.max', (['img_array'], {}), '(img_array)\n', (2350, 2361), True, 'import numpy as np\n'), ((2266, 2283), 'numpy.max', 'np.max', (['img_array'], {}), '(img_array)\n', (2272, 2283), True, 'import numpy as np\n'), ((2286, 2303), 'numpy.min', 'np.min', (['img_array'], {}), '(img_array)\n', (2292, 2303), True, 'import numpy as np\n')]
|
import numpy as np
from multiprocessing import Pool, cpu_count
import statsmodels.api as sm
from tqdm import tqdm
from itertools import product
import pandas as pd
# Load files from parent folders
import os
import sys
try:sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
except NameError: print("Cannot load testing module")
from wrapper_resampler import ShiftedTester
# Help functions
def coin(n, p=0.5): return 1*(np.random.uniform(size=(n,1)) < p)
def cb(*args): return np.concatenate(args, axis=1) # Col bind
def to_r(x): return robj.FloatVector(x)
np.random.seed(1)
# Simulate data from a Gaussian SCM
def scm(n, q=0.1):
X1 = coin(n, q)
return cb(X1)
q = 0.1
p = 0.9
# Weight
def weight(X):
return (X*p/q + (1-X)*(1-p)/(1-q)).ravel()
# Test if proportion of 1 is larger than p - 0.1 (this is true in target dist, where proportion is p, but may fail in resample)
def T(X):
return 1*(sm.stats.ztest(X, value=p-0.1, alternative="smaller")[1] < 0.05)[0]
# Define rates for resampling
def rate(pow, c=1):
def f(n): return c*n**pow
return f
# Loop parameters
pow_range = [a/20 for a in range(4, 20)]
n_range = [int(10**(x/2)) for x in range(4, 11)]
rep_range = [False, True]
combinations = list(product(n_range, pow_range, rep_range))
def conduct_experiment(i=None):
out = []
for n, pow, repl in combinations:
X = scm(n, q)
if rate(pow)(n) >= 3 and (rate(pow)(n) <= n):
try:
psi = ShiftedTester(weight, T, rate(pow), replacement=repl, reject_retries=5000, verbose=True)
out.append(psi.test(X))
except:
# Catch errors from test statistic
print(f"Error occurred {pow}, {n}")
out.append(np.nan)
else:
# print(f"Sampling {pow}, {n}")
out.append(np.nan)
return out
## Conduct multiple experiments with multiprocessing and export to R for plotting:
if __name__ == '__main__':
repeats = 200
# Multiprocess
pool = Pool(cpu_count()-2)
res = np.array(
list(tqdm(pool.imap_unordered(conduct_experiment, range(repeats)), total=repeats)))
pool.close()
# Count non-nas, to be used for binomial confidence intervals
counts = (~np.isnan(res)).sum(axis=0)
res = np.nansum(res, axis=0)
# Pack as data frame
df = pd.DataFrame(
[(x/c, *v, c) for x, v, c in zip(res, combinations, counts)],
columns=["RejectRate", "n", "Power", "SamplingScheme", "Count"])
# Export to R for ggplotting
df['RejectRate'] = df["RejectRate"].replace(np.NaN, "NA")
df.to_csv("experiment-a2-binary.csv")
|
[
"numpy.random.uniform",
"numpy.nansum",
"os.path.abspath",
"numpy.random.seed",
"numpy.isnan",
"statsmodels.api.stats.ztest",
"itertools.product",
"numpy.concatenate",
"multiprocessing.cpu_count"
] |
[((585, 602), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (599, 602), True, 'import numpy as np\n'), ((503, 531), 'numpy.concatenate', 'np.concatenate', (['args'], {'axis': '(1)'}), '(args, axis=1)\n', (517, 531), True, 'import numpy as np\n'), ((1256, 1294), 'itertools.product', 'product', (['n_range', 'pow_range', 'rep_range'], {}), '(n_range, pow_range, rep_range)\n', (1263, 1294), False, 'from itertools import product\n'), ((2315, 2337), 'numpy.nansum', 'np.nansum', (['res'], {'axis': '(0)'}), '(res, axis=0)\n', (2324, 2337), True, 'import numpy as np\n'), ((446, 476), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(n, 1)'}), '(size=(n, 1))\n', (463, 476), True, 'import numpy as np\n'), ((2052, 2063), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (2061, 2063), False, 'from multiprocessing import Pool, cpu_count\n'), ((271, 296), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (286, 296), False, 'import os\n'), ((2278, 2291), 'numpy.isnan', 'np.isnan', (['res'], {}), '(res)\n', (2286, 2291), True, 'import numpy as np\n'), ((939, 994), 'statsmodels.api.stats.ztest', 'sm.stats.ztest', (['X'], {'value': '(p - 0.1)', 'alternative': '"""smaller"""'}), "(X, value=p - 0.1, alternative='smaller')\n", (953, 994), True, 'import statsmodels.api as sm\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 13 16:49:29 2020
Copyright 2020 by <NAME>.
"""
# Standard imports:
import numpy as np
def fpcond(F):
"""Enforce the pole condition for the DFS coefficients F."""
# Get the dimension:
n = len(F)
Fp = np.zeros([n, n], dtype=complex)
# Negative wavenumbers:
G = F[:, :int(n/2)]
A = np.ones([2, n])
A[1, :] = (-1)**np.arange(0, n)
C = A.T @ (np.linalg.inv(A @ A.T) @ A @ G)
Fp[:, :int(n/2)] = F[:, :int(n/2)] - C
# Positive wavenumbers:
G = F[:, int(n/2)+1:]
A = np.ones([2, n])
A[1, :] = (-1)**np.arange(1, n+1)
C = A.T @ (np.linalg.inv(A @ A.T) @ A @ G)
Fp[:, int(n/2)+1:] = F[:, int(n/2)+1:] - C
return Fp
|
[
"numpy.arange",
"numpy.linalg.inv",
"numpy.zeros",
"numpy.ones"
] |
[((292, 323), 'numpy.zeros', 'np.zeros', (['[n, n]'], {'dtype': 'complex'}), '([n, n], dtype=complex)\n', (300, 323), True, 'import numpy as np\n'), ((389, 404), 'numpy.ones', 'np.ones', (['[2, n]'], {}), '([2, n])\n', (396, 404), True, 'import numpy as np\n'), ((598, 613), 'numpy.ones', 'np.ones', (['[2, n]'], {}), '([2, n])\n', (605, 613), True, 'import numpy as np\n'), ((425, 440), 'numpy.arange', 'np.arange', (['(0)', 'n'], {}), '(0, n)\n', (434, 440), True, 'import numpy as np\n'), ((634, 653), 'numpy.arange', 'np.arange', (['(1)', '(n + 1)'], {}), '(1, n + 1)\n', (643, 653), True, 'import numpy as np\n'), ((456, 478), 'numpy.linalg.inv', 'np.linalg.inv', (['(A @ A.T)'], {}), '(A @ A.T)\n', (469, 478), True, 'import numpy as np\n'), ((667, 689), 'numpy.linalg.inv', 'np.linalg.inv', (['(A @ A.T)'], {}), '(A @ A.T)\n', (680, 689), True, 'import numpy as np\n')]
|
import os
import pickle
import numpy as np
import errno
def do_pickle(pickle_bool, pickle_name, num_args, func, *args, **kwargs):
'''
General function to handle pickling.
@func: call this guy to get the result if pickle file not available.
'''
if not pickle_bool:
rets = func(*args, **kwargs)
elif os.path.isfile(pickle_name):
#pickle exists!
with open(pickle_name, 'rb') as handle:
rets = pickle.load(handle)
print("successfully loaded pickle file!", pickle_name)
handle.close()
else:
rets = func(*args, **kwargs)
# dump it for future
with open(pickle_name, 'w+') as handle:
pickle.dump(rets, handle, protocol=pickle.HIGHEST_PROTOCOL)
handle.close()
return rets
def mix_samples(train_genuine, train_impostors):
"""
Returns a single np.array with samples, and corresponding labels.
"""
samples = np.vstack((train_genuine, train_impostors))
labels = []
# Add labels: 1 - user, and 0 - impostor.
for i in train_genuine:
labels.append(1)
for i in train_impostors:
labels.append(0)
labels = np.array(labels)
return samples, labels
# Taken from http://stackoverflow.com/a/600612/119527
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
|
[
"pickle.dump",
"os.makedirs",
"os.path.isdir",
"os.path.isfile",
"pickle.load",
"numpy.array",
"numpy.vstack"
] |
[((971, 1014), 'numpy.vstack', 'np.vstack', (['(train_genuine, train_impostors)'], {}), '((train_genuine, train_impostors))\n', (980, 1014), True, 'import numpy as np\n'), ((1200, 1216), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (1208, 1216), True, 'import numpy as np\n'), ((334, 361), 'os.path.isfile', 'os.path.isfile', (['pickle_name'], {}), '(pickle_name)\n', (348, 361), False, 'import os\n'), ((1336, 1353), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (1347, 1353), False, 'import os\n'), ((454, 473), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (465, 473), False, 'import pickle\n'), ((718, 777), 'pickle.dump', 'pickle.dump', (['rets', 'handle'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(rets, handle, protocol=pickle.HIGHEST_PROTOCOL)\n', (729, 777), False, 'import pickle\n'), ((1436, 1455), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (1449, 1455), False, 'import os\n')]
|
from time import time
import numpy as np
from models import convolutional_model
from pre_process import next_batch
from triplet_loss import deep_speaker_loss
from constants import BATCH_NUM_TRIPLETS
if __name__ == '__main__':
b = next_batch()
num_frames = b.shape[0]
model = convolutional_model(batch_input_shape=[BATCH_NUM_TRIPLETS * num_frames] + list(b.shape[1:]),
batch_size=BATCH_NUM_TRIPLETS, num_frames=num_frames)
model.compile(optimizer='adam',
loss=deep_speaker_loss)
print(model.summary())
grad_steps = 0
orig_time = time()
while True:
anc1 = next_batch()
anc2 = next_batch()
pos1 = next_batch()
pos2 = next_batch()
neg1 = next_batch()
neg2 = next_batch()
batch = np.concatenate([anc1, anc2, pos1, pos2, neg1, neg2], axis=0)
# this line should not raise an error
# output.shape = (3, 383, 32, 32, 3)
# explanation = (batch_size, num_frames, width, height, channels)
np.reshape(batch, (BATCH_NUM_TRIPLETS, num_frames, b.shape[2], b.shape[2], b.shape[3]))
stub_targets = np.random.uniform(size=(batch.shape[0], 1))
loss = model.train_on_batch(batch, stub_targets)
print('batch #{0} processed in {1:.2f}s, training loss = {2}.'.format(grad_steps, time() - orig_time, loss))
grad_steps += 1
orig_time = time()
|
[
"pre_process.next_batch",
"numpy.random.uniform",
"time.time",
"numpy.reshape",
"numpy.concatenate"
] |
[((237, 249), 'pre_process.next_batch', 'next_batch', ([], {}), '()\n', (247, 249), False, 'from pre_process import next_batch\n'), ((611, 617), 'time.time', 'time', ([], {}), '()\n', (615, 617), False, 'from time import time\n'), ((649, 661), 'pre_process.next_batch', 'next_batch', ([], {}), '()\n', (659, 661), False, 'from pre_process import next_batch\n'), ((677, 689), 'pre_process.next_batch', 'next_batch', ([], {}), '()\n', (687, 689), False, 'from pre_process import next_batch\n'), ((705, 717), 'pre_process.next_batch', 'next_batch', ([], {}), '()\n', (715, 717), False, 'from pre_process import next_batch\n'), ((733, 745), 'pre_process.next_batch', 'next_batch', ([], {}), '()\n', (743, 745), False, 'from pre_process import next_batch\n'), ((761, 773), 'pre_process.next_batch', 'next_batch', ([], {}), '()\n', (771, 773), False, 'from pre_process import next_batch\n'), ((789, 801), 'pre_process.next_batch', 'next_batch', ([], {}), '()\n', (799, 801), False, 'from pre_process import next_batch\n'), ((818, 878), 'numpy.concatenate', 'np.concatenate', (['[anc1, anc2, pos1, pos2, neg1, neg2]'], {'axis': '(0)'}), '([anc1, anc2, pos1, pos2, neg1, neg2], axis=0)\n', (832, 878), True, 'import numpy as np\n'), ((1054, 1145), 'numpy.reshape', 'np.reshape', (['batch', '(BATCH_NUM_TRIPLETS, num_frames, b.shape[2], b.shape[2], b.shape[3])'], {}), '(batch, (BATCH_NUM_TRIPLETS, num_frames, b.shape[2], b.shape[2],\n b.shape[3]))\n', (1064, 1145), True, 'import numpy as np\n'), ((1166, 1209), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(batch.shape[0], 1)'}), '(size=(batch.shape[0], 1))\n', (1183, 1209), True, 'import numpy as np\n'), ((1428, 1434), 'time.time', 'time', ([], {}), '()\n', (1432, 1434), False, 'from time import time\n'), ((1357, 1363), 'time.time', 'time', ([], {}), '()\n', (1361, 1363), False, 'from time import time\n')]
|
import numpy as np
import pytest
from pyha import Hardware, Complex, Sfix, default_complex, simulate, sims_close
from pyha.common.datavalid import DataValid, NumpyToDataValid
class FFTPower(Hardware):
"""
FFTPower
--------
Multiplies complex input by its conjugate: (a + bi)(a - bi) = a**2 + b**2
Results in a real number.
"""
def __init__(self):
self._pyha_simulation_input_callback = NumpyToDataValid(dtype=default_complex)
self.output = DataValid(Sfix(bits=36))
def main(self, input):
"""
Args:
input (DataValid): type not restricted
Returns:
DataValid: Lowest 36 bits from the result.
Example: Input is 18 bits with format 0:-17, then output is 36 bits 1:-34
"""
if not input.valid:
return DataValid(self.output.data, valid=False)
# (a + bi)(a - bi) = a**2 + b**2
self.output.data = (input.data.real * input.data.real) + (input.data.imag * input.data.imag)
self.output.valid = input.valid
return self.output
def model(self, input_list):
return (np.conjugate(input_list) * input_list).real.flatten()
@pytest.mark.parametrize("input_power", [0.5, 0.1, 0.001, 0.00001])
def test_all(input_power):
dut = FFTPower()
inp = (np.random.uniform(-1, 1, size=1280) + np.random.uniform(-1, 1, size=1280) * 1j) * input_power
inp = [complex(Complex(x, 0, -17)) for x in inp]
sims = simulate(dut, inp, pipeline_flush='auto', simulations=['MODEL', 'HARDWARE'])
assert sims_close(sims, rtol=1e-20, atol=1e-20)
def test_nonstandard_input_size():
input_power = 0.0001
dut = FFTPower()
dtype = Complex(0, -4, -21, round_style='round')
dut._pyha_simulation_input_callback = NumpyToDataValid(dtype)
inp = (np.random.uniform(-1, 1, size=64) + np.random.uniform(-1, 1, size=64) * 1j) * input_power
inp = [complex(dtype(x)) for x in inp]
sims = simulate(dut, inp, pipeline_flush='auto', conversion_path='/tmp/pyha_output')
assert sims_close(sims, rtol=1e-20, atol=1e-20)
|
[
"numpy.random.uniform",
"pyha.Complex",
"pyha.Sfix",
"pyha.common.datavalid.DataValid",
"pyha.simulate",
"pyha.sims_close",
"pytest.mark.parametrize",
"numpy.conjugate",
"pyha.common.datavalid.NumpyToDataValid"
] |
[((1201, 1265), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""input_power"""', '[0.5, 0.1, 0.001, 1e-05]'], {}), "('input_power', [0.5, 0.1, 0.001, 1e-05])\n", (1224, 1265), False, 'import pytest\n'), ((1485, 1561), 'pyha.simulate', 'simulate', (['dut', 'inp'], {'pipeline_flush': '"""auto"""', 'simulations': "['MODEL', 'HARDWARE']"}), "(dut, inp, pipeline_flush='auto', simulations=['MODEL', 'HARDWARE'])\n", (1493, 1561), False, 'from pyha import Hardware, Complex, Sfix, default_complex, simulate, sims_close\n'), ((1573, 1613), 'pyha.sims_close', 'sims_close', (['sims'], {'rtol': '(1e-20)', 'atol': '(1e-20)'}), '(sims, rtol=1e-20, atol=1e-20)\n', (1583, 1613), False, 'from pyha import Hardware, Complex, Sfix, default_complex, simulate, sims_close\n'), ((1710, 1750), 'pyha.Complex', 'Complex', (['(0)', '(-4)', '(-21)'], {'round_style': '"""round"""'}), "(0, -4, -21, round_style='round')\n", (1717, 1750), False, 'from pyha import Hardware, Complex, Sfix, default_complex, simulate, sims_close\n'), ((1794, 1817), 'pyha.common.datavalid.NumpyToDataValid', 'NumpyToDataValid', (['dtype'], {}), '(dtype)\n', (1810, 1817), False, 'from pyha.common.datavalid import DataValid, NumpyToDataValid\n'), ((1973, 2050), 'pyha.simulate', 'simulate', (['dut', 'inp'], {'pipeline_flush': '"""auto"""', 'conversion_path': '"""/tmp/pyha_output"""'}), "(dut, inp, pipeline_flush='auto', conversion_path='/tmp/pyha_output')\n", (1981, 2050), False, 'from pyha import Hardware, Complex, Sfix, default_complex, simulate, sims_close\n'), ((2062, 2102), 'pyha.sims_close', 'sims_close', (['sims'], {'rtol': '(1e-20)', 'atol': '(1e-20)'}), '(sims, rtol=1e-20, atol=1e-20)\n', (2072, 2102), False, 'from pyha import Hardware, Complex, Sfix, default_complex, simulate, sims_close\n'), ((428, 467), 'pyha.common.datavalid.NumpyToDataValid', 'NumpyToDataValid', ([], {'dtype': 'default_complex'}), '(dtype=default_complex)\n', (444, 467), False, 'from pyha.common.datavalid import DataValid, NumpyToDataValid\n'), ((500, 513), 'pyha.Sfix', 'Sfix', ([], {'bits': '(36)'}), '(bits=36)\n', (504, 513), False, 'from pyha import Hardware, Complex, Sfix, default_complex, simulate, sims_close\n'), ((843, 883), 'pyha.common.datavalid.DataValid', 'DataValid', (['self.output.data'], {'valid': '(False)'}), '(self.output.data, valid=False)\n', (852, 883), False, 'from pyha.common.datavalid import DataValid, NumpyToDataValid\n'), ((1327, 1362), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': '(1280)'}), '(-1, 1, size=1280)\n', (1344, 1362), True, 'import numpy as np\n'), ((1440, 1458), 'pyha.Complex', 'Complex', (['x', '(0)', '(-17)'], {}), '(x, 0, -17)\n', (1447, 1458), False, 'from pyha import Hardware, Complex, Sfix, default_complex, simulate, sims_close\n'), ((1829, 1862), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': '(64)'}), '(-1, 1, size=64)\n', (1846, 1862), True, 'import numpy as np\n'), ((1365, 1400), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': '(1280)'}), '(-1, 1, size=1280)\n', (1382, 1400), True, 'import numpy as np\n'), ((1865, 1898), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': '(64)'}), '(-1, 1, size=64)\n', (1882, 1898), True, 'import numpy as np\n'), ((1144, 1168), 'numpy.conjugate', 'np.conjugate', (['input_list'], {}), '(input_list)\n', (1156, 1168), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Thu 01/10/2020
----------------------------
@author: <NAME>
PLASMON Data Analysis
class dataset & roi
The dataset and ROI class of v2 of program. Dataset is one nd2 file, ROIs are region of interest.
-----------------
v2.0: part of v2.0: 15/10/2020
"""
# GENERAL IMPORTS
import scipy.fft as fft
from skimage.feature import match_template
import numpy as np
__self_made__ = True
# %% ROI
class Roi:
"""
ROI class. Used to determine region of interest
"""
def __init__(self, x, y):
"""
Initialization of ROI class.
---------------------------
:param x: x position
:param y: y position
"""
self.x = x
self.y = y
self.index = None
self.results = {}
def set_index(self, index):
"""
Sets index of ROI
----------------
:param index: index to set
"""
self.index = index
def get_roi(self, frame, roi_size_1d, offset):
"""
Gets ROI for a certain frame, offset, and ROI size
------------------------------------
:param frame: frame to get ROI of
:param roi_size_1d: ROI size
:param offset: offset of current ROI in that frame
:return: Something by Something ROI around the x/y position of this ROI
"""
return frame[self.y + offset[0] - roi_size_1d:self.y + offset[0] + roi_size_1d + 1,
self.x + offset[1] - roi_size_1d:self.x + offset[1] + roi_size_1d + 1]
def get_frame_stack(self, frames, roi_size_1d, offset):
"""
Gets ROI for a certain frame stack, offset, and ROI size
------------------------------------
:param frames: frames to get ROI of
:param roi_size_1d: ROI size
:param offset: offset of current ROI in that frame
:return: Something by Something ROI around the x/y position of this ROI, in time
"""
return frames[:, self.y + offset[0] - roi_size_1d:self.y + offset[0] + roi_size_1d + 1,
self.x + offset[1] - roi_size_1d:self.x + offset[1] + roi_size_1d + 1]
def in_frame(self, shape, offset, margin):
"""
Checks whether or not this ROI is in the frame
--------------------------
:param shape: Shape of frame
:param offset: offset of frame
:param margin: margin required to edge
:return: in_frame_boolean: whether or not in frame
"""
if self.x + offset[1] < margin or self.x + offset[1] > shape[1] - margin:
in_frame_boolean = False
elif self.y + offset[0] < margin or self.y + offset[0] > shape[0] - margin:
in_frame_boolean = False
else:
in_frame_boolean = True
return in_frame_boolean
# %% Dataset
class Dataset:
"""
Base dataset class. Each dataset type (HSM / TT) inherits from this
"""
def __init__(self, experiment, nd2, name):
"""
Init for dataset class. Sets name, type, name_result (for MATLAB) and some other base things to None
-----------------------------------
:param experiment: parent experiment
:param name: name of dataset
"""
self.type = "Dataset"
self.experiment = experiment
self.data_type = nd2.pixel_type
bits = int("".join([s for s in str(self.data_type) if s.isdigit()])) # complicated way to get #bits
if bits < 16:
self.data_type_signed = np.int16
elif bits < 32:
self.data_type_signed = np.int32
else:
self.data_type_signed = np.int64
self.name = name[:-4].split("/")[-1]
self.filename = name
self.name_result = self.set_result_name(self.name)
self.frames = None
self.frame_for_rois = None
self.metadata = None
self.fitter = None
self.roi_offset = None
self.settings = None
self.active_rois = []
@staticmethod
def check_name_validity(new_name):
"""
Check if name for a dataset is conform MATLAB requirements
---------------------------
:param new_name: string of new name
:return: return if valid or not
"""
chars = set(new_name)
for i in range(0, 10):
chars.discard(str(i))
# loop over all letters
from string import ascii_letters
for char in ascii_letters:
chars.discard(char)
chars.discard('-')
chars.discard('_')
chars.discard(' ')
if len(chars) > 0:
return False
else:
return True
@staticmethod
def set_result_name(new_name):
"""
Set name for result struct
--------------------
:param new_name: new name to adapt to self.name
:return:
"""
# check matlab rules
tmp_name = new_name.replace(' ', '_') # no spaces
tmp_name = tmp_name.replace('-', '_') # no -
tmp_name = tmp_name[-59:] # take only last 59 characters
return "res_" + tmp_name
def set_name(self, new_name):
"""
Set a name
-----------------
:param new_name: new name
:return: None. Edits class
"""
self.name = new_name
self.name_result = self.set_result_name(new_name)
@staticmethod
def parse_start_end(start, end):
"""
Parses start and end values often used in program to create a slice
---------------------------
:param start: Start value
:param end: End value
:return: Slice from start to end value
"""
if start == "Leave empty for start" and end == "Leave empty for end":
return slice(0, None), 0
elif start == "Leave empty for start" and end != "Leave empty for end":
return slice(0, int(end)), 0
elif start != "Leave empty for start" and end == "Leave empty for end":
return slice(int(start), None), int(start)
else: # start != "Leave empty for start" and end != "Leave empty for end":
return slice(int(start), int(end)), int(start)
@staticmethod
def correlate_frames_other_size(frame_small, frame_big):
"""
Correlates other sizes small and big frame and finds offset between the two
-----------------------
:param frame_small: Previous frame
:param frame_big: New frame to correlate with
:return: offset: the offset between the two frames
"""
corr = match_template(frame_big, frame_small)
offset = -np.transpose(np.asarray(np.where(corr == np.amax(corr))))[0]
return offset
@staticmethod
def correlate_frames_same_size(frame_old, frame_new, range=None):
"""
Correlates same sizes old frame and new frame and finds offset between the two
-----------------------
:param frame_old: Previous frame
:param frame_new: New frame to correlate with
:param range: The maximum possible range of drift between the two frames
:return: offset: the offset between the two frames
"""
# if same matrix
if np.array_equal(frame_new, frame_old):
offset = np.asarray([0, 0])
else:
corr = normxcorr2(frame_old, frame_new)
if range is None:
maxima = np.transpose(np.asarray(np.where(corr == np.amax(corr))))[0]
else:
# cut out center with range. Only check center for maximum value
small_corr = corr[frame_old.shape[0] - range:frame_old.shape[0] + range,
frame_old.shape[1] - range:frame_old.shape[1] + range]
maxima = np.transpose(np.asarray(np.where(corr == np.amax(small_corr))))[0]
offset = maxima - np.asarray(frame_old.shape) + np.asarray([1, 1])
return offset
@staticmethod
def check_slice_validity(total_frame, x_slice, y_slice):
if x_slice.stop is None:
x_slice = slice(x_slice.start, total_frame.shape[1])
if y_slice.stop is None:
y_slice = slice(y_slice.start, total_frame.shape[0])
if x_slice.start > x_slice.stop or x_slice.start < 0 or x_slice.stop > total_frame.shape[1]:
return False
if y_slice.start > y_slice.stop or y_slice.start < 0 or y_slice.stop > total_frame.shape[0]:
return False
return True
def correlate(self, settings):
"""
The overall correlate function. Calls the above functions
----------------------------------
:param settings: settings from user
:return: offset: offset between new and old frame
"""
# finds slices and offset
x_slice, x_offset = self.parse_start_end(settings['x_min'], settings['x_max'])
y_slice, y_offset = self.parse_start_end(settings['y_min'], settings['y_max'])
offset_crop = np.asarray([y_offset, x_offset])
experiment_frame_shape = self.experiment.frame_for_rois.shape
frame_shape = self.frame_for_rois.shape
# if frame is larger than experiment frame
if frame_shape[0] > experiment_frame_shape[0] or frame_shape[1] > experiment_frame_shape[1]:
small_frame = self.experiment.frame_for_rois
cropped_frame = self.frame_for_rois[y_slice, x_slice]
# if slices are present, check validity
if self.check_slice_validity(self.frame_for_rois, x_slice, y_slice) is False:
self.experiment.error_func("Slice invalid", "Slice size is not valid")
return None
# check if sliced frame is valid
if small_frame.shape[0] > cropped_frame.shape[0] or small_frame.shape[1] > cropped_frame.shape[1]:
self.experiment.error_func("Crop too tight", "Cropped frame now smaller than previously smaller frame")
return None
offset = -self.correlate_frames_other_size(small_frame, cropped_frame) + offset_crop
elif frame_shape[0] < experiment_frame_shape[0] or frame_shape[1] < experiment_frame_shape[1]:
# if other way around
small_frame = self.frame_for_rois
cropped_frame = self.experiment.frame_for_rois[y_slice, x_slice]
# if slices are present, check validity
if self.check_slice_validity(self.experiment.frame_for_rois, x_slice, y_slice) is False:
self.experiment.error_func("Slice invalid", "Slice size is not valid")
return None
# check if sliced frame is valid
if small_frame.shape[0] > cropped_frame.shape[0] or small_frame.shape[1] > cropped_frame.shape[1]:
self.experiment.error_func("Crop too tight", "Cropped frame now smaller than previously smaller frame")
return None
offset = self.correlate_frames_other_size(small_frame, cropped_frame) - offset_crop
else:
# if same size
old_frame = self.experiment.frame_for_rois
new_frame = self.frame_for_rois
offset = self.correlate_frames_same_size(old_frame, new_frame)
return offset
def find_rois(self, settings):
"""
Finds the ROIs within a new dataset. First correlates frames, then uses roi_in_frame func
--------------------
:param settings: settings from user
:return: None. Edits dataset class by self.active_rois and self.roi_offset
"""
self.roi_offset = self.correlate(settings)
if self.roi_offset is None:
self.active_rois = []
return
self.active_rois = [roi for roi in self.experiment.rois if roi.in_frame(self.frame_for_rois.shape,
self.roi_offset,
self.experiment.
roi_finder.side_distance)]
# %% correlation
def normxcorr2(b, a):
"""
Correlation of similar size frames
"""
def conv2(a, b):
ma, na = a.shape
mb, nb = b.shape
return fft.ifft2(fft.fft2(a, [2 * ma - 1, 2 * na - 1]) * fft.fft2(b, [2 * mb - 1, 2 * nb - 1]))
c = conv2(a, np.flipud(np.fliplr(b)))
a = conv2(a ** 2, np.ones(b.shape))
b = int(sum(b.flatten().astype(np.int64) ** 2))
c = c / np.sqrt(a * b)
return c
|
[
"numpy.asarray",
"skimage.feature.match_template",
"numpy.ones",
"numpy.amax",
"numpy.fliplr",
"numpy.array_equal",
"scipy.fft.fft2",
"numpy.sqrt"
] |
[((6604, 6642), 'skimage.feature.match_template', 'match_template', (['frame_big', 'frame_small'], {}), '(frame_big, frame_small)\n', (6618, 6642), False, 'from skimage.feature import match_template\n'), ((7247, 7283), 'numpy.array_equal', 'np.array_equal', (['frame_new', 'frame_old'], {}), '(frame_new, frame_old)\n', (7261, 7283), True, 'import numpy as np\n'), ((9029, 9061), 'numpy.asarray', 'np.asarray', (['[y_offset, x_offset]'], {}), '([y_offset, x_offset])\n', (9039, 9061), True, 'import numpy as np\n'), ((12483, 12499), 'numpy.ones', 'np.ones', (['b.shape'], {}), '(b.shape)\n', (12490, 12499), True, 'import numpy as np\n'), ((12565, 12579), 'numpy.sqrt', 'np.sqrt', (['(a * b)'], {}), '(a * b)\n', (12572, 12579), True, 'import numpy as np\n'), ((7306, 7324), 'numpy.asarray', 'np.asarray', (['[0, 0]'], {}), '([0, 0])\n', (7316, 7324), True, 'import numpy as np\n'), ((12446, 12458), 'numpy.fliplr', 'np.fliplr', (['b'], {}), '(b)\n', (12455, 12458), True, 'import numpy as np\n'), ((7936, 7954), 'numpy.asarray', 'np.asarray', (['[1, 1]'], {}), '([1, 1])\n', (7946, 7954), True, 'import numpy as np\n'), ((12339, 12376), 'scipy.fft.fft2', 'fft.fft2', (['a', '[2 * ma - 1, 2 * na - 1]'], {}), '(a, [2 * ma - 1, 2 * na - 1])\n', (12347, 12376), True, 'import scipy.fft as fft\n'), ((12379, 12416), 'scipy.fft.fft2', 'fft.fft2', (['b', '[2 * mb - 1, 2 * nb - 1]'], {}), '(b, [2 * mb - 1, 2 * nb - 1])\n', (12387, 12416), True, 'import scipy.fft as fft\n'), ((7906, 7933), 'numpy.asarray', 'np.asarray', (['frame_old.shape'], {}), '(frame_old.shape)\n', (7916, 7933), True, 'import numpy as np\n'), ((6702, 6715), 'numpy.amax', 'np.amax', (['corr'], {}), '(corr)\n', (6709, 6715), True, 'import numpy as np\n'), ((7487, 7500), 'numpy.amax', 'np.amax', (['corr'], {}), '(corr)\n', (7494, 7500), True, 'import numpy as np\n'), ((7850, 7869), 'numpy.amax', 'np.amax', (['small_corr'], {}), '(small_corr)\n', (7857, 7869), True, 'import numpy as np\n')]
|
"""gauss_mod_p.py
This module implements Gaussian elimination by columns modulo a prime
number p.
"""
import numpy as np
from .arithmetic_mod_p import add_arrays_mod_c, inv_mod_p
###############################################################################
# Index searching function
def _index_pivot(l):
"""Returns the pivot of a 1D array
Parameters
----------
l : :obj:`list(int)`
List of integers to compute pivot from.
Returns
-------
int
Index of last nonzero entry on `l`. Returns -1 if the list is zero.
"""
l_bool = np.nonzero(l)
if len(l_bool[0]) > 0:
return l_bool[0][-1]
return -1
assert _index_pivot(np.array([0, 1, 0, 1, 0])) == 3
assert _index_pivot(np.array([0, 0, 0])) == -1
###############################################################################
# Gaussian elimination procedure
def gauss_col(A, p):
"""This function implements the Gaussian elimination by columns.
A is reduced by left to right column additions. The reduced matrix has
unique column pivots.
Parameters
----------
A : :obj:`Numpy Array`
Matrix to be reduced
p : `int(prime)`
Prime number. The corresponding field will be Z mod p.
Returns
-------
R : :obj:`Numpy Array`
Reduced matrix by left to right column additions.
T : :obj:`Numpy Array`
Matrix recording additions performed, so that AT = R
"""
if np.size(A, 0) == 0:
return np.array([]), np.array([])
# number of columns in A
N = np.size(A, 1)
# copy of matrix to be reduced
# The matrix is transposed for more computational efficiency
R = np.copy(np.transpose(A))
T = np.identity(N)
# iterate over all columns
for j in range(N):
pivot = _index_pivot(R[j])
# Assume that the j-column is not reduced
reduced = False
while (pivot > -1) & (not reduced):
reduced = True
# look for previous columns to j
for k in range(j):
# if the pivots coincide, subtract column k to column j
# multiplied by a suitable coefficient q
if _index_pivot(R[k]) == pivot:
q = (R[j][pivot] * inv_mod_p(R[k][pivot], p)) % p
R[j] = add_arrays_mod_c(R[j], -q * R[k], p)
T[j] = add_arrays_mod_c(T[j], -q * T[k], p)
# reset pivot
if np.any(R[j]):
pivot = _index_pivot(R[j])
reduced = False
break
# end if
# end for
# end while
# end for
return np.transpose(R), np.transpose(T)
|
[
"numpy.size",
"numpy.transpose",
"numpy.identity",
"numpy.nonzero",
"numpy.any",
"numpy.array"
] |
[((584, 597), 'numpy.nonzero', 'np.nonzero', (['l'], {}), '(l)\n', (594, 597), True, 'import numpy as np\n'), ((1562, 1575), 'numpy.size', 'np.size', (['A', '(1)'], {}), '(A, 1)\n', (1569, 1575), True, 'import numpy as np\n'), ((1717, 1731), 'numpy.identity', 'np.identity', (['N'], {}), '(N)\n', (1728, 1731), True, 'import numpy as np\n'), ((691, 716), 'numpy.array', 'np.array', (['[0, 1, 0, 1, 0]'], {}), '([0, 1, 0, 1, 0])\n', (699, 716), True, 'import numpy as np\n'), ((743, 762), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (751, 762), True, 'import numpy as np\n'), ((1462, 1475), 'numpy.size', 'np.size', (['A', '(0)'], {}), '(A, 0)\n', (1469, 1475), True, 'import numpy as np\n'), ((1692, 1707), 'numpy.transpose', 'np.transpose', (['A'], {}), '(A)\n', (1704, 1707), True, 'import numpy as np\n'), ((2698, 2713), 'numpy.transpose', 'np.transpose', (['R'], {}), '(R)\n', (2710, 2713), True, 'import numpy as np\n'), ((2715, 2730), 'numpy.transpose', 'np.transpose', (['T'], {}), '(T)\n', (2727, 2730), True, 'import numpy as np\n'), ((1497, 1509), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1505, 1509), True, 'import numpy as np\n'), ((1511, 1523), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1519, 1523), True, 'import numpy as np\n'), ((2474, 2486), 'numpy.any', 'np.any', (['R[j]'], {}), '(R[j])\n', (2480, 2486), True, 'import numpy as np\n')]
|
# Python Standard Libraries
import numpy as np
# grAdapt
from .base import Equidistributed
from grAdapt.utils.sampling import sample_points_bounds
from grAdapt.utils.math.spatial import pairwise_distances
class MaximalMinDistance(Equidistributed):
"""Maximal min distance sampling method
A fixed amount of points are candidates. The candidate is chosen
as a point if it has the highest minimal epsilon margin among
other points. The minimal epsilon margin is the smallest distance
between two points. Each point has an minimal epsilon margin.
For a better performance, only a fixed amount of latest points
are considered.
Has a disadvantage: creates 'evil' neighbours.
"""
def __init__(self, n_candidates=10, window_size=500):
"""
Parameters
----------
n_candidates : integer
number of candidates
window_size : integer
size of history points to consider. smaller is faster but worsen the results.
"""
super().__init__()
self.n_candidates = n_candidates
self.window_size = window_size
def sample(self, bounds, n, x_history=None):
"""Samples low discrepancy/equidistributed sequences
Method has to handle with new bounds and n.
Parameters
----------
bounds : list of tuples or list of grAdapt.space.datatype.base
Each tuple in the list defines the bounds for the corresponding variable
Example: [(1, 2), (2, 3), (-1, 4)...]
n : int
number of points to be sampled
x_history : array-like (2d)
History points. Consider those to prevent sampling in dense regions.
Returns
-------
array-like (n, len(bounds))
Returns a 2D array. dim is the dimension of a single point
Each row corresponds to a single point.
Each column corresponds to a dimension.
"""
# set to new variables
super().sample(bounds, n, x_history)
if x_history is None:
x_history = sample_points_bounds(self.bounds, 1)
x_history_list = list(x_history)
else:
x_history_list = list(x_history)
for i in range(self.n):
x_history_sublist = x_history_list[-self.window_size:]
candidates = sample_points_bounds(self.bounds, self.n_candidates)
dists_matrix = pairwise_distances(candidates, np.array(x_history_sublist))
min_dists = np.min(dists_matrix, axis=1)
max_min_dists = np.argmax(min_dists)
x_history_list.append(candidates[max_min_dists])
return np.array(x_history_list)[-self.n:]
|
[
"numpy.min",
"numpy.array",
"grAdapt.utils.sampling.sample_points_bounds",
"numpy.argmax"
] |
[((2098, 2134), 'grAdapt.utils.sampling.sample_points_bounds', 'sample_points_bounds', (['self.bounds', '(1)'], {}), '(self.bounds, 1)\n', (2118, 2134), False, 'from grAdapt.utils.sampling import sample_points_bounds\n'), ((2364, 2416), 'grAdapt.utils.sampling.sample_points_bounds', 'sample_points_bounds', (['self.bounds', 'self.n_candidates'], {}), '(self.bounds, self.n_candidates)\n', (2384, 2416), False, 'from grAdapt.utils.sampling import sample_points_bounds\n'), ((2528, 2556), 'numpy.min', 'np.min', (['dists_matrix'], {'axis': '(1)'}), '(dists_matrix, axis=1)\n', (2534, 2556), True, 'import numpy as np\n'), ((2585, 2605), 'numpy.argmax', 'np.argmax', (['min_dists'], {}), '(min_dists)\n', (2594, 2605), True, 'import numpy as np\n'), ((2683, 2707), 'numpy.array', 'np.array', (['x_history_list'], {}), '(x_history_list)\n', (2691, 2707), True, 'import numpy as np\n'), ((2475, 2502), 'numpy.array', 'np.array', (['x_history_sublist'], {}), '(x_history_sublist)\n', (2483, 2502), True, 'import numpy as np\n')]
|
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
import pandas as pd
import numpy as np
import datetime
def load_data(filename, training=True):
data = pd.read_csv(filename)
flight_code = data['flight_no'].to_numpy()
week = data['Week'].to_numpy()
destination = data['Arrival'].to_numpy()
std_hour = data['std_hour'].to_numpy()
flight_date = data['flight_date'].to_numpy()
if training:
is_claim = data['is_claim'].to_numpy().reshape(-1,1)
else:
flight_id = data['flight_id'].to_numpy()
N = data.shape[0]
year = np.full(N, np.nan, dtype=int)
month = np.full(N, np.nan, dtype=int)
day_of_month = np.full(N, np.nan, dtype=int)
day_of_week = np.full(N, np.nan, dtype=int)
carrier = np.full(N, '', dtype='S2')
for i in range(N):
this_date = map(int, flight_date[i].split('-'))
year[i], month[i], day_of_month[i] = this_date
day_of_week[i] = datetime.date(*this_date).weekday() ## Monday=0, ..., Sunday=6
carrier[i] = flight_code[i][:2] ## doing this because there are some flights whose "Airline" entry is NULL
le, ohe = LabelEncoder(), OneHotEncoder(categories='auto')
encode = lambda x : ohe.fit_transform(le.fit_transform(x).reshape(-1,1)).toarray()
year = encode(year)
month = encode(month)
day_of_month = encode(day_of_month)
day_of_week = encode(day_of_week)
std_hour = encode(std_hour)
carrier = encode(carrier)
destination = encode(destination)
nn_input = np.concatenate((year.T, month.T, day_of_month.T, day_of_week.T, std_hour.T, carrier.T, destination.T)).T
print('input shape: {}'.format(nn_input.shape))
if training: return nn_input, is_claim
else: return nn_input, flight_id
|
[
"numpy.full",
"pandas.read_csv",
"sklearn.preprocessing.OneHotEncoder",
"datetime.date",
"sklearn.preprocessing.LabelEncoder",
"numpy.concatenate"
] |
[((172, 193), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (183, 193), True, 'import pandas as pd\n'), ((622, 651), 'numpy.full', 'np.full', (['N', 'np.nan'], {'dtype': 'int'}), '(N, np.nan, dtype=int)\n', (629, 651), True, 'import numpy as np\n'), ((671, 700), 'numpy.full', 'np.full', (['N', 'np.nan'], {'dtype': 'int'}), '(N, np.nan, dtype=int)\n', (678, 700), True, 'import numpy as np\n'), ((720, 749), 'numpy.full', 'np.full', (['N', 'np.nan'], {'dtype': 'int'}), '(N, np.nan, dtype=int)\n', (727, 749), True, 'import numpy as np\n'), ((769, 798), 'numpy.full', 'np.full', (['N', 'np.nan'], {'dtype': 'int'}), '(N, np.nan, dtype=int)\n', (776, 798), True, 'import numpy as np\n'), ((818, 844), 'numpy.full', 'np.full', (['N', '""""""'], {'dtype': '"""S2"""'}), "(N, '', dtype='S2')\n", (825, 844), True, 'import numpy as np\n'), ((1212, 1226), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (1224, 1226), False, 'from sklearn.preprocessing import LabelEncoder, OneHotEncoder\n'), ((1228, 1260), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'categories': '"""auto"""'}), "(categories='auto')\n", (1241, 1260), False, 'from sklearn.preprocessing import LabelEncoder, OneHotEncoder\n'), ((1623, 1729), 'numpy.concatenate', 'np.concatenate', (['(year.T, month.T, day_of_month.T, day_of_week.T, std_hour.T, carrier.T,\n destination.T)'], {}), '((year.T, month.T, day_of_month.T, day_of_week.T, std_hour.T,\n carrier.T, destination.T))\n', (1637, 1729), True, 'import numpy as np\n'), ((1010, 1035), 'datetime.date', 'datetime.date', (['*this_date'], {}), '(*this_date)\n', (1023, 1035), False, 'import datetime\n')]
|
import numpy as np
from scipy.integrate import odeint
from scipy.optimize import brentq
logistic = lambda x : 4*x*(1-x)
class ChaosGenerator () :
"""
Base class for the chaotic generator
Contains functions for generating chaotic numbers and subsequently
evolving the states of the internal generators
"""
def getGen (shape, gentype) :
"""Returns a generator of the given shape and underlying map"""
return (lambda s : lambda i : ChaosGenerator.cgen[gentype](s).chaosPoints(i))(shape)
def __init__ (self, oshape, gshape=None, cascade=True, gens=2) :
"""
Child classes use this constructor to initialise essential parameters
and the internal generators
oshape - Shape that object owner uses
gshape - Internal shape (per generator) as the chaotic map/flow can
can be muti-dimensional
cascade - If cascade=False, then each point in the (Np, D) matrix
evolves independently of other points according to the map.
For the CPSO, this amounts to having a certain correlation
between the random numbers r1, r2 per iteration of the CPSO
- If cascade=True, then each of the Np particles is connected
to the previous one via the chaotic map. Every dimension is
independent of the other, however!
gens - Number of independent internal chaotic generators. Two by
default for chaotic pso
"""
self.oshape = oshape
######################################################################
# (Np, D, cdims) --> (D, cdims)
# where 'cdims' is the number of dimensions of the chaotic map/flow
#
# NOTE - By default, if map is single dimensional, then the last shape
# dimension (of 1) is omitted
######################################################################
self.gshape = (lambda s: s[1:] if cascade else s)(oshape if gshape is None else gshape)
self.cascade = cascade
self.gens = gens
# Creating the list of generators with shape (gens, Np, D, cdims)
self.cgens = np.array([
np.random.random_sample(self.gshape)
for i in range(gens)
])
def getCgens (self) :
""" Returns a copy of the internal generators """
return np.copy (self.cgens)
def chaosPoints (self, gno=0) :
"""
Returns numbers based on the underlying chaotic map/flow and depending
on the value of gno
gno - If ==0 means to evolve all generators and return them as a matrix of
shape (gens, Np, D)
- If !=0 means to evolve a particular generator (indexed from 1) rand
return a matrix of shape (Np, D)
"""
if gno :
if self.cascade :
# Evolve per particle
return np.array ([
self.evolve(gno-1) for i in range(self.oshape[0])
])
else :
return self.evolve(gno-1)
else :
# Evolve per generator (independent of 'cascade') --> Recursive call
return np.array ([
self.chaosPoints(i+1) for i in range(self.gens)
])
class Logistic (ChaosGenerator) :
"""
Logistic map --> f(x) = r*x*(1-x)
r = 4 for full chaos
"""
def __init__ (self, oshape, r=4, cascade=True, gens=2) :
"""
r - logistic bifurcation parameter
Rest is defined in the parent class
"""
super().__init__(oshape, None, cascade, gens)
self.r = r
def evolve (self, gind) :
""" Evolves according to the logistic map """
# Copying is necessary
x, r = self.cgens[gind], self.r
ret = np.copy(x)
self.cgens[gind] = r*x*(1-x)
return ret
class InverseLE (ChaosGenerator) :
"""
Finds a uni-dimensional map with a pre-determined lyapunov
exponent and evolves points according to it
Check the paper 'The problem of the inverse Lyapunov exponent and its applications'
by <NAME>
"""
def __invmap__ (self, eps=1e-4) :
if self.le < np.log(2) :
lep = lambda p : self.le + p*np.log(p) + (1-p)*np.log(1-p)
lo, mid, hi = 0, 0.5, 1
cmap = lambda p : lambda x : x/p if x <= p else (1-x)/(1-p)
else :
n = np.ceil(np.exp(self.le)).astype(np.int)
lep = lambda p : self.le - (n-2)/n*np.log(n) + p*np.log(p) + (2/n - p)*np.log(2/n - p)
lo, mid, hi = 0, 1/n, 2/n
def cmap (p) :
def _cmap(x) :
nx = n*x
nums = np.arange(0, n-2)
sub = nums[np.argmin(np.where(nx - nums > 0, nx - nums, n))]
if sub < n-3 or nx < n-2 :
return nx - sub
elif nx < n-2 + p*n : # sub == n-3
return (nx - (sub+1))/(n*p)
else :
return (nx - (sub+1) - n*p)/(2 - n*p)
return _cmap
plist = [brentq(lep, lo+eps, mid-eps), brentq(lep, mid+eps, hi-eps)]
self.invmap = np.vectorize(cmap(plist[
1 if np.random.rand() >= 0.5 else 0
]))
def __init__ (self, oshape, le=1.28991999999, cascade=True, gens=2) :
"""
le - The lyapunov exponent whose map has to be found
Rest is defined in the base class
"""
super().__init__(oshape, None, cascade, gens)
self.le = le
if le == np.log(2) :
mu = 0.49999
self.invmap = lambda x : np.where(x <= mu, x/mu, (1-x)/(1-mu))
else :
self.__invmap__()
def evolve (self, gind) :
""" Evolves according to the calculated inverse map """
# Copying is necessary
x = self.cgens[gind]
ret = np.copy(x)
self.cgens[gind] = self.invmap(x)
return ret
class Tent (ChaosGenerator) :
"""Tent map --> f(x) = 2*x , x <= 0.5 ; 2*(1-x) , x > 0.5
mu = 0.49999 in the equivalent form for numerical stability"""
def __init__ (self, oshape, mu=0.49999, cascade=True, gens=2) :
"""mu - Tent bifurcation paramater
Rest is defined in the parent class"""
super().__init__(oshape, None, cascade, gens)
self.mu = mu
def evolve (self, gind) :
"""Evolves according to the tent map"""
# Copying is necessary
x, mu = self.cgens[gind], self.mu
ret = np.copy(x)
self.cgens[gind] = np.where(x <= mu, x/mu, (1-x)/(1-mu))
return ret
class Lorenz (ChaosGenerator) :
"""
Lorenz flow --> xdot = sigma*(y-x)
ydot = x*(rho-z) - y
zdot = x*y - beta*z
sigma, beta, rho = 10, 8/3, 28
"""
# lims is a dictonary containing {(sigma, beta, rho) : limits(3,2)} pairs
lims = {}
def lorenz (X, t, sigma, beta, rho) :
""" lorenz differential equation needed by scipy odeint """
x, y, z = X
dXdt = [sigma*(y-x), x*(rho-z) - y, x*y - beta*z]
return dXdt
def setLimits (params) :
"""
No need to recalculate limits of the lorenz flow everytime for the
same set of parameters
"""
if params not in Lorenz.lims :
# Argument to lambda - (Time series of lorenz flow in all three dimensions)
Lorenz.lims[params] = (lambda s:np.array([
[np.min(s[:,i]), np.max(s[:,i])] for i in [0, 1, 2]
]))\
(odeint (Lorenz.lorenz, np.random.rand(3), np.linspace (0, 9999, 999999), args = params))
def __init__ (self, oshape, params=(10, 8.0/3, 28), cascade=True, comp=0, h=0.01, gens=2) :
""""
params - (sigma, beta, rho) of lorenz parameters
comp - which cdim to consider for chaotic numbers
h - Time step of evolution
Rest is defined in the parent class
"""
super().__init__ (oshape, oshape+(3,), cascade, gens)
self.params = params
self.comp = comp
self.h = h
# Set limits if not set already
Lorenz.setLimits (params)
######################################################################
# !!! IDEA FOR OOP
# Introduce two subclasses - Normalised, and unnormalised
# The unnormalised class will have normalisation functions like the one
# below (Also seen in Henon map)
######################################################################
# Per generator
for i in range(0, self.gens) :
# Per dimension of lorenz flow
for j in [0, 1, 2] :
self.cgens[i,...,j] = (lambda st,mn,mx : mn + (mx - mn)*st)\
(self.cgens[i,...,j], Lorenz.lims[params][j,0], Lorenz.lims[params][j,1])
# Argument to lambda - (ith generator jth cdim, min of jth cdim, max of jth cdim)
def evolveT (self, gind, T=1) :
"""
Evolves the lorenz map for T timesteps
and sets the internal generator
"""
for pt in np.ndindex(self.gshape[:-1]) :
# Per index in (Np, D)
self.cgens[gind][pt] = odeint(Lorenz.lorenz, self.cgens[gind][pt],
np.arange(0,self.h*(T+1),self.h), args=self.params)[-1]
def evolve (self, gind) :
"""
Evolves the internal generators 1 h-timestep according to the
Lorenz flow equations
"""
######################################################################
# If the limits defined in the dict 'lims' are exceeded, then
# corresponding chaotic points are replaced with eps or (1-eps) depending
# on whether its exceeding below or above, respectively
######################################################################
eps = 1e-5
# Copying is not necessary as it is being scaled
ret = (lambda n2 : np.where (n2 > 1, 1-eps, n2))(
(lambda n1 : np.where (n1 < 0, eps, n1))(
(lambda st, mn, mx : (st - mn)/(mx - mn))
(self.cgens[gind,...,self.comp],
Lorenz.lims[self.params][self.comp,0],
Lorenz.lims[self.params][self.comp,1])
))
self.evolveT (gind)
return ret
class Henon (ChaosGenerator) :
"""
Henon map (Simplified model of the poincare section of Lorenz model)
(x,y) -> (1-ax^2+y, bx)
"""
lims = {}
def setLimits (params) :
""" Sets the x, y limits of a run of iterates of the Henon map """
if not params in Henon.lims :
a, b = params
x, y = np.random.rand(), np.random.rand()
minx, maxx, miny, maxy = x, x, y, y
for _ in range(999999) :
tmp = x
x = 1 - a*x*x + y
y = b*tmp
minx = min(minx, x)
miny = min(miny, y)
maxx = max(maxx, x)
maxy = max(maxy, y)
Henon.lims[params] = np.array([
[minx, maxx], [miny, maxy]
])
def __init__ (self, oshape, params=(1.4, 0.3), cascade=True, comp=0, gens=2) :
"""
Constructor for the Henon chaotic map object
params - (a, b) parameters of the Henon map
"""
super().__init__ (oshape, oshape+(2,), cascade, gens)
self.params = params
self.comp = comp
# Setting the limits for the Henon map
Henon.setLimits(params)
# Per generator
for i in range(0, self.gens) :
# Per dimension of Henon map
for j in [0, 1] :
self.cgens[i,...,j] = (lambda st,mn,mx : mn + (mx - mn)*st)\
(self.cgens[i,...,j], Henon.lims[params][j,0], Henon.lims[params][j,1])
def evolve (self, gind) :
""" Evolves the Henon map by one iterate """
# Tolerance to set back if iterate is beyond bounds
eps = 1e-5
# Copying is not necessary as it is being scaled
ret = (lambda n2 : np.where (n2 > 1, 1-eps, n2))(
(lambda n1 : np.where (n1 < 0, eps, n1))(
(lambda st, mn, mx : (st - mn)/(mx - mn))
(self.cgens[gind,...,self.comp],
Henon.lims[self.params][self.comp,0],
Henon.lims[self.params][self.comp,1])
))
a, b = self.params
x, y = np.copy(self.cgens[gind,...,0]), self.cgens[gind,...,1]
x2 = np.square(x)
self.cgens[gind,...,0] = 1 - a*x2 + y
self.cgens[gind,...,1] = b*x
return ret
class Baker (ChaosGenerator) :
"""
Baker map --> (2x, y/2) if 0 <= x < 1/2
(2-2x, 1-y/2) 1/2 <= x < 1
"""
def __init__ (self, oshape, mu=0.49999, cascade=True, comp=0, gens=2) :
super().__init__ (oshape, oshape+(2,), cascade, gens)
self.mu = mu
self.comp = comp
def evolve (self, gind) :
""" Evolves one time-step according to the baker map """
ret = np.copy(self.cgens[gind,...,self.comp])
x, y = np.copy(self.cgens[gind,...,0]), np.copy(self.cgens[gind,...,1])
less = x < self.mu
more = np.invert(less)
self.cgens[gind,less,0] = 2*x[less]
self.cgens[gind,less,1] = y[less]/2
self.cgens[gind,more,0] = 2 - 2*x[more]
self.cgens[gind,more,1] = 1 - y[more]/2
return ret
class Beach () :
"""
Implements the BEACH pseudo-randon number generator
Name - B-Exponential All-Chaotic Map Hopping
Author - <NAME> et. al.
Link - https://arxiv.org/abs/cs/0607069
"""
# Limit below which B-exponential map is not surjective
robust_lim = np.exp(-4)
def __seedb__ (self) :
"""
Seeds the initial b parameter
of BEACH until it is legal
"""
self.x = np.random.rand()
r = np.random.rand()
b = Beach.robust_lim + r*(self.blim - Beach.robust_lim)
while r == 0.75 :
r = np.random.rand()
b = Beach.robust_lim + r*(self.blim - Beach.robust_lim)
self.r = r
self.b = b
def __init__ (self, oshape, R=20, blim=1e4) :
""" Beach constructor """
self.oshape = oshape
self.R = R
self.blim = blim
self.blim_inv = 1/blim
self.__seedb__()
def bmap (self, x, b) :
if np.abs(b - 1) < 1e-4 :
return 4*x*(1-x)
else :
return (b - x*np.power(b,x) - (1-x)*np.power(b, 1-x))/(b - np.sqrt(b))
def getrand (self) :
""" Returns a random number according to the BEACH PSRN algorithm """
ret = self.x
for _ in range(self.R) :
self.x = self.bmap(self.x, self.b)
self.r = logistic(self.r)
if self.r <= self.blim_inv :
self.r = self.x if self.x >= self.blim_inv else self.blim_inv
self.b = Beach.robust_lim + self.r*(self.blim - Beach.robust_lim)
return ret
def chaosPoints (self, _) :
""" Generates PSRNs with a given shape """
Np, D = self.oshape
return np.array([
self.getrand() for _ in range(Np*D)
]).reshape(self.oshape)
# Used by CPSO for generating swarms
cgen = {
"log" : Logistic,
"lorenz" : Lorenz,
"tent" : Tent,
"henon" : Henon,
"baker" : Baker,
"inverse" : InverseLE,
"beach" : Beach
}
|
[
"numpy.ndindex",
"numpy.abs",
"numpy.log",
"numpy.copy",
"numpy.invert",
"scipy.optimize.brentq",
"numpy.random.random_sample",
"numpy.power",
"numpy.square",
"numpy.min",
"numpy.where",
"numpy.array",
"numpy.exp",
"numpy.arange",
"numpy.linspace",
"numpy.random.rand",
"numpy.max",
"numpy.sqrt"
] |
[((14043, 14053), 'numpy.exp', 'np.exp', (['(-4)'], {}), '(-4)\n', (14049, 14053), True, 'import numpy as np\n'), ((2446, 2465), 'numpy.copy', 'np.copy', (['self.cgens'], {}), '(self.cgens)\n', (2453, 2465), True, 'import numpy as np\n'), ((3903, 3913), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (3910, 3913), True, 'import numpy as np\n'), ((6037, 6047), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (6044, 6047), True, 'import numpy as np\n'), ((6673, 6683), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (6680, 6683), True, 'import numpy as np\n'), ((6712, 6757), 'numpy.where', 'np.where', (['(x <= mu)', '(x / mu)', '((1 - x) / (1 - mu))'], {}), '(x <= mu, x / mu, (1 - x) / (1 - mu))\n', (6720, 6757), True, 'import numpy as np\n'), ((9315, 9343), 'numpy.ndindex', 'np.ndindex', (['self.gshape[:-1]'], {}), '(self.gshape[:-1])\n', (9325, 9343), True, 'import numpy as np\n'), ((12828, 12840), 'numpy.square', 'np.square', (['x'], {}), '(x)\n', (12837, 12840), True, 'import numpy as np\n'), ((13383, 13424), 'numpy.copy', 'np.copy', (['self.cgens[gind, ..., self.comp]'], {}), '(self.cgens[gind, ..., self.comp])\n', (13390, 13424), True, 'import numpy as np\n'), ((13545, 13560), 'numpy.invert', 'np.invert', (['less'], {}), '(less)\n', (13554, 13560), True, 'import numpy as np\n'), ((14163, 14179), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (14177, 14179), True, 'import numpy as np\n'), ((14186, 14202), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (14200, 14202), True, 'import numpy as np\n'), ((4313, 4322), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (4319, 4322), True, 'import numpy as np\n'), ((5258, 5290), 'scipy.optimize.brentq', 'brentq', (['lep', '(lo + eps)', '(mid - eps)'], {}), '(lep, lo + eps, mid - eps)\n', (5264, 5290), False, 'from scipy.optimize import brentq\n'), ((5288, 5320), 'scipy.optimize.brentq', 'brentq', (['lep', '(mid + eps)', '(hi - eps)'], {}), '(lep, mid + eps, hi - eps)\n', (5294, 5320), False, 'from scipy.optimize import brentq\n'), ((5710, 5719), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (5716, 5719), True, 'import numpy as np\n'), ((11317, 11355), 'numpy.array', 'np.array', (['[[minx, maxx], [miny, maxy]]'], {}), '([[minx, maxx], [miny, maxy]])\n', (11325, 11355), True, 'import numpy as np\n'), ((12759, 12792), 'numpy.copy', 'np.copy', (['self.cgens[gind, ..., 0]'], {}), '(self.cgens[gind, ..., 0])\n', (12766, 12792), True, 'import numpy as np\n'), ((13438, 13471), 'numpy.copy', 'np.copy', (['self.cgens[gind, ..., 0]'], {}), '(self.cgens[gind, ..., 0])\n', (13445, 13471), True, 'import numpy as np\n'), ((13471, 13504), 'numpy.copy', 'np.copy', (['self.cgens[gind, ..., 1]'], {}), '(self.cgens[gind, ..., 1])\n', (13478, 13504), True, 'import numpy as np\n'), ((14288, 14304), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (14302, 14304), True, 'import numpy as np\n'), ((14598, 14611), 'numpy.abs', 'np.abs', (['(b - 1)'], {}), '(b - 1)\n', (14604, 14611), True, 'import numpy as np\n'), ((2265, 2301), 'numpy.random.random_sample', 'np.random.random_sample', (['self.gshape'], {}), '(self.gshape)\n', (2288, 2301), True, 'import numpy as np\n'), ((5784, 5829), 'numpy.where', 'np.where', (['(x <= mu)', '(x / mu)', '((1 - x) / (1 - mu))'], {}), '(x <= mu, x / mu, (1 - x) / (1 - mu))\n', (5792, 5829), True, 'import numpy as np\n'), ((10188, 10217), 'numpy.where', 'np.where', (['(n2 > 1)', '(1 - eps)', 'n2'], {}), '(n2 > 1, 1 - eps, n2)\n', (10196, 10217), True, 'import numpy as np\n'), ((10933, 10949), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (10947, 10949), True, 'import numpy as np\n'), ((10951, 10967), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (10965, 10967), True, 'import numpy as np\n'), ((12375, 12404), 'numpy.where', 'np.where', (['(n2 > 1)', '(1 - eps)', 'n2'], {}), '(n2 > 1, 1 - eps, n2)\n', (12383, 12404), True, 'import numpy as np\n'), ((4827, 4846), 'numpy.arange', 'np.arange', (['(0)', '(n - 2)'], {}), '(0, n - 2)\n', (4836, 4846), True, 'import numpy as np\n'), ((7747, 7764), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (7761, 7764), True, 'import numpy as np\n'), ((7766, 7794), 'numpy.linspace', 'np.linspace', (['(0)', '(9999)', '(999999)'], {}), '(0, 9999, 999999)\n', (7777, 7794), True, 'import numpy as np\n'), ((9498, 9536), 'numpy.arange', 'np.arange', (['(0)', '(self.h * (T + 1))', 'self.h'], {}), '(0, self.h * (T + 1), self.h)\n', (9507, 9536), True, 'import numpy as np\n'), ((10248, 10273), 'numpy.where', 'np.where', (['(n1 < 0)', 'eps', 'n1'], {}), '(n1 < 0, eps, n1)\n', (10256, 10273), True, 'import numpy as np\n'), ((12435, 12460), 'numpy.where', 'np.where', (['(n1 < 0)', 'eps', 'n1'], {}), '(n1 < 0, eps, n1)\n', (12443, 12460), True, 'import numpy as np\n'), ((14712, 14722), 'numpy.sqrt', 'np.sqrt', (['b'], {}), '(b)\n', (14719, 14722), True, 'import numpy as np\n'), ((4384, 4397), 'numpy.log', 'np.log', (['(1 - p)'], {}), '(1 - p)\n', (4390, 4397), True, 'import numpy as np\n'), ((4543, 4558), 'numpy.exp', 'np.exp', (['self.le'], {}), '(self.le)\n', (4549, 4558), True, 'import numpy as np\n'), ((4658, 4675), 'numpy.log', 'np.log', (['(2 / n - p)'], {}), '(2 / n - p)\n', (4664, 4675), True, 'import numpy as np\n'), ((14689, 14707), 'numpy.power', 'np.power', (['b', '(1 - x)'], {}), '(b, 1 - x)\n', (14697, 14707), True, 'import numpy as np\n'), ((4366, 4375), 'numpy.log', 'np.log', (['p'], {}), '(p)\n', (4372, 4375), True, 'import numpy as np\n'), ((4636, 4645), 'numpy.log', 'np.log', (['p'], {}), '(p)\n', (4642, 4645), True, 'import numpy as np\n'), ((4886, 4923), 'numpy.where', 'np.where', (['(nx - nums > 0)', '(nx - nums)', 'n'], {}), '(nx - nums > 0, nx - nums, n)\n', (4894, 4923), True, 'import numpy as np\n'), ((5382, 5398), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (5396, 5398), True, 'import numpy as np\n'), ((14667, 14681), 'numpy.power', 'np.power', (['b', 'x'], {}), '(b, x)\n', (14675, 14681), True, 'import numpy as np\n'), ((4622, 4631), 'numpy.log', 'np.log', (['n'], {}), '(n)\n', (4628, 4631), True, 'import numpy as np\n'), ((7643, 7658), 'numpy.min', 'np.min', (['s[:, i]'], {}), '(s[:, i])\n', (7649, 7658), True, 'import numpy as np\n'), ((7659, 7674), 'numpy.max', 'np.max', (['s[:, i]'], {}), '(s[:, i])\n', (7665, 7674), True, 'import numpy as np\n')]
|
from PIL import Image
import numpy as np
im = Image.open('../bbtor.jpg').convert('L')
a = np.array(im)[::2, ::2]
gx = np.array([[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1]])
gy = np.array([[-1, -2, -1],
[ 0, 0, 0],
[ 1, 2, 1]])
sobel = np.zeros(a.shape)
for y in range(a.shape[0]-2):
for x in range(a.shape[1]-2):
sx = np.sum(gx * a[y:y+3, x:x+3])
sy = np.sum(gx * a[y:y+3, x:x+3])
sobel[y, x] = np.sqrt(sx**2 + sy**2)
snorm = 255 * sobel / sobel.max()
b = snorm.astype(np.uint8)
im = Image.fromarray(b)
im.save('sobel.png')
|
[
"numpy.sum",
"numpy.zeros",
"PIL.Image.open",
"numpy.array",
"PIL.Image.fromarray",
"numpy.sqrt"
] |
[((122, 168), 'numpy.array', 'np.array', (['[[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]'], {}), '([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])\n', (130, 168), True, 'import numpy as np\n'), ((205, 251), 'numpy.array', 'np.array', (['[[-1, -2, -1], [0, 0, 0], [1, 2, 1]]'], {}), '([[-1, -2, -1], [0, 0, 0], [1, 2, 1]])\n', (213, 251), True, 'import numpy as np\n'), ((297, 314), 'numpy.zeros', 'np.zeros', (['a.shape'], {}), '(a.shape)\n', (305, 314), True, 'import numpy as np\n'), ((579, 597), 'PIL.Image.fromarray', 'Image.fromarray', (['b'], {}), '(b)\n', (594, 597), False, 'from PIL import Image\n'), ((93, 105), 'numpy.array', 'np.array', (['im'], {}), '(im)\n', (101, 105), True, 'import numpy as np\n'), ((48, 74), 'PIL.Image.open', 'Image.open', (['"""../bbtor.jpg"""'], {}), "('../bbtor.jpg')\n", (58, 74), False, 'from PIL import Image\n'), ((393, 425), 'numpy.sum', 'np.sum', (['(gx * a[y:y + 3, x:x + 3])'], {}), '(gx * a[y:y + 3, x:x + 3])\n', (399, 425), True, 'import numpy as np\n'), ((435, 467), 'numpy.sum', 'np.sum', (['(gx * a[y:y + 3, x:x + 3])'], {}), '(gx * a[y:y + 3, x:x + 3])\n', (441, 467), True, 'import numpy as np\n'), ((487, 513), 'numpy.sqrt', 'np.sqrt', (['(sx ** 2 + sy ** 2)'], {}), '(sx ** 2 + sy ** 2)\n', (494, 513), True, 'import numpy as np\n')]
|
"""Simulate Lorentz's system ODE and discover edes.
Script accepts also optional comand line arguments:
arg0 -- number of samples/models
arg1 -- custom nickname of log that is added to the log filename, which is of
the form: log_lorenz_<custom nickname><random number>.log
"""
import time
import os
import sys # To import from parent directory.
import ProGED.examples.tee_so as te # Log using manually copied class from a forum.
import numpy as np
# import matplotlib.pyplot as plt
from scipy.integrate import solve_ivp, odeint
# # 0.) Log output to lorenz_log_<random>.log file
start = time.perf_counter()
# # # Input: # # #
eqation = "123" # Code for eq_disco([1], [2,3]).
# sample_size = 5
sample_size = 30 # It finds the equation at 30.
log_nickname = ""
isTee = False
# is_chaotic_wiki = "cw"
# is_chaotic_wiki = "c0" # chaotic but not wiki
is_chaotic_wiki = "__" # For hyperopt: not chaotic, not wiki.
if len(sys.argv) >= 2:
sample_size = int(sys.argv[1])
if len(sys.argv) >= 3:
isTee = True
log_nickname = sys.argv[2]
if len(sys.argv) >= 4:
eqation = sys.argv[3]
if len(sys.argv) >= 5:
is_chaotic_wiki = sys.argv[4]
aux = [int(i) for i in eqation]
aquation = (aux[:1], aux[1:])
random = str(np.random.random())
if isTee:
print("Filename id: " + log_nickname + random)
try:
log_object = te.Tee("examples/log_lorenz_" + log_nickname + random + ".txt")
except FileNotFoundError:
log_object = te.Tee("log_lorenz_" + log_nickname + random + ".txt")
if len(is_chaotic_wiki) != 2:
# print("Wrong cmd argument for chaotic/calm wiki/my diff. eq. configuration")
print("Wrong 5th chaotic+wiki cmd argument: should be of length 2, e.g. cw or 0w.")
else:
c, w = is_chaotic_wiki[0], is_chaotic_wiki[1]
is_chaotic = True if c == "c" else False
is_wiki = True if w == "w" else False
# Log signature.
print(f"Settings of this execution:\n"
f"equation: {eqation} aka. target index\n"
f"sample_size: {sample_size}\n"
f"is_chaotic: {is_chaotic}\n"
f"is_wiki: {is_wiki}\n"
)
# # 1.) Data construction (simulation of Lorenz):
np.random.seed(0)
T = np.linspace(0.48, 0.85, 1000) # Times currently run at.
if is_wiki:
T = np.linspace(0, 40, 4000) # Chaotic Lorenz times noted on Wiki.
# # Lorenz's sode:
# dx/dt = \sigma * (y-x)
# dy/dt = x*(\rho-z) - y
# dz/dt = x*y - \beta*z
# non-chaotic configuration:
sigma = 1.3 # 1 # 0
rho = -15 # 1 # 0
beta = 3.4 # 1 # 0
# Chaotic configuration:
if is_chaotic:
sigma = 10 # 1 # 0
rho = 28 # 1 # 0
beta = 8/3 # 1 # 0
y0 = [0.1, 0.4, 0.5] # Lorenz initial values run at.
if is_wiki:
y0 = [1, 1, 1] # Chaotic Lorenz initial values noted on Wiki.
def dy_dt(t, ys): # \frac{dy}{dt} ; # y = [y1,y2,y3,...] # ( shape= (n,) )
# \dot{y} = y^. = [y1^., y2^., y3^., ...]
x, y, z = ys
return [sigma * (y-x), x*(rho-z) - y, x*y - beta*z]
# Yode = solve_ivp(dy_dt, (T[0], T[-1]), y0, t_eval=T, atol=0)
max_steps = 10**6
# Convert max_steps to min_step:
min_step_from_max_steps = abs(T[-1] - T[0])/max_steps
# The minimal min_step to avoid min step error in LSODA:
min_step_error = 10**(-15)
min_step = max(min_step_from_max_steps, min_step_error) # Force them both.
rtol=10**(-6)
Yode = solve_ivp(dy_dt, (T[0], T[-1]), y0, method="LSODA", min_step=min_step, t_eval=T, rtol=rtol, atol=0).y
# Yode = odeint(dy_dt, y0, T, rtol=rtol, atol=0, tfirst=True, printmessg=0, hmin=min_step).T
# # Plot simulated data:
# plt.xlabel("T [time]")
# plt.ylabel("solutions [ys(t)]")
# plt.plot(T, Yode[0], label="solution x")
# plt.plot(T, Yode[1], label="solution y")
# plt.plot(T, Yode[2], label="solution z")
# plt.legend()
# plt.show()
data = np.concatenate((T[:, np.newaxis], Yode.T), axis=1) # Embed Time column into dataset.
# # # # 2.) Discover one ode at a time.
# sys.path += ['.','..']
from hyperopt import hp
from ProGED.equation_discoverer import EqDisco
from ProGED.parameter_estimation import DE_fit, hyperopt_fit #, DE_fit_metamodel
np.random.seed(0)
ED = EqDisco(data = data,
task = None,
task_type = "differential",
time_index = 0,
target_variable_index = aquation[0][0], # aquation = [123] -> target = 1 -> ([t,x,y,z]->x)
variable_names=["t", "x", "y", "z"],
generator = "grammar",
generator_template_name = "polynomial",
generator_settings={
# "variables": ["'x'", "'y'"],
"p_S": [0.4, 0.6],
"p_T": [0.4, 0.6],
"p_vars": [0.33, 0.33, 0.34],
"p_R": [1, 0],
"p_F": [],
"functions": [],
},
sample_size = sample_size,
verbosity = 4)
ED.generate_models()
ED.fit_models(
estimation_settings={
"timeout": 115,
"max_ode_steps": 10**6,
# "lower_upper_bounds": (-30, 30),
"lower_upper_bounds": (-11, 11),
"optimizer": 'differential_evolution',
# "optimizer": 'hyperopt',
## "hyperopt_space_fn": hp.uniform, # Works at nocw (non chaotic and non-wiki).
"hyperopt_max_evals": 150,
# "hyperopt_space_fn": hp.qnormal,
# "hyperopt_space_kwargs": {"mu": 0, "sigma": 1, "q": 1/30},
"verbosity": 1,
})
print(ED.models)
print("\nFinal score:")
for m in ED.models:
if m.get_error() < 10**(-3) or True:
print(f"model: {str(m.get_full_expr()):<70}; "
+ f"p: {m.p:<23}; "
+ f"error: {m.get_error()}")
finnish = time.perf_counter()
print(f"Finnished in {round(finnish-start, 2)} seconds")
|
[
"numpy.random.seed",
"scipy.integrate.solve_ivp",
"time.perf_counter",
"ProGED.equation_discoverer.EqDisco",
"numpy.random.random",
"numpy.linspace",
"ProGED.examples.tee_so.Tee",
"numpy.concatenate"
] |
[((604, 623), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (621, 623), False, 'import time\n'), ((2135, 2152), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (2149, 2152), True, 'import numpy as np\n'), ((2157, 2186), 'numpy.linspace', 'np.linspace', (['(0.48)', '(0.85)', '(1000)'], {}), '(0.48, 0.85, 1000)\n', (2168, 2186), True, 'import numpy as np\n'), ((3712, 3762), 'numpy.concatenate', 'np.concatenate', (['(T[:, np.newaxis], Yode.T)'], {'axis': '(1)'}), '((T[:, np.newaxis], Yode.T), axis=1)\n', (3726, 3762), True, 'import numpy as np\n'), ((4020, 4037), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (4034, 4037), True, 'import numpy as np\n'), ((4044, 4438), 'ProGED.equation_discoverer.EqDisco', 'EqDisco', ([], {'data': 'data', 'task': 'None', 'task_type': '"""differential"""', 'time_index': '(0)', 'target_variable_index': 'aquation[0][0]', 'variable_names': "['t', 'x', 'y', 'z']", 'generator': '"""grammar"""', 'generator_template_name': '"""polynomial"""', 'generator_settings': "{'p_S': [0.4, 0.6], 'p_T': [0.4, 0.6], 'p_vars': [0.33, 0.33, 0.34], 'p_R':\n [1, 0], 'p_F': [], 'functions': []}", 'sample_size': 'sample_size', 'verbosity': '(4)'}), "(data=data, task=None, task_type='differential', time_index=0,\n target_variable_index=aquation[0][0], variable_names=['t', 'x', 'y',\n 'z'], generator='grammar', generator_template_name='polynomial',\n generator_settings={'p_S': [0.4, 0.6], 'p_T': [0.4, 0.6], 'p_vars': [\n 0.33, 0.33, 0.34], 'p_R': [1, 0], 'p_F': [], 'functions': []},\n sample_size=sample_size, verbosity=4)\n", (4051, 4438), False, 'from ProGED.equation_discoverer import EqDisco\n'), ((5585, 5604), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (5602, 5604), False, 'import time\n'), ((1240, 1258), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1256, 1258), True, 'import numpy as np\n'), ((2234, 2258), 'numpy.linspace', 'np.linspace', (['(0)', '(40)', '(4000)'], {}), '(0, 40, 4000)\n', (2245, 2258), True, 'import numpy as np\n'), ((3267, 3370), 'scipy.integrate.solve_ivp', 'solve_ivp', (['dy_dt', '(T[0], T[-1])', 'y0'], {'method': '"""LSODA"""', 'min_step': 'min_step', 't_eval': 'T', 'rtol': 'rtol', 'atol': '(0)'}), "(dy_dt, (T[0], T[-1]), y0, method='LSODA', min_step=min_step,\n t_eval=T, rtol=rtol, atol=0)\n", (3276, 3370), False, 'from scipy.integrate import solve_ivp, odeint\n'), ((1351, 1414), 'ProGED.examples.tee_so.Tee', 'te.Tee', (["('examples/log_lorenz_' + log_nickname + random + '.txt')"], {}), "('examples/log_lorenz_' + log_nickname + random + '.txt')\n", (1357, 1414), True, 'import ProGED.examples.tee_so as te\n'), ((1466, 1520), 'ProGED.examples.tee_so.Tee', 'te.Tee', (["('log_lorenz_' + log_nickname + random + '.txt')"], {}), "('log_lorenz_' + log_nickname + random + '.txt')\n", (1472, 1520), True, 'import ProGED.examples.tee_so as te\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 15 10:09:44 2012
@author: schelle
"""
import matplotlib.pyplot as plt
import numpy as np
def sCurve(X,a=0.0,b=1.0,c=1.0):
s = 1.0/(b + np.exp(-c * (X-a)))
return s
dem50 = 300
dem90 = 313
dem10 = 275
perc = 0.9
C = -np.log(1.0/(perc) - 1)/(dem90 - dem50)
CC = -np.log(1.0/(1-perc) - 1)/(dem10 - dem50)
CCC = (C + CC ) * 0.5
zz = np.array(np.arange(250,380))
S = sCurve(zz,a=dem50,c=C)
SS = sCurve(zz,a=dem50,c=CC)
SSS = sCurve(zz,a=dem50,c=CCC)
plt.plot(zz,S,label='fitted to 90%')
plt.plot(zz,SS,label='fitted to 10%')
plt.plot(zz,SSS,label='fitted to average')
plt.plot(dem90,0.9,'p')
plt.plot(dem10,0.1,'p')
plt.legend()
plt.show()
|
[
"matplotlib.pyplot.show",
"numpy.log",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"numpy.arange",
"numpy.exp"
] |
[((509, 547), 'matplotlib.pyplot.plot', 'plt.plot', (['zz', 'S'], {'label': '"""fitted to 90%"""'}), "(zz, S, label='fitted to 90%')\n", (517, 547), True, 'import matplotlib.pyplot as plt\n'), ((546, 585), 'matplotlib.pyplot.plot', 'plt.plot', (['zz', 'SS'], {'label': '"""fitted to 10%"""'}), "(zz, SS, label='fitted to 10%')\n", (554, 585), True, 'import matplotlib.pyplot as plt\n'), ((584, 628), 'matplotlib.pyplot.plot', 'plt.plot', (['zz', 'SSS'], {'label': '"""fitted to average"""'}), "(zz, SSS, label='fitted to average')\n", (592, 628), True, 'import matplotlib.pyplot as plt\n'), ((627, 652), 'matplotlib.pyplot.plot', 'plt.plot', (['dem90', '(0.9)', '"""p"""'], {}), "(dem90, 0.9, 'p')\n", (635, 652), True, 'import matplotlib.pyplot as plt\n'), ((651, 676), 'matplotlib.pyplot.plot', 'plt.plot', (['dem10', '(0.1)', '"""p"""'], {}), "(dem10, 0.1, 'p')\n", (659, 676), True, 'import matplotlib.pyplot as plt\n'), ((675, 687), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (685, 687), True, 'import matplotlib.pyplot as plt\n'), ((688, 698), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (696, 698), True, 'import matplotlib.pyplot as plt\n'), ((402, 421), 'numpy.arange', 'np.arange', (['(250)', '(380)'], {}), '(250, 380)\n', (411, 421), True, 'import numpy as np\n'), ((280, 302), 'numpy.log', 'np.log', (['(1.0 / perc - 1)'], {}), '(1.0 / perc - 1)\n', (286, 302), True, 'import numpy as np\n'), ((325, 353), 'numpy.log', 'np.log', (['(1.0 / (1 - perc) - 1)'], {}), '(1.0 / (1 - perc) - 1)\n', (331, 353), True, 'import numpy as np\n'), ((193, 213), 'numpy.exp', 'np.exp', (['(-c * (X - a))'], {}), '(-c * (X - a))\n', (199, 213), True, 'import numpy as np\n')]
|
# Copyright (c) 2018, Xilinx
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import copy
import numpy as np
import FINN.core.layers as lb
import FINN.core.quantize as qnt
# Transformations are a key part of FINN. Each transformation takes in a QNN
# represented in the FINN IR (and possibly some other parameters), and returns
# a transformed variant also represented in the FINN IR.
# Each transformation returns a tuple (transformed_qnn, num_changes) where
# num_changes is the number of alterations (>=0) applied on the input.
# Thus, it is vital that a transformation returns 0 when it made no changes, OR
# when it is sure that no more calls to this transform is needed, in order to
# avoid infinite loops.
# Based on this mechanic, we use the following function to repeatedly apply the
# same transform on the graph, until everything that can be transformed has been
# transformed:
def apply_repeated(pipeline, pass_to_apply):
"Repeatedly applies a transform until there is nothing left to change."
ret = copy.deepcopy(pipeline)
while True:
(ret, numChanges) = pass_to_apply(ret)
if numChanges == 0:
break
return ret
# general-purpose (device-neutral) transformations
def directlyQuantizeLayer(layer, bits):
"Apply direct quantization to given layer, returns [quantized layer, scaling layer]"
assert(lb.isMatrixLayer(layer))
qlayer = copy.deepcopy(layer)
(Wint, alpha) = qnt.quantize_matrix(qlayer.W, bits)
qlayer.W = Wint
qlayer.wbits = bits
slayer = lb.LinearLayer(A = alpha, B = np.zeros(alpha.shape))
return [qlayer, slayer]
def directlyQuantizeAllFloatWeights(pipeline, bits):
"Quantize all float weights in network to given number of bits."
ret = []
pipeline_copy = copy.deepcopy(pipeline)
for L in pipeline_copy:
if lb.isMatrixLayer(L):
if L.wbits == 32:
ret += directlyQuantizeLayer(L, bits)
else:
ret += [L]
else:
ret += [L]
return ret
# TODO change name of this transform to "streamline" to avoid
# overloading <NAME>'s "cromulent networks"
def makeCromulent(pipeline, reorder_maxpool=True):
"Simplifies a QNN by absorbing linear operators into thresholds."
ret = pipeline
# ret = apply_repeated(ret, passRemoveOneDimMaxPool)
# ret = apply_repeated(ret, passUpdateChannels)
ret = apply_repeated(ret, passFwdPropagateLinear)
ret = apply_repeated(ret, passCollapseLinear)
ret = apply_repeated(ret, passAbsorbLinearIntoThreshold)
if reorder_maxpool:
ret = apply_repeated(ret, passReorderMaxPooling)
ret = apply_repeated(ret, passAbsorbLinearIntoThreshold)
ret = apply_repeated(ret, passUpdateBitwidths)
ret = apply_repeated(ret, passRoundUpIntThresholds)
return ret
def passThresholdSetter(pipeline):
inStages = pipeline
ret = []
lnum = 0
for L in inStages:
ltypename = L.__class__.__name__
if ltypename == "ThresholdLayer":
pass
ret += [L]
lnum += 1
return (ret, 0)
def passGiveUniqueNames(pipeline):
"Give unique name to each layer using simple enumeration."
inStages = pipeline
ret = []
lnum = 0
for L in inStages:
ltypename = L.__class__.__name__
L.name = "%s_%d" % (ltypename, lnum)
ret += [L]
lnum += 1
return (ret, 0)
def passFuseActivations(pipeline):
"Replace (Matrix, Threshold) layer pairs with fused equivalents."
inStages = pipeline
inStages.reverse()
numChanges = 0
ret = []
while len(inStages) > 1:
layerA = inStages.pop()
layerB = inStages.pop()
if lb.isMatrixLayer(layerA) and lb.isThresholdLayer(layerB):
ret += [lb.MatrixThresholdLayer("", layerA, layerB)]
numChanges += 1
else:
ret += [layerA]
inStages.append(layerB)
# pop final element, if any left
if len(inStages) == 1:
ret += [inStages.pop()]
return (ret, numChanges)
def passRoundUpIntThresholds(pipeline):
"Round up thresholds of ThresholdingLayers with integer inputs."
inStages = pipeline
ret = []
for L in inStages:
# TODO this is not a good way to check for integer input --
# fix this once we have i/o data types specified
if lb.isThresholdLayer(L) and L.ibits <= 16:
L.thresholds = np.ceil(L.thresholds).astype(np.int16)
ret += [L]
return (ret, 0)
def passUpdateBitwidths(pipeline, inputBitWidth = 1):
"Update the input/output bitwidths throughout the graph."
inStages = pipeline
numChanges = 0
ret = []
lastBitWidth = inputBitWidth
for L in inStages:
iprev = L.ibits
oprev = L.obits
lastBitWidth = L.updateBitwidths(lastBitWidth)
ret += [L]
return (ret, 0)
def passCollapseLinear(pipeline):
"Collapse neighboring linear (non-matrix) layers into a single linear layer."
inStages = pipeline
inStages.reverse()
numChanges = 0
ret = []
while len(inStages) > 1:
layerA = inStages.pop()
layerB = inStages.pop()
if lb.isLinearLayer(layerA) and lb.isLinearLayer(layerB):
# let layerA be Jx + K and layerB be Mx + N
# the output is M(Jx + K) + N = MJx + MK + N
# so the collapsed layer will be (MJ)x + (MK + N)
scaleNew = layerA.A * layerB.A
shiftNew = layerB.A * layerA.B + layerB.B
# TODO emit Scalarlb.LinearLayer, or just do shape=1 for those
ret += [lb.LinearLayer(scaleNew, shiftNew)]
numChanges += 1
else:
ret += [layerA]
inStages.append(layerB)
# pop final element, if any left
if len(inStages) == 1:
ret += [inStages.pop()]
return (ret, numChanges)
def passFwdPropagateLinear(pipeline):
"Move linear layers past matrix and pooling layers."
inStages = pipeline
inStages.reverse()
numChanges = 0
ret = []
while len(inStages) > 1:
layerA = inStages.pop()
layerB = inStages.pop()
if lb.isLinearLayer(layerA) and lb.isMatrixLayer(layerB):
# move the scalar ax+b to after the matrix layer Wx
# originally we have W(ax+b) = Wax + Wb
# desired: Mx+N = a(Wx) + Wb
# repeat a and b to make appropriately-sized vectors
a = layerA.A
b = layerA.B
W = layerB.W
matrixLayerOutSize = W.shape[0]
scaleNew = a*np.ones(matrixLayerOutSize)
shiftNew = np.dot(W, b*np.ones(W.shape[1]))
ret += [layerB, lb.LinearLayer(scaleNew, shiftNew)]
numChanges += 1
elif lb.isLinearLayer(layerA) and lb.isPoolingLayer(layerB):
# TODO do we need to check layerA.A < 0 and maxpooling here?
ret += [layerB, layerA]
numChanges += 1
else:
ret += [layerA]
inStages.append(layerB)
# pop final element, if any left
if len(inStages) == 1:
ret += [inStages.pop()]
return (ret, numChanges)
def passAbsorbLinearIntoThreshold(pipeline):
"Absorb linear transformations into the following threshold layer."
inStages = pipeline
inStages.reverse()
numChanges = 0
ret = []
while len(inStages) > 1:
layerA = inStages.pop()
layerB = inStages.pop()
if lb.isLinearLayer(layerA) and lb.isThresholdLayer(layerB):
# absorb the linear transform Ax+B into the threshold layer
# by updating each threshold T as Tnew = (T-B)/A
A = layerA.A
B = layerA.B
T = layerB.thresholds
Tnew = np.asarray([(t-B)/A for t in T])
layerBThresClass = layerB.__class__
ret += [layerBThresClass(Tnew)]
numChanges += 1
else:
ret += [layerA]
inStages.append(layerB)
# pop final element, if any left
if len(inStages) == 1:
ret += [inStages.pop()]
return (ret, numChanges)
def passReorderMaxPooling(pipeline):
"Move max pooling layers past thresholding layers."
inStages = pipeline
inStages.reverse()
numChanges = 0
ret = []
while len(inStages) > 1:
layerA = inStages.pop()
layerB = inStages.pop()
if lb.isMaxPoolingLayer(layerA) and lb.isThresholdLayer(layerB):
# need to check that the threholding preserves max to reoder
for t in range(len(layerB.thresholds)-1):
if not (layerB.thresholds[t+1] >= layerB.thresholds[t]).all():
raise Exception("Threshold does not preserve max")
# re-insert layers in reversed order
ret += [layerB, layerA]
numChanges += 1
else:
ret += [layerA]
inStages.append(layerB)
# pop final element, if any left
if len(inStages) == 1:
ret += [inStages.pop()]
return (ret, numChanges)
def passInterleaveChannels(pipeline):
"""Interleave the weight matrices of all convolutional layers, plus the first
subsequent fully-connected layer."""
ret = []
numChanges = 0
# whether the inputs to the current layer were interleaved
last_output_interleaved = 0
# the interleave factor for the inputs to the current layer
last_output_interleave_factor = 0
for L in pipeline:
if lb.isConvLayer(L):
# interleave the conv weight matrix
# originally, we assume the data layout is [ofm][ifm][k][k]
W = L.W.reshape(L.ofm, L.ifm, L.get_filter_dim(), L.get_filter_dim())
# transpose the weight tensor to be [ofm][k][k][ifm]
W = W.transpose(0, 2, 3, 1)
# put back into matrix form and set layer weight matrix
L.W = W.reshape(L.ofm, -1)
ret += [L]
last_output_interleaved = 1
last_output_interleave_factor = L.ofm
elif lb.isFCLayer(L) and last_output_interleaved == 1:
# interleave the columns in the weight matrix of the first FC
# layer
r = L.W.shape[0]
c = L.W.shape[1]
W = L.W.reshape(r, last_output_interleave_factor, -1)
L.W = W.transpose(0, 2, 1).reshape(r, c)
ret += [L]
# output is no longer interleaved
last_output_interleaved = 0
else:
# copy layer as-is
ret += [L]
return (ret, numChanges)
|
[
"copy.deepcopy",
"FINN.core.layers.isMatrixLayer",
"FINN.core.layers.isConvLayer",
"numpy.ceil",
"numpy.asarray",
"FINN.core.layers.MatrixThresholdLayer",
"numpy.zeros",
"numpy.ones",
"FINN.core.quantize.quantize_matrix",
"FINN.core.layers.isFCLayer",
"FINN.core.layers.isLinearLayer",
"FINN.core.layers.isThresholdLayer",
"FINN.core.layers.isMaxPoolingLayer",
"FINN.core.layers.LinearLayer",
"FINN.core.layers.isPoolingLayer"
] |
[((2486, 2509), 'copy.deepcopy', 'copy.deepcopy', (['pipeline'], {}), '(pipeline)\n', (2499, 2509), False, 'import copy\n'), ((2827, 2850), 'FINN.core.layers.isMatrixLayer', 'lb.isMatrixLayer', (['layer'], {}), '(layer)\n', (2843, 2850), True, 'import FINN.core.layers as lb\n'), ((2865, 2885), 'copy.deepcopy', 'copy.deepcopy', (['layer'], {}), '(layer)\n', (2878, 2885), False, 'import copy\n'), ((2906, 2941), 'FINN.core.quantize.quantize_matrix', 'qnt.quantize_matrix', (['qlayer.W', 'bits'], {}), '(qlayer.W, bits)\n', (2925, 2941), True, 'import FINN.core.quantize as qnt\n'), ((3236, 3259), 'copy.deepcopy', 'copy.deepcopy', (['pipeline'], {}), '(pipeline)\n', (3249, 3259), False, 'import copy\n'), ((3299, 3318), 'FINN.core.layers.isMatrixLayer', 'lb.isMatrixLayer', (['L'], {}), '(L)\n', (3315, 3318), True, 'import FINN.core.layers as lb\n'), ((10922, 10939), 'FINN.core.layers.isConvLayer', 'lb.isConvLayer', (['L'], {}), '(L)\n', (10936, 10939), True, 'import FINN.core.layers as lb\n'), ((3029, 3050), 'numpy.zeros', 'np.zeros', (['alpha.shape'], {}), '(alpha.shape)\n', (3037, 3050), True, 'import numpy as np\n'), ((5159, 5183), 'FINN.core.layers.isMatrixLayer', 'lb.isMatrixLayer', (['layerA'], {}), '(layerA)\n', (5175, 5183), True, 'import FINN.core.layers as lb\n'), ((5188, 5215), 'FINN.core.layers.isThresholdLayer', 'lb.isThresholdLayer', (['layerB'], {}), '(layerB)\n', (5207, 5215), True, 'import FINN.core.layers as lb\n'), ((5819, 5841), 'FINN.core.layers.isThresholdLayer', 'lb.isThresholdLayer', (['L'], {}), '(L)\n', (5838, 5841), True, 'import FINN.core.layers as lb\n'), ((6637, 6661), 'FINN.core.layers.isLinearLayer', 'lb.isLinearLayer', (['layerA'], {}), '(layerA)\n', (6653, 6661), True, 'import FINN.core.layers as lb\n'), ((6666, 6690), 'FINN.core.layers.isLinearLayer', 'lb.isLinearLayer', (['layerB'], {}), '(layerB)\n', (6682, 6690), True, 'import FINN.core.layers as lb\n'), ((7606, 7630), 'FINN.core.layers.isLinearLayer', 'lb.isLinearLayer', (['layerA'], {}), '(layerA)\n', (7622, 7630), True, 'import FINN.core.layers as lb\n'), ((7635, 7659), 'FINN.core.layers.isMatrixLayer', 'lb.isMatrixLayer', (['layerB'], {}), '(layerB)\n', (7651, 7659), True, 'import FINN.core.layers as lb\n'), ((8914, 8938), 'FINN.core.layers.isLinearLayer', 'lb.isLinearLayer', (['layerA'], {}), '(layerA)\n', (8930, 8938), True, 'import FINN.core.layers as lb\n'), ((8943, 8970), 'FINN.core.layers.isThresholdLayer', 'lb.isThresholdLayer', (['layerB'], {}), '(layerB)\n', (8962, 8970), True, 'import FINN.core.layers as lb\n'), ((9208, 9246), 'numpy.asarray', 'np.asarray', (['[((t - B) / A) for t in T]'], {}), '([((t - B) / A) for t in T])\n', (9218, 9246), True, 'import numpy as np\n'), ((9841, 9869), 'FINN.core.layers.isMaxPoolingLayer', 'lb.isMaxPoolingLayer', (['layerA'], {}), '(layerA)\n', (9861, 9869), True, 'import FINN.core.layers as lb\n'), ((9874, 9901), 'FINN.core.layers.isThresholdLayer', 'lb.isThresholdLayer', (['layerB'], {}), '(layerB)\n', (9893, 9901), True, 'import FINN.core.layers as lb\n'), ((5237, 5280), 'FINN.core.layers.MatrixThresholdLayer', 'lb.MatrixThresholdLayer', (['""""""', 'layerA', 'layerB'], {}), "('', layerA, layerB)\n", (5260, 5280), True, 'import FINN.core.layers as lb\n'), ((7059, 7093), 'FINN.core.layers.LinearLayer', 'lb.LinearLayer', (['scaleNew', 'shiftNew'], {}), '(scaleNew, shiftNew)\n', (7073, 7093), True, 'import FINN.core.layers as lb\n'), ((8027, 8054), 'numpy.ones', 'np.ones', (['matrixLayerOutSize'], {}), '(matrixLayerOutSize)\n', (8034, 8054), True, 'import numpy as np\n'), ((8139, 8173), 'FINN.core.layers.LinearLayer', 'lb.LinearLayer', (['scaleNew', 'shiftNew'], {}), '(scaleNew, shiftNew)\n', (8153, 8173), True, 'import FINN.core.layers as lb\n'), ((8216, 8240), 'FINN.core.layers.isLinearLayer', 'lb.isLinearLayer', (['layerA'], {}), '(layerA)\n', (8232, 8240), True, 'import FINN.core.layers as lb\n'), ((8245, 8270), 'FINN.core.layers.isPoolingLayer', 'lb.isPoolingLayer', (['layerB'], {}), '(layerB)\n', (8262, 8270), True, 'import FINN.core.layers as lb\n'), ((11481, 11496), 'FINN.core.layers.isFCLayer', 'lb.isFCLayer', (['L'], {}), '(L)\n', (11493, 11496), True, 'import FINN.core.layers as lb\n'), ((5888, 5909), 'numpy.ceil', 'np.ceil', (['L.thresholds'], {}), '(L.thresholds)\n', (5895, 5909), True, 'import numpy as np\n'), ((8090, 8109), 'numpy.ones', 'np.ones', (['W.shape[1]'], {}), '(W.shape[1])\n', (8097, 8109), True, 'import numpy as np\n')]
|
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import datasets
import sys
import os
import numpy as np
import pandas as pd
from transformers import AutoModel, BertTokenizerFast, AutoModelForSequenceClassification, BertConfig, DataCollatorWithPadding
from transformers import AutoTokenizer
from torch.utils.data import TensorDataset, DataLoader
import random
from transformers import AdamW, set_seed
import time
import ipdb
from utils import normalize_text
from sklearn.metrics import roc_auc_score
from scipy.special import softmax
import wandb
def set_seeds(seed):
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def parse_args():
parser = argparse.ArgumentParser(description="Finetune a transformers model on a Classification task")
parser.add_argument(
"--train_file", type=str, default='./data/Train_risk_classification_ans.csv', help="A file containing the training data."
)
parser.add_argument(
"--eval_file", type=str, default='./data/Develop_risk_classification.csv', help="A file containing the eval data."
)
parser.add_argument(
"--preprocessing_num_workers", type=int, default=4, help="Preproc num workers"
)
parser.add_argument(
"--max_seq_length",
type=int,
default=512,
help="The maximum total input sequence length after tokenization. Sequences longer than this will be truncated,"
" sequences shorter will be padded if `--pad_to_max_lengh` is passed.",
)
parser.add_argument(
"--model_name",
type=str,
help="Path to pretrained model or model identifier from huggingface.co/models.",
default='ckiplab/albert-base-chinese',
)
parser.add_argument(
"--tokenizer_name",
type=str,
default='bert-base-chinese',
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--per_device_train_batch_size",
type=int,
default=6,
help="Batch size (per device) for the training dataloader.",
)
parser.add_argument(
"--per_device_eval_batch_size",
type=int,
default=6,
help="Batch size (per device) for the evaluation dataloader.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=5e-5,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument("--weight_decay", type=float, default=5e-6, help="Weight decay to use.")
parser.add_argument("--num_train_epochs", type=int, default=10, help="Total number of training epochs to perform.")
parser.add_argument(
"--max_train_steps",
type=int,
default=None,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=4,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument("--save_model_dir", type=str, default='./task1_model', help="Where to store the final model.")
parser.add_argument("--seed", type=int, default=1027, help="A seed for reproducible training.")
parser.add_argument(
"--doc_stride",
type=int,
default=200,
help="When splitting up a long document into chunks how much stride to take between chunks.",
)
parser.add_argument(
"--debug",
action="store_true",
help="Activate debug mode and run training only with a subset of data.",
)
parser.add_argument(
"--device",
default="cuda:1",
help="The ID of the device you would like to use.",
)
parser.add_argument(
"--train_full",
action="store_true",
help="Training with full training data.",
)
parser.add_argument(
"--dropout",
type=float,
default=0.1,
help="Dropout probability to apply.",
)
args = parser.parse_args()
if args.save_model_dir is not None:
os.makedirs(args.save_model_dir, exist_ok=True)
return args
def main(args):
wandb.init(project='ntu_nlp_risk', entity='pwlin')
config = wandb.config
config.learning_rate = args.learning_rate
config.weight_decay = args.weight_decay
config.dropout = args.dropout
config.model = args.model_name
config.batch_size = args.per_device_train_batch_size
config.device = args.device
tokenizer = BertTokenizerFast.from_pretrained(args.tokenizer_name)
def prepare_train_features(examples):
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
tokenized_examples = tokenizer(
examples['text'],
truncation=True,
max_length=args.max_seq_length, # max_seq_length
stride=args.doc_stride,
return_overflowing_tokens=True,
return_offsets_mapping=True,
padding='max_length' # "max_length"
)
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping")
tokenized_examples.pop("offset_mapping")
# Let's label those examples!
tokenized_examples["labels"] = [] # 0 or 1
tokenized_examples["example_id"] = []
for sample_index in sample_mapping:
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
# One example can give several spans, this is the index of the example containing this span of text.
tokenized_examples["example_id"].append(examples["article_id"][sample_index])
tokenized_examples['labels'].append(int(examples['label'][sample_index]))
return tokenized_examples
raw_dataset = datasets.load_dataset("csv", data_files=args.train_file)
train_dataset = raw_dataset['train'].map(normalize_text)
raw_dataset = raw_dataset['train'].train_test_split(test_size=0.03)
# if args.debug:
# for split in raw_dataset.keys():
# raw_dataset[split] = raw_dataset[split].select(range(20))
if not args.train_full:
train_dataset = raw_dataset['train'].map(normalize_text)
eval_dataset = raw_dataset['test'].map(normalize_text)
column_names = raw_dataset["train"].column_names
train_ids = train_dataset['article_id']
eval_ids = eval_dataset['article_id']
num_train_samples = len(train_dataset)
num_eval_samples = len(eval_dataset)
num_samples = num_train_samples + num_eval_samples
train_dataset = train_dataset.map(
prepare_train_features,
batched=True,
num_proc=4,
remove_columns=column_names,
)
eval_dataset = eval_dataset.map(
prepare_train_features,
batched=True,
num_proc=4,
remove_columns=column_names,
)
data_collator = DataCollatorWithPadding(tokenizer)
train_dataloader = DataLoader(train_dataset, shuffle=True, collate_fn=data_collator, batch_size=args.per_device_train_batch_size)
eval_dataloader = DataLoader(eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size)
num_train_batch = len(train_dataloader)
num_eval_batch = len(eval_dataloader)
model = AutoModelForSequenceClassification.from_pretrained(args.model_name, num_labels=2).to(args.device)
model.config.attention_probs_dropout_prob = args.dropout
model.config.hidden_dropout_prob = args.dropout
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
#optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate)
optimizer = AdamW(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)
torch_softmax = nn.Softmax(dim=1)
best_auroc = float('-inf')
wandb.watch(model)
for epoch in range(args.num_train_epochs):
epoch_start_time = time.time()
model.train()
train_loss = 0
y_preds = np.zeros((num_samples+1, 2))
y_trues = np.zeros(num_samples+1)
for step, batch in enumerate(train_dataloader):
example_ids = batch.pop('example_id').tolist()
# print(example_ids)
# exit()
for i in batch.keys():
batch[i] = batch[i].to(args.device)
outputs = model(**batch)
y_pred = torch_softmax(outputs.logits).cpu().data.numpy()
y = batch.labels.cpu().data.numpy()
for i, example_id in enumerate(example_ids):
y_preds[example_id][0] += np.log(y_pred[i][0])
y_preds[example_id][1] += np.log(y_pred[i][1])
y_trues[example_id] = y[i]
loss = outputs.loss
loss = loss / args.gradient_accumulation_steps
loss.backward()
train_loss += loss.item()
if (step+1) % args.gradient_accumulation_steps == 0 or (step+1) == len(train_dataloader):
optimizer.step()
optimizer.zero_grad()
print(f'[{step:3d}/{num_train_batch}]',end='\r')
train_acc = (np.sum(np.argmax(y_preds, axis=1) == y_trues) - num_eval_samples - 1)/num_train_samples
train_loss /= num_train_batch
model.eval()
eval_loss = 0
y_preds = np.zeros((num_samples+1,2))
y_trues = np.zeros(num_samples+1)
y_preds = []
y_trues = []
last_example_id = -1
for step, batch in enumerate(eval_dataloader):
with torch.no_grad():
example_ids = batch.pop('example_id')
for i in batch.keys():
batch[i] = batch[i].to(args.device)
outputs = model(**batch)
y_pred = torch_softmax(outputs.logits).cpu().data.numpy()
y = batch.labels.cpu().data.numpy()
for i, example_id in enumerate(example_ids):
# y_preds[example_id][0] += np.log(y_pred[i][0])
# y_preds[example_id][1] += np.log(y_pred[i][1])
if example_id != last_example_id:
y_trues.append(y[i])
y_preds.append([0, 0])
# zero_score = 1 if y_pred[i][0] > y_pred[i][1] else 0
# one_score = 1 - zero_score
y_preds[-1][0] += np.log(y_pred[i][0])
y_preds[-1][1] += np.log(y_pred[i][1])
last_example_id = example_id
loss = outputs.loss
eval_loss += loss.item()
# sum logP
# eval_acc = (np.sum(np.argmax(y_preds, axis=1) == y_trues) - num_train_samples - 1)/num_eval_samples
try:
assert len(y_trues) == len(y_preds)
except:
ipdb.set_trace()
eval_acc = np.sum(np.argmax(np.array(y_preds), axis=1) == np.array(y_trues)) / len(y_trues)
eval_auroc = roc_auc_score(np.array(y_trues), softmax(np.array(y_preds), axis=1)[:, 1])
eval_loss /= num_eval_batch
print(f'epoch [{epoch+1:02d}/{args.num_train_epochs:02d}]: {time.time()-epoch_start_time:.2f} sec(s)')
print(f'train loss: {train_loss:.4f}, train acc: {train_acc:.4f}')
print(f' eval loss: {eval_loss:.4f}, eval acc: {eval_acc:.4f}, eval auroc: {eval_auroc:.4f}')
wandb.log({
"train loss": train_loss,
"train acc": train_acc,
"eval loss": eval_loss,
"eval acc": eval_acc,
"eval auroc": eval_auroc
})
if eval_auroc > best_auroc:
best_auroc = eval_auroc
model.save_pretrained(args.save_model_dir)
print(f"Saving model at eval auroc {eval_auroc:.4f}")
print('Done')
return
if __name__ == "__main__":
args = parse_args()
if args.seed is not None:
set_seed(args.seed)
set_seeds(args.seed)
main(args)
|
[
"wandb.log",
"numpy.random.seed",
"argparse.ArgumentParser",
"ipdb.set_trace",
"wandb.watch",
"numpy.argmax",
"torch.nn.Softmax",
"transformers.DataCollatorWithPadding",
"torch.no_grad",
"datasets.load_dataset",
"torch.utils.data.DataLoader",
"transformers.BertTokenizerFast.from_pretrained",
"random.seed",
"torch.manual_seed",
"torch.cuda.manual_seed",
"transformers.set_seed",
"torch.cuda.is_available",
"transformers.AutoModelForSequenceClassification.from_pretrained",
"os.makedirs",
"numpy.log",
"numpy.zeros",
"time.time",
"torch.cuda.manual_seed_all",
"wandb.init",
"numpy.array"
] |
[((609, 632), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (626, 632), False, 'import torch\n'), ((640, 665), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (663, 665), False, 'import torch\n'), ((749, 769), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (763, 769), True, 'import numpy as np\n'), ((774, 791), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (785, 791), False, 'import random\n'), ((915, 1013), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Finetune a transformers model on a Classification task"""'}), "(description=\n 'Finetune a transformers model on a Classification task')\n", (938, 1013), False, 'import argparse\n'), ((4456, 4506), 'wandb.init', 'wandb.init', ([], {'project': '"""ntu_nlp_risk"""', 'entity': '"""pwlin"""'}), "(project='ntu_nlp_risk', entity='pwlin')\n", (4466, 4506), False, 'import wandb\n'), ((4799, 4853), 'transformers.BertTokenizerFast.from_pretrained', 'BertTokenizerFast.from_pretrained', (['args.tokenizer_name'], {}), '(args.tokenizer_name)\n', (4832, 4853), False, 'from transformers import AutoModel, BertTokenizerFast, AutoModelForSequenceClassification, BertConfig, DataCollatorWithPadding\n'), ((6526, 6582), 'datasets.load_dataset', 'datasets.load_dataset', (['"""csv"""'], {'data_files': 'args.train_file'}), "('csv', data_files=args.train_file)\n", (6547, 6582), False, 'import datasets\n'), ((7650, 7684), 'transformers.DataCollatorWithPadding', 'DataCollatorWithPadding', (['tokenizer'], {}), '(tokenizer)\n', (7673, 7684), False, 'from transformers import AutoModel, BertTokenizerFast, AutoModelForSequenceClassification, BertConfig, DataCollatorWithPadding\n'), ((7708, 7822), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'shuffle': '(True)', 'collate_fn': 'data_collator', 'batch_size': 'args.per_device_train_batch_size'}), '(train_dataset, shuffle=True, collate_fn=data_collator,\n batch_size=args.per_device_train_batch_size)\n', (7718, 7822), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((7841, 7940), 'torch.utils.data.DataLoader', 'DataLoader', (['eval_dataset'], {'collate_fn': 'data_collator', 'batch_size': 'args.per_device_eval_batch_size'}), '(eval_dataset, collate_fn=data_collator, batch_size=args.\n per_device_eval_batch_size)\n', (7851, 7940), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((8849, 8866), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (8859, 8866), True, 'import torch.nn as nn\n'), ((8902, 8920), 'wandb.watch', 'wandb.watch', (['model'], {}), '(model)\n', (8913, 8920), False, 'import wandb\n'), ((675, 703), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (697, 703), False, 'import torch\n'), ((712, 744), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (738, 744), False, 'import torch\n'), ((4370, 4417), 'os.makedirs', 'os.makedirs', (['args.save_model_dir'], {'exist_ok': '(True)'}), '(args.save_model_dir, exist_ok=True)\n', (4381, 4417), False, 'import os\n'), ((8995, 9006), 'time.time', 'time.time', ([], {}), '()\n', (9004, 9006), False, 'import time\n'), ((9070, 9100), 'numpy.zeros', 'np.zeros', (['(num_samples + 1, 2)'], {}), '((num_samples + 1, 2))\n', (9078, 9100), True, 'import numpy as np\n'), ((9117, 9142), 'numpy.zeros', 'np.zeros', (['(num_samples + 1)'], {}), '(num_samples + 1)\n', (9125, 9142), True, 'import numpy as np\n'), ((10400, 10430), 'numpy.zeros', 'np.zeros', (['(num_samples + 1, 2)'], {}), '((num_samples + 1, 2))\n', (10408, 10430), True, 'import numpy as np\n'), ((10446, 10471), 'numpy.zeros', 'np.zeros', (['(num_samples + 1)'], {}), '(num_samples + 1)\n', (10454, 10471), True, 'import numpy as np\n'), ((12425, 12562), 'wandb.log', 'wandb.log', (["{'train loss': train_loss, 'train acc': train_acc, 'eval loss': eval_loss,\n 'eval acc': eval_acc, 'eval auroc': eval_auroc}"], {}), "({'train loss': train_loss, 'train acc': train_acc, 'eval loss':\n eval_loss, 'eval acc': eval_acc, 'eval auroc': eval_auroc})\n", (12434, 12562), False, 'import wandb\n'), ((12951, 12970), 'transformers.set_seed', 'set_seed', (['args.seed'], {}), '(args.seed)\n', (12959, 12970), False, 'from transformers import AdamW, set_seed\n'), ((8034, 8119), 'transformers.AutoModelForSequenceClassification.from_pretrained', 'AutoModelForSequenceClassification.from_pretrained', (['args.model_name'], {'num_labels': '(2)'}), '(args.model_name,\n num_labels=2)\n', (8084, 8119), False, 'from transformers import AutoModel, BertTokenizerFast, AutoModelForSequenceClassification, BertConfig, DataCollatorWithPadding\n'), ((12030, 12047), 'numpy.array', 'np.array', (['y_trues'], {}), '(y_trues)\n', (12038, 12047), True, 'import numpy as np\n'), ((9664, 9684), 'numpy.log', 'np.log', (['y_pred[i][0]'], {}), '(y_pred[i][0])\n', (9670, 9684), True, 'import numpy as np\n'), ((9727, 9747), 'numpy.log', 'np.log', (['y_pred[i][1]'], {}), '(y_pred[i][1])\n', (9733, 9747), True, 'import numpy as np\n'), ((10613, 10628), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10626, 10628), False, 'import torch\n'), ((11878, 11894), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (11892, 11894), False, 'import ipdb\n'), ((11453, 11473), 'numpy.log', 'np.log', (['y_pred[i][0]'], {}), '(y_pred[i][0])\n', (11459, 11473), True, 'import numpy as np\n'), ((11512, 11532), 'numpy.log', 'np.log', (['y_pred[i][1]'], {}), '(y_pred[i][1])\n', (11518, 11532), True, 'import numpy as np\n'), ((11961, 11978), 'numpy.array', 'np.array', (['y_trues'], {}), '(y_trues)\n', (11969, 11978), True, 'import numpy as np\n'), ((12057, 12074), 'numpy.array', 'np.array', (['y_preds'], {}), '(y_preds)\n', (12065, 12074), True, 'import numpy as np\n'), ((11931, 11948), 'numpy.array', 'np.array', (['y_preds'], {}), '(y_preds)\n', (11939, 11948), True, 'import numpy as np\n'), ((12196, 12207), 'time.time', 'time.time', ([], {}), '()\n', (12205, 12207), False, 'import time\n'), ((10210, 10236), 'numpy.argmax', 'np.argmax', (['y_preds'], {'axis': '(1)'}), '(y_preds, axis=1)\n', (10219, 10236), True, 'import numpy as np\n')]
|
from enum import Enum
import operator
import itertools
import collections
import sys
import numpy as np
import re
from .alignment import AlnStats
class Operation(Enum):
AlnMatch = 0
Insertion = 1
Deletion = 2
Skip = 3
Soft = 4
Hard = 5
Padding = 6
SeqMatch = 7
SeqMismatch = 8
def consumes_read(self):
return _consumes_read[self.value]
def consumes_ref(self):
return _consumes_ref[self.value]
def consumes_both(self):
return _consumes_both[self.value]
def __str__(self):
return _letters[self.value]
_letters = 'MIDNSHP=X'
_operations = dict((letter, Operation(num)) for num, letter in enumerate(_letters))
_consumes_read = [True, True, False, False, True, False, False, True, True]
_consumes_ref = [True, False, True, True, False, False, False, True, True]
_consumes_both = list(itertools.starmap(operator.and_, zip(_consumes_read, _consumes_ref)))
AlignedRegion = collections.namedtuple('AlignedRegion', 'start1 end1 start2 end2')
class Cigar:
def __init__(self, line):
self._tuples = []
current = 0
for char in line:
if char.isdigit():
current = current * 10 + int(char)
else:
self._tuples.append((current, _operations[char]))
current = 0
self._tuples = tuple(self._tuples)
self.__init_lengths()
self._index = None
@classmethod
def from_tuples(cls, tuples, lengths_from=None):
self = cls.__new__(cls)
self._tuples = tuple(tuples)
if lengths_from:
self._aligned_len = lengths_from._aligned_len
self._ref_len = lengths_from._ref_len
self._read_len = lengths_from._read_len
else:
self.__init_lengths()
self._index = None
return self
@classmethod
def from_pysam_tuples(cls, tuples):
self = cls.__new__(cls)
self._tuples = tuple((length, Operation(op)) for op, length in tuples)
self.__init_lengths()
self._index = None
return self
def init_index(self):
self._index = CigarIndex(self)
def init_proxy_index(self):
"""
Initializes fake proxy index, that does not do anything, but allows to run functions such as aligned_region().
"""
self._index = ProxyIndex()
def __init_lengths(self):
self._read_len = 0
self._ref_len = 0
self._aligned_len = 0
for len, op in self._tuples:
if op.consumes_read():
self._read_len += len
if op.consumes_ref():
self._ref_len += len
if op.consumes_both():
self._aligned_len += len
def reversed(self):
"""
Returns reversed cigar.
"""
return Cigar.from_tuples(self._tuples[::-1], self)
def __iter__(self):
return iter(self._tuples)
def iter_from(self, start_ix):
return itertools.islice(self._tuples, start_ix, len(self._tuples))
@property
def read_len(self):
return self._read_len
@property
def ref_len(self):
return self._ref_len
@property
def aligned_len(self):
return self._aligned_len
@property
def index(self):
return self._index
"""
Returns i-th pair (length: int, operation: Operation).
"""
def __getitem__(self, i):
return self._tuples[i]
"""
Finds aligned region in two sequences.
Parameters:
- ref_start: 0-based start of the region in the first sequence (should be >= 0),
- ref_end: 0-based exclusive end of the region in the first sequence (should be <= ref_len),
- alt_size_diff: difference between reference and alternative variants. Positive if alternative is longer.
Returns pair `read_start, read_end`, output interval may be empty.
TODO:
Process case when the variant overlaps homopolymer: for example
chr1:160696714-160696714 catatGtcatg
chr22:18944546-18944546 catatGttcat
can be stored as GT-, GTT or as G-T, GTT. We need to account for both cases.
"""
def aligned_region(self, ref_start, ref_end, alt_size_diff=0):
cigar_start, ref_pos, read_pos = self._index.find_by_ref(ref_start)
read_start = None
for cigar_ix in range(cigar_start, len(self._tuples)):
length, op = self._tuples[cigar_ix]
if op.consumes_ref():
if read_start is None and ref_pos <= ref_start < ref_pos + length:
# Found start in the first sequence.
if op.consumes_read():
read_start = read_pos + ref_start - ref_pos
else:
# this position is deleted in the second sequence.
read_start = read_pos
if ref_pos < ref_end <= ref_pos + length:
# Found end in the first sequence.
if op.consumes_read():
read_end = read_pos + ref_end - ref_pos
else:
read_end = read_pos
if alt_size_diff > 0 and ref_end - ref_pos == length and cigar_ix + 1 < len(self._tuples):
# Include case when the alternative variant is in the insertion in the second sequence.
next_len, next_op = self._tuples[cigar_ix + 1]
if not next_op.consumes_ref():
read_end += min(next_len, alt_size_diff)
assert read_start is not None
return (read_start, read_end)
ref_pos += length
if op.consumes_read():
read_pos += length
assert False
"""
Should be extended CIGAR: with X/= instead of M.
Returns iterator, each element: `AlignedRegion`.
"""
def find_differences(self):
pos1 = 0
pos2 = 0
curr = None
for length, op in self._tuples:
if op.consumes_both():
if op == Operation.SeqMismatch:
if curr:
yield curr
curr = AlignedRegion(start1=pos1, end1=pos1 + length, start2=pos2, end2=pos2 + length)
elif op == Operation.AlnMatch:
raise ValueError('Cannot call find_differences() on non-extended CIGAR')
pos1 += length
pos2 += length
# Both positions should be non-zero
elif op.consumes_ref():
assert pos1 and pos2
if curr:
if curr.start1 < pos1 - 1:
yield curr
yield AlignedRegion(start1=pos1 - 1, end1=pos1 + length, start2=pos2 - 1, end2=pos2)
curr = None
pos1 += length
elif op.consumes_read():
assert pos1 and pos2
if curr:
if curr.start1 < pos1 - 1:
yield curr
yield AlignedRegion(start1=pos1 - 1, end1=pos1, start2=pos2 - 1, end2=pos2 + length)
curr = None
pos2 += length
if curr:
yield curr
def remove_clipping(self):
"""
Returns tuple `(left_clipping, right_clipping, new_cigar)`.
"""
left = 0
right = 0
new_tuples = []
for length, op in self._tuples:
if op == Operation.Soft or op == Operation.Hard:
if new_tuples:
right += length
else:
left += length
else:
new_tuples.append((length, op))
return left, right, Cigar.from_tuples(new_tuples)
def get_clipping(self):
left_len, left_op = self._tuples[0]
right_len, right_op = self._tuples[-1]
# Do not expect to see hard clipping.
assert left_op.consumes_read() and right_op.consumes_read()
left = 0 if left_op.consumes_ref() else left_len
right = 0 if right_op.consumes_ref() else right_len
return left, right
def __str__(self):
return ''.join('%d%s' % t for t in self._tuples)
def to_str(self, delim=' '):
return delim.join('%d%s' % t for t in self._tuples)
def __len__(self):
return len(self._tuples)
def to_full(self, ref_seq, read_seq):
new_tuples = []
pos1 = 0
pos2 = 0
for length, op in self._tuples:
if op.consumes_both():
curr_len = 0
curr_op = Operation.SeqMatch
for i in range(length):
new_op = Operation.SeqMatch if read_seq[pos1 + i] == ref_seq[pos2 + i] else Operation.SeqMismatch
if curr_op == new_op:
curr_len += 1
else:
if curr_len:
new_tuples.append((curr_len, curr_op))
curr_op = new_op
curr_len = 1
new_tuples.append((curr_len, curr_op))
pos1 += length
pos2 += length
elif op.consumes_read():
new_tuples.append((length, op))
pos1 += length
elif op.consumes_ref():
new_tuples.append((length, op))
pos2 += length
return Cigar.from_tuples(new_tuples, lengths_from=self)
def to_extended_with_md(self, read_seq, md_tag):
"""
Returns pair (extended CIGAR, reference_sequence).
"""
new_tuples = []
read_pos = 0
ref_pos = 0
ref_seq = ''
# Use [A-Z^] instead of [0-9] because tag starts and ends with a number.
# Parentheses needed to capture [A-Z^] as well as numbers.
md_tag = re.split(r'([A-Z^]+)', md_tag)
for i in range(0, len(md_tag), 2):
md_tag[i] = int(md_tag[i])
# Skip first, if it is == 0.
md_ix = int(md_tag[0] == 0)
md_shift = 0
for length, op in self._tuples:
if op.consumes_both():
while length > 0:
# Sequence
if md_ix % 2:
subseq = md_tag[md_ix]
entry_len = len(subseq) - md_shift
pos_inc = min(length, entry_len)
new_op = Operation.SeqMismatch
ref_seq += subseq[md_shift : md_shift + pos_inc]
if entry_len > length:
md_shift += length
else:
md_ix += 1 + int(md_tag[md_ix + 1] == 0)
md_shift = 0
# Number
else:
new_op = Operation.SeqMatch
entry_len = md_tag[md_ix] - md_shift
pos_inc = min(length, entry_len)
ref_seq += read_seq[read_pos : read_pos + pos_inc]
if entry_len > length:
md_shift += length
else:
md_ix += 1
md_shift = 0
ref_pos += pos_inc
read_pos += pos_inc
if new_tuples and new_tuples[-1][1] == new_op:
new_tuples[-1] = (pos_inc + new_tuples[-1][0], new_op)
else:
new_tuples.append((pos_inc, new_op))
length -= pos_inc
elif op.consumes_read():
read_pos += length
new_tuples.append((length, op))
elif op.consumes_ref():
assert md_tag[md_ix][0] == '^'
ref_seq += md_tag[md_ix][1:]
# Add 2 if next entry is 0. We should not get out of the list because it should end with a number.
md_ix += 1 + int(md_tag[md_ix + 1] == 0)
ref_pos += length
new_tuples.append((length, op))
assert md_ix == len(md_tag) and read_pos == self._read_len \
and ref_pos == self._ref_len and len(ref_seq) == ref_pos
return Cigar.from_tuples(new_tuples, lengths_from=self), ref_seq
def calculate_stats(self):
"""
Returns `AlnStats`. The Cigar should be full.
"""
stats = AlnStats()
for length, op in self._tuples:
if op.consumes_both():
if op == Operation.SeqMismatch:
stats.add_mismatches(length)
else:
assert op == Operation.SeqMatch
elif op.consumes_read() and op != Operation.Soft:
stats.add_insertions(length)
elif op.consumes_ref():
stats.add_deletions(length)
stats.update_from_cigar(self)
return stats
def calculate_stats_with_seqs(self, read_seq, ref_seq):
"""
Returns `AlnStats`. The Cigar can contain M operations as well as X and =.
"""
stats = AlnStats()
pos1 = 0
pos2 = 0
for length, op in self._tuples:
if op.consumes_both():
stats.add_mismatches(sum(read_seq[pos1 + i] != ref_seq[pos2 + i] for i in range(length)))
pos1 += length
pos2 += length
elif op.consumes_read():
if op != Operation.Soft:
stats.add_insertions(length)
pos1 += length
elif op.consumes_ref():
stats.add_deletions(length)
pos2 += length
stats.update_from_cigar(self)
return stats
def no_gaps(self):
return all(op.consumes_both() for _, op in self._tuples[1:-1])
def to_short(self):
new_tuples = []
curr_match = 0
for length, op in self._tuples:
if op.consumes_both():
curr_match += length
else:
if curr_match:
new_tuples.append((curr_match, Operation.AlnMatch))
curr_match = 0
new_tuples.append((length, op))
if curr_match:
new_tuples.append((curr_match, Operation.AlnMatch))
return Cigar.from_tuples(new_tuples, lengths_from=self)
def to_pysam_tuples(self):
return [(op.value, len) for len, op in self._tuples]
def aligned_seqs(self, read_seq, ref_seq):
read_res = ''
ref_res = ''
pos1 = 0
pos2 = 0
for length, op in self._tuples:
if op.consumes_both():
subs1 = read_seq[pos1 : pos1 + length]
subs2 = ref_seq[pos2 : pos2 + length]
if op == Operation.SeqMismatch:
read_res += subs1.lower()
ref_res += subs2.lower()
if subs1 == subs2:
sys.stderr.write('Error: operation %dX at positions %d,%d corresponds to sequence match %s\n'
% (length, pos1, pos2, subs1))
else:
read_res += subs1
ref_res += subs2
if op == Operation.SeqMatch and subs1 != subs2:
sys.stderr.write('Error: operation %d= at positions %d,%d corresponds to sequence '
'mismatch %s != %s\n' % (length, pos1, pos2, subs1, subs2))
pos1 += length
pos2 += length
elif op.consumes_read():
ref_res += '-' * length
read_res += read_seq[pos1 : pos1 + length]
pos1 += length
elif op.consumes_ref():
ref_res += ref_seq[pos2 : pos2 + length]
read_res += '-' * length
pos2 += length
if pos1 != len(read_seq) or pos2 != len(ref_seq):
sys.stderr.write('Error: CIGAR length does not match sequences length: '
'read_len = %d, read_seq = %d, ref_len = %d, ref_seq = %d\n'
% (pos1, len(read_seq), pos2, len(ref_seq)))
return read_res, ref_res
def aligned_pairs(self, ref_start=0, read_start=0):
"""
Returns iterator over pairs of indices, aligned to each other.
"""
assert not (ref_start and read_start)
if ref_start:
cigar_start, ref_pos, read_pos = self._index.find_by_ref(ref_start)
elif read_start:
cigar_start, ref_pos, read_pos = self._index.find_by_read(read_start)
else:
cigar_start = ref_pos = read_pos = 0
for length, op in itertools.islice(self._tuples, cigar_start, len(self._tuples)):
if op.consumes_both():
for j in range(max(ref_start - ref_pos, 0), length):
yield (read_pos + j, ref_pos + j)
read_pos += length
ref_pos += length
elif op.consumes_read():
read_pos += length
elif op.consumes_ref():
ref_pos += length
def read_region(self, ref_start, ref_end):
cigar_start, ref_pos, read_pos = self._index.find_by_ref(ref_start)
read_start = None
for length, op in itertools.islice(self._tuples, cigar_start, len(self._tuples)):
if ref_pos == ref_end:
assert read_start is not None
return read_start, read_pos
cons_read = op.consumes_read()
if op.consumes_ref():
if ref_pos <= ref_start < ref_pos + length:
assert read_start is None
read_start = read_pos + (ref_start - ref_pos if cons_read else 1)
if ref_pos <= ref_end < ref_pos + length:
assert read_start is not None
read_end = read_pos + (ref_end - ref_pos if cons_read else 1)
return read_start, read_end
ref_pos += length
if cons_read:
read_pos += length
assert ref_end == ref_pos
assert read_start is not None
return read_start, read_pos
def subcigar(self, ref_start, ref_end):
"""
Returns tuple (ref_start, ref_end, read_start, read_end, subcigar).
Note, that if the first or last operation within the region of interest is not M,
returned ref_start or ref_end may be different from input ref_start or ref_end.
If the resulting alignment will be empty, returns None.
"""
assert ref_start >= 0 and ref_end <= self._ref_len
cigar_start, ref_pos, read_pos = self._index.find_by_ref(ref_start)
read_start = None
new_tuples = []
for length, op in itertools.islice(self._tuples, cigar_start, len(self._tuples)):
if ref_pos >= ref_end:
assert ref_pos == ref_end
break
cons_ref = op.consumes_ref()
cons_read = op.consumes_read()
if cons_ref and cons_read:
if ref_pos <= ref_start < ref_pos + length:
assert read_start is None
read_start = read_pos + ref_start - ref_pos
intersection = min(ref_end, ref_pos + length) - max(ref_start, ref_pos)
if intersection > 0:
new_tuples.append((intersection, op))
if ref_pos < ref_end <= ref_pos + length:
read_pos += ref_end - ref_pos
break
ref_pos += length
read_pos += length
elif cons_ref:
if ref_pos <= ref_start < ref_pos + length:
# Alignment starts with Deletion.
ref_start = ref_pos + length
if ref_start >= ref_end:
return None
elif ref_pos < ref_end <= ref_pos + length:
# Alignment ends with Deletion.
ref_end = ref_pos
break
elif ref_pos >= ref_start:
new_tuples.append((length, op))
ref_pos += length
elif cons_read:
if read_start is not None:
new_tuples.append((length, op))
read_pos += length
read_end = read_pos
new_cigar = Cigar.from_tuples(new_tuples)
assert new_cigar.ref_len == ref_end - ref_start
assert new_cigar.read_len == read_end - read_start
return ref_start, ref_end, read_start, read_end, new_cigar
def revert(self, strand):
new_tuples = []
for length, op in self._tuples:
if op.consumes_both():
new_tuples.append((length, op))
elif op == Operation.Insertion:
new_tuples.append((length, Operation.Deletion))
elif op == Operation.Deletion:
new_tuples.append((length, Operation.Insertion))
else:
raise RuntimeError('Encountered operation %s while trying to revert cigar "%s"' % (op, self.to_str()))
res = Cigar.from_tuples(new_tuples if strand else reversed(new_tuples), self)
res._read_len = self._ref_len
res._ref_len = self._read_len
return res
def __eq__(self, other):
return self._tuples == other._tuples
def has_true_clipping(self, base_qualities, max_len=2, low_bq=10):
"""
Suppose a read has C clipped bases and there are B bases with quality <= low_bq (on the same side of the read).
Returns True if C > B + max_len (for left OR for right side).
"""
left, right = self.get_clipping()
if base_qualities is None:
return left > max_len or right > max_len
if left:
low_bq_left = sum(bq <= low_bq for bq in base_qualities[:left])
if left > low_bq_left + max_len:
return True
if right:
low_bq_right = sum(bq <= low_bq for bq in base_qualities[-right:])
if right > low_bq_right + max_len:
return True
return False
class CigarIndex:
def __init__(self, cigar, step_size=100):
"""
Saves indices of CIGAR tuples to allow faster search within duplication.
"""
cigar_indices = []
ref_positions = []
read_positions = []
prev_ref_pos = ref_pos = 0
prev_read_pos = read_pos = 0
for cigar_ix, (length, op) in enumerate(cigar):
if ref_pos > prev_ref_pos + step_size or read_pos > prev_read_pos + step_size:
cigar_indices.append(cigar_ix)
ref_positions.append(ref_pos)
read_positions.append(read_pos)
prev_ref_pos = ref_pos
prev_read_pos = read_pos
if op.consumes_ref():
ref_pos += length
if op.consumes_read():
read_pos += length
self._cigar_indices = tuple(cigar_indices)
self._ref_positions = tuple(ref_positions)
self._read_positions = tuple(read_positions)
def find_by_ref(self, ref_pos):
"""
Returns tuple (cigar_ix, ref_pos', read_pos') with the biggest cigar_ix such that ref_pos' <= ref_pos.
"""
i = np.searchsorted(self._ref_positions, ref_pos, side='right') - 1
if i >= 0:
return (self._cigar_indices[i], self._ref_positions[i], self._read_positions[i])
return (0, 0, 0)
def find_by_read(self, read_pos):
"""
Returns tuple (cigar_ix, ref_pos', read_pos') with the biggest cigar_ix such that read_pos' <= read_pos.
"""
i = np.searchsorted(self._read_positions, read_pos, side='right') - 1
if i >= 0:
return (self._cigar_indices[i], self._ref_positions[i], self._read_positions[i])
return (0, 0, 0)
class ProxyIndex:
def __init__(self):
pass
def find_by_ref(self, _ref_pos):
return (0, 0, 0)
def find_by_read(self, read_pos):
return (0, 0, 0)
|
[
"numpy.searchsorted",
"sys.stderr.write",
"re.split",
"collections.namedtuple"
] |
[((960, 1026), 'collections.namedtuple', 'collections.namedtuple', (['"""AlignedRegion"""', '"""start1 end1 start2 end2"""'], {}), "('AlignedRegion', 'start1 end1 start2 end2')\n", (982, 1026), False, 'import collections\n'), ((9949, 9978), 're.split', 're.split', (['"""([A-Z^]+)"""', 'md_tag'], {}), "('([A-Z^]+)', md_tag)\n", (9957, 9978), False, 'import re\n'), ((23509, 23568), 'numpy.searchsorted', 'np.searchsorted', (['self._ref_positions', 'ref_pos'], {'side': '"""right"""'}), "(self._ref_positions, ref_pos, side='right')\n", (23524, 23568), True, 'import numpy as np\n'), ((23898, 23959), 'numpy.searchsorted', 'np.searchsorted', (['self._read_positions', 'read_pos'], {'side': '"""right"""'}), "(self._read_positions, read_pos, side='right')\n", (23913, 23959), True, 'import numpy as np\n'), ((15099, 15233), 'sys.stderr.write', 'sys.stderr.write', (["('Error: operation %dX at positions %d,%d corresponds to sequence match %s\\n' %\n (length, pos1, pos2, subs1))"], {}), "(\n 'Error: operation %dX at positions %d,%d corresponds to sequence match %s\\n'\n % (length, pos1, pos2, subs1))\n", (15115, 15233), False, 'import sys\n'), ((15441, 15594), 'sys.stderr.write', 'sys.stderr.write', (["('Error: operation %d= at positions %d,%d corresponds to sequence mismatch %s != %s\\n'\n % (length, pos1, pos2, subs1, subs2))"], {}), '(\n """Error: operation %d= at positions %d,%d corresponds to sequence mismatch %s != %s\n"""\n % (length, pos1, pos2, subs1, subs2))\n', (15457, 15594), False, 'import sys\n')]
|
import os
import numpy as np
from sklearn.svm import SVC
from sklearn.model_selection import cross_val_score
from sklearn.externals import joblib
from skimage.io import imread
from skimage.filters import threshold_otsu
letters = [
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D',
'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T',
'U', 'V', 'W', 'X', 'Y', 'Z'
]
def read_training_data(training_directory):
image_data = []
target_data = []
for each_letter in letters:
for each in range(10):
image_path = os.path.join(training_directory, each_letter, each_letter + '_' + str(each) + '.jpg')
img_details = imread(image_path, as_grey=True)
# converts each character image to binary image
binary_image = img_details < threshold_otsu(img_details)
flat_bin_image = binary_image.reshape(-1)
image_data.append(flat_bin_image)
target_data.append(each_letter)
return (np.array(image_data), np.array(target_data))
def cross_validation(model, num_of_fold, train_data, train_label):
accuracy_result = cross_val_score(model, train_data, train_label,
cv=num_of_fold)
print("Cross Validation Result for ", str(num_of_fold), " -fold")
print(accuracy_result * 100)
# current_dir = os.path.dirname(os.path.realpath(__file__))
#
# training_dataset_dir = os.path.join(current_dir, 'train')
print('reading data')
training_dataset_dir = os.path.dirname(os.path.realpath(__file__))
training_dataset_dir =training_dataset_dir+"/train20X20"
print(training_dataset_dir)
image_data, target_data = read_training_data(training_dataset_dir)
print('reading data completed')
svc_model = SVC(kernel='linear', probability=True)
cross_validation(svc_model, 4, image_data, target_data)
print('training model')
svc_model.fit(image_data, target_data)
import pickle
print("model trained.saving model..")
filename = os.path.dirname(os.path.realpath(__file__))+"/model.sav"
pickle.dump(svc_model, open(filename, 'wb'))
print("model saved")
|
[
"skimage.filters.threshold_otsu",
"sklearn.model_selection.cross_val_score",
"os.path.realpath",
"numpy.array",
"sklearn.svm.SVC",
"skimage.io.imread"
] |
[((1807, 1845), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""linear"""', 'probability': '(True)'}), "(kernel='linear', probability=True)\n", (1810, 1845), False, 'from sklearn.svm import SVC\n'), ((1191, 1254), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['model', 'train_data', 'train_label'], {'cv': 'num_of_fold'}), '(model, train_data, train_label, cv=num_of_fold)\n', (1206, 1254), False, 'from sklearn.model_selection import cross_val_score\n'), ((1582, 1608), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1598, 1608), False, 'import os\n'), ((1052, 1072), 'numpy.array', 'np.array', (['image_data'], {}), '(image_data)\n', (1060, 1072), True, 'import numpy as np\n'), ((1074, 1095), 'numpy.array', 'np.array', (['target_data'], {}), '(target_data)\n', (1082, 1095), True, 'import numpy as np\n'), ((2049, 2075), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (2065, 2075), False, 'import os\n'), ((733, 765), 'skimage.io.imread', 'imread', (['image_path'], {'as_grey': '(True)'}), '(image_path, as_grey=True)\n', (739, 765), False, 'from skimage.io import imread\n'), ((867, 894), 'skimage.filters.threshold_otsu', 'threshold_otsu', (['img_details'], {}), '(img_details)\n', (881, 894), False, 'from skimage.filters import threshold_otsu\n')]
|
import numpy as np
import itertools
import gpuscheduler
import argparse
import os
import uuid
import hashlib
import glob
import math
from itertools import product
from torch.optim.lr_scheduler import OneCycleLR
from os.path import join
parser = argparse.ArgumentParser(description='Compute script.')
parser.add_argument('--dry', action='store_true')
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--p', type=float, default=1.0, help='Probability with which to select a configuration.')
parser.add_argument('--baseline', action='store_true', help='Run baseline transformer')
args = parser.parse_args()
gpus = 1
cmd = 'python the_pile/pile.py --force-write '
#cmd = 'MKL_THREADING_LAYER=GNU fairseq-preprocess --task language_modeling --bpe hf_byte_bpe --nwordssrc 50176 --workers 20 --only-source --srcdict /private/home/timdettmers/data/The.Pile.hf.byte.bpe.50k/dict.txt'
cmd = 'MKL_THREADING_LAYER=GNU fairseq-preprocess --task language_modeling --bpe gpt2 --nwordssrc 50176 --workers 20 --only-source --srcdict /private/home/timdettmers/data/The.Pile.gpt2.byte.bpe.50k/dict.txt'
args2 = {}
name = 'preprocess_sub_datasets'
constraint = ''
logfolder = 'pile/{0}'.format(name)
cores_per_job = 10
mem = 64
num_seeds = 1
seed_offset = 0
time_hours = 24
time_minutes = 0
#account = 'cse'
#account = 'stf'
#account = 'ark'
#partition = 'scavenge'
#partition = 'scavenge,learnfair'
partition = 'learnfair'
#partition = 'uninterrupted'
#partition = 'dev'
change_dir = 'pile_private/'
repo = 'pile_private'
exclude = ''
s = gpuscheduler.HyakScheduler(verbose=args.verbose, account='', partition=partition, use_gres=False)
#s = gpuscheduler.SshScheduler(verbose=args.verbose)
fp16 = True
args3 = {}
#args3[('destdir', 'trainpref', 'validpref', 'testpref')] = [('~/data/The.Pile.gpt2.byte.bpe.50k/{0}'.format(i), 'data/The_Pile/segments/train{0}.txt'.format(i), 'data/The_Pile/valid.txt', 'data/The_Pile/test.txt') for i in range(0, 30)]
#names = ['ArXiv', 'NIH.ExPorter', '"Wikipedia.(en)"', 'Bibliotik', 'OpenSubtitles', 'YoutubeSubtitles', 'BookCorpus', 'OpenWebText2', 'DM.Mathematics', 'PhilPapers', 'Enron.Emails', 'PubMed.Abstracts', 'EuroParl', 'PubMed.Central','FreeLaw', 'StackExchange','Github', '"Gutenberg.(PG-19)"', 'USPTO','HackerNews', 'Ubuntu.IRC']
names = ['Wikipedia.en', 'Gutenberg.PG19']
args3[('destdir', 'trainpref', 'validpref', 'testpref')] = []
for name in names:
args3[('destdir', 'trainpref', 'validpref', 'testpref')].append(('~/data/{0}'.format(name), 'data/{0}/train.txt'.format(name), 'data/{0}/valid.txt'.format(name), 'data/{0}/test.txt'.format(name)))
#args3[('destdir', 'trainpref')] = [('~/data/pile_hf_bpe/hf_bpe-{0}'.format(i), 'data/The_Pile/segments/train{0}.txt'.format(i)) for i in range(1)]
#args3[('destdir', 'validpref', 'testpref')] = [('~/data/pile_hf_bpe/hf_bpe-validtest', 'data/The_Pile/valid.txt', 'data/The_Pile/test.txt')]
#grgs3[('destdir', 'validpref', 'testpref')] = [('~/data/pile_gpt2_raw/hf_bpe-validtest', 'data/The_Pile/valid.txt', 'data/The_Pile/test.txt')]
args4 = []
args5 = {}
args6 = {}
rdm = np.random.RandomState(5345)
for key, value in args2.items():
cmd = cmd + ' --{0} {1}'.format(key, value)
args_prod = []
for key, values in args3.items():
if isinstance(key, tuple):
keyvalues = []
for tups in values:
arg = ''
for i, v in enumerate(tups):
if v is True: v = ''
if v is False: continue
if len(key[i]) == 0:
arg += '{0} '.format(v)
else:
arg += '--{0} {1} '.format(key[i], v)
keyvalues.append(arg)
elif isinstance(key, str):
keyvalues = []
for v in values:
if v is True: v = ''
if v is False:
keyvalues.append('')
else:
keyvalues.append(' --{0} {1}'.format(key, v))
args_prod.append(keyvalues)
if len(args_prod) >= 2:
args_prod = list(product(*args_prod))
else:
new_args = []
if len(args_prod) > 0:
for arg in args_prod[0]:
new_args.append([arg])
args_prod = new_args
jobs = []
cmds = ['mv ~/git/data/pile_bin/gpt2-{0}/train.txt ~/data/gpt2/train{0}.txt']
if len(args4) == 0: args4.append('')
for seed in range(num_seeds):
seed = seed + seed_offset
for arg4 in args4:
if len(args_prod) == 0: args_prod.append(('', ''))
for i, values in enumerate(args_prod):
job_cmd = cmd + arg4
for val in values:
job_cmd += ' {0}' .format(val)
if rdm.rand(1) <= args.p:
jobs.append(job_cmd)
s.add_job(logfolder, repo, change_dir, [job_cmd], time_hours, fp16, cores=cores_per_job, mem=mem, constraint=constraint, exclude=exclude, time_minutes=time_minutes, gpus=gpus)
if args.dry:
for i, job in enumerate(jobs):
print(i, job)
print('')
print('Total jobs', len(jobs))
print('Time hours: {0}'.format(time_hours))
print('GPUs: {0}'.format(gpus))
print('Jobs will be written to: {0}'.format(join('/private/home/timdettmers/logs/', logfolder)))
print('Jobs will be run on: {0}'.format(partition))
print('Run in folder: {0}'.format(change_dir))
if not args.dry:
s.run_jobs()
|
[
"gpuscheduler.HyakScheduler",
"argparse.ArgumentParser",
"numpy.random.RandomState",
"itertools.product",
"os.path.join"
] |
[((247, 301), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Compute script."""'}), "(description='Compute script.')\n", (270, 301), False, 'import argparse\n'), ((1553, 1655), 'gpuscheduler.HyakScheduler', 'gpuscheduler.HyakScheduler', ([], {'verbose': 'args.verbose', 'account': '""""""', 'partition': 'partition', 'use_gres': '(False)'}), "(verbose=args.verbose, account='', partition=\n partition, use_gres=False)\n", (1579, 1655), False, 'import gpuscheduler\n'), ((3096, 3123), 'numpy.random.RandomState', 'np.random.RandomState', (['(5345)'], {}), '(5345)\n', (3117, 3123), True, 'import numpy as np\n'), ((4006, 4025), 'itertools.product', 'product', (['*args_prod'], {}), '(*args_prod)\n', (4013, 4025), False, 'from itertools import product\n'), ((5132, 5182), 'os.path.join', 'join', (['"""/private/home/timdettmers/logs/"""', 'logfolder'], {}), "('/private/home/timdettmers/logs/', logfolder)\n", (5136, 5182), False, 'from os.path import join\n')]
|
#!/usr/bin/env python
# encoding:utf-8
# @Time : 2019/10/4
# @Author : 茶葫芦
# @Site :
# @File : pca.py
import numpy as np
import matplotlib.pyplot as plt
class pca():
def __init__(self,initial_w,n_compents,eta =0.1,epsilon=1e-10,n_iters=1e8):
self.initial_w=initial_w
self.n_compents=n_compents
self.eta=eta
self.epsilon=epsilon
self.n_iters=n_iters
def demean(self,X):
# 对数据进行平均值为0化
return X -np.mean(X,axis=0)
def first_compent(self,X):
def f(w,X):
# 目标函数(损失函数,X为demean处理后的样本)
return np.sum((X.dot(w))**2)/len(X)
def df(w,X):
# 目标函数导数
return X.T.dot(X.dot(w))* 2./len(X)
def df_debug(w,X,epsilon=0.00001):
# 根据梯度定义而产生的一个通用的梯度求法(与函数形态无关,可用于梯度公式验证)
res = np.empty(len(w))
for i in range(len(w)):
w_1 =w.copy()
w_1[i]+=epsilon
w_2=w.copy()
w_2[i] -=epsilon
res[i] =(f(w_1,X) -f(w_2,X)) / (2 * epsilon)
return res
def unit_direction(w):
#将一个向量转化成单位向量
return w /np.linalg.norm(w)
# 梯度上升法求最大值
cur_iter = 0
self.initial_w = unit_direction(self.initial_w)
while cur_iter < self.n_iters:
last_y = f(self.initial_w,x)
gradient= df(self.initial_w,x)
self.initial_w = self.initial_w + gradient * self.eta
self.initial_w = unit_direction(self.initial_w)
if last_y - f(self.initial_w ,x) <= self.epsilon:
break
cur_iter +=1
return self.initial_w
def first_n_compents(self,X):
res =[]
for i in range(self.n_compents):
w = self.first_compent(X)
res.append(w)
X - X.dot(w).reshape(-1,1)*w
return res
if __name__ == '__main__':
np.random.seed(1000)
x = np.empty((100,2))
x[:,0]= np.random.uniform(0,100,size=100)
x[:,1]=0.75 * x[:,0] +3.
print(x)
w =np.random.random(2)
p=pca(initial_w=w,n_compents=2)
res =p.first_n_compents(x)
print(res)
|
[
"numpy.random.uniform",
"numpy.random.seed",
"numpy.empty",
"numpy.random.random",
"numpy.mean",
"numpy.linalg.norm"
] |
[((1929, 1949), 'numpy.random.seed', 'np.random.seed', (['(1000)'], {}), '(1000)\n', (1943, 1949), True, 'import numpy as np\n'), ((1958, 1976), 'numpy.empty', 'np.empty', (['(100, 2)'], {}), '((100, 2))\n', (1966, 1976), True, 'import numpy as np\n'), ((1988, 2023), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(100)'], {'size': '(100)'}), '(0, 100, size=100)\n', (2005, 2023), True, 'import numpy as np\n'), ((2072, 2091), 'numpy.random.random', 'np.random.random', (['(2)'], {}), '(2)\n', (2088, 2091), True, 'import numpy as np\n'), ((468, 486), 'numpy.mean', 'np.mean', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (475, 486), True, 'import numpy as np\n'), ((1173, 1190), 'numpy.linalg.norm', 'np.linalg.norm', (['w'], {}), '(w)\n', (1187, 1190), True, 'import numpy as np\n')]
|
"""Contains functions for tasks related to file system.
"""
import os
import shutil
import re
import datetime
import logging
import errno
import numpy as np
import mne
def open_raw(fname, preload=True, verbose='info'):
"""Reads a raw from file.
Parameters
----------
fname : str
Path to the raw file.
preload : bool
Should the data be loaded as well or just the metadata.
verbose : str
Verbose level for the read_raw call.
Returns
-------
mne.io.Raw
The raw object read from the file.
"""
try:
if verbose == 'info' or verbose == 'debug':
logging.getLogger('ui_logger').info('Reading ' + fname)
raw = mne.io.read_raw(fname, preload=preload)
else:
raw = mne.io.read_raw(fname, preload=preload, verbose=verbose)
return raw
except Exception as exc:
logging.getLogger('ui_logger').exception('')
raise Exception('Could not read the raw file: ' + str(fname))
def save_raw(raw, path, overwrite=True):
"""Saves a raw to file(s).
After witnessing several corruptions along the way,
this was made more atomic by saving the raw first to a tmp file
and then moving with shutil.
Parameters
----------
raw : mne.io.Raw
The raw file to be saved.
path : str
The path where to save.
overwrite : bool
Whether to overwrite.
"""
folder = os.path.dirname(path)
bname = os.path.basename(path)
exists = False
if os.path.exists(path):
if not overwrite:
raise IOError('File already exists.')
exists = True
# be protective and save with other name first and move afterwards
temp_path = os.path.join(folder, '_' + bname)
raw.save(temp_path, overwrite=True)
stem, ext = os.path.splitext(bname)
ext_len = len(ext)
# assumes filename ends with .fif
pat_old = re.compile(bname[:-ext_len] + r'(-[0-9]+)?' + bname[-ext_len:])
pat_new = re.compile('_' + bname[:-ext_len] + r'(-[0-9]+)?' + bname[-ext_len:])
contents = os.listdir(folder)
old_files = [fname_ for fname_ in contents if pat_old.match(fname_)]
new_files = [fname_ for fname_ in contents if pat_new.match(fname_)]
if len(old_files) != len(new_files):
logger = logging.getLogger('ui_logger')
if exists:
logger.warning("Be warned, amount of parts has changed!")
logger.debug("Old parts: ")
for part in old_files:
logger.debug(part)
logger.debug("New parts: ")
for part in new_files:
logger.debug(part)
moved_paths = []
for file_ in new_files:
tmp_path = os.path.join(folder, os.path.basename(file_))
new_path = os.path.join(folder, os.path.basename(file_)[1:])
shutil.move(tmp_path, new_path)
moved_paths.append(new_path)
for file_ in old_files:
old_file_path = os.path.join(folder, os.path.basename(file_))
if old_file_path not in moved_paths:
logger.warning('Removing unused part: ' + str(old_file_path))
os.remove(old_file_path)
raw._filenames[0] = path
def ensure_folders(paths):
"""Ensures that paths in specified in the paths param exist.
Parameters
----------
paths : list
List of folder paths.
"""
for path in paths:
if not os.path.exists(path):
os.makedirs(path)
def create_timestamped_folder(experiment):
"""Creates folder with a timestamp inside the output folder in the
experiment folder.
Parameters
----------
experiment : meggie.experiment.Experiment
The experiment where to create the folder.
Returns
-------
str
The path to the folder.
"""
current_time_str = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
path = os.path.join(experiment.path, 'output')
timestamped_folder = os.path.join(path, current_time_str)
try:
os.makedirs(timestamped_folder)
except OSError as e:
if e.errno != errno.EEXIST:
raise
return timestamped_folder
def save_csv(path, data, column_names, row_descs):
""" Saves tabular data to csv.
Parameters
----------
path : str
Where to save.
data : a numpy array of shape (n_rows, n_cols)
Data to save.
column_names : list
List of column names.
row_descs : list
List of row descriptions that can be tuples like
('EEG', 'Left-frontal'), which are then put to the
csv as multiple columns.
"""
# gather all the data to list of rows
all_data = []
if type(data) == np.ndarray:
data = data.tolist()
# freqs data, assume same lengths
all_data.append(['']*len(row_descs[0]) + column_names)
for idx in range(len(data)):
row = list(row_descs[idx]) + data[idx]
all_data.append(row)
# save to file
all_data = np.array(all_data)
np.savetxt(path, all_data, fmt='%s', delimiter=', ')
def load_csv(path):
"""Loads tabular data from csv.
Parameters
----------
path : str
Path to the csv file.
Returns
-------
list
Column names.
list
Row descriptions.
np.array
The data.
"""
all_data = np.loadtxt(path, dtype=np.str, delimiter=', ')
data = []
column_names = []
row_descs = []
first_data_idx = np.min(np.where(all_data[0] != '')[0])
column_names = all_data[0, first_data_idx:].tolist()
row_descs = [tuple(elems) for elems in all_data[1:, :first_data_idx]]
data = all_data[1:, first_data_idx:].astype(np.float)
return column_names, row_descs, data
def tail(f, lines=1, _buffer=4098):
"""Tail a file and get `lines` lines from the end,
See https://stackoverflow.com/a/13790289
Parameters
----------
f : file descriptor
The file descriptor already opened.
lines : int
How many lines to read from the end.
_buffer : int
What buffer size to use.
Returns
-------
list
Lines from the end.
"""
# place holder for the lines found
lines_found = []
# block counter will be multiplied by buffer
# to get the block size from the end
block_counter = -1
# loop until we find X lines
while len(lines_found) < lines:
try:
f.seek(block_counter * _buffer, os.SEEK_END)
except IOError: # either file is too small, or too many lines requested
f.seek(0)
lines_found = f.readlines()
break
lines_found = f.readlines()
block_counter -= 1
return lines_found[-lines:]
def homepath():
"""Tries to find correct path for home folder.
Returns
-------
str
Path to home directory.
"""
from os.path import expanduser
home = expanduser("~")
if not home:
return '.'
return home
|
[
"os.path.expanduser",
"mne.io.read_raw",
"os.remove",
"os.makedirs",
"os.path.basename",
"os.path.dirname",
"numpy.savetxt",
"os.path.exists",
"datetime.datetime.now",
"numpy.where",
"numpy.array",
"os.path.splitext",
"numpy.loadtxt",
"shutil.move",
"os.path.join",
"os.listdir",
"logging.getLogger",
"re.compile"
] |
[((1453, 1474), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (1468, 1474), False, 'import os\n'), ((1487, 1509), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (1503, 1509), False, 'import os\n'), ((1537, 1557), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (1551, 1557), False, 'import os\n'), ((1745, 1778), 'os.path.join', 'os.path.join', (['folder', "('_' + bname)"], {}), "(folder, '_' + bname)\n", (1757, 1778), False, 'import os\n'), ((1836, 1859), 'os.path.splitext', 'os.path.splitext', (['bname'], {}), '(bname)\n', (1852, 1859), False, 'import os\n'), ((1936, 1998), 're.compile', 're.compile', (["(bname[:-ext_len] + '(-[0-9]+)?' + bname[-ext_len:])"], {}), "(bname[:-ext_len] + '(-[0-9]+)?' + bname[-ext_len:])\n", (1946, 1998), False, 'import re\n'), ((2014, 2082), 're.compile', 're.compile', (["('_' + bname[:-ext_len] + '(-[0-9]+)?' + bname[-ext_len:])"], {}), "('_' + bname[:-ext_len] + '(-[0-9]+)?' + bname[-ext_len:])\n", (2024, 2082), False, 'import re\n'), ((2100, 2118), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (2110, 2118), False, 'import os\n'), ((3884, 3923), 'os.path.join', 'os.path.join', (['experiment.path', '"""output"""'], {}), "(experiment.path, 'output')\n", (3896, 3923), False, 'import os\n'), ((3949, 3985), 'os.path.join', 'os.path.join', (['path', 'current_time_str'], {}), '(path, current_time_str)\n', (3961, 3985), False, 'import os\n'), ((4975, 4993), 'numpy.array', 'np.array', (['all_data'], {}), '(all_data)\n', (4983, 4993), True, 'import numpy as np\n'), ((4998, 5050), 'numpy.savetxt', 'np.savetxt', (['path', 'all_data'], {'fmt': '"""%s"""', 'delimiter': '""", """'}), "(path, all_data, fmt='%s', delimiter=', ')\n", (5008, 5050), True, 'import numpy as np\n'), ((5330, 5376), 'numpy.loadtxt', 'np.loadtxt', (['path'], {'dtype': 'np.str', 'delimiter': '""", """'}), "(path, dtype=np.str, delimiter=', ')\n", (5340, 5376), True, 'import numpy as np\n'), ((6908, 6923), 'os.path.expanduser', 'expanduser', (['"""~"""'], {}), "('~')\n", (6918, 6923), False, 'from os.path import expanduser\n'), ((2324, 2354), 'logging.getLogger', 'logging.getLogger', (['"""ui_logger"""'], {}), "('ui_logger')\n", (2341, 2354), False, 'import logging\n'), ((2832, 2863), 'shutil.move', 'shutil.move', (['tmp_path', 'new_path'], {}), '(tmp_path, new_path)\n', (2843, 2863), False, 'import shutil\n'), ((4004, 4035), 'os.makedirs', 'os.makedirs', (['timestamped_folder'], {}), '(timestamped_folder)\n', (4015, 4035), False, 'import os\n'), ((714, 753), 'mne.io.read_raw', 'mne.io.read_raw', (['fname'], {'preload': 'preload'}), '(fname, preload=preload)\n', (729, 753), False, 'import mne\n'), ((786, 842), 'mne.io.read_raw', 'mne.io.read_raw', (['fname'], {'preload': 'preload', 'verbose': 'verbose'}), '(fname, preload=preload, verbose=verbose)\n', (801, 842), False, 'import mne\n'), ((2730, 2753), 'os.path.basename', 'os.path.basename', (['file_'], {}), '(file_)\n', (2746, 2753), False, 'import os\n'), ((2975, 2998), 'os.path.basename', 'os.path.basename', (['file_'], {}), '(file_)\n', (2991, 2998), False, 'import os\n'), ((3131, 3155), 'os.remove', 'os.remove', (['old_file_path'], {}), '(old_file_path)\n', (3140, 3155), False, 'import os\n'), ((3404, 3424), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (3418, 3424), False, 'import os\n'), ((3438, 3455), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (3449, 3455), False, 'import os\n'), ((3819, 3842), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3840, 3842), False, 'import datetime\n'), ((5462, 5489), 'numpy.where', 'np.where', (["(all_data[0] != '')"], {}), "(all_data[0] != '')\n", (5470, 5489), True, 'import numpy as np\n'), ((2795, 2818), 'os.path.basename', 'os.path.basename', (['file_'], {}), '(file_)\n', (2811, 2818), False, 'import os\n'), ((640, 670), 'logging.getLogger', 'logging.getLogger', (['"""ui_logger"""'], {}), "('ui_logger')\n", (657, 670), False, 'import logging\n'), ((899, 929), 'logging.getLogger', 'logging.getLogger', (['"""ui_logger"""'], {}), "('ui_logger')\n", (916, 929), False, 'import logging\n')]
|
#from unittest import TestCase
from animator.plotter import ScatterAnimation
from matplotlib.animation import PillowWriter
import numpy as np
x = np.linspace(0, 10, 100)
Y = [np.sin(x - 0.1 * t) for t in range(10)]
animation = ScatterAnimation(x, Y)
writer = PillowWriter(fps=5)
animation.anim.save("test.gif", writer=writer)
|
[
"matplotlib.animation.PillowWriter",
"animator.plotter.ScatterAnimation",
"numpy.sin",
"numpy.linspace"
] |
[((146, 169), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(100)'], {}), '(0, 10, 100)\n', (157, 169), True, 'import numpy as np\n'), ((227, 249), 'animator.plotter.ScatterAnimation', 'ScatterAnimation', (['x', 'Y'], {}), '(x, Y)\n', (243, 249), False, 'from animator.plotter import ScatterAnimation\n'), ((259, 278), 'matplotlib.animation.PillowWriter', 'PillowWriter', ([], {'fps': '(5)'}), '(fps=5)\n', (271, 278), False, 'from matplotlib.animation import PillowWriter\n'), ((175, 194), 'numpy.sin', 'np.sin', (['(x - 0.1 * t)'], {}), '(x - 0.1 * t)\n', (181, 194), True, 'import numpy as np\n')]
|
# Copyright 2008-2018 pydicom authors. See LICENSE file for details.
"""Use the numpy package to convert supported pixel data to an ndarray.
**Supported transfer syntaxes**
* 1.2.840.10008.1.2 : Implicit VR Little Endian
* 1.2.840.10008.1.2.1 : Explicit VR Little Endian
* 1.2.840.10008.1.2.1.99 : Deflated Explicit VR Little Endian
* 1.2.840.10008.1.2.2 : Explicit VR Big Endian
**Supported data**
The numpy handler supports the conversion of data in the (7fe0,0010)
*Pixel Data* element to a numpy ndarray provided the related Image Pixel module
elements have values given in the table below.
+------------------------------------------------+--------------+----------+
| Element | Supported | |
+-------------+---------------------------+------+ values | |
| Tag | Keyword | Type | | |
+=============+===========================+======+==============+==========+
| (0028,0002) | SamplesPerPixel | 1 | N | Required |
+-------------+---------------------------+------+--------------+----------+
| (0028,0006) | PlanarConfiguration | 1C | 0, 1 | Optional |
+-------------+---------------------------+------+--------------+----------+
| (0028,0008) | NumberOfFrames | 1C | N | Optional |
+-------------+---------------------------+------+--------------+----------+
| (0028,0010) | Rows | 1 | N | Required |
+-------------+---------------------------+------+--------------+----------+
| (0028,0011) | Columns | 1 | N | Required |
+-------------+---------------------------+------+--------------+----------+
| (0028,0100) | BitsAllocated | 1 | 1, 8, 16, 32 | Required |
+-------------+---------------------------+------+--------------+----------+
| (0028,0103) | PixelRepresentation | 1 | 0, 1 | Required |
+-------------+---------------------------+------+--------------+----------+
"""
try:
import numpy as np
HAVE_NP = True
except ImportError:
HAVE_NP = False
import warnings
from pydicom.pixel_data_handlers.util import pixel_dtype, get_expected_length
import pydicom.uid
HANDLER_NAME = 'Numpy'
DEPENDENCIES = {
'numpy': ('http://www.numpy.org/', 'NumPy'),
}
SUPPORTED_TRANSFER_SYNTAXES = [
pydicom.uid.ExplicitVRLittleEndian,
pydicom.uid.ImplicitVRLittleEndian,
pydicom.uid.DeflatedExplicitVRLittleEndian,
pydicom.uid.ExplicitVRBigEndian,
]
def is_available():
"""Return True if the handler has its dependencies met."""
return HAVE_NP
def supports_transfer_syntax(transfer_syntax):
"""Return True if the handler supports the `transfer_syntax`.
Parameters
----------
transfer_syntax : UID
The Transfer Syntax UID of the Pixel Data that is to be used with
the handler.
"""
return transfer_syntax in SUPPORTED_TRANSFER_SYNTAXES
def needs_to_convert_to_RGB(ds):
"""Return True if the pixel data should to be converted from YCbCr to RGB.
This affects JPEG transfer syntaxes.
"""
return False
def should_change_PhotometricInterpretation_to_RGB(ds):
"""Return True if the PhotometricInterpretation should be changed to RGB.
This affects JPEG transfer syntaxes.
"""
return False
def pack_bits(arr):
"""Pack a binary numpy ndarray into bytes for use with Pixel Data.
Should be used in conjunction with (0028,0100) *BitsAllocated* = 1.
Parameters
----------
arr : numpy.ndarray
The ndarray containing 1-bit data as ints. The array must only contain
integer values of 0 and 1 and must have an 'uint' or 'int' dtype. For
the sake of efficiency its recommended that the array length be a
multiple of 8 (i.e. that any empty bit-padding to round out the byte
has already been added).
Returns
-------
bytes
The bit packed data.
Raises
------
ValueError
If `arr` contains anything other than 0 or 1.
References
----------
DICOM Standard, Part 5, Section 8.1.1 and Annex D
"""
if arr.shape == (0,):
return bytes()
# Test array
if not np.array_equal(arr, arr.astype(bool)):
raise ValueError(
"Only binary arrays (containing ones or zeroes) can be packed."
)
if len(arr.shape) > 1:
raise ValueError("Only 1D arrays are supported.")
# The array length must be a multiple of 8, pad the end
if arr.shape[0] % 8:
arr = np.append(arr, np.zeros(8 - arr.shape[0] % 8))
# Reshape so each row is 8 bits
arr = np.reshape(arr, (-1, 8))
arr = np.fliplr(arr)
arr = np.packbits(arr.astype('uint8'))
return arr.tobytes()
def unpack_bits(bytestream):
"""Unpack bit packed pixel data into a numpy ndarray.
Suitable for use when (0028,0011) *Bits Allocated* is 1.
Parameters
----------
bytestream : bytes
The bit packed pixel data.
Returns
-------
numpy.ndarray
The unpacked pixel data as a 1D array.
Notes
-----
The implementation for PyPy is roughly 100 times slower than the
standard ``numpy.unpackbits`` method.
References
----------
DICOM Standard, Part 5, Section 8.1.1 and Annex D
"""
# Thanks to @sbrodehl (#643)
# e.g. b'\xC0\x09' -> [192, 9]
arr = np.frombuffer(bytestream, dtype='uint8')
# -> [1 1 0 0 0 0 0 0 0 0 0 0 1 0 0 1]
arr = np.unpackbits(arr)
# -> [[1 1 0 0 0 0 0 0],
# [0 0 0 0 1 0 0 1]]
arr = np.reshape(arr, (-1, 8))
# -> [[0 0 0 0 0 0 1 1],
# [1 0 0 1 0 0 0 0]]
arr = np.fliplr(arr)
# -> [0 0 0 0 0 0 1 1 1 0 0 1 0 0 0 0]
arr = np.ravel(arr)
return arr
def get_pixeldata(ds, read_only=False):
"""Return an ndarray of the Pixel Data.
Parameters
----------
ds : dataset.Dataset
The DICOM dataset containing an Image Pixel module and the Pixel Data
to be converted.
read_only : bool, optional
If False (default) then returns a writeable array that no longer uses
the original memory. If True and the value of (0028,0100) *Bits
Allocated* > 1 then returns a read-only array that uses the original
memory buffer of the pixel data. If *Bits Allocated* = 1 then always
returns a writeable array.
Returns
-------
np.ndarray
The contents of the Pixel Data element (7FE0,0010) as a 1D array.
Raises
------
AttributeError
If the dataset is missing a required element.
NotImplementedError
If the dataset contains pixel data in an unsupported format.
ValueError
If the actual length of the pixel data doesn't match the expected
length.
"""
transfer_syntax = ds.file_meta.TransferSyntaxUID
# The check of transfer syntax must be first
if transfer_syntax not in SUPPORTED_TRANSFER_SYNTAXES:
raise NotImplementedError(
"Unable to convert the pixel data as the transfer syntax "
"is not supported by the numpy pixel data handler."
)
# Check required elements
required_elements = ['PixelData', 'BitsAllocated', 'Rows', 'Columns',
'PixelRepresentation', 'SamplesPerPixel']
missing = [elem for elem in required_elements if elem not in ds]
if missing:
raise AttributeError(
"Unable to convert the pixel data as the following required "
"elements are missing from the dataset: " + ", ".join(missing)
)
# Calculate the expected length of the pixel data (in bytes)
# Note: this does NOT include the trailing null byte for odd length data
expected_len = get_expected_length(ds)
# Check that the actual length of the pixel data is as expected
actual_length = len(ds.PixelData)
# Correct for the trailing NULL byte padding for odd length data
padded_expected_len = expected_len + expected_len % 2
if actual_length < padded_expected_len:
raise ValueError(
"The length of the pixel data in the dataset doesn't match the "
"expected amount ({0} vs. {1} bytes). The dataset may be "
"corrupted or there may be an issue with the pixel data handler."
.format(actual_length, padded_expected_len)
)
elif actual_length > padded_expected_len:
# PS 3.5, Section 8.1.1
msg = (
"The length of the pixel data in the dataset ({} bytes) indicates "
"it contains excess padding. {} bytes will be removed from the "
"end of the data"
.format(actual_length, actual_length - expected_len)
)
warnings.warn(msg)
# Unpack the pixel data into a 1D ndarray
if ds.BitsAllocated == 1:
# Skip any trailing padding bits
nr_pixels = get_expected_length(ds, unit='pixels')
arr = unpack_bits(ds.PixelData)[:nr_pixels]
else:
# Skip the trailing padding byte if present
arr = np.frombuffer(ds.PixelData[:expected_len],
dtype=pixel_dtype(ds))
if should_change_PhotometricInterpretation_to_RGB(ds):
ds.PhotometricInterpretation = "RGB"
if not read_only and ds.BitsAllocated > 1:
return arr.copy()
return arr
|
[
"numpy.ravel",
"numpy.frombuffer",
"numpy.zeros",
"pydicom.pixel_data_handlers.util.pixel_dtype",
"pydicom.pixel_data_handlers.util.get_expected_length",
"numpy.fliplr",
"numpy.reshape",
"numpy.unpackbits",
"warnings.warn"
] |
[((4705, 4729), 'numpy.reshape', 'np.reshape', (['arr', '(-1, 8)'], {}), '(arr, (-1, 8))\n', (4715, 4729), True, 'import numpy as np\n'), ((4740, 4754), 'numpy.fliplr', 'np.fliplr', (['arr'], {}), '(arr)\n', (4749, 4754), True, 'import numpy as np\n'), ((5457, 5497), 'numpy.frombuffer', 'np.frombuffer', (['bytestream'], {'dtype': '"""uint8"""'}), "(bytestream, dtype='uint8')\n", (5470, 5497), True, 'import numpy as np\n'), ((5551, 5569), 'numpy.unpackbits', 'np.unpackbits', (['arr'], {}), '(arr)\n', (5564, 5569), True, 'import numpy as np\n'), ((5638, 5662), 'numpy.reshape', 'np.reshape', (['arr', '(-1, 8)'], {}), '(arr, (-1, 8))\n', (5648, 5662), True, 'import numpy as np\n'), ((5731, 5745), 'numpy.fliplr', 'np.fliplr', (['arr'], {}), '(arr)\n', (5740, 5745), True, 'import numpy as np\n'), ((5799, 5812), 'numpy.ravel', 'np.ravel', (['arr'], {}), '(arr)\n', (5807, 5812), True, 'import numpy as np\n'), ((7811, 7834), 'pydicom.pixel_data_handlers.util.get_expected_length', 'get_expected_length', (['ds'], {}), '(ds)\n', (7830, 7834), False, 'from pydicom.pixel_data_handlers.util import pixel_dtype, get_expected_length\n'), ((8952, 8990), 'pydicom.pixel_data_handlers.util.get_expected_length', 'get_expected_length', (['ds'], {'unit': '"""pixels"""'}), "(ds, unit='pixels')\n", (8971, 8990), False, 'from pydicom.pixel_data_handlers.util import pixel_dtype, get_expected_length\n'), ((4626, 4656), 'numpy.zeros', 'np.zeros', (['(8 - arr.shape[0] % 8)'], {}), '(8 - arr.shape[0] % 8)\n', (4634, 4656), True, 'import numpy as np\n'), ((8795, 8813), 'warnings.warn', 'warnings.warn', (['msg'], {}), '(msg)\n', (8808, 8813), False, 'import warnings\n'), ((9196, 9211), 'pydicom.pixel_data_handlers.util.pixel_dtype', 'pixel_dtype', (['ds'], {}), '(ds)\n', (9207, 9211), False, 'from pydicom.pixel_data_handlers.util import pixel_dtype, get_expected_length\n')]
|
#!/usr/bin/env python3
'''Create light curves from UVIT data.
Copyright 2019 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.'''
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import os
import sys
import ntpath
import random
import numpy as np
import matplotlib.pyplot as plt
from glob import glob
from astropy.io import fits
from collections import Counter
from scipy.spatial import KDTree
from scipy.interpolate import interp1d
from matplotlib.colors import LogNorm
from photutils import DAOStarFinder, CircularAperture
from astropy.convolution import Gaussian2DKernel, convolve
from astropy.stats import sigma_clipped_stats, gaussian_fwhm_to_sigma, sigma_clip
#######################################################################
# Initial set of parameters.
'''Window size Vs Framecount rate dictionary (approximate).
The most accurate way to get the rate would be to take the value
of (1 / INT_TIME). INT_TIME value can be found from the image header
window_rate_dict = {'512 x 512': 28.7185,
'350 x 350': 61.0,
'300 x 300': 82.0,
'250 x 250': 115.0,
'200 x 200': 180.0,
'150 x 150': 300.0,
'100 x 100' : 640.0}
'''
events_list = '' #events file
radius = 6 # radius of aperture in pixels.
sky_radius = 12 # radius of background aperture in pixels.
bwidth = 50 # bin width in seconds, change as you please.
framecount_per_sec = 28.7185 # 28.7185 frames / second for 512x512 mode.
# The following is only required for makecurves.
detection_method = 'daofind' # valid inputs are 'daofind' / 'kdtree'.
threshold = 4 # threshold ('daofind').
how_many = 4 # limit ('kdtree').
# The coordinates are only required for curves.
xp = None
yp = None
'''The following parameter affects how the background estimation is done.
The default value is 'None' and no background estimation is carried out.
If you prefer to manually specify a background region, then give 'manual'
as the value. Also, you can provide 'auto' and background region
will be automatically selected.'''
background = None # valid inputs are None / 'manual' / 'auto'.
# If 'manual', PLEASE FILL the following.
x_bg = None # background X-coordinate.
y_bg = None # background Y-coordinate.
'''The following parameters determines whether corrections are
applied to the CPF. They are aperture-correction and
saturation-correction.'''
aperture_correction = None # valid inputs are None / 'fuv' / 'nuv'.
saturation_correction = False # True or False.
# Following parameters need not be changed (unless you want to).
whole_figure_resolution = 256 # resolution of full figure.
sub_fig_size = 40 # size of sub figure.
fontsize = 9 #fontsize for plots.
# Encircled energy data (https://doi.org/10.3847/1538-3881/ab72a3).
radius_pixels = np.array([ 1.5, 2, 2.5, 3, 4,
5, 7, 9, 12, 15,
20, 30, 40, 50, 70,
80, 95 ])
nuv_energy_percentage = np.array([ 29.9, 42.0, 52.0, 59.3, 68.8,
74.5, 81.3, 85.1, 89.3, 92.1,
95.2, 97.6, 98.4, 98.8, 99.4,
99.6, 100.0 ])
fuv_energy_percentage = np.array([ 28.1, 40.7, 51.1, 59.1, 68.9,
74.6, 81.4, 85.0, 88.6, 91.3,
94.5, 96.9, 97.7, 98.3, 99.1,
99.5, 100.0 ])
#ratio = measured CPF / actual CPF
nuv_ratio = nuv_energy_percentage / 100.
fuv_ratio = fuv_energy_percentage / 100.
fuv_ratio_function = interp1d(radius_pixels, fuv_ratio, kind = 'cubic')
nuv_ratio_function = interp1d(radius_pixels, nuv_ratio, kind = 'cubic')
#######################################################################
def read_columns(events_list):
# Reading few columns.
f = fits.open(events_list)
time = f[1].data['MJD_L2']
fx = f[1].data['Fx']
fy = f[1].data['Fy']
photons = f[1].data['EFFECTIVE_NUM_PHOTONS']
mask = photons > 0
time = time[mask]
fx = fx[mask]
fy = fy[mask]
photons = photons[mask]
return time, fx, fy, photons
def tobe_or_notobe(time, bwidth,
detection_method,
threshold,
how_many,
background,
x_bg, y_bg,
aperture_correction, radius,
saturation_correction):
sanity = (time.max() - time.min()) / bwidth
if int(sanity) < 1:
print('\nEvents list contain little data OR check bwidth parameter.\n')
if detection_method not in ['daofind', 'kdtree']:
print('\nInvalid input for "detection_method" parameter.\n')
sanity = 0
if threshold == 0:
print('\nThe "threshold" parameter is set at 0.\n')
sanity = 0
if how_many == 0:
print('\nThe "how_many" parameter is set at 0.\n')
sanity = 0
if background not in [None, 'auto', 'manual']:
print('\nInvalid input for "background" parameter.\n')
sanity = 0
if background == 'manual':
if None in [x_bg, y_bg]:
print('\nPlease provide values for both "x_bg" and "y_bg".\n')
sanity = 0
if aperture_correction not in [None, 'fuv', 'nuv']:
print('\nInvalid input for "aperture_correction" parameter.\n')
sanity = 0
if saturation_correction not in [True, False]:
print('\nInvalid input for "saturation_correction" parameter.\n')
sanity = 0
if aperture_correction != None:
if 1.5 <= radius <= 95:
pass
else:
print('\nThe "radius" parameter should be in the range {1.5, 95}')
sanity = 0
return int(sanity)
def modify_string(events_list):
if events_list[-5:] == '.fits':
events_list = events_list[:-5]
if events_list[-8:] == '.fits.gz':
events_list = events_list[:-8]
return events_list
# To automatically choose background region.
def auto_bg(fx, fy, time, photons, radius, framecount_per_sec, sky_radius):
weights = photons / framecount_per_sec
bins = np.arange(0, 4801, 16)
lowres_counts, lowres_xedges, lowres_yedges = np.histogram2d(fx, fy,
bins = (bins, bins),
weights = weights)
lowres_xcentres = (lowres_xedges[:-1] + lowres_xedges[1:]) / 2.
lowres_ycentres = (lowres_yedges[:-1] + lowres_yedges[1:]) / 2.
flat_counts = lowres_counts.flatten()
x_mesh, y_mesh = np.meshgrid(lowres_ycentres, lowres_xcentres) #Notice the swap.
x_mesh = x_mesh.flatten()
y_mesh = y_mesh.flatten()
# To avoid the edges.
mask_radius = 1700
mask = (x_mesh - 2400) ** 2 + (y_mesh - 2400) ** 2 <= mask_radius ** 2
array = np.array([flat_counts, x_mesh, y_mesh]).T
polished_array = array[mask]
bg_mask = sigma_clip(polished_array[:, 0], sigma = 3, maxiters = 5)
bg_mask = np.logical_not(bg_mask.mask)
bg_CPS_sample = []
bg_CPS_e_sample = []
sample_size = 100
for d in range(sample_size):
r_count, x_bg, y_bg = random.choice(polished_array[bg_mask])
bg_CPS, bg_CPS_e = bg_estimate(fx, fy, time, photons, framecount_per_sec,
radius, x_bg, y_bg, sky_radius)
bg_CPS_sample.append(bg_CPS)
bg_CPS_e_sample.append(bg_CPS_e)
bg_CPS_sample = np.array(bg_CPS_sample)
bg_CPS_e_sample = np.array(bg_CPS_e_sample)
bg_CPS_mask = sigma_clip(bg_CPS_sample, sigma = 3, maxiters = 5)
bg_CPS_mask = np.logical_not(bg_CPS_mask.mask)
bg_CPS = np.mean(bg_CPS_sample[bg_CPS_mask])
bg_CPS_e = np.mean(bg_CPS_e_sample[bg_CPS_mask])
return lowres_counts, bg_CPS, bg_CPS_e
# To estimate background CPS.
def bg_estimate(fx, fy, time, photons, framecount_per_sec, radius, x_bg, y_bg, sky_radius):
weights = photons / framecount_per_sec
mask = ((fx - x_bg) ** 2 + (fy - y_bg) ** 2) <= sky_radius ** 2
T = time[mask]
W = weights[mask]
if len(T) != 0:
scaled_events = (np.sum(W) * radius ** 2) / float(sky_radius ** 2)
scaled_events_e = (np.sqrt(len(T)) * radius ** 2) / float(sky_radius ** 2)
else:
scaled_events = 0
scaled_events_e = 0
unique_time = np.unique(time)
Number_of_frames = float(len(unique_time))
bg_CPS = (scaled_events * framecount_per_sec) / Number_of_frames
bg_CPS_e = (scaled_events_e * framecount_per_sec) / Number_of_frames
return bg_CPS, bg_CPS_e
# To create subset images.
def create_sub_image(pos_x, pos_y,
sub_size,
cir_rad,
sub_name,
fx, fy,
path_to_events_list,
events_list):
mask = np.logical_and(np.abs(fx - pos_x) <= sub_size,
np.abs(fy - pos_y) <= sub_size)
obj_fx = fx[mask]
obj_fy = fy[mask]
obj_circle = plt.Circle((pos_x, pos_y), cir_rad,
color = 'k', fill = False)
plt.hist2d(obj_fx, obj_fy, bins = sub_size * 2, norm = LogNorm())
plt.gcf().gca().add_artist(obj_circle)
source_png_name = os.path.join(path_to_events_list, sub_name + events_list + '.png')
plt.savefig(source_png_name, format = 'png', bbox_inches = 'tight')
plt.clf()
return source_png_name
# To find positions of interest (using daofind algorithm).
def detect_sources_daofind(fx, fy, photons, threshold):
mask_radius = 1700
kernel = Gaussian2DKernel(x_stddev = 4 * gaussian_fwhm_to_sigma)
aperture = CircularAperture((2400, 2400), r = mask_radius)
mask = aperture.to_mask(method = 'center')
mask = mask.to_image(shape = ((4800, 4800)))
weights = photons / framecount_per_sec
bins = np.arange(0, 4801)
ndarray, yedges, xedges = np.histogram2d(fy, fx, bins = (bins, bins), weights = weights)
data = ndarray * mask
mean, _, std = sigma_clipped_stats(data, sigma = 5., maxiters = 1)
# to avoid std becoming zero.
if std == 0:
mean = np.mean(data)
std = np.std(data)
data = convolve(data, kernel)
daofind = DAOStarFinder(fwhm = 3.0, threshold = threshold * std, exclude_border = True)
sources = daofind(data - mean)
sources.sort('mag')
uA = np.array([sources['xcentroid'].data, sources['ycentroid'].data]).T
uA = np.round(uA, 2)
return uA
# To find positions of interest (positions with maximum events).
def detect_sources_kdtree(fx, fy, how_many):
fxi = [int(round(s)) for s in fx]
fyi = [int(round(s)) for s in fy]
# Counting stuff to know who all are popular.
counter = Counter(zip(fxi, fyi))
most_common_limit = int(100 * how_many)
A = np.array(list(zip(*counter.most_common(most_common_limit)))[0])
# Sieving out the duplicates.
uA = []
while len(A) != 0:
uA.append(A[0])
A = np.array([x for x in A
if x not in A[KDTree(A).query_ball_point(uA[-1], 16)]])
uA = np.array(uA)[:how_many]
uA = uA
if len(uA) != 0:
# To avoid sources which have coordinates 0 or 4800.
mask = np.isin(uA, [0, 4800], invert = True)
mask = mask[:, 0] * mask[:, 1]
uA = uA[mask]
return uA
def get_counts(fx, fy, time, photons, framecount_per_sec, xp, yp, radius):
weights = photons / framecount_per_sec
# selecting events within a circular region.
mask = ((fx - xp) ** 2 + (fy - yp) ** 2) <= radius ** 2
T = time[mask]
W = weights[mask]
# To find Counts per Frame (CPF).
unique_time = np.unique(time)
Number_of_frames = float(len(unique_time))
CPF = np.sum(W) / Number_of_frames
CPF_err = np.sqrt(len(T)) / Number_of_frames
return CPF, CPF_err
# To change mission elapsed time in seconds to modified julian date.
def met_to_mjd(met):
jan2010 = 55197.0 # 2010.0(UTC) expressed with MJD format and scale UTC.
mjd = (met / 86400.0) + jan2010 # 1 julian day = 86400 seconds.
return mjd
def apply_aperture_correction(CPF, CPF_err, radius, aperture_correction):
if aperture_correction == 'fuv':
CPF = CPF / fuv_ratio_function(radius)
CPF_err = CPF_err / fuv_ratio_function(radius)
elif aperture_correction == 'nuv':
CPF = CPF / nuv_ratio_function(radius)
CPF_err = CPF_err / nuv_ratio_function(radius)
return CPF, CPF_err
def apply_saturation_correction(CPF5, CPF5_err, saturation_correction):
if saturation_correction == True:
if np.sum(CPF5 >= 0.6) != 0:
print("\nCounts per frame exeeds 0.6; saturation correction cannot be applied")
return
ICPF5 = -1 * np.log(1 - CPF5)
ICPF5_err = CPF5_err / CPF5
ICORR = ICPF5 - CPF5
ICORR_err = np.sqrt((ICPF5_err ** 2) + (CPF5_err ** 2))
RCORR = ICORR * (0.89 - (0.30 * (ICORR ** 2)))
RCORR_err = RCORR * np.sqrt((ICORR_err ** 2) + ((0.30 * 2 * ICORR * ICORR_err) ** 2))
CPF5 = CPF5 + RCORR
CPF5_err = np.sqrt((CPF5_err ** 2) + (RCORR_err ** 2))
return CPF5, CPF5_err
def makecurves(events_list = events_list,
radius = radius,
detection_method = detection_method,
threshold = threshold,
how_many = how_many,
bwidth = bwidth,
framecount_per_sec = framecount_per_sec,
background = background,
sky_radius = sky_radius,
x_bg = x_bg,
y_bg = y_bg,
aperture_correction = aperture_correction,
saturation_correction = saturation_correction,
whole_figure_resolution = whole_figure_resolution,
sub_fig_size = sub_fig_size,
fontsize = fontsize):
"""Automatically detect sources amd create light curves.
Parameters
----------
events_list : file path
The name of the events list FITS file.
radius : float, optional
The source aperture radius in pixels.
This parameter has a default value of 6.
detection_method : {'daofind', 'kdtree'}, optional
The parameter to choose between available detection methods.
* ``'daofind'``: To use the DAOFIND algorithm. This is the default method.
* ``'kdtree'``: Source detection method based on a k-d tree implementation.
threshold : float, optional
The threshold parameter associated with the ``'daofind'`` method.
The default value is 4.
how_many : int, optional
The limit for the number of sources to be detected using
the ``'kdtree'`` method.
The default value is 4.
bwidth : float, optional
Time bin width in seconds.
the default value is 50.
framecount_per_sec : float, optional
The framerate of the observation, with a default value of 28.7185
frames per second for 512 x 512 window mode.
The most accurate way to get the framerate would be to take the value
of (``1 / INT_TIME``).
``INT_TIME`` value can be found from the corresponding image header.
Approximate values of framerate for different window modes of UVIT
are given in the table below.
+---------------+---------------------+
| window mode | frames per second |
+===============+=====================+
| 512 x 512 | 28.7 |
+---------------+---------------------+
| 350 x 350 | 61 |
+---------------+---------------------+
| 300 x 300 | 82 |
+---------------+---------------------+
| 250 x 250 | 115 |
+---------------+---------------------+
| 200 x 200 | 180 |
+---------------+---------------------+
| 150 x 150 | 300 |
+---------------+---------------------+
| 100 x 100 | 640 |
+---------------+---------------------+
background : {'auto', 'manual', None}, optional
The parameter affects how the background count-rate estimation is done.
* ``'auto'``: Automatic estimation of the background count-rate.
* ``'manual'``: To manually specify a background region using **x_bg** and **y_bg** parameters.
* ``None``: No background estimation is carried out. This is the default method.
sky_radius: float, optional
The background aperture radius in pixels.
The default value is 12.
x_bg : float, optional
The X-coordinate of the background region.
y_bg : float, optional
The Y-coordinate of the background region.
aperture_correction : {'fuv', 'nuv', None}, optional
The parameter affects how the aperture correction is done.
* ``'fuv'``: Aperture correction for the FUV channel is applied.
* ``'nuv'``: Aperture correction for the NUV channel is applied.
* ``None``: No aperture correction is applied. This is the default method.
saturation_correction : bool, optional
If `True`, saturation correction is applied.
The default value is `False`.
Note
----
It is essential to set the correct value of the framerate.
Most UVIT observations are carried out in 512 x 512 window mode.
Example
--------
>>> import curvit
>>> curvit.makecurves(events_list = 'AS1G06_084T01_9000000710uvtNIIPC00F2_l2ce.fits.gz',
... threshold = 5)
::
Detected source coordinates saved in file:
* sources_AS1G06_084T01_9000000710uvtNIIPC00F2_l2ce.coo
Detected sources are plotted in the image:
* sources_AS1G06_084T01_9000000710uvtNIIPC00F2_l2ce.png
---------------------- light curves ----------------------
* makecurves_3136.64_3651.08_AS1G06_084T01_9000000710uvtNIIPC00F2_l2ce.png
* makecurves_2530.02_1442.18_AS1G06_084T01_9000000710uvtNIIPC00F2_l2ce.png
* makecurves_2912.31_3657.17_AS1G06_084T01_9000000710uvtNIIPC00F2_l2ce.png
...
...
Done!
"""
time, fx, fy, photons = read_columns(events_list)
weights = photons / framecount_per_sec
sanity = tobe_or_notobe(time, bwidth,
detection_method,
threshold,
how_many,
background,
x_bg, y_bg,
aperture_correction, radius,
saturation_correction)
if sanity < 1:
return
original_input = events_list
path_to_events_list, events_list = ntpath.split(events_list)
events_list = modify_string(events_list)
if detection_method == 'daofind':
uA = detect_sources_daofind(fx, fy, photons, threshold)
else:
uA = detect_sources_kdtree(fx, fy, how_many)
if len(uA) == 0:
print('No sources, try changing the detection threshold parameter.')
return
coo_file = os.path.join(path_to_events_list, 'sources_' + events_list +'.coo')
np.savetxt(coo_file, uA, fmt = '%4.2f\t%4.2f')
print('\nDetected source coordinates saved in file:\n* {}'.format(coo_file))
# To automatically choose background region.
plt.figure(figsize = (10.5, 10))
if background == 'auto':
lowres_counts, bg_CPS, bg_CPS_e = auto_bg(fx, fy, time, photons, radius,
framecount_per_sec, sky_radius)
# To create a quick look figure marking sources and background.
bins = np.arange(0, 4801, 4096 / whole_figure_resolution)
plt.hist2d(fx, fy,
bins = (bins, bins),
weights = weights,
norm = LogNorm())
plt.tick_params(axis = 'both', labelsize = fontsize)
for u in uA:
plt.annotate('Source', u,
size = 13, color = 'black', fontweight = 'bold')
obj_circle = plt.Circle(u, 100, color = 'k', fill = False)
plt.gcf().gca().add_artist(obj_circle)
if background == 'manual':
plt.annotate('Background', (x_bg, y_bg),
size = 13, color = 'black', fontweight = 'bold')
bg_circle = plt.Circle((x_bg, y_bg), 100, color = 'k', fill = False)
plt.gcf().gca().add_artist(bg_circle)
png_name = os.path.join(path_to_events_list, 'sources_' + events_list + '.png')
plt.savefig( png_name, format = 'png', bbox_inches = 'tight')
plt.clf()
print('Detected sources are plotted in the image:\n* {}'.format(png_name))
if background != None:
if background == 'auto':
print('\nThe estimated background CPS = {:.5f} +/-{:.5f}'.format(bg_CPS, bg_CPS_e))
if background == 'manual':
# To estimate Background CPS.
bg_CPS, bg_CPS_e = bg_estimate(fx, fy, time, photons, framecount_per_sec,
radius, x_bg, y_bg, sky_radius)
bg_png = create_sub_image(x_bg, y_bg,
sub_fig_size,
sky_radius,
'background_',
fx, fy,
path_to_events_list,
events_list)
print('\nThe estimated background CPS = {:.5f} +/-{:.5f}'.format(bg_CPS, bg_CPS_e))
print('Region selected for background estimate:\n* {}'.format(bg_png))
else:
bg_CPS, bg_CPS_e = 0, 0
# Calculating number of bins.
time_width = time.max() - time.min()
nbin = time_width / bwidth
nbin = int(nbin)
unique_time = np.unique(time)
till_here = time.min() + (bwidth * nbin)
# Changing mission elapsed time in seconds to modified julian date.
till_here = met_to_mjd(till_here)
time_start = met_to_mjd(time.min())
unique_time = met_to_mjd(unique_time)
# Getting the number of unique frames within a bin.
u_counts, u_bin_edges = np.histogram(unique_time, bins = nbin,
range = (time_start, till_here))
# selecting events within a circular region.
print('\n---------------------- light curves ----------------------')
plt.figure(figsize = (8, 5))
for uaxy in uA:
xp, yp = uaxy
# selecting events within a circular region.
mask = ((fx - xp) ** 2 + (fy - yp) ** 2) <= radius ** 2
T = time[mask]
W = weights[mask]
T = met_to_mjd(T)
plt.title("X = %s, Y = %s, bin = %ss, radius = %spx" \
%(xp, yp, bwidth, radius), fontsize = fontsize)
plt.xlabel("Time (Julian Date)", fontsize = fontsize)
plt.ylabel("Counts per second", fontsize = fontsize)
plt.tick_params(axis = 'both', labelsize = fontsize)
weighted_counts, bin_edges = np.histogram(T, bins = nbin,
range = (time_start, till_here),
weights = W)
counts, _ = np.histogram(T, bins = nbin,
range = (time_start, till_here))
bin_centres = (bin_edges[:-1] + bin_edges[1:]) / 2.
if np.array_equal(bin_edges, u_bin_edges) == True:
count_mask = counts != 0
else:
print('\nThis happens when bwidth is too small\n')
return
if np.sum(count_mask) != 0:
mcentres = bin_centres[count_mask]
weighted_mcounts = weighted_counts[count_mask]
mcounts = counts[count_mask]
frames_in_bin = u_counts[count_mask]
else:
print('No counts for the source at %s' %uaxy)
continue
CPF = weighted_mcounts / frames_in_bin
CPF_err = np.sqrt(mcounts) / frames_in_bin
# Background subtraction.
CPF = CPF - (bg_CPS / framecount_per_sec)
CPF_err = np.sqrt(CPF_err ** 2 + (bg_CPS_e / framecount_per_sec) ** 2)
CPF, CPF_err = apply_aperture_correction(CPF, CPF_err, radius, aperture_correction)
CPF, CPF_err = apply_saturation_correction(CPF, CPF_err, saturation_correction)
CPS = CPF * framecount_per_sec
CPS_err = CPF_err * framecount_per_sec
plt.scatter(mcentres, CPS)
plt.errorbar(mcentres, CPS, yerr = CPS_err, linestyle = "None")
empty_space = (till_here - time_start) / 25.0
plt.xlim(time_start - empty_space, till_here + empty_space)
#To write the array to output.
data_to_output = list(zip(mcentres, CPS, CPS_err))
output_prefix = 'makecurves_' + str(xp) + '_' + str(yp) + '_' + events_list
datname = os.path.join(path_to_events_list, output_prefix + '.dat')
np.savetxt(datname, data_to_output,
fmt = '%10.11f\t%.5e\t%.5e',
header = 'MJD\t\t\tCPS (bin=%ss)\tCPS_error' %bwidth)
output_prefix = 'makecurves_' + str(xp) + '_' + str(yp) + '_' + events_list
figname = os.path.join(path_to_events_list, output_prefix + '.png')
plt.savefig(figname, format = 'png', bbox_inches = 'tight', dpi = 150)
print('* {}'.format(figname))
plt.clf()
print('\nDone!\n')
plt.close('all')
def curve(events_list = events_list,
xp = xp,
yp = yp,
radius = radius,
bwidth = bwidth,
framecount_per_sec = framecount_per_sec,
background = background,
sky_radius = sky_radius,
x_bg = x_bg,
y_bg = y_bg,
aperture_correction = aperture_correction,
saturation_correction = saturation_correction,
whole_figure_resolution = whole_figure_resolution,
sub_fig_size = sub_fig_size,
fontsize = fontsize):
"""Create light curve for a source.
Parameters
----------
events_list : file path
The name of the events list FITS file.
xp : float
The X-coordinate of the source.
yp : float
The Y-coordinate of the source.
radius : float, optional
The source aperture radius in pixels.
This parameter has a default value of 6.
bwidth : float, optional
Time bin width in seconds.
the default value is 50.
framecount_per_sec : float, optional
The framerate of the observation, with a default value of 28.7185
frames per second for 512 x 512 window mode.
The most accurate way to get the framerate would be to take the value
of (``1 / INT_TIME``).
``INT_TIME`` value can be found from the corresponding image header.
Approximate values of framerate for different window modes of UVIT
are given in the table below.
+---------------+---------------------+
| window mode | frames per second |
+===============+=====================+
| 512 x 512 | 28.7 |
+---------------+---------------------+
| 350 x 350 | 61 |
+---------------+---------------------+
| 300 x 300 | 82 |
+---------------+---------------------+
| 250 x 250 | 115 |
+---------------+---------------------+
| 200 x 200 | 180 |
+---------------+---------------------+
| 150 x 150 | 300 |
+---------------+---------------------+
| 100 x 100 | 640 |
+---------------+---------------------+
background : {'auto', 'manual', None}, optional
The parameter affects how the background count-rate estimation is done.
* ``'auto'``: Automatic estimation of the background count-rate.
* ``'manual'``: To manually specify a background region using **x_bg** and **y_bg** parameters.
* ``None``: No background estimation is carried out. This is the default method.
sky_radius: float, optional
The background aperture radius in pixels.
The default value is 12.
x_bg : float, optional
The X-coordinate of the background region.
y_bg : float, optional
The Y-coordinate of the background region.
aperture_correction : {'fuv', 'nuv', None}, optional
The parameter affects how the aperture correction is done.
* ``'fuv'``: Aperture correction for the FUV channel is applied.
* ``'nuv'``: Aperture correction for the NUV channel is applied.
* ``None``: No aperture correction is applied. This is the default method.
saturation_correction : bool, optional
If `True`, saturation correction is applied.
The default value is `False`.
Note
----
It is essential to set the correct value of the framerate.
Most UVIT observations are carried out in 512 x 512 window mode.
Example
--------
>>> curvit.curve(events_list = 'AS1G06_084T01_9000000710uvtFIIPC00F1_l2ce.fits.gz',
... xp = 2636.71, yp = 907.91,
... radius = 15,
... bwidth = 50,
... background = 'auto')
::
The estimated background CPS = 0.02155 +/-0.00425
-------------------------- curve --------------------------
source: source_AS1G06_084T01_9000000710uvtFIIPC00F1_l2ce.png
source_zoomed_AS1G06_084T01_9000000710uvtFIIPC00F1_l2ce.png
data: curve_2636.71_907.91_AS1G06_084T01_9000000710uvtFIIPC00F1_l2ce.dat
plot: curve_2636.71_907.91_AS1G06_084T01_9000000710uvtFIIPC00F1_l2ce.png
Done!
"""
time, fx, fy, photons = read_columns(events_list)
weights = photons / framecount_per_sec
if None in [xp, yp]:
print('\nPlease provide values for both "xp" and "yp".\n')
return
sanity = tobe_or_notobe(time, bwidth,
detection_method,
threshold,
how_many,
background,
x_bg, y_bg,
aperture_correction, radius,
saturation_correction)
if sanity < 1:
return
path_to_events_list, events_list = ntpath.split(events_list)
events_list = modify_string(events_list)
# To automatically choose background region.
plt.figure(figsize = (10.5, 10))
if background == 'auto':
lowres_counts, bg_CPS, bg_CPS_e = auto_bg(fx, fy, time, photons, radius,
framecount_per_sec, sky_radius)
# To create a quick look figure marking sources and background.
bins = np.arange(0, 4801, 4096 / whole_figure_resolution)
plt.hist2d(fx, fy,
bins = (bins, bins),
weights = weights,
norm = LogNorm())
plt.tick_params(axis = 'both', labelsize = fontsize)
plt.annotate("Source", (xp, yp),
size = 13, color = 'black', fontweight = 'bold')
obj_circle = plt.Circle((xp, yp), 100, color = 'k', fill = False)
plt.gcf().gca().add_artist(obj_circle)
if background == 'manual':
plt.annotate('Background', (x_bg, y_bg),
size = 13, color = 'black', fontweight = 'bold')
bg_circle = plt.Circle((x_bg, y_bg), 100, color = 'k', fill = False)
plt.gcf().gca().add_artist(bg_circle)
png_name = os.path.join(path_to_events_list, 'source_' + events_list + '.png')
plt.savefig(png_name, format = 'png', bbox_inches = 'tight')
plt.clf()
source_png = create_sub_image(xp, yp,
sub_fig_size,
radius,
'source_zoomed_',
fx, fy,
path_to_events_list,
events_list)
if background != None:
if background == 'auto':
print('\nThe estimated background CPS = {:.5f} +/-{:.5f}'.format(bg_CPS, bg_CPS_e))
if background == 'manual':
# To estimate Background CPS.
bg_CPS, bg_CPS_e = bg_estimate(fx, fy, time, photons, framecount_per_sec,
radius, x_bg, y_bg, sky_radius)
bg_png = create_sub_image(x_bg, y_bg,
sub_fig_size,
sky_radius,
'background_',
fx, fy,
path_to_events_list,
events_list)
print('\nThe estimated background CPS = {:.5f} +/-{:.5f}'.format(bg_CPS, bg_CPS_e))
print('Region selected for background estimate:\n* {}'.format(bg_png))
else:
bg_CPS, bg_CPS_e = 0, 0
# selecting events within a circular region.
mask = ((fx - xp) ** 2 + (fy - yp) ** 2) <= radius ** 2
T = time[mask]
W = weights[mask]
# Calculating number of bins.
time_width = time.max() - time.min()
nbin = time_width / bwidth
nbin = int(nbin)
unique_time = np.unique(time)
till_here = time.min() + (bwidth * nbin)
# Changing mission elapsed time in seconds to modified julian date.
T = met_to_mjd(T)
till_here = met_to_mjd(till_here)
time_start = met_to_mjd(time.min())
unique_time = met_to_mjd(unique_time)
# Binning stuff, plotting stuff.
plt.figure(figsize = (8, 5))
plt.title('bin = %ss, radius = %spx' %(bwidth, radius), fontsize = fontsize)
plt.xlabel("Time (Julian Date)", fontsize = fontsize)
plt.ylabel("Counts per second", fontsize = fontsize)
plt.tick_params(axis = 'both', labelsize = fontsize)
u_counts, u_bin_edges = np.histogram(unique_time, bins = nbin,
range = (time_start, till_here))
weighted_counts, bin_edges = np.histogram(T, bins = nbin,
range = (time_start, till_here),
weights = W)
counts, _ = np.histogram(T, bins = nbin,
range = (time_start, till_here))
bin_centres = (bin_edges[:-1] + bin_edges[1:]) / 2.
if np.array_equal(bin_edges, u_bin_edges) == True:
count_mask = counts != 0
else:
print('\nThis happens when bwidth is too small\n')
return
if np.sum(count_mask) != 0:
mcentres = bin_centres[count_mask]
weighted_mcounts = weighted_counts[count_mask]
mcounts = counts[count_mask]
frames_in_bin = u_counts[count_mask]
else:
print('No counts inside the aperture!')
CPF = weighted_mcounts / frames_in_bin
CPF_err = np.sqrt(mcounts) / frames_in_bin
# Background subtraction.
CPF = CPF - (bg_CPS / framecount_per_sec)
CPF_err = np.sqrt(CPF_err ** 2 + (bg_CPS_e / framecount_per_sec) ** 2)
CPF, CPF_err = apply_aperture_correction(CPF, CPF_err, radius, aperture_correction)
CPF, CPF_err = apply_saturation_correction(CPF, CPF_err, saturation_correction)
CPS = CPF * framecount_per_sec
CPS_err = CPF_err * framecount_per_sec
# Let us get on with the plot.
plt.scatter(mcentres, CPS)
plt.errorbar(mcentres, CPS, yerr = CPS_err, linestyle = "None")
# To make the plot look good.
empty_space = (till_here - time_start) / 25.0
plt.xlim(time_start - empty_space, till_here + empty_space)
#To write the array to output.
data_to_output = list(zip(mcentres, CPS, CPS_err))
output_prefix = 'curve_' + str(xp) + '_' + str(yp) + '_' + events_list
datname = os.path.join(path_to_events_list, output_prefix + '.dat')
np.savetxt(datname, data_to_output,
fmt = '%10.11f\t%.5e\t%.5e',
header = 'MJD\t\t\tCPS (bin=%ss)\tCPS_error' %bwidth)
figname = os.path.join(path_to_events_list, output_prefix + '.png')
plt.savefig(figname, format = 'png', bbox_inches = 'tight', dpi = 150)
print('\n-------------------------- curve --------------------------')
print('source: {}\n {}'.format(png_name, source_png))
print('data: {}'.format(datname))
print('plot: {}'.format(figname))
print("\nDone!\n")
plt.close('all')
# Function to convert CCDLAB XYFrac and XYInts to X, Y positions in 4k.
def CCDLAB_to_4k(Int, Frac):
coo_in_4k = ((Int + Frac) - 16) / 4.0
return coo_in_4k
# Function to convert CCDLAB files to a compatible events list.
def process_ccdlab(output = None,
time_list = None,
XY_integers = None,
XY_fractions = None,
flat_list = None,
framecount_per_sec = framecount_per_sec):
"""Generate a Curvit compatible events list from CCDLAB files.
Parameters
----------
output : file path
The name of the output events list FITS file.
time_list : file path
The name of the CCDLAB time list FITS file
XY_integers : file path
The name of the CCDLAB XY integers FITS file
XY_fractions : file path
The name of the CCDLAB XY fractions FITS file
flat_list : file path
The name of the CCDLAB flat list FITS file
framecount_per_sec : float, optional
The framerate of the observation, with a default value of 28.7185
frames per second for 512 x 512 window mode.
The most accurate way to get the framerate would be to take the value
of (``1 / INT_TIME``).
``INT_TIME`` value can be found from the corresponding image header.
Approximate values of framerate for different window modes of UVIT
are given in the table below.
+---------------+---------------------+
| window mode | frames per second |
+===============+=====================+
| 512 x 512 | 28.7 |
+---------------+---------------------+
| 350 x 350 | 61 |
+---------------+---------------------+
| 300 x 300 | 82 |
+---------------+---------------------+
| 250 x 250 | 115 |
+---------------+---------------------+
| 200 x 200 | 180 |
+---------------+---------------------+
| 150 x 150 | 300 |
+---------------+---------------------+
| 100 x 100 | 640 |
+---------------+---------------------+
Note
----
It is essential to set the correct value of the framerate.
Most UVIT observations are carried out in 512 x 512 window mode.
Warning
-------
This function is new; please report if you find any bugs.
Example
--------
>>> import curvit
>>> process_ccdlab(output = 'output_events_list.fits',
... time_list = 'sample_TimeList.fits',
... XY_integers = 'sample_XYInts_List.fits',
... XY_fractions = 'sample_XYFrac_List.fits',
... flat_list = 'sample_FlatList.fits',
... framecount_per_sec = 28.7185)
The above script will generate a FITS table called ``output_events_list.fits``.
You may then use it as input to ``curve`` or ``makecurves``.
"""
time = fits.open(time_list)[0].data / 1000
XYFrac = fits.open(XY_fractions)[0].data
XYInts = fits.open(XY_integers)[0].data
weight = fits.open(flat_list)[0].data
photons = weight * framecount_per_sec
fx = CCDLAB_to_4k(XYInts[:,0], XYFrac[:,0])
fy = CCDLAB_to_4k(XYInts[:,1], XYFrac[:,1])
col1 = fits.Column(name = 'MJD_L2', format = 'D', array = time)
col2 = fits.Column(name = 'Fx', format = 'D', array = fx)
col3 = fits.Column(name = 'Fy', format = 'D', array = fy)
col4 = fits.Column(name = 'EFFECTIVE_NUM_PHOTONS', format = 'D', array = photons)
cols = fits.ColDefs([col1, col2, col3, col4])
tbhdu = fits.BinTableHDU.from_columns(cols)
tbhdu.writeto(output, overwrite = True)
return
def makefits(events_list = events_list,
framecount_per_sec = framecount_per_sec):
"""Create a quick look FITS image from the input events list.
Parameters
----------
events_list : file path
The name of the events list FITS file.
framecount_per_sec : float, optional
The framerate of the observation, with a default value of 28.7185
frames per second for 512 x 512 window mode.
The most accurate way to get the framerate would be to take the value
of (``1 / INT_TIME``).
``INT_TIME`` value can be found from the corresponding image header.
Approximate values of framerate for different window modes of UVIT
are given in the table below.
+---------------+---------------------+
| window mode | frames per second |
+===============+=====================+
| 512 x 512 | 28.7 |
+---------------+---------------------+
| 350 x 350 | 61 |
+---------------+---------------------+
| 300 x 300 | 82 |
+---------------+---------------------+
| 250 x 250 | 115 |
+---------------+---------------------+
| 200 x 200 | 180 |
+---------------+---------------------+
| 150 x 150 | 300 |
+---------------+---------------------+
| 100 x 100 | 640 |
+---------------+---------------------+
Warning
-------
If you plan to use the generated FITS image for science,
make sure to give the proper framerate value.
Example
--------
>>> import curvit
>>> curvit.makefits('test_events_list.fits', 28.7185)
The above script will generate a FITS image called ``test_events_list_quick_look.fits``.
You may open it in software such as DS9 to view the image.
"""
time, fx, fy, photons = read_columns(events_list)
weights = photons / framecount_per_sec
bins = np.arange(0, 4801)
ndarray, yedges, xedges = np.histogram2d(fy, fx, bins = (bins, bins), weights = weights)
fits_name = events_list.replace('.fits', '_quick_look.fits')
hdu = fits.PrimaryHDU(data = ndarray)
hdu.writeto(fits_name, overwrite = True)
|
[
"astropy.convolution.Gaussian2DKernel",
"astropy.convolution.convolve",
"matplotlib.pyplot.title",
"numpy.isin",
"numpy.sum",
"numpy.abs",
"matplotlib.pyplot.clf",
"astropy.stats.sigma_clipped_stats",
"astropy.io.fits.ColDefs",
"astropy.io.fits.PrimaryHDU",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.arange",
"numpy.histogram",
"matplotlib.colors.LogNorm",
"matplotlib.pyplot.tick_params",
"scipy.interpolate.interp1d",
"os.path.join",
"numpy.round",
"numpy.unique",
"astropy.stats.sigma_clip",
"numpy.meshgrid",
"numpy.std",
"numpy.histogram2d",
"matplotlib.pyplot.close",
"numpy.logical_not",
"numpy.savetxt",
"ntpath.split",
"astropy.io.fits.Column",
"matplotlib.pyplot.errorbar",
"photutils.CircularAperture",
"matplotlib.use",
"astropy.io.fits.open",
"matplotlib.pyplot.Circle",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.xlim",
"numpy.log",
"matplotlib.pyplot.annotate",
"astropy.io.fits.BinTableHDU.from_columns",
"matplotlib.pyplot.scatter",
"random.choice",
"numpy.array",
"scipy.spatial.KDTree",
"photutils.DAOStarFinder",
"numpy.array_equal",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig",
"numpy.sqrt"
] |
[((725, 746), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (739, 746), False, 'import matplotlib\n'), ((3405, 3479), 'numpy.array', 'np.array', (['[1.5, 2, 2.5, 3, 4, 5, 7, 9, 12, 15, 20, 30, 40, 50, 70, 80, 95]'], {}), '([1.5, 2, 2.5, 3, 4, 5, 7, 9, 12, 15, 20, 30, 40, 50, 70, 80, 95])\n', (3413, 3479), True, 'import numpy as np\n'), ((3590, 3707), 'numpy.array', 'np.array', (['[29.9, 42.0, 52.0, 59.3, 68.8, 74.5, 81.3, 85.1, 89.3, 92.1, 95.2, 97.6, \n 98.4, 98.8, 99.4, 99.6, 100.0]'], {}), '([29.9, 42.0, 52.0, 59.3, 68.8, 74.5, 81.3, 85.1, 89.3, 92.1, 95.2,\n 97.6, 98.4, 98.8, 99.4, 99.6, 100.0])\n', (3598, 3707), True, 'import numpy as np\n'), ((3842, 3959), 'numpy.array', 'np.array', (['[28.1, 40.7, 51.1, 59.1, 68.9, 74.6, 81.4, 85.0, 88.6, 91.3, 94.5, 96.9, \n 97.7, 98.3, 99.1, 99.5, 100.0]'], {}), '([28.1, 40.7, 51.1, 59.1, 68.9, 74.6, 81.4, 85.0, 88.6, 91.3, 94.5,\n 96.9, 97.7, 98.3, 99.1, 99.5, 100.0])\n', (3850, 3959), True, 'import numpy as np\n'), ((4241, 4289), 'scipy.interpolate.interp1d', 'interp1d', (['radius_pixels', 'fuv_ratio'], {'kind': '"""cubic"""'}), "(radius_pixels, fuv_ratio, kind='cubic')\n", (4249, 4289), False, 'from scipy.interpolate import interp1d\n'), ((4313, 4361), 'scipy.interpolate.interp1d', 'interp1d', (['radius_pixels', 'nuv_ratio'], {'kind': '"""cubic"""'}), "(radius_pixels, nuv_ratio, kind='cubic')\n", (4321, 4361), False, 'from scipy.interpolate import interp1d\n'), ((4504, 4526), 'astropy.io.fits.open', 'fits.open', (['events_list'], {}), '(events_list)\n', (4513, 4526), False, 'from astropy.io import fits\n'), ((6842, 6864), 'numpy.arange', 'np.arange', (['(0)', '(4801)', '(16)'], {}), '(0, 4801, 16)\n', (6851, 6864), True, 'import numpy as np\n'), ((6919, 6977), 'numpy.histogram2d', 'np.histogram2d', (['fx', 'fy'], {'bins': '(bins, bins)', 'weights': 'weights'}), '(fx, fy, bins=(bins, bins), weights=weights)\n', (6933, 6977), True, 'import numpy as np\n'), ((7315, 7360), 'numpy.meshgrid', 'np.meshgrid', (['lowres_ycentres', 'lowres_xcentres'], {}), '(lowres_ycentres, lowres_xcentres)\n', (7326, 7360), True, 'import numpy as np\n'), ((7667, 7720), 'astropy.stats.sigma_clip', 'sigma_clip', (['polished_array[:, 0]'], {'sigma': '(3)', 'maxiters': '(5)'}), '(polished_array[:, 0], sigma=3, maxiters=5)\n', (7677, 7720), False, 'from astropy.stats import sigma_clipped_stats, gaussian_fwhm_to_sigma, sigma_clip\n'), ((7739, 7767), 'numpy.logical_not', 'np.logical_not', (['bg_mask.mask'], {}), '(bg_mask.mask)\n', (7753, 7767), True, 'import numpy as np\n'), ((8205, 8228), 'numpy.array', 'np.array', (['bg_CPS_sample'], {}), '(bg_CPS_sample)\n', (8213, 8228), True, 'import numpy as np\n'), ((8251, 8276), 'numpy.array', 'np.array', (['bg_CPS_e_sample'], {}), '(bg_CPS_e_sample)\n', (8259, 8276), True, 'import numpy as np\n'), ((8295, 8341), 'astropy.stats.sigma_clip', 'sigma_clip', (['bg_CPS_sample'], {'sigma': '(3)', 'maxiters': '(5)'}), '(bg_CPS_sample, sigma=3, maxiters=5)\n', (8305, 8341), False, 'from astropy.stats import sigma_clipped_stats, gaussian_fwhm_to_sigma, sigma_clip\n'), ((8364, 8396), 'numpy.logical_not', 'np.logical_not', (['bg_CPS_mask.mask'], {}), '(bg_CPS_mask.mask)\n', (8378, 8396), True, 'import numpy as np\n'), ((8410, 8445), 'numpy.mean', 'np.mean', (['bg_CPS_sample[bg_CPS_mask]'], {}), '(bg_CPS_sample[bg_CPS_mask])\n', (8417, 8445), True, 'import numpy as np\n'), ((8461, 8498), 'numpy.mean', 'np.mean', (['bg_CPS_e_sample[bg_CPS_mask]'], {}), '(bg_CPS_e_sample[bg_CPS_mask])\n', (8468, 8498), True, 'import numpy as np\n'), ((9095, 9110), 'numpy.unique', 'np.unique', (['time'], {}), '(time)\n', (9104, 9110), True, 'import numpy as np\n'), ((9777, 9835), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(pos_x, pos_y)', 'cir_rad'], {'color': '"""k"""', 'fill': '(False)'}), "((pos_x, pos_y), cir_rad, color='k', fill=False)\n", (9787, 9835), True, 'import matplotlib.pyplot as plt\n'), ((10005, 10071), 'os.path.join', 'os.path.join', (['path_to_events_list', "(sub_name + events_list + '.png')"], {}), "(path_to_events_list, sub_name + events_list + '.png')\n", (10017, 10071), False, 'import os\n'), ((10076, 10139), 'matplotlib.pyplot.savefig', 'plt.savefig', (['source_png_name'], {'format': '"""png"""', 'bbox_inches': '"""tight"""'}), "(source_png_name, format='png', bbox_inches='tight')\n", (10087, 10139), True, 'import matplotlib.pyplot as plt\n'), ((10148, 10157), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (10155, 10157), True, 'import matplotlib.pyplot as plt\n'), ((10339, 10392), 'astropy.convolution.Gaussian2DKernel', 'Gaussian2DKernel', ([], {'x_stddev': '(4 * gaussian_fwhm_to_sigma)'}), '(x_stddev=4 * gaussian_fwhm_to_sigma)\n', (10355, 10392), False, 'from astropy.convolution import Gaussian2DKernel, convolve\n'), ((10411, 10456), 'photutils.CircularAperture', 'CircularAperture', (['(2400, 2400)'], {'r': 'mask_radius'}), '((2400, 2400), r=mask_radius)\n', (10427, 10456), False, 'from photutils import DAOStarFinder, CircularAperture\n'), ((10610, 10628), 'numpy.arange', 'np.arange', (['(0)', '(4801)'], {}), '(0, 4801)\n', (10619, 10628), True, 'import numpy as np\n'), ((10659, 10717), 'numpy.histogram2d', 'np.histogram2d', (['fy', 'fx'], {'bins': '(bins, bins)', 'weights': 'weights'}), '(fy, fx, bins=(bins, bins), weights=weights)\n', (10673, 10717), True, 'import numpy as np\n'), ((10770, 10818), 'astropy.stats.sigma_clipped_stats', 'sigma_clipped_stats', (['data'], {'sigma': '(5.0)', 'maxiters': '(1)'}), '(data, sigma=5.0, maxiters=1)\n', (10789, 10818), False, 'from astropy.stats import sigma_clipped_stats, gaussian_fwhm_to_sigma, sigma_clip\n'), ((10950, 10972), 'astropy.convolution.convolve', 'convolve', (['data', 'kernel'], {}), '(data, kernel)\n', (10958, 10972), False, 'from astropy.convolution import Gaussian2DKernel, convolve\n'), ((10987, 11058), 'photutils.DAOStarFinder', 'DAOStarFinder', ([], {'fwhm': '(3.0)', 'threshold': '(threshold * std)', 'exclude_border': '(True)'}), '(fwhm=3.0, threshold=threshold * std, exclude_border=True)\n', (11000, 11058), False, 'from photutils import DAOStarFinder, CircularAperture\n'), ((11214, 11229), 'numpy.round', 'np.round', (['uA', '(2)'], {}), '(uA, 2)\n', (11222, 11229), True, 'import numpy as np\n'), ((12436, 12451), 'numpy.unique', 'np.unique', (['time'], {}), '(time)\n', (12445, 12451), True, 'import numpy as np\n'), ((19825, 19850), 'ntpath.split', 'ntpath.split', (['events_list'], {}), '(events_list)\n', (19837, 19850), False, 'import ntpath\n'), ((20197, 20265), 'os.path.join', 'os.path.join', (['path_to_events_list', "('sources_' + events_list + '.coo')"], {}), "(path_to_events_list, 'sources_' + events_list + '.coo')\n", (20209, 20265), False, 'import os\n'), ((20269, 20313), 'numpy.savetxt', 'np.savetxt', (['coo_file', 'uA'], {'fmt': '"""%4.2f\t%4.2f"""'}), "(coo_file, uA, fmt='%4.2f\\t%4.2f')\n", (20279, 20313), True, 'import numpy as np\n'), ((20451, 20481), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10.5, 10)'}), '(figsize=(10.5, 10))\n', (20461, 20481), True, 'import matplotlib.pyplot as plt\n'), ((20763, 20813), 'numpy.arange', 'np.arange', (['(0)', '(4801)', '(4096 / whole_figure_resolution)'], {}), '(0, 4801, 4096 / whole_figure_resolution)\n', (20772, 20813), True, 'import numpy as np\n'), ((20969, 21017), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'labelsize': 'fontsize'}), "(axis='both', labelsize=fontsize)\n", (20984, 21017), True, 'import matplotlib.pyplot as plt\n'), ((21559, 21627), 'os.path.join', 'os.path.join', (['path_to_events_list', "('sources_' + events_list + '.png')"], {}), "(path_to_events_list, 'sources_' + events_list + '.png')\n", (21571, 21627), False, 'import os\n'), ((21632, 21688), 'matplotlib.pyplot.savefig', 'plt.savefig', (['png_name'], {'format': '"""png"""', 'bbox_inches': '"""tight"""'}), "(png_name, format='png', bbox_inches='tight')\n", (21643, 21688), True, 'import matplotlib.pyplot as plt\n'), ((21698, 21707), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (21705, 21707), True, 'import matplotlib.pyplot as plt\n'), ((22921, 22936), 'numpy.unique', 'np.unique', (['time'], {}), '(time)\n', (22930, 22936), True, 'import numpy as np\n'), ((23260, 23327), 'numpy.histogram', 'np.histogram', (['unique_time'], {'bins': 'nbin', 'range': '(time_start, till_here)'}), '(unique_time, bins=nbin, range=(time_start, till_here))\n', (23272, 23327), True, 'import numpy as np\n'), ((23501, 23527), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 5)'}), '(figsize=(8, 5))\n', (23511, 23527), True, 'import matplotlib.pyplot as plt\n'), ((26569, 26585), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (26578, 26585), True, 'import matplotlib.pyplot as plt\n'), ((31762, 31787), 'ntpath.split', 'ntpath.split', (['events_list'], {}), '(events_list)\n', (31774, 31787), False, 'import ntpath\n'), ((31887, 31917), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10.5, 10)'}), '(figsize=(10.5, 10))\n', (31897, 31917), True, 'import matplotlib.pyplot as plt\n'), ((32200, 32250), 'numpy.arange', 'np.arange', (['(0)', '(4801)', '(4096 / whole_figure_resolution)'], {}), '(0, 4801, 4096 / whole_figure_resolution)\n', (32209, 32250), True, 'import numpy as np\n'), ((32387, 32435), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'labelsize': 'fontsize'}), "(axis='both', labelsize=fontsize)\n", (32402, 32435), True, 'import matplotlib.pyplot as plt\n'), ((32445, 32520), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""Source"""', '(xp, yp)'], {'size': '(13)', 'color': '"""black"""', 'fontweight': '"""bold"""'}), "('Source', (xp, yp), size=13, color='black', fontweight='bold')\n", (32457, 32520), True, 'import matplotlib.pyplot as plt\n'), ((32563, 32611), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(xp, yp)', '(100)'], {'color': '"""k"""', 'fill': '(False)'}), "((xp, yp), 100, color='k', fill=False)\n", (32573, 32611), True, 'import matplotlib.pyplot as plt\n'), ((32958, 33025), 'os.path.join', 'os.path.join', (['path_to_events_list', "('source_' + events_list + '.png')"], {}), "(path_to_events_list, 'source_' + events_list + '.png')\n", (32970, 33025), False, 'import os\n'), ((33030, 33086), 'matplotlib.pyplot.savefig', 'plt.savefig', (['png_name'], {'format': '"""png"""', 'bbox_inches': '"""tight"""'}), "(png_name, format='png', bbox_inches='tight')\n", (33041, 33086), True, 'import matplotlib.pyplot as plt\n'), ((33095, 33104), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (33102, 33104), True, 'import matplotlib.pyplot as plt\n'), ((34748, 34763), 'numpy.unique', 'np.unique', (['time'], {}), '(time)\n', (34757, 34763), True, 'import numpy as np\n'), ((35066, 35092), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 5)'}), '(figsize=(8, 5))\n', (35076, 35092), True, 'import matplotlib.pyplot as plt\n'), ((35099, 35174), 'matplotlib.pyplot.title', 'plt.title', (["('bin = %ss, radius = %spx' % (bwidth, radius))"], {'fontsize': 'fontsize'}), "('bin = %ss, radius = %spx' % (bwidth, radius), fontsize=fontsize)\n", (35108, 35174), True, 'import matplotlib.pyplot as plt\n'), ((35180, 35231), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (Julian Date)"""'], {'fontsize': 'fontsize'}), "('Time (Julian Date)', fontsize=fontsize)\n", (35190, 35231), True, 'import matplotlib.pyplot as plt\n'), ((35238, 35288), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Counts per second"""'], {'fontsize': 'fontsize'}), "('Counts per second', fontsize=fontsize)\n", (35248, 35288), True, 'import matplotlib.pyplot as plt\n'), ((35295, 35343), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'labelsize': 'fontsize'}), "(axis='both', labelsize=fontsize)\n", (35310, 35343), True, 'import matplotlib.pyplot as plt\n'), ((35377, 35444), 'numpy.histogram', 'np.histogram', (['unique_time'], {'bins': 'nbin', 'range': '(time_start, till_here)'}), '(unique_time, bins=nbin, range=(time_start, till_here))\n', (35389, 35444), True, 'import numpy as np\n'), ((35524, 35592), 'numpy.histogram', 'np.histogram', (['T'], {'bins': 'nbin', 'range': '(time_start, till_here)', 'weights': 'W'}), '(T, bins=nbin, range=(time_start, till_here), weights=W)\n', (35536, 35592), True, 'import numpy as np\n'), ((35713, 35770), 'numpy.histogram', 'np.histogram', (['T'], {'bins': 'nbin', 'range': '(time_start, till_here)'}), '(T, bins=nbin, range=(time_start, till_here))\n', (35725, 35770), True, 'import numpy as np\n'), ((36493, 36553), 'numpy.sqrt', 'np.sqrt', (['(CPF_err ** 2 + (bg_CPS_e / framecount_per_sec) ** 2)'], {}), '(CPF_err ** 2 + (bg_CPS_e / framecount_per_sec) ** 2)\n', (36500, 36553), True, 'import numpy as np\n'), ((36855, 36881), 'matplotlib.pyplot.scatter', 'plt.scatter', (['mcentres', 'CPS'], {}), '(mcentres, CPS)\n', (36866, 36881), True, 'import matplotlib.pyplot as plt\n'), ((36886, 36945), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['mcentres', 'CPS'], {'yerr': 'CPS_err', 'linestyle': '"""None"""'}), "(mcentres, CPS, yerr=CPS_err, linestyle='None')\n", (36898, 36945), True, 'import matplotlib.pyplot as plt\n'), ((37038, 37097), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(time_start - empty_space)', '(till_here + empty_space)'], {}), '(time_start - empty_space, till_here + empty_space)\n', (37046, 37097), True, 'import matplotlib.pyplot as plt\n'), ((37278, 37335), 'os.path.join', 'os.path.join', (['path_to_events_list', "(output_prefix + '.dat')"], {}), "(path_to_events_list, output_prefix + '.dat')\n", (37290, 37335), False, 'import os\n'), ((37340, 37460), 'numpy.savetxt', 'np.savetxt', (['datname', 'data_to_output'], {'fmt': '"""%10.11f\t%.5e\t%.5e"""', 'header': "('MJD\\t\\t\\tCPS (bin=%ss)\\tCPS_error' % bwidth)"}), "(datname, data_to_output, fmt='%10.11f\\t%.5e\\t%.5e', header=\n 'MJD\\t\\t\\tCPS (bin=%ss)\\tCPS_error' % bwidth)\n", (37350, 37460), True, 'import numpy as np\n'), ((37504, 37561), 'os.path.join', 'os.path.join', (['path_to_events_list', "(output_prefix + '.png')"], {}), "(path_to_events_list, output_prefix + '.png')\n", (37516, 37561), False, 'import os\n'), ((37566, 37630), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figname'], {'format': '"""png"""', 'bbox_inches': '"""tight"""', 'dpi': '(150)'}), "(figname, format='png', bbox_inches='tight', dpi=150)\n", (37577, 37630), True, 'import matplotlib.pyplot as plt\n'), ((37882, 37898), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (37891, 37898), True, 'import matplotlib.pyplot as plt\n'), ((41421, 41471), 'astropy.io.fits.Column', 'fits.Column', ([], {'name': '"""MJD_L2"""', 'format': '"""D"""', 'array': 'time'}), "(name='MJD_L2', format='D', array=time)\n", (41432, 41471), False, 'from astropy.io import fits\n'), ((41489, 41533), 'astropy.io.fits.Column', 'fits.Column', ([], {'name': '"""Fx"""', 'format': '"""D"""', 'array': 'fx'}), "(name='Fx', format='D', array=fx)\n", (41500, 41533), False, 'from astropy.io import fits\n'), ((41551, 41595), 'astropy.io.fits.Column', 'fits.Column', ([], {'name': '"""Fy"""', 'format': '"""D"""', 'array': 'fy'}), "(name='Fy', format='D', array=fy)\n", (41562, 41595), False, 'from astropy.io import fits\n'), ((41613, 41681), 'astropy.io.fits.Column', 'fits.Column', ([], {'name': '"""EFFECTIVE_NUM_PHOTONS"""', 'format': '"""D"""', 'array': 'photons'}), "(name='EFFECTIVE_NUM_PHOTONS', format='D', array=photons)\n", (41624, 41681), False, 'from astropy.io import fits\n'), ((41700, 41738), 'astropy.io.fits.ColDefs', 'fits.ColDefs', (['[col1, col2, col3, col4]'], {}), '([col1, col2, col3, col4])\n', (41712, 41738), False, 'from astropy.io import fits\n'), ((41751, 41786), 'astropy.io.fits.BinTableHDU.from_columns', 'fits.BinTableHDU.from_columns', (['cols'], {}), '(cols)\n', (41780, 41786), False, 'from astropy.io import fits\n'), ((43947, 43965), 'numpy.arange', 'np.arange', (['(0)', '(4801)'], {}), '(0, 4801)\n', (43956, 43965), True, 'import numpy as np\n'), ((43996, 44054), 'numpy.histogram2d', 'np.histogram2d', (['fy', 'fx'], {'bins': '(bins, bins)', 'weights': 'weights'}), '(fy, fx, bins=(bins, bins), weights=weights)\n', (44010, 44054), True, 'import numpy as np\n'), ((44136, 44165), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', ([], {'data': 'ndarray'}), '(data=ndarray)\n', (44151, 44165), False, 'from astropy.io import fits\n'), ((7577, 7616), 'numpy.array', 'np.array', (['[flat_counts, x_mesh, y_mesh]'], {}), '([flat_counts, x_mesh, y_mesh])\n', (7585, 7616), True, 'import numpy as np\n'), ((7906, 7944), 'random.choice', 'random.choice', (['polished_array[bg_mask]'], {}), '(polished_array[bg_mask])\n', (7919, 7944), False, 'import random\n'), ((10889, 10902), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (10896, 10902), True, 'import numpy as np\n'), ((10917, 10929), 'numpy.std', 'np.std', (['data'], {}), '(data)\n', (10923, 10929), True, 'import numpy as np\n'), ((11138, 11202), 'numpy.array', 'np.array', (["[sources['xcentroid'].data, sources['ycentroid'].data]"], {}), "([sources['xcentroid'].data, sources['ycentroid'].data])\n", (11146, 11202), True, 'import numpy as np\n'), ((11854, 11866), 'numpy.array', 'np.array', (['uA'], {}), '(uA)\n', (11862, 11866), True, 'import numpy as np\n'), ((11988, 12023), 'numpy.isin', 'np.isin', (['uA', '[0, 4800]'], {'invert': '(True)'}), '(uA, [0, 4800], invert=True)\n', (11995, 12023), True, 'import numpy as np\n'), ((12509, 12518), 'numpy.sum', 'np.sum', (['W'], {}), '(W)\n', (12515, 12518), True, 'import numpy as np\n'), ((13662, 13701), 'numpy.sqrt', 'np.sqrt', (['(ICPF5_err ** 2 + CPF5_err ** 2)'], {}), '(ICPF5_err ** 2 + CPF5_err ** 2)\n', (13669, 13701), True, 'import numpy as np\n'), ((13920, 13959), 'numpy.sqrt', 'np.sqrt', (['(CPF5_err ** 2 + RCORR_err ** 2)'], {}), '(CPF5_err ** 2 + RCORR_err ** 2)\n', (13927, 13959), True, 'import numpy as np\n'), ((21048, 21116), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""Source"""', 'u'], {'size': '(13)', 'color': '"""black"""', 'fontweight': '"""bold"""'}), "('Source', u, size=13, color='black', fontweight='bold')\n", (21060, 21116), True, 'import matplotlib.pyplot as plt\n'), ((21167, 21208), 'matplotlib.pyplot.Circle', 'plt.Circle', (['u', '(100)'], {'color': '"""k"""', 'fill': '(False)'}), "(u, 100, color='k', fill=False)\n", (21177, 21208), True, 'import matplotlib.pyplot as plt\n'), ((21300, 21388), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""Background"""', '(x_bg, y_bg)'], {'size': '(13)', 'color': '"""black"""', 'fontweight': '"""bold"""'}), "('Background', (x_bg, y_bg), size=13, color='black', fontweight\n ='bold')\n", (21312, 21388), True, 'import matplotlib.pyplot as plt\n'), ((21432, 21484), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(x_bg, y_bg)', '(100)'], {'color': '"""k"""', 'fill': '(False)'}), "((x_bg, y_bg), 100, color='k', fill=False)\n", (21442, 21484), True, 'import matplotlib.pyplot as plt\n'), ((23801, 23904), 'matplotlib.pyplot.title', 'plt.title', (["('X = %s, Y = %s, bin = %ss, radius = %spx' % (xp, yp, bwidth, radius))"], {'fontsize': 'fontsize'}), "('X = %s, Y = %s, bin = %ss, radius = %spx' % (xp, yp, bwidth,\n radius), fontsize=fontsize)\n", (23810, 23904), True, 'import matplotlib.pyplot as plt\n'), ((23931, 23982), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (Julian Date)"""'], {'fontsize': 'fontsize'}), "('Time (Julian Date)', fontsize=fontsize)\n", (23941, 23982), True, 'import matplotlib.pyplot as plt\n'), ((23993, 24043), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Counts per second"""'], {'fontsize': 'fontsize'}), "('Counts per second', fontsize=fontsize)\n", (24003, 24043), True, 'import matplotlib.pyplot as plt\n'), ((24054, 24102), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'labelsize': 'fontsize'}), "(axis='both', labelsize=fontsize)\n", (24069, 24102), True, 'import matplotlib.pyplot as plt\n'), ((24145, 24213), 'numpy.histogram', 'np.histogram', (['T'], {'bins': 'nbin', 'range': '(time_start, till_here)', 'weights': 'W'}), '(T, bins=nbin, range=(time_start, till_here), weights=W)\n', (24157, 24213), True, 'import numpy as np\n'), ((24346, 24403), 'numpy.histogram', 'np.histogram', (['T'], {'bins': 'nbin', 'range': '(time_start, till_here)'}), '(T, bins=nbin, range=(time_start, till_here))\n', (24358, 24403), True, 'import numpy as np\n'), ((25233, 25293), 'numpy.sqrt', 'np.sqrt', (['(CPF_err ** 2 + (bg_CPS_e / framecount_per_sec) ** 2)'], {}), '(CPF_err ** 2 + (bg_CPS_e / framecount_per_sec) ** 2)\n', (25240, 25293), True, 'import numpy as np\n'), ((25583, 25609), 'matplotlib.pyplot.scatter', 'plt.scatter', (['mcentres', 'CPS'], {}), '(mcentres, CPS)\n', (25594, 25609), True, 'import matplotlib.pyplot as plt\n'), ((25618, 25677), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['mcentres', 'CPS'], {'yerr': 'CPS_err', 'linestyle': '"""None"""'}), "(mcentres, CPS, yerr=CPS_err, linestyle='None')\n", (25630, 25677), True, 'import matplotlib.pyplot as plt\n'), ((25744, 25803), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(time_start - empty_space)', '(till_here + empty_space)'], {}), '(time_start - empty_space, till_here + empty_space)\n', (25752, 25803), True, 'import matplotlib.pyplot as plt\n'), ((26013, 26070), 'os.path.join', 'os.path.join', (['path_to_events_list', "(output_prefix + '.dat')"], {}), "(path_to_events_list, output_prefix + '.dat')\n", (26025, 26070), False, 'import os\n'), ((26079, 26199), 'numpy.savetxt', 'np.savetxt', (['datname', 'data_to_output'], {'fmt': '"""%10.11f\t%.5e\t%.5e"""', 'header': "('MJD\\t\\t\\tCPS (bin=%ss)\\tCPS_error' % bwidth)"}), "(datname, data_to_output, fmt='%10.11f\\t%.5e\\t%.5e', header=\n 'MJD\\t\\t\\tCPS (bin=%ss)\\tCPS_error' % bwidth)\n", (26089, 26199), True, 'import numpy as np\n'), ((26339, 26396), 'os.path.join', 'os.path.join', (['path_to_events_list', "(output_prefix + '.png')"], {}), "(path_to_events_list, output_prefix + '.png')\n", (26351, 26396), False, 'import os\n'), ((26405, 26469), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figname'], {'format': '"""png"""', 'bbox_inches': '"""tight"""', 'dpi': '(150)'}), "(figname, format='png', bbox_inches='tight', dpi=150)\n", (26416, 26469), True, 'import matplotlib.pyplot as plt\n'), ((26531, 26540), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (26538, 26540), True, 'import matplotlib.pyplot as plt\n'), ((32699, 32787), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""Background"""', '(x_bg, y_bg)'], {'size': '(13)', 'color': '"""black"""', 'fontweight': '"""bold"""'}), "('Background', (x_bg, y_bg), size=13, color='black', fontweight\n ='bold')\n", (32711, 32787), True, 'import matplotlib.pyplot as plt\n'), ((32831, 32883), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(x_bg, y_bg)', '(100)'], {'color': '"""k"""', 'fill': '(False)'}), "((x_bg, y_bg), 100, color='k', fill=False)\n", (32841, 32883), True, 'import matplotlib.pyplot as plt\n'), ((35870, 35908), 'numpy.array_equal', 'np.array_equal', (['bin_edges', 'u_bin_edges'], {}), '(bin_edges, u_bin_edges)\n', (35884, 35908), True, 'import numpy as np\n'), ((36043, 36061), 'numpy.sum', 'np.sum', (['count_mask'], {}), '(count_mask)\n', (36049, 36061), True, 'import numpy as np\n'), ((36365, 36381), 'numpy.sqrt', 'np.sqrt', (['mcounts'], {}), '(mcounts)\n', (36372, 36381), True, 'import numpy as np\n'), ((9620, 9638), 'numpy.abs', 'np.abs', (['(fx - pos_x)'], {}), '(fx - pos_x)\n', (9626, 9638), True, 'import numpy as np\n'), ((9678, 9696), 'numpy.abs', 'np.abs', (['(fy - pos_y)'], {}), '(fy - pos_y)\n', (9684, 9696), True, 'import numpy as np\n'), ((9929, 9938), 'matplotlib.colors.LogNorm', 'LogNorm', ([], {}), '()\n', (9936, 9938), False, 'from matplotlib.colors import LogNorm\n'), ((13384, 13403), 'numpy.sum', 'np.sum', (['(CPF5 >= 0.6)'], {}), '(CPF5 >= 0.6)\n', (13390, 13403), True, 'import numpy as np\n'), ((13551, 13567), 'numpy.log', 'np.log', (['(1 - CPF5)'], {}), '(1 - CPF5)\n', (13557, 13567), True, 'import numpy as np\n'), ((13798, 13858), 'numpy.sqrt', 'np.sqrt', (['(ICORR_err ** 2 + (0.3 * 2 * ICORR * ICORR_err) ** 2)'], {}), '(ICORR_err ** 2 + (0.3 * 2 * ICORR * ICORR_err) ** 2)\n', (13805, 13858), True, 'import numpy as np\n'), ((20934, 20943), 'matplotlib.colors.LogNorm', 'LogNorm', ([], {}), '()\n', (20941, 20943), False, 'from matplotlib.colors import LogNorm\n'), ((24515, 24553), 'numpy.array_equal', 'np.array_equal', (['bin_edges', 'u_bin_edges'], {}), '(bin_edges, u_bin_edges)\n', (24529, 24553), True, 'import numpy as np\n'), ((24708, 24726), 'numpy.sum', 'np.sum', (['count_mask'], {}), '(count_mask)\n', (24714, 24726), True, 'import numpy as np\n'), ((25089, 25105), 'numpy.sqrt', 'np.sqrt', (['mcounts'], {}), '(mcounts)\n', (25096, 25105), True, 'import numpy as np\n'), ((32371, 32380), 'matplotlib.colors.LogNorm', 'LogNorm', ([], {}), '()\n', (32378, 32380), False, 'from matplotlib.colors import LogNorm\n'), ((41148, 41171), 'astropy.io.fits.open', 'fits.open', (['XY_fractions'], {}), '(XY_fractions)\n', (41157, 41171), False, 'from astropy.io import fits\n'), ((41193, 41215), 'astropy.io.fits.open', 'fits.open', (['XY_integers'], {}), '(XY_integers)\n', (41202, 41215), False, 'from astropy.io import fits\n'), ((41237, 41257), 'astropy.io.fits.open', 'fits.open', (['flat_list'], {}), '(flat_list)\n', (41246, 41257), False, 'from astropy.io import fits\n'), ((8879, 8888), 'numpy.sum', 'np.sum', (['W'], {}), '(W)\n', (8885, 8888), True, 'import numpy as np\n'), ((41099, 41119), 'astropy.io.fits.open', 'fits.open', (['time_list'], {}), '(time_list)\n', (41108, 41119), False, 'from astropy.io import fits\n'), ((9944, 9953), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (9951, 9953), True, 'import matplotlib.pyplot as plt\n'), ((32620, 32629), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (32627, 32629), True, 'import matplotlib.pyplot as plt\n'), ((21221, 21230), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (21228, 21230), True, 'import matplotlib.pyplot as plt\n'), ((21497, 21506), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (21504, 21506), True, 'import matplotlib.pyplot as plt\n'), ((32896, 32905), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (32903, 32905), True, 'import matplotlib.pyplot as plt\n'), ((11802, 11811), 'scipy.spatial.KDTree', 'KDTree', (['A'], {}), '(A)\n', (11808, 11811), False, 'from scipy.spatial import KDTree\n')]
|
import numpy as np
from typing import List, Any
import cv2
from skimage.exposure import rescale_intensity, adjust_sigmoid
from skimage.util import invert, img_as_float, img_as_ubyte
def fg_pts(mask: np.ndarray):
"""
:param mask: binary image, 2D numpy array
:return: 2 * n numpy array
Retrieves coordinates of points whose intensity is more than 0.
"""
height, width = mask.shape
pts = [[i, j]
for i in range(height)
for j in range(width)
if mask[i, j] > 0]
return np.asarray(pts)
def get_angle(v1: List[int], v2: List[int]):
"""
:param v1: 2D vector
:param v2: 2D vector
:return: the angle of v1 and v2 in degree
"""
dot = np.dot(v1, v2)
norm = np.linalg.norm(v1) * np.linalg.norm(v2)
return np.degrees(np.arccos(dot / norm))
def sd_filter(image: np.ndarray, kernel: np.ndarray) -> np.ndarray:
"""
:param image: grayscale image, 2D numpy array
:param kernel: 2-element tuple
:return: 2D numpy float array
Calculate standard deviation on an array, based on a specific kernel.
"""
a = image.astype(np.float)
return cv2.sqrt(cv2.blur(a ** 2, kernel) - cv2.blur(a, kernel) ** 2)
def fill_hole(mask: np.ndarray) -> np.ndarray:
"""
:param mask: grayscale image, 2D numpy array
:return: binary image, 2D numpy array
Find contours in an array image, fill the out most one.
"""
contour = cv2.findContours(mask, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE)[0]
return cv2.drawContours(mask, contour, -1, 255, cv2.FILLED)
def saturation_rectified_intensity(image: np.ndarray) -> np.ndarray:
"""
:param image: BGR image, 3D numpy array
:return: grayscale image, 2D numpy array
Convert color image to grayscale, also apply sigmoid to weaken
low saturation pixels.
"""
image_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
saturation = img_as_float(image_hsv[:, :, 1])
intensity = img_as_float(image_hsv[:, :, 2])
adjust = adjust_sigmoid(saturation, 0.08, 25)
signal = invert(intensity)
image_out = invert(adjust * signal)
return img_as_ubyte(image_out)
def rescale_foreground(image: Any, mask: np.ndarray) -> Any:
"""
:param image: grayscale image, 2D numpy array
:param mask: binary image, 2D numpy array
:return: grayscale image, 2D numpy array
Linearly transform a grayscale image so that its intensity spans [0, 255].
"""
fg_intensity = image[mask > 0]
fg_range = np.min(fg_intensity), np.max(fg_intensity)
return rescale_intensity(image, fg_range)
|
[
"skimage.exposure.adjust_sigmoid",
"cv2.cvtColor",
"skimage.util.img_as_ubyte",
"numpy.asarray",
"skimage.util.invert",
"skimage.exposure.rescale_intensity",
"numpy.arccos",
"cv2.blur",
"numpy.min",
"numpy.max",
"numpy.linalg.norm",
"cv2.drawContours",
"numpy.dot",
"skimage.util.img_as_float",
"cv2.findContours"
] |
[((533, 548), 'numpy.asarray', 'np.asarray', (['pts'], {}), '(pts)\n', (543, 548), True, 'import numpy as np\n'), ((718, 732), 'numpy.dot', 'np.dot', (['v1', 'v2'], {}), '(v1, v2)\n', (724, 732), True, 'import numpy as np\n'), ((1552, 1604), 'cv2.drawContours', 'cv2.drawContours', (['mask', 'contour', '(-1)', '(255)', 'cv2.FILLED'], {}), '(mask, contour, -1, 255, cv2.FILLED)\n', (1568, 1604), False, 'import cv2\n'), ((1891, 1929), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2HSV'], {}), '(image, cv2.COLOR_BGR2HSV)\n', (1903, 1929), False, 'import cv2\n'), ((1947, 1979), 'skimage.util.img_as_float', 'img_as_float', (['image_hsv[:, :, 1]'], {}), '(image_hsv[:, :, 1])\n', (1959, 1979), False, 'from skimage.util import invert, img_as_float, img_as_ubyte\n'), ((1996, 2028), 'skimage.util.img_as_float', 'img_as_float', (['image_hsv[:, :, 2]'], {}), '(image_hsv[:, :, 2])\n', (2008, 2028), False, 'from skimage.util import invert, img_as_float, img_as_ubyte\n'), ((2042, 2078), 'skimage.exposure.adjust_sigmoid', 'adjust_sigmoid', (['saturation', '(0.08)', '(25)'], {}), '(saturation, 0.08, 25)\n', (2056, 2078), False, 'from skimage.exposure import rescale_intensity, adjust_sigmoid\n'), ((2092, 2109), 'skimage.util.invert', 'invert', (['intensity'], {}), '(intensity)\n', (2098, 2109), False, 'from skimage.util import invert, img_as_float, img_as_ubyte\n'), ((2126, 2149), 'skimage.util.invert', 'invert', (['(adjust * signal)'], {}), '(adjust * signal)\n', (2132, 2149), False, 'from skimage.util import invert, img_as_float, img_as_ubyte\n'), ((2161, 2184), 'skimage.util.img_as_ubyte', 'img_as_ubyte', (['image_out'], {}), '(image_out)\n', (2173, 2184), False, 'from skimage.util import invert, img_as_float, img_as_ubyte\n'), ((2588, 2622), 'skimage.exposure.rescale_intensity', 'rescale_intensity', (['image', 'fg_range'], {}), '(image, fg_range)\n', (2605, 2622), False, 'from skimage.exposure import rescale_intensity, adjust_sigmoid\n'), ((744, 762), 'numpy.linalg.norm', 'np.linalg.norm', (['v1'], {}), '(v1)\n', (758, 762), True, 'import numpy as np\n'), ((765, 783), 'numpy.linalg.norm', 'np.linalg.norm', (['v2'], {}), '(v2)\n', (779, 783), True, 'import numpy as np\n'), ((806, 827), 'numpy.arccos', 'np.arccos', (['(dot / norm)'], {}), '(dot / norm)\n', (815, 827), True, 'import numpy as np\n'), ((1442, 1506), 'cv2.findContours', 'cv2.findContours', (['mask', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_NONE'], {}), '(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n', (1458, 1506), False, 'import cv2\n'), ((2534, 2554), 'numpy.min', 'np.min', (['fg_intensity'], {}), '(fg_intensity)\n', (2540, 2554), True, 'import numpy as np\n'), ((2556, 2576), 'numpy.max', 'np.max', (['fg_intensity'], {}), '(fg_intensity)\n', (2562, 2576), True, 'import numpy as np\n'), ((1159, 1183), 'cv2.blur', 'cv2.blur', (['(a ** 2)', 'kernel'], {}), '(a ** 2, kernel)\n', (1167, 1183), False, 'import cv2\n'), ((1186, 1205), 'cv2.blur', 'cv2.blur', (['a', 'kernel'], {}), '(a, kernel)\n', (1194, 1205), False, 'import cv2\n')]
|
import numpy as np
from lightfm.datasets import fetch_movielens
from lightfm import LightFM
data = fetch_movielens(min_rating = 4.0)
model = LightFM(loss = 'warp')
model.fit(data['train'], epochs=30, num_threads=2)
def sample_recommendation(model, data, user_ids):
n_users, n_items = data['train'].shape
for user_id in user_ids:
known_positives = data['item_labels'][data['train'].tocsr()
[user_id].indices]
scores = model.predict(user_id, np.arange(n_items))
top_items = data['item_labels'][np.argsort(-scores)]
print("User %s" % user_id)
print("Known positives:")
for x in known_positives[:3]:
print("%s" % x)
print("Recommended:")
for x in top_items[:3]:
print("%s" % x)
sample_recommendation(model, data, [3, 25, 451])
|
[
"numpy.argsort",
"lightfm.LightFM",
"lightfm.datasets.fetch_movielens",
"numpy.arange"
] |
[((100, 131), 'lightfm.datasets.fetch_movielens', 'fetch_movielens', ([], {'min_rating': '(4.0)'}), '(min_rating=4.0)\n', (115, 131), False, 'from lightfm.datasets import fetch_movielens\n'), ((143, 163), 'lightfm.LightFM', 'LightFM', ([], {'loss': '"""warp"""'}), "(loss='warp')\n", (150, 163), False, 'from lightfm import LightFM\n'), ((539, 557), 'numpy.arange', 'np.arange', (['n_items'], {}), '(n_items)\n', (548, 557), True, 'import numpy as np\n'), ((600, 619), 'numpy.argsort', 'np.argsort', (['(-scores)'], {}), '(-scores)\n', (610, 619), True, 'import numpy as np\n')]
|
import numpy as np
data = np.genfromtxt('jajka1.csv', delimiter=";", dtype=('|U16'))
data2 = np.array([[s.replace(',', '.') for s in line] for line in data])
suma = 0
x = 0
for i in range(1, 17):
for j in range(1, 9):
if data2[i][j] == "":
data2[i][j] = 0
suma += data2[i][j].astype(np.float)
x += 1
srednia = suma / x
print('Średnia : ', srednia)
mini = 100
maxi = 0
m1 = ''
m2 = ''
s1 = ''
s2 = ''
for i in range(1, 17):
for j in range(1, 9):
if data2[i][j].astype(np.float) > maxi:
maxi = data2[i][j].astype(np.float)
m1 = data[0][j]
s1 = data[i][0]
if (data2[i][j].astype(np.float) < mini) and (data2[i][j].astype(np.float) != 0):
mini = data2[i][j].astype(np.float)
m2 = data[0][j]
s2 = data[i][0]
x = np.array(['Miasto', '<NAME>', 'Ceny', m1, s1, maxi, m2, s2, mini])
x.resize(3, 3)
print(x)
|
[
"numpy.array",
"numpy.genfromtxt"
] |
[((27, 83), 'numpy.genfromtxt', 'np.genfromtxt', (['"""jajka1.csv"""'], {'delimiter': '""";"""', 'dtype': '"""|U16"""'}), "('jajka1.csv', delimiter=';', dtype='|U16')\n", (40, 83), True, 'import numpy as np\n'), ((842, 908), 'numpy.array', 'np.array', (["['Miasto', '<NAME>', 'Ceny', m1, s1, maxi, m2, s2, mini]"], {}), "(['Miasto', '<NAME>', 'Ceny', m1, s1, maxi, m2, s2, mini])\n", (850, 908), True, 'import numpy as np\n')]
|
# coding=utf-8
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for parametric_utils."""
from absl.testing import absltest
import haiku as hk
import jax
import jax.numpy as jnp
from learned_optimization.tasks.parametric import parametric_utils
import numpy as onp
from numpy import testing
class ParametricUtilsTest(absltest.TestCase):
def test_SampleImageDataset(self):
key = jax.random.PRNGKey(0)
cfg = parametric_utils.SampleImageDataset.sample(key)
datasets = parametric_utils.SampleImageDataset.get_dataset(cfg, 8, (8, 8))
_ = datasets.train
def test_SampleActivation(self):
key = jax.random.PRNGKey(0)
cfg = parametric_utils.SampleActivation.sample(key)
act_fn = parametric_utils.SampleActivation.get_dynamic(cfg)
value = jax.jit(act_fn)(12.)
self.assertEqual(value.shape, ())
act_fn = parametric_utils.SampleActivation.get_static(cfg)
value2 = act_fn(12.)
self.assertEqual(value, value2)
def test_SampleInitializer(self):
key = jax.random.PRNGKey(0)
cfg = parametric_utils.SampleInitializer.sample(key)
def forward(cfg):
init = parametric_utils.SampleInitializer.get_dynamic(cfg)
param = hk.get_parameter('asdf', [2, 2], dtype=jnp.float32, init=init)
return param
init_fn, _ = hk.transform(forward)
val = jax.jit(init_fn)(key, cfg)
self.assertEqual(jax.tree_leaves(val)[0].shape, (2, 2))
def test_orth_init(self):
key = jax.random.PRNGKey(0)
init = parametric_utils.orth_init([16, 16], jnp.float32, key)
# Check that the initializer is orthogonal by checking if all the eval's
evals, unused_evecs = onp.linalg.eig(init)
testing.assert_allclose(onp.abs(evals), jnp.ones([16]), rtol=1e-6)
if __name__ == '__main__':
absltest.main()
|
[
"absl.testing.absltest.main",
"learned_optimization.tasks.parametric.parametric_utils.orth_init",
"numpy.abs",
"jax.jit",
"learned_optimization.tasks.parametric.parametric_utils.SampleInitializer.sample",
"learned_optimization.tasks.parametric.parametric_utils.SampleInitializer.get_dynamic",
"learned_optimization.tasks.parametric.parametric_utils.SampleImageDataset.sample",
"numpy.linalg.eig",
"haiku.transform",
"haiku.get_parameter",
"jax.random.PRNGKey",
"jax.tree_leaves",
"learned_optimization.tasks.parametric.parametric_utils.SampleActivation.sample",
"jax.numpy.ones",
"learned_optimization.tasks.parametric.parametric_utils.SampleActivation.get_static",
"learned_optimization.tasks.parametric.parametric_utils.SampleActivation.get_dynamic",
"learned_optimization.tasks.parametric.parametric_utils.SampleImageDataset.get_dataset"
] |
[((2289, 2304), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (2302, 2304), False, 'from absl.testing import absltest\n'), ((923, 944), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(0)'], {}), '(0)\n', (941, 944), False, 'import jax\n'), ((955, 1002), 'learned_optimization.tasks.parametric.parametric_utils.SampleImageDataset.sample', 'parametric_utils.SampleImageDataset.sample', (['key'], {}), '(key)\n', (997, 1002), False, 'from learned_optimization.tasks.parametric import parametric_utils\n'), ((1018, 1081), 'learned_optimization.tasks.parametric.parametric_utils.SampleImageDataset.get_dataset', 'parametric_utils.SampleImageDataset.get_dataset', (['cfg', '(8)', '(8, 8)'], {}), '(cfg, 8, (8, 8))\n', (1065, 1081), False, 'from learned_optimization.tasks.parametric import parametric_utils\n'), ((1151, 1172), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(0)'], {}), '(0)\n', (1169, 1172), False, 'import jax\n'), ((1183, 1228), 'learned_optimization.tasks.parametric.parametric_utils.SampleActivation.sample', 'parametric_utils.SampleActivation.sample', (['key'], {}), '(key)\n', (1223, 1228), False, 'from learned_optimization.tasks.parametric import parametric_utils\n'), ((1242, 1292), 'learned_optimization.tasks.parametric.parametric_utils.SampleActivation.get_dynamic', 'parametric_utils.SampleActivation.get_dynamic', (['cfg'], {}), '(cfg)\n', (1287, 1292), False, 'from learned_optimization.tasks.parametric import parametric_utils\n'), ((1378, 1427), 'learned_optimization.tasks.parametric.parametric_utils.SampleActivation.get_static', 'parametric_utils.SampleActivation.get_static', (['cfg'], {}), '(cfg)\n', (1422, 1427), False, 'from learned_optimization.tasks.parametric import parametric_utils\n'), ((1536, 1557), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(0)'], {}), '(0)\n', (1554, 1557), False, 'import jax\n'), ((1568, 1614), 'learned_optimization.tasks.parametric.parametric_utils.SampleInitializer.sample', 'parametric_utils.SampleInitializer.sample', (['key'], {}), '(key)\n', (1609, 1614), False, 'from learned_optimization.tasks.parametric import parametric_utils\n'), ((1817, 1838), 'haiku.transform', 'hk.transform', (['forward'], {}), '(forward)\n', (1829, 1838), True, 'import haiku as hk\n'), ((1975, 1996), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(0)'], {}), '(0)\n', (1993, 1996), False, 'import jax\n'), ((2008, 2062), 'learned_optimization.tasks.parametric.parametric_utils.orth_init', 'parametric_utils.orth_init', (['[16, 16]', 'jnp.float32', 'key'], {}), '([16, 16], jnp.float32, key)\n', (2034, 2062), False, 'from learned_optimization.tasks.parametric import parametric_utils\n'), ((2166, 2186), 'numpy.linalg.eig', 'onp.linalg.eig', (['init'], {}), '(init)\n', (2180, 2186), True, 'import numpy as onp\n'), ((1305, 1320), 'jax.jit', 'jax.jit', (['act_fn'], {}), '(act_fn)\n', (1312, 1320), False, 'import jax\n'), ((1651, 1702), 'learned_optimization.tasks.parametric.parametric_utils.SampleInitializer.get_dynamic', 'parametric_utils.SampleInitializer.get_dynamic', (['cfg'], {}), '(cfg)\n', (1697, 1702), False, 'from learned_optimization.tasks.parametric import parametric_utils\n'), ((1717, 1779), 'haiku.get_parameter', 'hk.get_parameter', (['"""asdf"""', '[2, 2]'], {'dtype': 'jnp.float32', 'init': 'init'}), "('asdf', [2, 2], dtype=jnp.float32, init=init)\n", (1733, 1779), True, 'import haiku as hk\n'), ((1849, 1865), 'jax.jit', 'jax.jit', (['init_fn'], {}), '(init_fn)\n', (1856, 1865), False, 'import jax\n'), ((2215, 2229), 'numpy.abs', 'onp.abs', (['evals'], {}), '(evals)\n', (2222, 2229), True, 'import numpy as onp\n'), ((2231, 2245), 'jax.numpy.ones', 'jnp.ones', (['[16]'], {}), '([16])\n', (2239, 2245), True, 'import jax.numpy as jnp\n'), ((1897, 1917), 'jax.tree_leaves', 'jax.tree_leaves', (['val'], {}), '(val)\n', (1912, 1917), False, 'import jax\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import io
import os
import sys
import numpy
import pkg_resources
from shutil import rmtree
from setuptools import setup, find_packages, Command
from distutils import util
from wheel.bdist_wheel import bdist_wheel as _bdist_wheel
from PyMieSim.Tools.utils import Print
Version = '0.3.5'
plateform = util.get_platform().replace("-", "_")
Print(msg=f' Plateform: {plateform} \n Version: {Version}', title='PyMieSim')
requirementPath = os.path.join(os.path.dirname(__file__), 'requirements.txt')
here = os.path.abspath(os.path.dirname(__file__))
with open(requirementPath,'r') as requirements_txt:
REQUIRED = [ str(requirement) for requirement in pkg_resources.parse_requirements(requirements_txt) ]
class bdist_wheel(_bdist_wheel):
def finalize_options(self):
_bdist_wheel.finalize_options(self)
self.root_is_pure = False
class get_pybind11_include(object):
"""Defer numpy.get_include() until after numpy is installed."""
def __str__(self):
import pybind11
return pybind11.get_include()
class get_numpy_include(object):
"""Defer numpy.get_include() until after numpy is installed."""
def __str__(self):
import numpy
return numpy.get_include()
with io.open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = '\n' + f.read()
class UploadCommand(Command):
"""Support setup.py upload."""
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPI via Twine…')
os.system('twine upload dist/*')
self.status('Pushing git tags…')
os.system('git tag v{0}'.format(Version))
os.system('git push --tags')
sys.exit()
# Where the magic happens:
setup(
name = 'PyMieSim',
version = Version,
description = 'A package for light scattering simulations.',
long_description = long_description,
long_description_content_type = 'text/markdown',
author = '<NAME>',
author_email = '<EMAIL>',
setup_requires = ['numpy', 'pybind11','cython'],
python_requires = '>=3.6',#REQUIRES_PYTHON,
url = 'https://github.com/MartinPdeS/PyMieSim',
packages = find_packages(exclude=["tests", "*.tests", "*.tests.*", "tests.*"]),
install_requires = REQUIRED,
extras_require = {},
dependency_links = [],
include_package_data = True,
ext_modules = [],
license = 'MIT',
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: OSI Approved :: MIT License',
'Operating System :: Unix',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS',
'Programming Language :: C++',
'Programming Language :: Fortran',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation :: CPython',
'Development Status :: 3 - Alpha',
'Topic :: Scientific/Engineering :: Physics',
'Intended Audience :: Science/Research',
],
# $ setup.py publish support.
cmdclass={'upload': UploadCommand, 'bdist_wheel': bdist_wheel}, #'build_ext': build_ext
)
|
[
"pkg_resources.parse_requirements",
"pybind11.get_include",
"setuptools.find_packages",
"wheel.bdist_wheel.bdist_wheel.finalize_options",
"os.path.dirname",
"os.system",
"PyMieSim.Tools.utils.Print",
"numpy.get_include",
"distutils.util.get_platform",
"os.path.join",
"sys.exit"
] |
[((425, 510), 'PyMieSim.Tools.utils.Print', 'Print', ([], {'msg': 'f""" Plateform: {plateform} \n Version: {Version}"""', 'title': '"""PyMieSim"""'}), '(msg=f""" Plateform: {plateform} \n Version: {Version}""", title=\'PyMieSim\'\n )\n', (430, 510), False, 'from PyMieSim.Tools.utils import Print\n'), ((535, 560), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (550, 560), False, 'import os\n'), ((605, 630), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (620, 630), False, 'import os\n'), ((386, 405), 'distutils.util.get_platform', 'util.get_platform', ([], {}), '()\n', (403, 405), False, 'from distutils import util\n'), ((866, 901), 'wheel.bdist_wheel.bdist_wheel.finalize_options', '_bdist_wheel.finalize_options', (['self'], {}), '(self)\n', (895, 901), True, 'from wheel.bdist_wheel import bdist_wheel as _bdist_wheel\n'), ((1103, 1125), 'pybind11.get_include', 'pybind11.get_include', ([], {}), '()\n', (1123, 1125), False, 'import pybind11\n'), ((1287, 1306), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (1304, 1306), False, 'import numpy\n'), ((1322, 1354), 'os.path.join', 'os.path.join', (['here', '"""README.rst"""'], {}), "(here, 'README.rst')\n", (1334, 1354), False, 'import os\n'), ((2184, 2216), 'os.system', 'os.system', (['"""twine upload dist/*"""'], {}), "('twine upload dist/*')\n", (2193, 2216), False, 'import os\n'), ((2317, 2345), 'os.system', 'os.system', (['"""git push --tags"""'], {}), "('git push --tags')\n", (2326, 2345), False, 'import os\n'), ((2355, 2365), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2363, 2365), False, 'import sys\n'), ((3022, 3089), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['tests', '*.tests', '*.tests.*', 'tests.*']"}), "(exclude=['tests', '*.tests', '*.tests.*', 'tests.*'])\n", (3035, 3089), False, 'from setuptools import setup, find_packages, Command\n'), ((738, 788), 'pkg_resources.parse_requirements', 'pkg_resources.parse_requirements', (['requirements_txt'], {}), '(requirements_txt)\n', (770, 788), False, 'import pkg_resources\n'), ((1879, 1905), 'os.path.join', 'os.path.join', (['here', '"""dist"""'], {}), "(here, 'dist')\n", (1891, 1905), False, 'import os\n')]
|
import tkinter as tk
import moderngl
import numpy as np
from tkinter_framebuffer import FramebufferImage
from hello_world import HelloWorld2D, PanTool
ctx = moderngl.create_standalone_context()
canvas = HelloWorld2D(ctx)
pan_tool = PanTool()
def vertices():
x = np.linspace(-1.0, 1.0, 50)
y = np.random.rand(50) - 0.5
r = np.ones(50)
g = np.zeros(50)
b = np.zeros(50)
a = np.ones(50)
return np.dstack([x, y, r, g, b, a])
verts = vertices()
def update(evt):
if evt.type == tk.EventType.ButtonPress:
pan_tool.start_drag(evt.x / size[0], evt.y / size[1])
if evt.type == tk.EventType.Motion:
pan_tool.dragging(evt.x / size[0], evt.y / size[1])
if evt.type == tk.EventType.ButtonRelease:
pan_tool.stop_drag(evt.x / size[0], evt.y / size[1])
canvas.pan(pan_tool.value)
with tkfbo:
ctx.clear()
canvas.plot(verts)
size = (512, 512)
root = tk.Tk()
tkfbo = FramebufferImage(root, ctx, size)
lbl = tk.Label(root, image=tkfbo)
lbl.bind("<ButtonPress-1>", update)
lbl.bind("<ButtonRelease-1>", update)
lbl.bind('<Motion>', update)
lbl.pack()
# btn = tk.Button(root, text='Hello', command=update)
# btn.pack()
root.mainloop()
|
[
"hello_world.PanTool",
"numpy.dstack",
"numpy.zeros",
"numpy.ones",
"numpy.linspace",
"moderngl.create_standalone_context",
"numpy.random.rand",
"hello_world.HelloWorld2D",
"tkinter.Label",
"tkinter.Tk",
"tkinter_framebuffer.FramebufferImage"
] |
[((160, 196), 'moderngl.create_standalone_context', 'moderngl.create_standalone_context', ([], {}), '()\n', (194, 196), False, 'import moderngl\n'), ((207, 224), 'hello_world.HelloWorld2D', 'HelloWorld2D', (['ctx'], {}), '(ctx)\n', (219, 224), False, 'from hello_world import HelloWorld2D, PanTool\n'), ((236, 245), 'hello_world.PanTool', 'PanTool', ([], {}), '()\n', (243, 245), False, 'from hello_world import HelloWorld2D, PanTool\n'), ((933, 940), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (938, 940), True, 'import tkinter as tk\n'), ((949, 982), 'tkinter_framebuffer.FramebufferImage', 'FramebufferImage', (['root', 'ctx', 'size'], {}), '(root, ctx, size)\n', (965, 982), False, 'from tkinter_framebuffer import FramebufferImage\n'), ((990, 1017), 'tkinter.Label', 'tk.Label', (['root'], {'image': 'tkfbo'}), '(root, image=tkfbo)\n', (998, 1017), True, 'import tkinter as tk\n'), ((272, 298), 'numpy.linspace', 'np.linspace', (['(-1.0)', '(1.0)', '(50)'], {}), '(-1.0, 1.0, 50)\n', (283, 298), True, 'import numpy as np\n'), ((340, 351), 'numpy.ones', 'np.ones', (['(50)'], {}), '(50)\n', (347, 351), True, 'import numpy as np\n'), ((360, 372), 'numpy.zeros', 'np.zeros', (['(50)'], {}), '(50)\n', (368, 372), True, 'import numpy as np\n'), ((381, 393), 'numpy.zeros', 'np.zeros', (['(50)'], {}), '(50)\n', (389, 393), True, 'import numpy as np\n'), ((402, 413), 'numpy.ones', 'np.ones', (['(50)'], {}), '(50)\n', (409, 413), True, 'import numpy as np\n'), ((425, 454), 'numpy.dstack', 'np.dstack', (['[x, y, r, g, b, a]'], {}), '([x, y, r, g, b, a])\n', (434, 454), True, 'import numpy as np\n'), ((307, 325), 'numpy.random.rand', 'np.random.rand', (['(50)'], {}), '(50)\n', (321, 325), True, 'import numpy as np\n')]
|
# --------------
import pandas as pd
from sklearn.model_selection import train_test_split
# Code starts here
data = pd.read_csv(path)
X = data.drop(columns=['customer.id', 'paid.back.loan'])
y = data['paid.back.loan']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
# Code ends here
# --------------
#Importing header files
import matplotlib.pyplot as plt
# Code starts here
fully_paid = y_train.value_counts()
fully_paid.plot(kind='bar')
# Code ends here
# --------------
#Importing header files
import numpy as np
from sklearn.preprocessing import LabelEncoder
# Code starts here
X_train['int.rate'].replace(regex=True,inplace=True, to_replace=r'\D', value=r'')
X_train['int.rate'] = (X_train['int.rate'].astype(float))/10000
X_test['int.rate'].replace(regex=True,inplace=True, to_replace=r'\D', value=r'')
X_test['int.rate'] = (X_test['int.rate'].astype(float))/10000
num_df = X_train.select_dtypes(['int64','float64'])
cat_df = X_train.select_dtypes(['object'])
# Code ends here
# --------------
#Importing header files
import seaborn as sns
# Code starts here
cols = list(num_df.columns)
fig, axes = plt.subplots(nrows=9, ncols=1, figsize=(10,20))
for i in range(0,len(cols)):
sns.boxplot(x= y_train, y= num_df[cols[i]], ax=axes[i])
# Code ends here
# --------------
# Code starts here
cols = list(cat_df.columns)
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(10,10))
for i in range(2):
for j in range(2):
sns.countplot(x=X_train[cols[i*2+j]], hue=y_train, ax=axes[i,j])
# Code ends here
# --------------
#Importing header files
from sklearn.tree import DecisionTreeClassifier
# Code starts here
le = LabelEncoder()
X_train.fillna("NA")
X_test.fillna("NA")
for col in cat_df.columns:
X_train[col] = le.fit_transform(X_train[col])
X_test[col] = le.transform(X_test[col])
y_train.replace("No", 0, inplace=True)
y_test.replace("No", 0, inplace=True)
y_train.replace("Yes", 1, inplace=True)
y_test.replace("Yes", 1, inplace=True)
model = DecisionTreeClassifier(random_state=0)
model.fit(X_train, y_train)
acc = model.score(X_test, y_test)
# Code ends here
# --------------
#Importing header files
from sklearn.model_selection import GridSearchCV
#Parameter grid
parameter_grid = {'max_depth': np.arange(3,10), 'min_samples_leaf': range(10,50,10)}
# Code starts here
model_2 = DecisionTreeClassifier(random_state=0)
p_tree = GridSearchCV(estimator=model_2, param_grid=parameter_grid, cv=5)
p_tree.fit(X_train, y_train)
acc_2 = p_tree.score(X_test, y_test)
print(acc_2)
# Code ends here
# --------------
#Importing header files
from io import StringIO
from sklearn.tree import export_graphviz
from sklearn import tree
from sklearn import metrics
from IPython.display import Image
import pydotplus
# Code starts here
dot_data = tree.export_graphviz(decision_tree = p_tree.best_estimator_, out_file=None, feature_names=X.columns,
filled=True, class_names=['loan_paid_back_yes','loan_paid_back_no'])
graph_big = pydotplus.graph_from_dot_data(dot_data)
# show graph - do not delete/modify the code below this line
img_path = user_data_dir+'/file.png'
graph_big.write_png(img_path)
plt.figure(figsize=(20,15))
plt.imshow(plt.imread(img_path))
plt.axis('off')
plt.show()
# Code ends here
|
[
"sklearn.model_selection.GridSearchCV",
"matplotlib.pyplot.show",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.axis",
"sklearn.preprocessing.LabelEncoder",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.tree.export_graphviz",
"pydotplus.graph_from_dot_data",
"matplotlib.pyplot.figure",
"seaborn.boxplot",
"numpy.arange",
"seaborn.countplot",
"matplotlib.pyplot.imread",
"matplotlib.pyplot.subplots"
] |
[((116, 133), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (127, 133), True, 'import pandas as pd\n'), ((253, 306), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.3)', 'random_state': '(0)'}), '(X, y, test_size=0.3, random_state=0)\n', (269, 306), False, 'from sklearn.model_selection import train_test_split\n'), ((1153, 1201), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(9)', 'ncols': '(1)', 'figsize': '(10, 20)'}), '(nrows=9, ncols=1, figsize=(10, 20))\n', (1165, 1201), True, 'import matplotlib.pyplot as plt\n'), ((1385, 1433), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(2)', 'figsize': '(10, 10)'}), '(nrows=2, ncols=2, figsize=(10, 10))\n', (1397, 1433), True, 'import matplotlib.pyplot as plt\n'), ((1680, 1694), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (1692, 1694), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((2021, 2059), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'random_state': '(0)'}), '(random_state=0)\n', (2043, 2059), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((2361, 2399), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'random_state': '(0)'}), '(random_state=0)\n', (2383, 2399), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((2409, 2473), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', ([], {'estimator': 'model_2', 'param_grid': 'parameter_grid', 'cv': '(5)'}), '(estimator=model_2, param_grid=parameter_grid, cv=5)\n', (2421, 2473), False, 'from sklearn.model_selection import GridSearchCV\n'), ((2812, 2988), 'sklearn.tree.export_graphviz', 'tree.export_graphviz', ([], {'decision_tree': 'p_tree.best_estimator_', 'out_file': 'None', 'feature_names': 'X.columns', 'filled': '(True)', 'class_names': "['loan_paid_back_yes', 'loan_paid_back_no']"}), "(decision_tree=p_tree.best_estimator_, out_file=None,\n feature_names=X.columns, filled=True, class_names=['loan_paid_back_yes',\n 'loan_paid_back_no'])\n", (2832, 2988), False, 'from sklearn import tree\n'), ((3026, 3065), 'pydotplus.graph_from_dot_data', 'pydotplus.graph_from_dot_data', (['dot_data'], {}), '(dot_data)\n', (3055, 3065), False, 'import pydotplus\n'), ((3195, 3223), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 15)'}), '(figsize=(20, 15))\n', (3205, 3223), True, 'import matplotlib.pyplot as plt\n'), ((3256, 3271), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3264, 3271), True, 'import matplotlib.pyplot as plt\n'), ((3272, 3282), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3280, 3282), True, 'import matplotlib.pyplot as plt\n'), ((1234, 1287), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': 'y_train', 'y': 'num_df[cols[i]]', 'ax': 'axes[i]'}), '(x=y_train, y=num_df[cols[i]], ax=axes[i])\n', (1245, 1287), True, 'import seaborn as sns\n'), ((2278, 2294), 'numpy.arange', 'np.arange', (['(3)', '(10)'], {}), '(3, 10)\n', (2287, 2294), True, 'import numpy as np\n'), ((3234, 3254), 'matplotlib.pyplot.imread', 'plt.imread', (['img_path'], {}), '(img_path)\n', (3244, 3254), True, 'import matplotlib.pyplot as plt\n'), ((1483, 1552), 'seaborn.countplot', 'sns.countplot', ([], {'x': 'X_train[cols[i * 2 + j]]', 'hue': 'y_train', 'ax': 'axes[i, j]'}), '(x=X_train[cols[i * 2 + j]], hue=y_train, ax=axes[i, j])\n', (1496, 1552), True, 'import seaborn as sns\n')]
|
import math
import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# class EncoderDecoder(nn.Module):
# """
# A standard Encoder-Decoder architecture. Base for this and many
# other models.
# """
# def __init__(self, encoder, decoder, src_embed, tgt_embed, generator):
# super(EncoderDecoder, self).__init__()
# self.encoder = encoder
# self.decoder = decoder
# self.src_embed = src_embed # Embedding function
# self.tgt_embed = tgt_embed # Embedding function
# self.generator = generator
# def forward(self, src, tgt, src_mask, tgt_mask):
# "Take in and process masked src and target sequences."
# return self.decode(self.encode(src, src_mask), src_mask,
# tgt, tgt_mask)
# def encode(self, src, src_mask):
# return self.encoder(self.src_embed(src), src_mask)
# def decode(self, memory, src_mask, tgt, tgt_mask):
# return self.decoder(self.tgt_embed(tgt), memory, src_mask, tgt_mask)
# class Generator(nn.Module):
# "Define standard linear + softmax generation step."
# def __init__(self, d_model, vocab):
# super(Generator, self).__init__()
# self.proj = nn.Linear(d_model, vocab)
# self.softmax = nn.Softmax(dim=-1)
# def forward(self, x):
# return F.log_softmax(self.proj(x), dim=-1)
# def scaled_forward(self, x, scale=1.0):
# return self.softmax(self.proj(x)*scale)
def clones(module, N):
"Produce N identical layers."
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
def make_mask_from_lens(x, lens):
if lens is None:
return None
mask = torch.zeros(x.shape[:2])
for i, j in enumerate(lens):
mask[i,:j] = 1
return mask.unsqueeze(-1)
class Encoder(nn.Module):
"Core encoder is a stack of N layers"
def __init__(self, layer, N):
super(Encoder, self).__init__()
self.layers = clones(layer, N) # layer = EncoderLayer()
self.norm = LayerNorm(layer.size)
def forward(self, x, mask):
"Pass the input (and mask) through each layer in turn."
for layer in self.layers:
x = layer(x, mask)
return self.norm(x)
class TransformerEncoder(nn.Module):
"""
A standard Encoder architecture. Base for this and many
other models.
"""
def __init__(self, encoder, src_embed):
super(TransformerEncoder, self).__init__()
self.encoder = encoder
self.src_embed = src_embed # Embedding function
def forward(self, src, src_lens):
src_mask = make_mask_from_lens(src, src_lens)
return self.encoder(self.src_embed(src), src_mask), src_lens
class LayerNorm(nn.Module):
"Construct a layernorm module (See citation for details)."
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class SublayerConnection(nn.Module):
"""
A residual connection followed by a layer norm.
Note for code simplicity the norm is first as opposed to last.
"""
def __init__(self, size, dropout, first_only=False):
super(SublayerConnection, self).__init__()
self.norm = LayerNorm(size)
self.dropout = nn.Dropout(dropout)
self.first_only = first_only
def forward(self, x, sublayer):
"Apply residual connection to any sublayer with the same size."
if self.first_only:
w, others = sublayer(self.norm(x))
return x + self.dropout(w), others
else:
return x + self.dropout(sublayer(self.norm(x)))
class EncoderLayer(nn.Module):
"Encoder is made up of self-attn and feed forward (defined below)"
def __init__(self, size, self_attn, feed_forward, dropout):
super(EncoderLayer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size, dropout), 2)
self.size = size
def forward(self, x, mask):
"Follow Figure 1 (left) for connections."
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask)[0])
return self.sublayer[1](x, self.feed_forward)
class Decoder(nn.Module):
"Generic N layer decoder with masking."
def __init__(self, layer, N):
super(Decoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, memory, src_mask, tgt_mask, past=None):
if past is None:
past = [None] * len(self.layers)
new_past = []
for layer, layer_past in zip(self.layers, past):
x, new_layer_past = layer(x, memory, src_mask, tgt_mask, layer_past)
new_past.append(new_layer_past)
return self.norm(x), new_past
class TransformerDecoder(nn.Module):
"""
A standard Decoder architecture. Base for this and many
other models.
"""
def __init__(self, decoder, tgt_embed):
super(TransformerDecoder, self).__init__()
self.decoder = decoder
self.tgt_embed = tgt_embed # Embedding function
def forward(self, memory, src_lens, tgt, tgt_mask, past=None):
src_mask = make_mask_from_lens(memory, src_lens)
past_len = 0
if past is not None:
if past[0] is not None:
if past[0][0] is not None:
past_len = past[0][0].shape[-2]
return self.decoder(self.tgt_embed(tgt, past_len=past_len), memory, src_mask, tgt_mask, past)
class DecoderLayer(nn.Module):
"Decoder is made of self-attn, src-attn, and feed forward (defined below)"
def __init__(self, size, self_attn, src_attn, feed_forward, dropout):
super(DecoderLayer, self).__init__()
self.size = size
self.self_attn = self_attn
self.src_attn = src_attn
self.feed_forward = feed_forward
self.sublayer = nn.ModuleList([SublayerConnection(size, dropout, first_only=True),
SublayerConnection(size, dropout, first_only=False),
SublayerConnection(size, dropout, first_only=False)])
def forward(self, x, memory, src_mask, tgt_mask, past=None):
"Follow Figure 1 (right) for connections."
m = memory
x, past = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask, past))
x = self.sublayer[1](x, lambda x: self.src_attn(x, m, m, src_mask.transpose(-2, -1))[0])
return self.sublayer[2](x, self.feed_forward), past
def subsequent_mask(size):
"Mask out subsequent positions."
attn_shape = (1, size, size)
subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')
return torch.from_numpy(subsequent_mask) == 0
def make_std_mask(tgt, pad):
"Create a mask to hide padding and future words."
first_col = torch.zeros_like(tgt)
first_col[:, 0] = 1
tgt_mask = ((tgt != pad) | first_col.bool()).unsqueeze(-2)
tgt_mask = tgt_mask & subsequent_mask(tgt.size(-1)).type_as(tgt_mask.data)
return tgt_mask
def attention(query, key, value, mask=None, dropout=None):
"Compute 'Scaled Dot Product Attention'"
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) \
/ math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e9)
p_attn = F.softmax(scores, dim = -1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
class MultiHeadedAttention(nn.Module):
def __init__(self, h, d_model, dropout=0.1):
"Take in model size and number of heads."
super(MultiHeadedAttention, self).__init__()
assert d_model % h == 0
# We assume d_v always equals d_k
self.d_k = d_model // h
self.h = h
self.linears = clones(nn.Linear(d_model, d_model), 4)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, value, mask=None, past=None):
"Implements Figure 2"
if mask is not None:
# Same mask applied to all h heads.
mask = mask.unsqueeze(1)
nbatches = query.size(0)
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = \
[l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linears, (query, key, value))]
if past is not None:
assert mask is None, "Using past state while auto-regressive decoding without mask."
past_key, past_value = past
key = torch.cat((past_key, key), dim=-2)
value = torch.cat((past_value, value), dim=-2)
temp_past = key, value
else:
temp_past = None
# 2) Apply attention on all the projected vectors in batch.
x, self.attn = attention(query, key, value, mask=mask.to(query.device) if type(mask)!=type(None) else mask,
dropout=self.dropout)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous() \
.view(nbatches, -1, self.h * self.d_k)
return self.linears[-1](x), temp_past
class PositionwiseFeedForward(nn.Module):
"Implements FFN equation."
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
return self.w_2(self.dropout(F.relu(self.w_1(x))))
class Embeddings(nn.Module):
def __init__(self, d_model, vocab, pretrained_matrix=None):
super(Embeddings, self).__init__()
self.lut = nn.Embedding(vocab, d_model)
if pretrained_matrix is not None:
self.lut.weight.data = torch.tensor(pretrained_matrix)
self.d_model = d_model
def forward(self, x):
return self.lut(x) * math.sqrt(self.d_model)
class Scale(nn.Module):
def __init__(self, d_model):
super(Scale, self).__init__()
self.d_model = d_model
def forward(self, x):
return x * math.sqrt(self.d_model)
class PositionalEncoding(nn.Module):
"Implement the PE function."
def __init__(self, d_model, dropout, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model, dtype=torch.float32)
position = torch.arange(0, max_len).unsqueeze(1).float()
div_term = torch.exp(torch.arange(0, d_model, 2).float() *
-(math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x, past_len=0):
x = x.float() + self.pe[:, past_len:past_len+x.size(1)]
return self.dropout(x)
# def Transformer(N=6, d_model=1024, d_ff=2048, h=8, dropout=0.1):
# "Helper: Construct a model from hyperparameters."
# c = copy.deepcopy
# attn = MultiHeadedAttention(h, d_model)
# ff = PositionwiseFeedForward(d_model, d_ff, dropout)
# position = PositionalEncoding(d_model, dropout)
# model = EncoderDecoder(
# Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout), N),
# Decoder(DecoderLayer(d_model, c(attn), c(attn),
# c(ff), dropout), N),
# ScalePositionEmbedding(Scale(d_model), c(position)),
# ScalePositionEmbedding(Scale(d_model), c(position)), None)
# # This was important from their code.
# # Initialize parameters with Glorot / fan_avg.
# for p in model.parameters():
# if p.dim() > 1:
# nn.init.xavier_uniform_(p)
# return model
class ScalePositionEmbedding(nn.Module):
def __init__(self, scale, position):
super(ScalePositionEmbedding, self).__init__()
self.add_module('0', scale)
self.add_module('1', position)
def __getitem__(self, idx):
return self._modules[str(idx)]
def forward(self, x, past_len=0):
x = self[0](x)
x = self[1](x, past_len)
return x
def SpeechTransformerEncoder(N=6, d_model=1024, d_ff=2048, h=8, dropout=0.1):
'''Helper: Construct a model from hyperparameters.
This encoder does not include a word embeddding layer.
The input tensors need to be word embeddings/speech features.'''
c = copy.deepcopy
attn = MultiHeadedAttention(h, d_model)
ff = PositionwiseFeedForward(d_model, d_ff, dropout)
position = PositionalEncoding(d_model, dropout)
model = TransformerEncoder(
Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout), N),
ScalePositionEmbedding(Scale(d_model), c(position)))
# This was important from their code.
# Initialize parameters with Glorot / fan_avg.
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
return model
def SpeechTransformerDecoder(N=6, d_model=1024, d_ff=2048, h=8, dropout=0.1):
'''Helper: Construct a model from hyperparameters.
This decoder does not include a word embeddding layer.
The input tensors need to be word embeddings/speech features.'''
c = copy.deepcopy
attn = MultiHeadedAttention(h, d_model)
ff = PositionwiseFeedForward(d_model, d_ff, dropout)
position = PositionalEncoding(d_model, dropout)
model = TransformerDecoder(
Decoder(DecoderLayer(d_model, c(attn), c(attn), c(ff), dropout), N),
ScalePositionEmbedding(Scale(d_model), c(position)))
# This was important from their code.
# Initialize parameters with Glorot / fan_avg.
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
return model
|
[
"torch.nn.Dropout",
"torch.nn.Embedding",
"torch.cat",
"numpy.ones",
"torch.cos",
"torch.arange",
"torch.ones",
"torch.nn.Linear",
"torch.zeros",
"math.log",
"torch.matmul",
"copy.deepcopy",
"torch.zeros_like",
"math.sqrt",
"torch.nn.init.xavier_uniform_",
"torch.from_numpy",
"torch.nn.functional.softmax",
"torch.sin",
"torch.tensor"
] |
[((1758, 1782), 'torch.zeros', 'torch.zeros', (['x.shape[:2]'], {}), '(x.shape[:2])\n', (1769, 1782), False, 'import torch\n'), ((7274, 7295), 'torch.zeros_like', 'torch.zeros_like', (['tgt'], {}), '(tgt)\n', (7290, 7295), False, 'import torch\n'), ((7791, 7816), 'torch.nn.functional.softmax', 'F.softmax', (['scores'], {'dim': '(-1)'}), '(scores, dim=-1)\n', (7800, 7816), True, 'import torch.nn.functional as F\n'), ((3620, 3639), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (3630, 3639), True, 'import torch.nn as nn\n'), ((7135, 7168), 'torch.from_numpy', 'torch.from_numpy', (['subsequent_mask'], {}), '(subsequent_mask)\n', (7151, 7168), False, 'import torch\n'), ((7685, 7699), 'math.sqrt', 'math.sqrt', (['d_k'], {}), '(d_k)\n', (7694, 7699), False, 'import math\n'), ((7891, 7918), 'torch.matmul', 'torch.matmul', (['p_attn', 'value'], {}), '(p_attn, value)\n', (7903, 7918), False, 'import torch\n'), ((8354, 8375), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout'}), '(p=dropout)\n', (8364, 8375), True, 'import torch.nn as nn\n'), ((9900, 9924), 'torch.nn.Linear', 'nn.Linear', (['d_model', 'd_ff'], {}), '(d_model, d_ff)\n', (9909, 9924), True, 'import torch.nn as nn\n'), ((9944, 9968), 'torch.nn.Linear', 'nn.Linear', (['d_ff', 'd_model'], {}), '(d_ff, d_model)\n', (9953, 9968), True, 'import torch.nn as nn\n'), ((9992, 10011), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (10002, 10011), True, 'import torch.nn as nn\n'), ((10254, 10282), 'torch.nn.Embedding', 'nn.Embedding', (['vocab', 'd_model'], {}), '(vocab, d_model)\n', (10266, 10282), True, 'import torch.nn as nn\n'), ((10901, 10922), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout'}), '(p=dropout)\n', (10911, 10922), True, 'import torch.nn as nn\n'), ((11007, 11057), 'torch.zeros', 'torch.zeros', (['max_len', 'd_model'], {'dtype': 'torch.float32'}), '(max_len, d_model, dtype=torch.float32)\n', (11018, 11057), False, 'import torch\n'), ((11273, 11303), 'torch.sin', 'torch.sin', (['(position * div_term)'], {}), '(position * div_term)\n', (11282, 11303), False, 'import torch\n'), ((11326, 11356), 'torch.cos', 'torch.cos', (['(position * div_term)'], {}), '(position * div_term)\n', (11335, 11356), False, 'import torch\n'), ((1629, 1650), 'copy.deepcopy', 'copy.deepcopy', (['module'], {}), '(module)\n', (1642, 1650), False, 'import copy\n'), ((3008, 3028), 'torch.ones', 'torch.ones', (['features'], {}), '(features)\n', (3018, 3028), False, 'import torch\n'), ((3062, 3083), 'torch.zeros', 'torch.zeros', (['features'], {}), '(features)\n', (3073, 3083), False, 'import torch\n'), ((8274, 8301), 'torch.nn.Linear', 'nn.Linear', (['d_model', 'd_model'], {}), '(d_model, d_model)\n', (8283, 8301), True, 'import torch.nn as nn\n'), ((9063, 9097), 'torch.cat', 'torch.cat', (['(past_key, key)'], {'dim': '(-2)'}), '((past_key, key), dim=-2)\n', (9072, 9097), False, 'import torch\n'), ((9118, 9156), 'torch.cat', 'torch.cat', (['(past_value, value)'], {'dim': '(-2)'}), '((past_value, value), dim=-2)\n', (9127, 9156), False, 'import torch\n'), ((10360, 10391), 'torch.tensor', 'torch.tensor', (['pretrained_matrix'], {}), '(pretrained_matrix)\n', (10372, 10391), False, 'import torch\n'), ((10479, 10502), 'math.sqrt', 'math.sqrt', (['self.d_model'], {}), '(self.d_model)\n', (10488, 10502), False, 'import math\n'), ((10676, 10699), 'math.sqrt', 'math.sqrt', (['self.d_model'], {}), '(self.d_model)\n', (10685, 10699), False, 'import math\n'), ((13605, 13631), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['p'], {}), '(p)\n', (13628, 13631), True, 'import torch.nn as nn\n'), ((14424, 14450), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['p'], {}), '(p)\n', (14447, 14450), True, 'import torch.nn as nn\n'), ((7082, 7101), 'numpy.ones', 'np.ones', (['attn_shape'], {}), '(attn_shape)\n', (7089, 7101), True, 'import numpy as np\n'), ((11077, 11101), 'torch.arange', 'torch.arange', (['(0)', 'max_len'], {}), '(0, max_len)\n', (11089, 11101), False, 'import torch\n'), ((11152, 11179), 'torch.arange', 'torch.arange', (['(0)', 'd_model', '(2)'], {}), '(0, d_model, 2)\n', (11164, 11179), False, 'import torch\n'), ((11221, 11238), 'math.log', 'math.log', (['(10000.0)'], {}), '(10000.0)\n', (11229, 11238), False, 'import math\n')]
|
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "<NAME>" at 21:43, 01/02/2020 %
# %
# Email: <EMAIL> %
# Homepage: https://www.researchgate.net/profile/Bao_Hoang19 %
# Github: https://github.com/hoangbao123 %
#-------------------------------------------------------------------------------------------------------%
"""
Get all information (best fit of 15 runtimes and losses for the best run) of all algorithms.
Save all information in overall/algo_dict_info.pkl
"""
import pickle as pkl
import numpy as np
from utils.FunctionUtil import cal_mean, cal_std
from utils.class_utils import AlgoInfor
algos = ['GA', 'PSO', 'ABFOLS', 'CRO', 'ABC', 'WOA', 'QSO', 'IQSO']
path_loss = './history/loss/'
path_best_fit = './history/best_fit/'
algo_dict = {}
# iterate over all algorithms
for name in algos:
al = AlgoInfor()
al.name = name
print(name)
# iterate over 30 benmark functions
for i in range(1, 31):
function_name = 'F' + str(i)
name_file = name + "_" + function_name
loss_file = name_file + '_loss.pkl'
best_fit_file = name_file + '_best_fit.pkl'
path_file_loss = path_loss + loss_file
path_file_best_fit = path_best_fit + best_fit_file
with open(path_file_loss, 'rb') as f:
loss = pkl.load(f)
with open(path_file_best_fit, 'rb') as f:
best_fit = pkl.load(f)
if name == 'PSO':
# PSO returns matrix form of loss and best fit
loss = np.reshape(np.array(loss), -1)
best_fit = np.reshape(np.array(best_fit), -1)
elif name == 'ABFOLS':
# ABFOLS return inversed value of fitness and matrix form of loss
best_fit = 1 / np.array(best_fit)
loss = np.array(loss)[:, 0]
# cal std, mean , worst, best of 15 run times
std = cal_std(best_fit, i*100)
mean = cal_mean(best_fit, i*100)
worst = max(best_fit)
best = min(best_fit)
al.std.append(std)
al.mean.append(mean)
al.best.append(best)
al.worst.append(worst)
al.loss.append(np.array(loss))
al.best_fit.append(np.array(best_fit))
algo_dict[name] = al
# save infor as pickle file
with open('./history/overall/algo_dict_info.pkl', 'wb') as f:
pkl.dump(algo_dict, f, pkl.HIGHEST_PROTOCOL)
with open('./history/overall/algo_dict_info.pkl', 'rb') as f:
alf = pkl.load(f)
print(alf['PSO'].name)
|
[
"pickle.dump",
"utils.FunctionUtil.cal_std",
"pickle.load",
"numpy.array",
"utils.class_utils.AlgoInfor",
"utils.FunctionUtil.cal_mean"
] |
[((1245, 1256), 'utils.class_utils.AlgoInfor', 'AlgoInfor', ([], {}), '()\n', (1254, 1256), False, 'from utils.class_utils import AlgoInfor\n'), ((2712, 2756), 'pickle.dump', 'pkl.dump', (['algo_dict', 'f', 'pkl.HIGHEST_PROTOCOL'], {}), '(algo_dict, f, pkl.HIGHEST_PROTOCOL)\n', (2720, 2756), True, 'import pickle as pkl\n'), ((2830, 2841), 'pickle.load', 'pkl.load', (['f'], {}), '(f)\n', (2838, 2841), True, 'import pickle as pkl\n'), ((2265, 2291), 'utils.FunctionUtil.cal_std', 'cal_std', (['best_fit', '(i * 100)'], {}), '(best_fit, i * 100)\n', (2272, 2291), False, 'from utils.FunctionUtil import cal_mean, cal_std\n'), ((2305, 2332), 'utils.FunctionUtil.cal_mean', 'cal_mean', (['best_fit', '(i * 100)'], {}), '(best_fit, i * 100)\n', (2313, 2332), False, 'from utils.FunctionUtil import cal_mean, cal_std\n'), ((1711, 1722), 'pickle.load', 'pkl.load', (['f'], {}), '(f)\n', (1719, 1722), True, 'import pickle as pkl\n'), ((1796, 1807), 'pickle.load', 'pkl.load', (['f'], {}), '(f)\n', (1804, 1807), True, 'import pickle as pkl\n'), ((2529, 2543), 'numpy.array', 'np.array', (['loss'], {}), '(loss)\n', (2537, 2543), True, 'import numpy as np\n'), ((2572, 2590), 'numpy.array', 'np.array', (['best_fit'], {}), '(best_fit)\n', (2580, 2590), True, 'import numpy as np\n'), ((1923, 1937), 'numpy.array', 'np.array', (['loss'], {}), '(loss)\n', (1931, 1937), True, 'import numpy as np\n'), ((1977, 1995), 'numpy.array', 'np.array', (['best_fit'], {}), '(best_fit)\n', (1985, 1995), True, 'import numpy as np\n'), ((2137, 2155), 'numpy.array', 'np.array', (['best_fit'], {}), '(best_fit)\n', (2145, 2155), True, 'import numpy as np\n'), ((2175, 2189), 'numpy.array', 'np.array', (['loss'], {}), '(loss)\n', (2183, 2189), True, 'import numpy as np\n')]
|
# %% REQUIRED LIBRARIES
import os
import pandas as pd
import numpy as np
from plotly.offline import plot
import plotly.graph_objs as go
import plotly.express as px
from pyloopkit.loop_data_manager import update
from src.input_data_tools import input_table_to_dict, dict_inputs_to_dataframes
# %% REFERENCES
"""
A version of this code in google colab and stored in github gist is located here:
https://colab.research.google.com/gist/ed-nykaza/fe631aec6166a50b50ac9e8e5bc0eeb7/how-loop-dosing-decisions-work-etn-2020-05-11-v-0-1-0.ipynb
Credit for the color palette used in this animation goes to colorbrewer:
https://colorbrewer2.org/#type=qualitative&scheme=Set1&n=5
"""
# %% FUNCTIONS
def make_animation(scenario_df):
inputs_from_file = input_table_to_dict(scenario_df)
# first get the original prediction
loop_output = update(inputs_from_file)
inputs = loop_output.get("input_data")
# convert dict_inputs_to_dataframes
(
basal_rates,
carb_events,
carb_ratios,
dose_events,
blood_glucose,
df_last_temporary_basal,
df_misc,
df_sensitivity_ratio,
df_settings,
df_target_range,
) = dict_inputs_to_dataframes(inputs)
original_forecast = loop_output.get("predicted_glucose_values")
t = np.arange(0, len(original_forecast) * 5, 5)
df = pd.DataFrame(t, columns=["time"])
df["forecast"] = original_forecast
df["forecast_type"] = "original"
recommended_bolus = loop_output.get("recommended_bolus")[0]
# TODO: subtract the effect of the temp basal. In the example, the temp basal = scheduled basal rate
# so it is fine that this is not accounted for.
carbs = int(carb_events["carb_values"][0])
carb_to_insulin_ratio = np.round(carb_ratios["carb_ratio_values"][0], 2)
insulin_to_cover_carbs = np.round(carbs / carb_to_insulin_ratio, 2)
insulin_as_correction = np.round(recommended_bolus - insulin_to_cover_carbs, 2)
original_bolus_amount = inputs_from_file.get("dose_values")[0]
dosing_decision_df = pd.DataFrame()
for dose_amount in np.arange(0, np.max([recommended_bolus + 0.1, insulin_to_cover_carbs + 0.05]), 0.05):
temp_df = pd.DataFrame(t, columns=["time"])
inputs_from_file["dose_values"] = [original_bolus_amount + dose_amount]
temp_loop_output = update(inputs_from_file)
temp_forecast = temp_loop_output.get("predicted_glucose_values")
temp_df["forecast"] = temp_forecast
temp_df["Forecast updated with Dose (U)"] = np.round(dose_amount, 2)
dosing_decision_df = pd.concat([dosing_decision_df, temp_df], ignore_index=True, sort=False)
# make an animation
figure_title = "How Loop's Dosing Decision Works, Recommended Bolus of {}U =<br>{}U to cover {}g of carbs and {}U to prevent going below Dosing Safety Threshold".format(
recommended_bolus, insulin_to_cover_carbs, carbs, insulin_as_correction
)
fig = px.line(
dosing_decision_df,
x="time",
y="forecast",
line_dash="Forecast updated with Dose (U)",
line_dash_sequence=["dot"],
line_shape="spline",
animation_frame="Forecast updated with Dose (U)",
range_y=[40, dosing_decision_df["forecast"].max() + 10],
title=figure_title,
)
x_tick_vals = np.arange(-120, 400, 30)
fig.update_layout(
yaxis_title="Glucose (mg/dL)",
xaxis_title="Time Relative to Current Time (t=0)",
xaxis_tickvals=x_tick_vals,
autosize=False,
width=1200,
height=700,
)
fig.add_trace(
go.Scatter(
name="CGM data leading up to the Forecast",
x=np.arange(-(len(inputs_from_file["glucose_values"]) * 5) + 5, 5, 5),
y=inputs_from_file["glucose_values"],
mode="markers",
marker_color="rgba(97,73,246, 0.75)",
)
)
fig.add_trace(
go.Scatter(
name="Original Forecast",
x=t,
y=original_forecast,
mode="lines",
line_color="rgba(97,73,246, 0.75)",
line_width=4,
)
)
forecast_with_rec_bolus = dosing_decision_df.loc[
dosing_decision_df["Forecast updated with Dose (U)"] == np.round(recommended_bolus, 2), "forecast"
].values
fig.add_trace(
go.Scatter(
name="Forecast with Bolus Rec. of {}U".format(recommended_bolus),
x=t,
y=forecast_with_rec_bolus,
mode="lines",
line_color="rgba(77,175,74, 0.75)",
line_width=4,
)
)
forecast_with_too_much_dose = dosing_decision_df.loc[
dosing_decision_df["Forecast updated with Dose (U)"] == np.round(recommended_bolus + 0.05, 2), "forecast"
].values
fig.add_trace(
go.Scatter(
name="Forecast with Dose of {}U".format(np.round(recommended_bolus + 0.05, 2)),
x=t,
y=forecast_with_too_much_dose,
mode="lines",
line_color="rgba(255,127,0, 0.75)",
line_width=4,
)
)
# add other traces
# this only takes a single value TODO: update this to take a schedule
suspend_threshold = df_settings.loc["suspend_threshold", "settings"]
target_range_min = int(df_target_range["target_range_minimum_values"][0])
target_range_max = int(df_target_range["target_range_maximum_values"][0])
correction_range_mid = int(np.mean([target_range_min, target_range_max]))
t_dosing_threshold = np.arange(0, 370, 1)
dosing_threshold = np.append(
np.ones(185) * suspend_threshold, np.linspace(suspend_threshold, correction_range_mid, 185),
)
# # calculate the amount of dose per time step
# asdf = pd.DataFrame(original_forecast)
# asdf.to_csv("original_forecast.csv")
df["Suspend Threshold"] = suspend_threshold
df["Correction Range Min"] = target_range_min
df["Correction Range Max"] = target_range_max
fig.add_trace(
go.Scatter(
name="Correction Min",
x=df["time"],
y=df["Correction Range Min"],
fill=None,
mode="lines",
line_color="rgba(166,206,227, 0.50)",
legendgroup="correction_range",
showlegend=False,
)
)
fig.add_trace(
go.Scatter(
name="Correction Range = {}-{} mg/dL".format(target_range_min, target_range_max),
x=df["time"],
y=df["Correction Range Max"],
fill="tonexty", # fill area between trace0 and trace1
fillcolor="rgba(166,206,227, 0.25)",
mode="lines",
line_color="rgba(166,206,227, 0.50)",
legendgroup="correction_range",
opacity=0.05,
)
)
fig.add_trace(
go.Scatter(
name="Suspend Threshold = {} mg/dL".format(suspend_threshold),
x=x_tick_vals,
y=np.repeat(suspend_threshold, len(x_tick_vals)),
mode="lines",
line_color="rgb(228,26,28)",
)
)
fig.add_trace(
go.Scatter(
name="Dosing Safety Threshold",
x=t_dosing_threshold,
y=dosing_threshold,
mode="lines",
line_color="rgb(152,78,163)",
line_width=4,
)
)
return fig
if __name__ == "__main__":
# %% LOAD DATA
# filenames = [
# "dosing-safety-threshold-example",
# "dosing-safety-threshold-example-goes-below-suspend-threshold",
# "dosing-safety-threshold-example-only-last-3-points",
# "dosing-safety-threshold-example-only-last-2-points",
# ]
filenames = ["dosing-safety-threshold-example"]
for filename in filenames:
scenario = pd.read_csv(os.path.join("data", filename + ".csv"), index_col=[0])
# %% MAKE THE FIGURE
scenario_fig = make_animation(scenario)
# %% PLOT AND SAVE FILE
figure_location = os.path.join("figures", filename + ".html")
plot(scenario_fig, filename=figure_location)
|
[
"pandas.DataFrame",
"os.path.join",
"plotly.graph_objs.Scatter",
"src.input_data_tools.dict_inputs_to_dataframes",
"numpy.ones",
"plotly.offline.plot",
"numpy.max",
"numpy.mean",
"numpy.arange",
"pyloopkit.loop_data_manager.update",
"numpy.linspace",
"src.input_data_tools.input_table_to_dict",
"numpy.round",
"pandas.concat"
] |
[((746, 778), 'src.input_data_tools.input_table_to_dict', 'input_table_to_dict', (['scenario_df'], {}), '(scenario_df)\n', (765, 778), False, 'from src.input_data_tools import input_table_to_dict, dict_inputs_to_dataframes\n'), ((838, 862), 'pyloopkit.loop_data_manager.update', 'update', (['inputs_from_file'], {}), '(inputs_from_file)\n', (844, 862), False, 'from pyloopkit.loop_data_manager import update\n'), ((1193, 1226), 'src.input_data_tools.dict_inputs_to_dataframes', 'dict_inputs_to_dataframes', (['inputs'], {}), '(inputs)\n', (1218, 1226), False, 'from src.input_data_tools import input_table_to_dict, dict_inputs_to_dataframes\n'), ((1357, 1390), 'pandas.DataFrame', 'pd.DataFrame', (['t'], {'columns': "['time']"}), "(t, columns=['time'])\n", (1369, 1390), True, 'import pandas as pd\n'), ((1764, 1812), 'numpy.round', 'np.round', (["carb_ratios['carb_ratio_values'][0]", '(2)'], {}), "(carb_ratios['carb_ratio_values'][0], 2)\n", (1772, 1812), True, 'import numpy as np\n'), ((1842, 1884), 'numpy.round', 'np.round', (['(carbs / carb_to_insulin_ratio)', '(2)'], {}), '(carbs / carb_to_insulin_ratio, 2)\n', (1850, 1884), True, 'import numpy as np\n'), ((1913, 1968), 'numpy.round', 'np.round', (['(recommended_bolus - insulin_to_cover_carbs)', '(2)'], {}), '(recommended_bolus - insulin_to_cover_carbs, 2)\n', (1921, 1968), True, 'import numpy as np\n'), ((2062, 2076), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2074, 2076), True, 'import pandas as pd\n'), ((3332, 3356), 'numpy.arange', 'np.arange', (['(-120)', '(400)', '(30)'], {}), '(-120, 400, 30)\n', (3341, 3356), True, 'import numpy as np\n'), ((5542, 5562), 'numpy.arange', 'np.arange', (['(0)', '(370)', '(1)'], {}), '(0, 370, 1)\n', (5551, 5562), True, 'import numpy as np\n'), ((2113, 2177), 'numpy.max', 'np.max', (['[recommended_bolus + 0.1, insulin_to_cover_carbs + 0.05]'], {}), '([recommended_bolus + 0.1, insulin_to_cover_carbs + 0.05])\n', (2119, 2177), True, 'import numpy as np\n'), ((2204, 2237), 'pandas.DataFrame', 'pd.DataFrame', (['t'], {'columns': "['time']"}), "(t, columns=['time'])\n", (2216, 2237), True, 'import pandas as pd\n'), ((2345, 2369), 'pyloopkit.loop_data_manager.update', 'update', (['inputs_from_file'], {}), '(inputs_from_file)\n', (2351, 2369), False, 'from pyloopkit.loop_data_manager import update\n'), ((2539, 2563), 'numpy.round', 'np.round', (['dose_amount', '(2)'], {}), '(dose_amount, 2)\n', (2547, 2563), True, 'import numpy as np\n'), ((2593, 2664), 'pandas.concat', 'pd.concat', (['[dosing_decision_df, temp_df]'], {'ignore_index': '(True)', 'sort': '(False)'}), '([dosing_decision_df, temp_df], ignore_index=True, sort=False)\n', (2602, 2664), True, 'import pandas as pd\n'), ((3935, 4065), 'plotly.graph_objs.Scatter', 'go.Scatter', ([], {'name': '"""Original Forecast"""', 'x': 't', 'y': 'original_forecast', 'mode': '"""lines"""', 'line_color': '"""rgba(97,73,246, 0.75)"""', 'line_width': '(4)'}), "(name='Original Forecast', x=t, y=original_forecast, mode='lines',\n line_color='rgba(97,73,246, 0.75)', line_width=4)\n", (3945, 4065), True, 'import plotly.graph_objs as go\n'), ((5470, 5515), 'numpy.mean', 'np.mean', (['[target_range_min, target_range_max]'], {}), '([target_range_min, target_range_max])\n', (5477, 5515), True, 'import numpy as np\n'), ((5639, 5696), 'numpy.linspace', 'np.linspace', (['suspend_threshold', 'correction_range_mid', '(185)'], {}), '(suspend_threshold, correction_range_mid, 185)\n', (5650, 5696), True, 'import numpy as np\n'), ((6021, 6220), 'plotly.graph_objs.Scatter', 'go.Scatter', ([], {'name': '"""Correction Min"""', 'x': "df['time']", 'y': "df['Correction Range Min']", 'fill': 'None', 'mode': '"""lines"""', 'line_color': '"""rgba(166,206,227, 0.50)"""', 'legendgroup': '"""correction_range"""', 'showlegend': '(False)'}), "(name='Correction Min', x=df['time'], y=df['Correction Range Min'\n ], fill=None, mode='lines', line_color='rgba(166,206,227, 0.50)',\n legendgroup='correction_range', showlegend=False)\n", (6031, 6220), True, 'import plotly.graph_objs as go\n'), ((7120, 7267), 'plotly.graph_objs.Scatter', 'go.Scatter', ([], {'name': '"""Dosing Safety Threshold"""', 'x': 't_dosing_threshold', 'y': 'dosing_threshold', 'mode': '"""lines"""', 'line_color': '"""rgb(152,78,163)"""', 'line_width': '(4)'}), "(name='Dosing Safety Threshold', x=t_dosing_threshold, y=\n dosing_threshold, mode='lines', line_color='rgb(152,78,163)', line_width=4)\n", (7130, 7267), True, 'import plotly.graph_objs as go\n'), ((7982, 8025), 'os.path.join', 'os.path.join', (['"""figures"""', "(filename + '.html')"], {}), "('figures', filename + '.html')\n", (7994, 8025), False, 'import os\n'), ((8034, 8078), 'plotly.offline.plot', 'plot', (['scenario_fig'], {'filename': 'figure_location'}), '(scenario_fig, filename=figure_location)\n', (8038, 8078), False, 'from plotly.offline import plot\n'), ((5605, 5617), 'numpy.ones', 'np.ones', (['(185)'], {}), '(185)\n', (5612, 5617), True, 'import numpy as np\n'), ((7789, 7828), 'os.path.join', 'os.path.join', (['"""data"""', "(filename + '.csv')"], {}), "('data', filename + '.csv')\n", (7801, 7828), False, 'import os\n'), ((4270, 4300), 'numpy.round', 'np.round', (['recommended_bolus', '(2)'], {}), '(recommended_bolus, 2)\n', (4278, 4300), True, 'import numpy as np\n'), ((4739, 4776), 'numpy.round', 'np.round', (['(recommended_bolus + 0.05)', '(2)'], {}), '(recommended_bolus + 0.05, 2)\n', (4747, 4776), True, 'import numpy as np\n'), ((4894, 4931), 'numpy.round', 'np.round', (['(recommended_bolus + 0.05)', '(2)'], {}), '(recommended_bolus + 0.05, 2)\n', (4902, 4931), True, 'import numpy as np\n')]
|
import time
import numpy as np
import tensorflow as tf
from PIL import Image
from core import utils
import cv2
import argparse
IMAGE_H, IMAGE_W = 416, 416
parser = argparse.ArgumentParser(description="gpu模式下不能设置score_thresh和iou_thresh")
parser.add_argument("--video_id", "-vi", default=0, help="传入相机的id,可以是图片,视频,网络摄像头(eg:http://admin:admin@ip:端口/")
parser.add_argument("--model", "-m", default="cpu", choices=["cpu", "gpu"], help="选择gpu中运行还是在cpu中运行")
parser.add_argument("--score_thresh", "-st", default=0.5, type=float, help="设置score_thresh值,越高所获得的box越少(仅在cpu模式下生效)")
parser.add_argument("--iou_thresh", "-it", default=0.5, type=float, help="设置score_thresh值,越高所获得的box越少(仅在cpu模式下生效)")
flags = parser.parse_args()
classes = utils.read_coco_names('./data/coco.names')
num_classes = len(classes)
graph = tf.Graph()
if flags.model == "cpu":
input_tensor, output_tensors = utils.read_pb_return_tensors(graph, "data/checkpoint/yolov3_cpu_nms.pb",
["Placeholder:0", "concat_9:0", "mul_6:0"])
else:
input_tensor, output_tensors = utils.read_pb_return_tensors(graph, "data/checkpoint/yolov3_gpu_nms.pb",
["Placeholder:0", "concat_10:0", "concat_11:0",
"concat_12:0"])
with tf.Session(graph=graph) as sess:
vid = cv2.VideoCapture(flags.video_id)
while True:
return_value, frame = vid.read()
if return_value:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = Image.fromarray(frame)
else:
raise ValueError("No image!")
img_resized = np.array(image.resize(size=(IMAGE_H, IMAGE_W)), dtype=np.float32)
img_resized = img_resized / 255.
prev_time = time.time()
# 从模型中获取结果
if flags.model == "cpu":
boxes, scores = sess.run(output_tensors, feed_dict={input_tensor: np.expand_dims(img_resized, axis=0)})
boxes, scores, labels = utils.cpu_nms(boxes, scores, num_classes, score_thresh=0.4, iou_thresh=0.5)
else:
boxes, scores, labels = sess.run(output_tensors,
feed_dict={input_tensor: np.expand_dims(img_resized, axis=0)})
# 在图中进行标记
image = utils.draw_boxes(image, boxes, scores, labels, classes, (IMAGE_H, IMAGE_W), show=False)
image = utils.draw_Chinese(image, "按q退出", (0, 35))
image = utils.draw_Chinese(image, "按k截图", (0, 55))
curr_time = time.time()
exec_time = curr_time - prev_time
result = np.asarray(image)
info = "time: %.2f ms" % (1000 * exec_time)
cv2.putText(result, text=info, org=(0, 25), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=1, color=(255, 0, 0), thickness=2)
cv2.namedWindow("result", cv2.WINDOW_AUTOSIZE)
result = cv2.cvtColor(result, cv2.COLOR_RGB2BGR)
cv2.imshow("result", result)
keyboard = cv2.waitKey(10)
# 按"k"进行截图
if keyboard & 0xFF == ord('k'):
now = int(round(time.time() * 1000))
now02 = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(now / 1000))
filename = "screenshot/frames_%s.jpg" % now02
cv2.imwrite(filename, result)
# 按"q"退出
if keyboard & 0xFF == ord('q'): break
|
[
"argparse.ArgumentParser",
"core.utils.cpu_nms",
"cv2.imshow",
"core.utils.draw_boxes",
"cv2.cvtColor",
"cv2.imwrite",
"time.localtime",
"core.utils.read_pb_return_tensors",
"cv2.waitKey",
"numpy.asarray",
"tensorflow.Session",
"tensorflow.Graph",
"core.utils.draw_Chinese",
"cv2.putText",
"core.utils.read_coco_names",
"numpy.expand_dims",
"time.time",
"cv2.VideoCapture",
"PIL.Image.fromarray",
"cv2.namedWindow"
] |
[((165, 237), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""gpu模式下不能设置score_thresh和iou_thresh"""'}), "(description='gpu模式下不能设置score_thresh和iou_thresh')\n", (188, 237), False, 'import argparse\n'), ((725, 767), 'core.utils.read_coco_names', 'utils.read_coco_names', (['"""./data/coco.names"""'], {}), "('./data/coco.names')\n", (746, 767), False, 'from core import utils\n'), ((803, 813), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (811, 813), True, 'import tensorflow as tf\n'), ((874, 995), 'core.utils.read_pb_return_tensors', 'utils.read_pb_return_tensors', (['graph', '"""data/checkpoint/yolov3_cpu_nms.pb"""', "['Placeholder:0', 'concat_9:0', 'mul_6:0']"], {}), "(graph, 'data/checkpoint/yolov3_cpu_nms.pb', [\n 'Placeholder:0', 'concat_9:0', 'mul_6:0'])\n", (902, 995), False, 'from core import utils\n'), ((1096, 1237), 'core.utils.read_pb_return_tensors', 'utils.read_pb_return_tensors', (['graph', '"""data/checkpoint/yolov3_gpu_nms.pb"""', "['Placeholder:0', 'concat_10:0', 'concat_11:0', 'concat_12:0']"], {}), "(graph, 'data/checkpoint/yolov3_gpu_nms.pb', [\n 'Placeholder:0', 'concat_10:0', 'concat_11:0', 'concat_12:0'])\n", (1124, 1237), False, 'from core import utils\n'), ((1368, 1391), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (1378, 1391), True, 'import tensorflow as tf\n'), ((1411, 1443), 'cv2.VideoCapture', 'cv2.VideoCapture', (['flags.video_id'], {}), '(flags.video_id)\n', (1427, 1443), False, 'import cv2\n'), ((1833, 1844), 'time.time', 'time.time', ([], {}), '()\n', (1842, 1844), False, 'import time\n'), ((2345, 2436), 'core.utils.draw_boxes', 'utils.draw_boxes', (['image', 'boxes', 'scores', 'labels', 'classes', '(IMAGE_H, IMAGE_W)'], {'show': '(False)'}), '(image, boxes, scores, labels, classes, (IMAGE_H, IMAGE_W),\n show=False)\n', (2361, 2436), False, 'from core import utils\n'), ((2449, 2491), 'core.utils.draw_Chinese', 'utils.draw_Chinese', (['image', '"""按q退出"""', '(0, 35)'], {}), "(image, '按q退出', (0, 35))\n", (2467, 2491), False, 'from core import utils\n'), ((2508, 2550), 'core.utils.draw_Chinese', 'utils.draw_Chinese', (['image', '"""按k截图"""', '(0, 55)'], {}), "(image, '按k截图', (0, 55))\n", (2526, 2550), False, 'from core import utils\n'), ((2571, 2582), 'time.time', 'time.time', ([], {}), '()\n', (2580, 2582), False, 'import time\n'), ((2642, 2659), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (2652, 2659), True, 'import numpy as np\n'), ((2720, 2848), 'cv2.putText', 'cv2.putText', (['result'], {'text': 'info', 'org': '(0, 25)', 'fontFace': 'cv2.FONT_HERSHEY_SIMPLEX', 'fontScale': '(1)', 'color': '(255, 0, 0)', 'thickness': '(2)'}), '(result, text=info, org=(0, 25), fontFace=cv2.\n FONT_HERSHEY_SIMPLEX, fontScale=1, color=(255, 0, 0), thickness=2)\n', (2731, 2848), False, 'import cv2\n'), ((2873, 2919), 'cv2.namedWindow', 'cv2.namedWindow', (['"""result"""', 'cv2.WINDOW_AUTOSIZE'], {}), "('result', cv2.WINDOW_AUTOSIZE)\n", (2888, 2919), False, 'import cv2\n'), ((2937, 2976), 'cv2.cvtColor', 'cv2.cvtColor', (['result', 'cv2.COLOR_RGB2BGR'], {}), '(result, cv2.COLOR_RGB2BGR)\n', (2949, 2976), False, 'import cv2\n'), ((2985, 3013), 'cv2.imshow', 'cv2.imshow', (['"""result"""', 'result'], {}), "('result', result)\n", (2995, 3013), False, 'import cv2\n'), ((3034, 3049), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (3045, 3049), False, 'import cv2\n'), ((1546, 1584), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2RGB'], {}), '(frame, cv2.COLOR_BGR2RGB)\n', (1558, 1584), False, 'import cv2\n'), ((1605, 1627), 'PIL.Image.fromarray', 'Image.fromarray', (['frame'], {}), '(frame)\n', (1620, 1627), False, 'from PIL import Image\n'), ((2050, 2125), 'core.utils.cpu_nms', 'utils.cpu_nms', (['boxes', 'scores', 'num_classes'], {'score_thresh': '(0.4)', 'iou_thresh': '(0.5)'}), '(boxes, scores, num_classes, score_thresh=0.4, iou_thresh=0.5)\n', (2063, 2125), False, 'from core import utils\n'), ((3311, 3340), 'cv2.imwrite', 'cv2.imwrite', (['filename', 'result'], {}), '(filename, result)\n', (3322, 3340), False, 'import cv2\n'), ((3213, 3239), 'time.localtime', 'time.localtime', (['(now / 1000)'], {}), '(now / 1000)\n', (3227, 3239), False, 'import time\n'), ((1976, 2011), 'numpy.expand_dims', 'np.expand_dims', (['img_resized'], {'axis': '(0)'}), '(img_resized, axis=0)\n', (1990, 2011), True, 'import numpy as np\n'), ((2272, 2307), 'numpy.expand_dims', 'np.expand_dims', (['img_resized'], {'axis': '(0)'}), '(img_resized, axis=0)\n', (2286, 2307), True, 'import numpy as np\n'), ((3137, 3148), 'time.time', 'time.time', ([], {}), '()\n', (3146, 3148), False, 'import time\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 11 10:03:29 2020
@author: sid
"""
from matplotlib import pyplot as plt
import numpy as np
import plotly.express as px
from scipy import ndimage, signal
import pandas as pd
plt.ion()
#Detector calibration and setup
import pyFAI, pyFAI.detectors, fabio
import pyFAI.distortion as dis
from pyFAI.gui import jupyter
from pyFAI.calibrant import get_calibrant
from pyFAI.azimuthalIntegrator import AzimuthalIntegrator
import time
#openfile for data #Only RAW DATA has radial averaging, as urwarped is already averaged
fig_z = fabio.open(r"C:\PhD work\PhD_May20\SAXS107cm\Box_01\Aerogel1_2_60s_107cm_01_unwarped.gfrm") #data has additional commands like shape
img_z = fig_z.data
fig_x = fabio.open(r"C:\PhD work\PhD_May20\SAXS107cm\Box_01\Aerogel1_1_60s_107cm_01_unwarped.gfrm")
img_x = fig_x.data
Mask_dat = fabio.open(r"C:\PhD work\PhD_May20\SAXS107cm\Box_01\air_60s_107cm_01_unwarped.gfrm")
msk = Mask_dat.data #Mask_correction
#Flats and background
flat_xz = fabio.open(r"C:\PhD work\PhD_May20\SAXS107cm\Box_01\2D-A5.2_01_001.gfrm") #Masks
flat = fabio.open(r"C:\PhD work\PhD_May20\SAXS107cm\Box_01\2D-A5.2_01_000.gfrm") #Marks
# fig_plt, ax = plt.subplots()
#1 = 0%
#2 = 85%
#3 = 40%
#4 = 70%
#5 = 98%
#6 = 95%
#for headers -> fig_z.header()
#Detector and measurement parameters
wl = 1.5418e-10 #nm dimension of X-rays
cal = get_calibrant("AgBh") #Silver behanate sample
cal.wavelength=wl
start_time = time.time()
print("PyFAI version", pyFAI.version)
Vantec = pyFAI.detectors.Detector(68e-6, 68e-6)#pixel size
Vantec.max_shape=(2048,2048)#image shape
ai = AzimuthalIntegrator(dist=1.07050, detector=Vantec, wavelength=wl)#initialization of arbitrary detector with given dimensions
#Masks and darks
Ai_mask = ai.create_mask(msk)
ai.mask = Ai_mask
#image center calculation
cent = msk.T
x_cent = np.zeros(len(cent))
x_holder = np.zeros(len(cent))
y_cent = np.zeros(len(cent))
y_holder = np.zeros(len(cent))
#image center calculation
for i in range(len(cent)):
for j in range(len(cent)):
x_holder[j] = cent[i][j] #running X center intensity loop
y_holder[j] = cent[j][i] #running Y center intensity loop
x_cent[i] = x_holder.sum()
y_cent[i] = y_holder.sum()
x_c=y_c = 0
for i in range(len(cent)):
ctr_x = (x_cent[i]*i)
ctr_y = (y_cent[i]*i)
x_c+=ctr_x
y_c+=ctr_y
xx_ctr = x_c/x_cent.sum()
yy_ctr = y_c/y_cent.sum() #weighted average for center position
#finding beamcenter with PONI=Point of normal incedence
p1 = 68e-6 * 2048/2
ai.poni1 = p1 - 0.00017
p2 = 68e-6 * 2048/2
ai.poni2 = p2
print(ai)
fake = cal.fake_calibration_image(ai)
#detector setup complete
#Fixing the peak processing with a centering kernel
size = 11 #Odd of course
center = (size-1)//2
y, x = np.ogrid[-center:center+1,-center:center+1]
r2 = x*x + y*y
kernel = (r2<=(center+0.5)**2).astype(float)
kernel /= kernel.sum()
fig_fix,ax_fix = plt.subplots()
# ax_fix.imshow(kernel, interpolation="nearest", origin="lower")
cnv = signal.convolve2d(img_z, kernel, mode="same")
#convolution complete_but errors are still present
#CORRECTION FACTORS:
npt = 1000
kwarg = {"npt":npt,
"correctSolidAngle":True,
"polarization_factor":None,
"safe":False}
omega = ai.solidAngleArray(Vantec.shape, absolute=True)
flat = np.ones(Vantec.shape)
res_flat1 = ai.integrate1d(flat, 1000)
res_flat2 = ai.integrate2d(flat, 1000)
# crv = jupyter.plot1d(res_flat1)
# crv.axes.set_xlim(-1,15)
# crv.axes.set_ylim(0.9,1.1)
# crv2 = jupyter.plot2d(res_flat2)
#distortion correction
distort = dis.Distortion(detector=Vantec, shape=Vantec.shape, resize = False, empty=0,mask=msk,method = 'lut')
cor_img = distort.correct_ng(img_z, solidangle = ai.solidAngleArray)
#CORRECTIONS end:
plt.rcParams['figure.dpi'] = 600 #inline plot dpi setting
plt.rcParams["figure.figsize"] = (10,10)
Desc = 1500 #Descretizer for 2D unwrapping of the scattering
#Plot 1D azimuthal and radial integration--was not removed before; but results are varying. better to use for 1d integrate. radial integration is questionable
#XZ plane plot
res_z = ai.integrate1d(img_z, 2000, unit="q_nm^-1",filename= "integrated.dat", radial_range = [0.3,2.5], correctSolidAngle=False, mask = msk)
rad_z = ai.integrate_radial(img_z, 2000, radial_range= [0,2.5],unit = "chi_deg", correctSolidAngle=True, mask = msk, npt_rad = 200)
jupyter.plot1d(res_z, label = "Compression axis")
jupyter.plot1d(rad_z)
ai.setSPD(SampleDistance = 1.070500031, Center_1=1024.6-.1, Center_2 = 1026.5+0.1) #maybe removed
# ai.setSPD(SampleDistance = 1.070500031, Center_1=xx_ctr, Center_2 = yy_ctr) #weighted average xx_ctr, yy_ctr better for some check and move
d_z = ai.integrate2d(img_z, Desc, correctSolidAngle = False, radial_range = [0.2,2.5], mask = msk, )
# jupyter.plot2d(d_z)
#XY plane plot
res_x = ai.integrate1d(img_x, 2000, unit="q_nm^-1", filename= "integrated.dat", radial_range = [0.3,5], correctSolidAngle=False, mask= msk)
rad_x = ai.integrate_radial(img_x, 500, unit = "chi_deg", radial_range= [0.2,2.5], correctSolidAngle=True, mask=msk)
jupyter.plot1d(rad_x, label = "XY axis")
# #plotting rad same way
# ai.setSPD(SampleDistance = 1.070500031, Center_1=1024.6, Center_2 = 1026.5) #different from above
d_x = ai.integrate2d(img_x, Desc, correctSolidAngle= False, radial_range = [0.2,2.5], mask = msk)
# jupyter.plot2d(d_x)
#Flats plot
# res_flat = ai.integrate1d(flat.data,2000,unit="q_nm^-1",filename="integrated.dat", radial_range = [0.01,10])
# rad_flat = ai.integrate_radial(flat.data,500, radial_range= [0,10],unit = "chi_deg")
# #raw
# res_flat_raw = ai.integrate1d(flat_xz.data,2000,unit="q_nm^-1",filename="integrated.dat", radial_range = [0.01,10])
# rad_flat_raw = ai.integrate_radial(flat_xz.data,500, radial_range= [0,10],unit = "chi_deg", )
#2D Scatter plot without imshow
jupyter.display(img_z)#2d scatter display
jupyter.display(img_x)
#plotting 2d integrated data
intensity, q, tth = d_z
intensity_x, q_x, tth_x = d_x
z_max = np.zeros(len(intensity))
x_max = np.zeros(len(intensity_x))
z_avg = np.zeros(len(intensity))
x_avg = np.zeros(len(intensity_x))
intensity_mask_x = np.zeros(360)
intensity_mask_z = np.zeros(360)
for i in range(len(intensity)): ## Z-axis aligned sample distortion correction for initial beamstop
z_max[i] = intensity[i].max()
z_avg[i] = intensity[i].mean()
x_max[i] = intensity_x[i].max()
x_avg[i] = intensity_x[i].mean()
for j in range(Desc):
if(int(intensity[i][j]>160)):
intensity_mask_z[i] = j
break
for i in range(len(intensity_x)): ## X-axis aligned sample distortion correction for initial beamstop
for j in range(1500):
if(int(intensity_x[i][j]>160)):
intensity_mask_x[i] = j
break
#Deletion of Z-axis limits upto mask
corr_intensity_z = np.zeros((360,Desc)) #corrected intensity
corr_intensity_x = np.zeros((360,Desc)) #corrected intensity
for i in range(len(intensity)): #rearrangement for masking Z-axis
aa = int(intensity_mask_z[i])
for j in range(Desc):
if(j>=int(intensity_mask_z[i])):
k=j-aa
corr_intensity_z[i][k] = intensity[i][j]
for i in range(len(intensity)):
aa = int(intensity_mask_x[i])
for j in range(Desc):
if(j>=int(intensity_mask_x[i])):
k=j-aa
corr_intensity_x[i][k] = intensity_x[i][j]
corr_max_x = np.zeros(len(intensity_x))
corr_avg_x = np.zeros(len(intensity_x))
corr_sum_x = np.zeros(len(intensity_x))
corr_max_z = np.zeros(len(intensity))
corr_avg_z = np.zeros(len(intensity))
corr_sum_z = np.zeros(len(intensity))
for i in range(len(intensity)):
corr_avg_x[i] = corr_intensity_x[i].mean()
corr_avg_z[i] = corr_intensity_z[i].mean()
corr_max_x[i] = corr_intensity_x[i].max()
corr_max_z[i] = corr_intensity_z[i].max()
corr_sum_x[i] = corr_intensity_x[i].sum()
corr_sum_z[i] = corr_intensity_z[i].sum()
z_sum = np.zeros(len(intensity))
x_sum = np.zeros(len(intensity_x))
for i in range(len(intensity)): ## Z-axis aligned sample distortion correction for initial beamstop
z_sum[i] = intensity[i].sum()
x_sum[i] = intensity_x[i].sum()
#moving averaging for correction
def moving_average(x): #(x,w)
mv = np.zeros(len(intensity))
for i in range(len(mv)):
mv[i] = (x[i-2] + x[i-1] +x[i])/3
return mv
# return np.convolve(x, np.ones(w), 'valid') / w
mv_avg = moving_average(x_sum)
# fig_avg = px.scatter(x= tth, y = mv_avg, height = 1200, width = 1200,labels = 'Moving average')
error = (x_sum.max() + x_sum.min())/2
err= np.zeros(len(intensity))
for i in range(360):
err[i] = abs(np.average(x_sum)-x_sum[i])
print (err.sum(), np.average(x_sum))
index_z = np.argmin(z_sum)
index_x = np.argmin(x_sum) #argumentation for minimum in array
rotate_img_z = ndimage.rotate(img_z, 45, reshape=False) #rotation to index of minimum theta where we can shift/rotate image
rotate_img_x = ndimage.rotate(img_x, abs(tth[index_x]), reshape=False) #rotation to minimum
proc_img_z = rotate_img_z[800:1248, 800:1248]
proc_img_x = rotate_img_x[800:1248, 800:1248]
# rot_z = integrate2d(rotate_img_z, Desc, radialrange=[0.2, 2.5], mask=msk)
fig_int_azi_z = px.scatter(x = tth, y = z_sum, height = 1200, width = 1200,labels = 'Intensity_z', title="Z-axis orientation")
fig_int_azi_z.update_yaxes(title_font=dict(size=24, family='Courier', color='crimson'), title ='Intensity (a.U.)')
fig_int_azi_z.update_xaxes(title_font=dict(size=24, family='Courier', color='crimson'), title ='Azimuthal angle, 𝛘 (°)')
fig_int_azi_z.update_xaxes(showline=True, linewidth=1, linecolor='black', mirror=True)
fig_int_azi_z.update_yaxes(showline=True, linewidth=1, linecolor='black', mirror=True)
fig_int_azi_x = px.scatter(x = tth_x, y = x_sum, height = 1200, width = 1200, labels = 'Intensity_x', title="X-axis orientation")
fig_int_azi_x.update_yaxes(title_font=dict(size=24, family='Courier', color='crimson'), title ='Intensity (a.U.)')
fig_int_azi_x.update_xaxes(title_font=dict(size=24, family='Courier', color='crimson'), title ='Azimuthal angle, 𝛘 (°)')
fig_int_azi_x.update_xaxes(showline=True, linewidth=1, linecolor='black', mirror=True)
fig_int_azi_x.update_yaxes(showline=True, linewidth=1, linecolor='black', mirror=True) #y=corr_sum_x
fig_int_azi_z.show()
fig_int_azi_x.show()
#showing the 2D scatter plot better than jupyter
plt.figure(figsize=(16,9))
plt.axis("off")
z = plt.imshow(proc_img_z, cmap = 'gnuplot2', vmin=0, vmax = 1000)
plt.colorbar() # Show color bar of above image
# patch = patches.Circle((2048, 2048), radius=500)
# x.set_clip_path(patch)
# plt.imsave('98_z', img_z, cmap='gnuplot', dpi = 1200)
plt.show()
x = plt.imshow(proc_img_x, cmap = 'gnuplot2', vmin=0, vmax = 1000)
a,b = res_z
fig, ax = plt.subplots()
q1 = pd.read_excel(r"C:\Users\sid\Desktop\Cellulose aerogel paper\Lambda_Compression.xlsx", sheet_name="q vs Intensity_SAXS")
plt.loglog(q1.Q_range, q1.Aerogel_5_z, label='95% Compressed_XZ', linewidth = 2.0)
plt.loglog(q1.Q_range, q1.Aerogel_5_x, label='95% Compressed_XY', linewidth = 2.0)
plt.loglog(q1.Q_range, q1.Aerogel_1_z, label='0% Compressed_XZ', linewidth = 2.0)
plt.loglog(q1.Q_range, q1.Aerogel_1_x, label='0% Compressed_XY', linewidth = 2.0)
plt.legend(fontsize=22, frameon=False, loc='lower left')
plt.xlabel('Scattering vector, q ($\mathregular {nm^{-1}}$)', labelpad=20, fontsize=32)
# plt.legend(fontsize=22, frameon=False, loc='best', bbox_to_anchor=(0.6, 0.4))
plt.ylabel('Intensity, a.U.', labelpad=20, fontsize = 32)
plt.xticks(fontsize=18)
plt.yticks(fontsize=18)
plt.xlim([0.1,5])
plt.ylim([0, 120])
plt.tick_params(axis='both', pad = 10, top=True, right=True)
plt.grid(which='major', color='#DDDDDD', linewidth=1.25)
plt.grid(which='minor', color='#EEEEEE', linestyle=':', linewidth=1.0)
plt.minorticks_on()
for axis in ['top', 'bottom', 'left','right']:
ax.spines[axis].set_linewidth(2.0)
|
[
"matplotlib.pyplot.loglog",
"numpy.ones",
"numpy.argmin",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tick_params",
"plotly.express.scatter",
"pyFAI.azimuthalIntegrator.AzimuthalIntegrator",
"scipy.signal.convolve2d",
"pyFAI.calibrant.get_calibrant",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.yticks",
"fabio.open",
"pyFAI.distortion.Distortion",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.subplots",
"pyFAI.gui.jupyter.display",
"matplotlib.pyplot.show",
"numpy.average",
"pyFAI.gui.jupyter.plot1d",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"pandas.read_excel",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlim",
"numpy.zeros",
"matplotlib.pyplot.axis",
"time.time",
"matplotlib.pyplot.minorticks_on",
"matplotlib.pyplot.xlabel",
"scipy.ndimage.rotate",
"pyFAI.detectors.Detector"
] |
[((233, 242), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (240, 242), True, 'from matplotlib import pyplot as plt\n'), ((592, 697), 'fabio.open', 'fabio.open', (['"""C:\\\\PhD work\\\\PhD_May20\\\\SAXS107cm\\\\Box_01\\\\Aerogel1_2_60s_107cm_01_unwarped.gfrm"""'], {}), "(\n 'C:\\\\PhD work\\\\PhD_May20\\\\SAXS107cm\\\\Box_01\\\\Aerogel1_2_60s_107cm_01_unwarped.gfrm'\n )\n", (602, 697), False, 'import pyFAI, pyFAI.detectors, fabio\n'), ((754, 859), 'fabio.open', 'fabio.open', (['"""C:\\\\PhD work\\\\PhD_May20\\\\SAXS107cm\\\\Box_01\\\\Aerogel1_1_60s_107cm_01_unwarped.gfrm"""'], {}), "(\n 'C:\\\\PhD work\\\\PhD_May20\\\\SAXS107cm\\\\Box_01\\\\Aerogel1_1_60s_107cm_01_unwarped.gfrm'\n )\n", (764, 859), False, 'import pyFAI, pyFAI.detectors, fabio\n'), ((878, 976), 'fabio.open', 'fabio.open', (['"""C:\\\\PhD work\\\\PhD_May20\\\\SAXS107cm\\\\Box_01\\\\air_60s_107cm_01_unwarped.gfrm"""'], {}), "(\n 'C:\\\\PhD work\\\\PhD_May20\\\\SAXS107cm\\\\Box_01\\\\air_60s_107cm_01_unwarped.gfrm'\n )\n", (888, 976), False, 'import pyFAI, pyFAI.detectors, fabio\n'), ((1035, 1112), 'fabio.open', 'fabio.open', (['"""C:\\\\PhD work\\\\PhD_May20\\\\SAXS107cm\\\\Box_01\\\\2D-A5.2_01_001.gfrm"""'], {}), "('C:\\\\PhD work\\\\PhD_May20\\\\SAXS107cm\\\\Box_01\\\\2D-A5.2_01_001.gfrm')\n", (1045, 1112), False, 'import pyFAI, pyFAI.detectors, fabio\n'), ((1124, 1201), 'fabio.open', 'fabio.open', (['"""C:\\\\PhD work\\\\PhD_May20\\\\SAXS107cm\\\\Box_01\\\\2D-A5.2_01_000.gfrm"""'], {}), "('C:\\\\PhD work\\\\PhD_May20\\\\SAXS107cm\\\\Box_01\\\\2D-A5.2_01_000.gfrm')\n", (1134, 1201), False, 'import pyFAI, pyFAI.detectors, fabio\n'), ((1414, 1435), 'pyFAI.calibrant.get_calibrant', 'get_calibrant', (['"""AgBh"""'], {}), "('AgBh')\n", (1427, 1435), False, 'from pyFAI.calibrant import get_calibrant\n'), ((1493, 1504), 'time.time', 'time.time', ([], {}), '()\n', (1502, 1504), False, 'import time\n'), ((1554, 1596), 'pyFAI.detectors.Detector', 'pyFAI.detectors.Detector', (['(6.8e-05)', '(6.8e-05)'], {}), '(6.8e-05, 6.8e-05)\n', (1578, 1596), False, 'import pyFAI, pyFAI.detectors, fabio\n'), ((1652, 1716), 'pyFAI.azimuthalIntegrator.AzimuthalIntegrator', 'AzimuthalIntegrator', ([], {'dist': '(1.0705)', 'detector': 'Vantec', 'wavelength': 'wl'}), '(dist=1.0705, detector=Vantec, wavelength=wl)\n', (1671, 1716), False, 'from pyFAI.azimuthalIntegrator import AzimuthalIntegrator\n'), ((2992, 3006), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3004, 3006), True, 'from matplotlib import pyplot as plt\n'), ((3080, 3125), 'scipy.signal.convolve2d', 'signal.convolve2d', (['img_z', 'kernel'], {'mode': '"""same"""'}), "(img_z, kernel, mode='same')\n", (3097, 3125), False, 'from scipy import ndimage, signal\n'), ((3402, 3423), 'numpy.ones', 'np.ones', (['Vantec.shape'], {}), '(Vantec.shape)\n', (3409, 3423), True, 'import numpy as np\n'), ((3668, 3770), 'pyFAI.distortion.Distortion', 'dis.Distortion', ([], {'detector': 'Vantec', 'shape': 'Vantec.shape', 'resize': '(False)', 'empty': '(0)', 'mask': 'msk', 'method': '"""lut"""'}), "(detector=Vantec, shape=Vantec.shape, resize=False, empty=0,\n mask=msk, method='lut')\n", (3682, 3770), True, 'import pyFAI.distortion as dis\n'), ((4476, 4523), 'pyFAI.gui.jupyter.plot1d', 'jupyter.plot1d', (['res_z'], {'label': '"""Compression axis"""'}), "(res_z, label='Compression axis')\n", (4490, 4523), False, 'from pyFAI.gui import jupyter\n'), ((4527, 4548), 'pyFAI.gui.jupyter.plot1d', 'jupyter.plot1d', (['rad_z'], {}), '(rad_z)\n', (4541, 4548), False, 'from pyFAI.gui import jupyter\n'), ((5196, 5234), 'pyFAI.gui.jupyter.plot1d', 'jupyter.plot1d', (['rad_x'], {'label': '"""XY axis"""'}), "(rad_x, label='XY axis')\n", (5210, 5234), False, 'from pyFAI.gui import jupyter\n'), ((5963, 5985), 'pyFAI.gui.jupyter.display', 'jupyter.display', (['img_z'], {}), '(img_z)\n', (5978, 5985), False, 'from pyFAI.gui import jupyter\n'), ((6006, 6028), 'pyFAI.gui.jupyter.display', 'jupyter.display', (['img_x'], {}), '(img_x)\n', (6021, 6028), False, 'from pyFAI.gui import jupyter\n'), ((6279, 6292), 'numpy.zeros', 'np.zeros', (['(360)'], {}), '(360)\n', (6287, 6292), True, 'import numpy as np\n'), ((6313, 6326), 'numpy.zeros', 'np.zeros', (['(360)'], {}), '(360)\n', (6321, 6326), True, 'import numpy as np\n'), ((6999, 7020), 'numpy.zeros', 'np.zeros', (['(360, Desc)'], {}), '((360, Desc))\n', (7007, 7020), True, 'import numpy as np\n'), ((7061, 7082), 'numpy.zeros', 'np.zeros', (['(360, Desc)'], {}), '((360, Desc))\n', (7069, 7082), True, 'import numpy as np\n'), ((8982, 8998), 'numpy.argmin', 'np.argmin', (['z_sum'], {}), '(z_sum)\n', (8991, 8998), True, 'import numpy as np\n'), ((9010, 9026), 'numpy.argmin', 'np.argmin', (['x_sum'], {}), '(x_sum)\n', (9019, 9026), True, 'import numpy as np\n'), ((9081, 9121), 'scipy.ndimage.rotate', 'ndimage.rotate', (['img_z', '(45)'], {'reshape': '(False)'}), '(img_z, 45, reshape=False)\n', (9095, 9121), False, 'from scipy import ndimage, signal\n'), ((9475, 9580), 'plotly.express.scatter', 'px.scatter', ([], {'x': 'tth', 'y': 'z_sum', 'height': '(1200)', 'width': '(1200)', 'labels': '"""Intensity_z"""', 'title': '"""Z-axis orientation"""'}), "(x=tth, y=z_sum, height=1200, width=1200, labels='Intensity_z',\n title='Z-axis orientation')\n", (9485, 9580), True, 'import plotly.express as px\n'), ((10017, 10124), 'plotly.express.scatter', 'px.scatter', ([], {'x': 'tth_x', 'y': 'x_sum', 'height': '(1200)', 'width': '(1200)', 'labels': '"""Intensity_x"""', 'title': '"""X-axis orientation"""'}), "(x=tth_x, y=x_sum, height=1200, width=1200, labels='Intensity_x',\n title='X-axis orientation')\n", (10027, 10124), True, 'import plotly.express as px\n'), ((10656, 10683), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (10666, 10683), True, 'from matplotlib import pyplot as plt\n'), ((10684, 10699), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (10692, 10699), True, 'from matplotlib import pyplot as plt\n'), ((10705, 10763), 'matplotlib.pyplot.imshow', 'plt.imshow', (['proc_img_z'], {'cmap': '"""gnuplot2"""', 'vmin': '(0)', 'vmax': '(1000)'}), "(proc_img_z, cmap='gnuplot2', vmin=0, vmax=1000)\n", (10715, 10763), True, 'from matplotlib import pyplot as plt\n'), ((10769, 10783), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (10781, 10783), True, 'from matplotlib import pyplot as plt\n'), ((10952, 10962), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10960, 10962), True, 'from matplotlib import pyplot as plt\n'), ((10970, 11028), 'matplotlib.pyplot.imshow', 'plt.imshow', (['proc_img_x'], {'cmap': '"""gnuplot2"""', 'vmin': '(0)', 'vmax': '(1000)'}), "(proc_img_x, cmap='gnuplot2', vmin=0, vmax=1000)\n", (10980, 11028), True, 'from matplotlib import pyplot as plt\n'), ((11059, 11073), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (11071, 11073), True, 'from matplotlib import pyplot as plt\n'), ((11080, 11214), 'pandas.read_excel', 'pd.read_excel', (['"""C:\\\\Users\\\\sid\\\\Desktop\\\\Cellulose aerogel paper\\\\Lambda_Compression.xlsx"""'], {'sheet_name': '"""q vs Intensity_SAXS"""'}), "(\n 'C:\\\\Users\\\\sid\\\\Desktop\\\\Cellulose aerogel paper\\\\Lambda_Compression.xlsx'\n , sheet_name='q vs Intensity_SAXS')\n", (11093, 11214), True, 'import pandas as pd\n'), ((11202, 11287), 'matplotlib.pyplot.loglog', 'plt.loglog', (['q1.Q_range', 'q1.Aerogel_5_z'], {'label': '"""95% Compressed_XZ"""', 'linewidth': '(2.0)'}), "(q1.Q_range, q1.Aerogel_5_z, label='95% Compressed_XZ', linewidth=2.0\n )\n", (11212, 11287), True, 'from matplotlib import pyplot as plt\n'), ((11286, 11371), 'matplotlib.pyplot.loglog', 'plt.loglog', (['q1.Q_range', 'q1.Aerogel_5_x'], {'label': '"""95% Compressed_XY"""', 'linewidth': '(2.0)'}), "(q1.Q_range, q1.Aerogel_5_x, label='95% Compressed_XY', linewidth=2.0\n )\n", (11296, 11371), True, 'from matplotlib import pyplot as plt\n'), ((11370, 11449), 'matplotlib.pyplot.loglog', 'plt.loglog', (['q1.Q_range', 'q1.Aerogel_1_z'], {'label': '"""0% Compressed_XZ"""', 'linewidth': '(2.0)'}), "(q1.Q_range, q1.Aerogel_1_z, label='0% Compressed_XZ', linewidth=2.0)\n", (11380, 11449), True, 'from matplotlib import pyplot as plt\n'), ((11453, 11532), 'matplotlib.pyplot.loglog', 'plt.loglog', (['q1.Q_range', 'q1.Aerogel_1_x'], {'label': '"""0% Compressed_XY"""', 'linewidth': '(2.0)'}), "(q1.Q_range, q1.Aerogel_1_x, label='0% Compressed_XY', linewidth=2.0)\n", (11463, 11532), True, 'from matplotlib import pyplot as plt\n'), ((11536, 11592), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(22)', 'frameon': '(False)', 'loc': '"""lower left"""'}), "(fontsize=22, frameon=False, loc='lower left')\n", (11546, 11592), True, 'from matplotlib import pyplot as plt\n'), ((11594, 11686), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Scattering vector, q ($\\\\mathregular {nm^{-1}}$)"""'], {'labelpad': '(20)', 'fontsize': '(32)'}), "('Scattering vector, q ($\\\\mathregular {nm^{-1}}$)', labelpad=20,\n fontsize=32)\n", (11604, 11686), True, 'from matplotlib import pyplot as plt\n'), ((11764, 11819), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Intensity, a.U."""'], {'labelpad': '(20)', 'fontsize': '(32)'}), "('Intensity, a.U.', labelpad=20, fontsize=32)\n", (11774, 11819), True, 'from matplotlib import pyplot as plt\n'), ((11823, 11846), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(18)'}), '(fontsize=18)\n', (11833, 11846), True, 'from matplotlib import pyplot as plt\n'), ((11848, 11871), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(18)'}), '(fontsize=18)\n', (11858, 11871), True, 'from matplotlib import pyplot as plt\n'), ((11873, 11891), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.1, 5]'], {}), '([0.1, 5])\n', (11881, 11891), True, 'from matplotlib import pyplot as plt\n'), ((11892, 11910), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 120]'], {}), '([0, 120])\n', (11900, 11910), True, 'from matplotlib import pyplot as plt\n'), ((11912, 11970), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'pad': '(10)', 'top': '(True)', 'right': '(True)'}), "(axis='both', pad=10, top=True, right=True)\n", (11927, 11970), True, 'from matplotlib import pyplot as plt\n'), ((11974, 12030), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'which': '"""major"""', 'color': '"""#DDDDDD"""', 'linewidth': '(1.25)'}), "(which='major', color='#DDDDDD', linewidth=1.25)\n", (11982, 12030), True, 'from matplotlib import pyplot as plt\n'), ((12032, 12102), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'which': '"""minor"""', 'color': '"""#EEEEEE"""', 'linestyle': '""":"""', 'linewidth': '(1.0)'}), "(which='minor', color='#EEEEEE', linestyle=':', linewidth=1.0)\n", (12040, 12102), True, 'from matplotlib import pyplot as plt\n'), ((12104, 12123), 'matplotlib.pyplot.minorticks_on', 'plt.minorticks_on', ([], {}), '()\n', (12121, 12123), True, 'from matplotlib import pyplot as plt\n'), ((8950, 8967), 'numpy.average', 'np.average', (['x_sum'], {}), '(x_sum)\n', (8960, 8967), True, 'import numpy as np\n'), ((8903, 8920), 'numpy.average', 'np.average', (['x_sum'], {}), '(x_sum)\n', (8913, 8920), True, 'import numpy as np\n')]
|
import numpy as np
from network import Netowork
from layers import FCLayer, ActivationLayer
from activations import tanh, tanh_prime
from loss import mse, mse_prime
# training data
x_train = np.array([[[0,0]],[[0,1]],[[1,0]],[[1,1]]])
y_train = np.array([[[0]],[[1]],[[1]],[[0]]])
# network
net = Netowork()
net.add(FCLayer(2,3))
net.add(ActivationLayer(tanh, tanh_prime))
net.add(FCLayer(3,1))
net.add(ActivationLayer(tanh, tanh_prime))
# Train
net.use(mse,mse_prime)
net.fit(x_train, y_train, epochs=100, learning_rate=0.1)
# Test
print('testing with training data')
out = net.predict(x_train)
print(out)
|
[
"network.Netowork",
"layers.FCLayer",
"layers.ActivationLayer",
"numpy.array"
] |
[((193, 243), 'numpy.array', 'np.array', (['[[[0, 0]], [[0, 1]], [[1, 0]], [[1, 1]]]'], {}), '([[[0, 0]], [[0, 1]], [[1, 0]], [[1, 1]]])\n', (201, 243), True, 'import numpy as np\n'), ((247, 285), 'numpy.array', 'np.array', (['[[[0]], [[1]], [[1]], [[0]]]'], {}), '([[[0]], [[1]], [[1]], [[0]]])\n', (255, 285), True, 'import numpy as np\n'), ((300, 310), 'network.Netowork', 'Netowork', ([], {}), '()\n', (308, 310), False, 'from network import Netowork\n'), ((319, 332), 'layers.FCLayer', 'FCLayer', (['(2)', '(3)'], {}), '(2, 3)\n', (326, 332), False, 'from layers import FCLayer, ActivationLayer\n'), ((341, 374), 'layers.ActivationLayer', 'ActivationLayer', (['tanh', 'tanh_prime'], {}), '(tanh, tanh_prime)\n', (356, 374), False, 'from layers import FCLayer, ActivationLayer\n'), ((384, 397), 'layers.FCLayer', 'FCLayer', (['(3)', '(1)'], {}), '(3, 1)\n', (391, 397), False, 'from layers import FCLayer, ActivationLayer\n'), ((406, 439), 'layers.ActivationLayer', 'ActivationLayer', (['tanh', 'tanh_prime'], {}), '(tanh, tanh_prime)\n', (421, 439), False, 'from layers import FCLayer, ActivationLayer\n')]
|
import numpy as np
import warnings
import copy
from scipy.special import expit
from .stratification import Strata
def verify_positive(value):
"""Throws exception if value is not positive"""
if not value > 0:
raise ValueError("expected positive integer")
return value
def verify_predictions(predictions):
"""Ensures that predictions is stored as a numpy array and checks that
all values are either 0 or 1.
"""
# Check that it contains only zeros and ones
predictions = np.array(predictions, copy=False)
if not np.array_equal(predictions, predictions.astype(bool)):
raise ValueError("predictions contains invalid values. " +
"The only permitted values are 0 or 1.")
if predictions.ndim == 1:
predictions = predictions[:,np.newaxis]
return predictions
def verify_scores(scores):
"""Ensures that scores is stored as a numpy array and checks that all
values are finite.
"""
scores = np.array(scores, copy=False)
if np.any(~np.isfinite(scores)):
raise ValueError("scores contains invalid values. " +
"Please check that all values are finite.")
if scores.ndim == 1:
scores = scores[:,np.newaxis]
return scores
def verify_consistency(predictions, scores, proba, opt_class):
"""Verifies that all arrays have consistent dimensions. Also verifies
that the scores are consistent with proba.
Returns
-------
proba, opt_class
"""
if predictions.shape != scores.shape:
raise ValueError("predictions and scores arrays have inconsistent " +
"dimensions.")
n_class = scores.shape[1] if scores.ndim > 1 else 1
# If proba not given, default to False for all classifiers
if proba is None:
proba = np.repeat(False, n_class)
# If opt_class is not given, default to True for all classifiers
if opt_class is None:
opt_class = np.repeat(True, n_class)
# Convert to numpy arrays if necessary
proba = np.array(proba, dtype=bool, ndmin=1)
opt_class = np.array(opt_class, dtype=bool, ndmin=1)
if np.sum(opt_class) < 1:
raise ValueError("opt_class should contain at least one True value.")
if predictions.shape[1] != len(proba):
raise ValueError("mismatch in shape of proba and predictions.")
if predictions.shape[1] != len(opt_class):
raise ValueError("mismatch in shape of opt_class and predictions.")
for m in range(n_class):
if (np.any(np.logical_or(scores[:,m] < 0, scores[:,m] > 1)) and proba[m]):
warnings.warn("scores fall outside the [0,1] interval for " +
"classifier {}. Setting proba[m]=False.".format(m))
proba[m] = False
return proba, opt_class
def verify_unit_interval(value):
"""Throw an exception if the value is not on the unit interval [0,1].
"""
if not (value >= 0 and value <= 1):
raise ValueError("expected value on the interval [0, 1].")
return value
def verify_boolean(value):
"""Throws an exception if value is not a bool
"""
if type(value)!=bool:
raise ValueError("expected boolean value.")
return value
def verify_identifiers(identifiers, n_items):
"""Ensure that identifiers has a compatible length and that its elements
are unique"""
if identifiers is None:
return identifiers
identifiers = np.array(identifiers, copy=False)
# Check length for consistency
if len(identifiers) != n_items:
raise ValueError("identifiers has inconsistent dimension.")
# Check that identifiers are unique
if len(np.unique(identifiers)) != n_items:
raise ValueError("identifiers contains duplicate values.")
return identifiers
def verify_strata(strata):
"""Ensure that input is of type `Strata`"""
if strata is None:
return strata
if not isinstance(strata, Strata):
raise ValueError("expected an instance of the Strata class")
return strata
def scores_to_probs(scores, proba, eps=0.01):
"""Transforms scores to probabilities by applying the logistic function"""
if np.any(~proba):
# Need to convert some of the scores into probabilities
probs = copy.deepcopy(scores)
n_class = len(proba)
for m in range(n_class):
if not proba[m]:
#TODO: incorporate threshold (currently assuming zero)
# find most extreme absolute score
max_extreme_score = max(np.abs(np.min(scores[:,m])),\
np.abs(np.max(scores[:,m])))
k = np.log((1-eps)/eps)/max_extreme_score # scale factor
# self._probs[:,m] = expit(k * self.scores[:,m])
probs[:,m] = expit(k * scores[:,m])
else:
probs[:,m] = scores[:,m]
return probs
else:
return scores
|
[
"copy.deepcopy",
"numpy.sum",
"numpy.log",
"numpy.isfinite",
"numpy.any",
"scipy.special.expit",
"numpy.min",
"numpy.max",
"numpy.array",
"numpy.logical_or",
"numpy.unique",
"numpy.repeat"
] |
[((510, 543), 'numpy.array', 'np.array', (['predictions'], {'copy': '(False)'}), '(predictions, copy=False)\n', (518, 543), True, 'import numpy as np\n'), ((990, 1018), 'numpy.array', 'np.array', (['scores'], {'copy': '(False)'}), '(scores, copy=False)\n', (998, 1018), True, 'import numpy as np\n'), ((2049, 2085), 'numpy.array', 'np.array', (['proba'], {'dtype': 'bool', 'ndmin': '(1)'}), '(proba, dtype=bool, ndmin=1)\n', (2057, 2085), True, 'import numpy as np\n'), ((2102, 2142), 'numpy.array', 'np.array', (['opt_class'], {'dtype': 'bool', 'ndmin': '(1)'}), '(opt_class, dtype=bool, ndmin=1)\n', (2110, 2142), True, 'import numpy as np\n'), ((3451, 3484), 'numpy.array', 'np.array', (['identifiers'], {'copy': '(False)'}), '(identifiers, copy=False)\n', (3459, 3484), True, 'import numpy as np\n'), ((4184, 4198), 'numpy.any', 'np.any', (['(~proba)'], {}), '(~proba)\n', (4190, 4198), True, 'import numpy as np\n'), ((1826, 1851), 'numpy.repeat', 'np.repeat', (['(False)', 'n_class'], {}), '(False, n_class)\n', (1835, 1851), True, 'import numpy as np\n'), ((1968, 1992), 'numpy.repeat', 'np.repeat', (['(True)', 'n_class'], {}), '(True, n_class)\n', (1977, 1992), True, 'import numpy as np\n'), ((2151, 2168), 'numpy.sum', 'np.sum', (['opt_class'], {}), '(opt_class)\n', (2157, 2168), True, 'import numpy as np\n'), ((4280, 4301), 'copy.deepcopy', 'copy.deepcopy', (['scores'], {}), '(scores)\n', (4293, 4301), False, 'import copy\n'), ((1034, 1053), 'numpy.isfinite', 'np.isfinite', (['scores'], {}), '(scores)\n', (1045, 1053), True, 'import numpy as np\n'), ((3677, 3699), 'numpy.unique', 'np.unique', (['identifiers'], {}), '(identifiers)\n', (3686, 3699), True, 'import numpy as np\n'), ((2540, 2589), 'numpy.logical_or', 'np.logical_or', (['(scores[:, m] < 0)', '(scores[:, m] > 1)'], {}), '(scores[:, m] < 0, scores[:, m] > 1)\n', (2553, 2589), True, 'import numpy as np\n'), ((4817, 4840), 'scipy.special.expit', 'expit', (['(k * scores[:, m])'], {}), '(k * scores[:, m])\n', (4822, 4840), False, 'from scipy.special import expit\n'), ((4670, 4693), 'numpy.log', 'np.log', (['((1 - eps) / eps)'], {}), '((1 - eps) / eps)\n', (4676, 4693), True, 'import numpy as np\n'), ((4562, 4582), 'numpy.min', 'np.min', (['scores[:, m]'], {}), '(scores[:, m])\n', (4568, 4582), True, 'import numpy as np\n'), ((4628, 4648), 'numpy.max', 'np.max', (['scores[:, m]'], {}), '(scores[:, m])\n', (4634, 4648), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
import math
import sys
import os
data_dir = sys.argv[1]
out_dir = sys.argv[2]
dataX = os.path.join(sys.argv[1], 'logisticX.csv')
dataY = os.path.join(sys.argv[1], 'logisticY.csv')
out = os.path.join(sys.argv[2], 'Q3a.txt')
outfile = open(out, "w")
# 3. Logistic Regression
print("################ 3. Logistic Regression ################\n", file=outfile)
trainX = np.loadtxt(dataX, delimiter=',')
trainY = np.loadtxt(dataY)
def normalize(X):
mu = np.mean(X)
sigma = np.std(X)
return (X - mu)/sigma
X1 = normalize(trainX[:,0])
X2 = normalize(trainX[:,1])
X = np.column_stack((X1, X2))
X = np.column_stack((np.ones(X.shape[0]), X))
Y = trainY.reshape(-1,1)
m = len(trainY)
# (a) Logistic Regression
def sigmoid(z):
return 1.0/(1 + np.exp(-z))
def hw(theta, x):
return sigmoid(np.dot(x, theta))
def LLw(y, h):
return -1*np.sum(y*np.log(h) + (1-y)*np.log(1-h))
def dJw(x, y, h):
return np.dot(x.T, (h - y))
def Hessian(x, h):
A = np.diag((h*(1-h)).reshape(-1,))
return np.dot(np.dot(x.T, A), x)
def LogisticRegression(x, y):
theta = np.zeros((x.shape[1], 1))
h = hw(theta, x)
prevCost = LLw(y, h)
converged = False
itr = 0
while not converged:
H = Hessian(x, h)
theta = theta - np.dot(np.linalg.pinv(H), dJw(x, y, h))
h = hw(theta, x)
cost = LLw(y, h)
error = abs(cost - prevCost)
prevCost = cost
itr += 1
if error < 1e-10 or itr > 10:
converged = True
print('iteration {}: cost = {} error = {} '.format(itr, cost, error), end = '', file=outfile)
print('w = {0},{1},{2}'.format(theta[0], theta[1], theta[2]), file=outfile)
print("Final Cost =", cost, file=outfile)
print("Final Parameters = {0},{1},{2}\n".format(theta[0], theta[1], theta[2]), file=outfile)
return theta
theta = LogisticRegression(X, Y)
|
[
"numpy.log",
"numpy.std",
"numpy.zeros",
"numpy.ones",
"numpy.linalg.pinv",
"numpy.mean",
"matplotlib.use",
"numpy.loadtxt",
"numpy.exp",
"numpy.column_stack",
"numpy.dot",
"os.path.join"
] |
[((37, 58), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (51, 58), False, 'import matplotlib\n'), ((244, 286), 'os.path.join', 'os.path.join', (['sys.argv[1]', '"""logisticX.csv"""'], {}), "(sys.argv[1], 'logisticX.csv')\n", (256, 286), False, 'import os\n'), ((295, 337), 'os.path.join', 'os.path.join', (['sys.argv[1]', '"""logisticY.csv"""'], {}), "(sys.argv[1], 'logisticY.csv')\n", (307, 337), False, 'import os\n'), ((344, 380), 'os.path.join', 'os.path.join', (['sys.argv[2]', '"""Q3a.txt"""'], {}), "(sys.argv[2], 'Q3a.txt')\n", (356, 380), False, 'import os\n'), ((524, 556), 'numpy.loadtxt', 'np.loadtxt', (['dataX'], {'delimiter': '""","""'}), "(dataX, delimiter=',')\n", (534, 556), True, 'import numpy as np\n'), ((566, 583), 'numpy.loadtxt', 'np.loadtxt', (['dataY'], {}), '(dataY)\n', (576, 583), True, 'import numpy as np\n'), ((723, 748), 'numpy.column_stack', 'np.column_stack', (['(X1, X2)'], {}), '((X1, X2))\n', (738, 748), True, 'import numpy as np\n'), ((609, 619), 'numpy.mean', 'np.mean', (['X'], {}), '(X)\n', (616, 619), True, 'import numpy as np\n'), ((629, 638), 'numpy.std', 'np.std', (['X'], {}), '(X)\n', (635, 638), True, 'import numpy as np\n'), ((1056, 1074), 'numpy.dot', 'np.dot', (['x.T', '(h - y)'], {}), '(x.T, h - y)\n', (1062, 1074), True, 'import numpy as np\n'), ((1208, 1233), 'numpy.zeros', 'np.zeros', (['(x.shape[1], 1)'], {}), '((x.shape[1], 1))\n', (1216, 1233), True, 'import numpy as np\n'), ((770, 789), 'numpy.ones', 'np.ones', (['X.shape[0]'], {}), '(X.shape[0])\n', (777, 789), True, 'import numpy as np\n'), ((944, 960), 'numpy.dot', 'np.dot', (['x', 'theta'], {}), '(x, theta)\n', (950, 960), True, 'import numpy as np\n'), ((1149, 1163), 'numpy.dot', 'np.dot', (['x.T', 'A'], {}), '(x.T, A)\n', (1155, 1163), True, 'import numpy as np\n'), ((897, 907), 'numpy.exp', 'np.exp', (['(-z)'], {}), '(-z)\n', (903, 907), True, 'import numpy as np\n'), ((1370, 1387), 'numpy.linalg.pinv', 'np.linalg.pinv', (['H'], {}), '(H)\n', (1384, 1387), True, 'import numpy as np\n'), ((998, 1007), 'numpy.log', 'np.log', (['h'], {}), '(h)\n', (1004, 1007), True, 'import numpy as np\n'), ((1016, 1029), 'numpy.log', 'np.log', (['(1 - h)'], {}), '(1 - h)\n', (1022, 1029), True, 'import numpy as np\n')]
|
import numpy as np
from scipy.spatial.distance import cdist
# ========================================================================
# USAGE: [Coeff]=LLC_coding_appr(B,X,knn,lambda)
# Approximated Locality-constraint Linear Coding
#
# Inputs
# B -M x d codebook, M entries in a d-dim space
# X -N x d matrix, N data points in a d-dim space
# knn -number of nearest neighboring
# lambda -regulerization to improve condition
#
# Outputs
# Coeff -N x M matrix, each row is a code for corresponding X
#
# <NAME>, march 19, 2010
# ========================================================================
def llc_coding_approx(B, X, k_nn=5, beta=1e-4):
D = cdist(X, B, 'euclidean')
N = X.shape[0]
I = np.zeros((N, k_nn), 'int32')
for i in range(N):
d = D[i, :]
idx = np.argsort(d)
I[i, :] = idx[:k_nn]
II = np.eye(k_nn)
coeffs = np.zeros((N, B.shape[0]))
for i in range(N):
idx = I[i, :]
z = B[idx, :] - np.tile(X[i, :], (k_nn, 1)) # shift ith point to origin
z = z.dot(z.transpose())
z = z + II * beta * np.trace(z) # regularization (K>D)
w = np.linalg.solve(z, np.ones((k_nn, 1)))
w = w / np.sum(w) # enforce sum(w) = 1
coeffs[i, idx] = w.ravel()
return coeffs
|
[
"scipy.spatial.distance.cdist",
"numpy.trace",
"numpy.sum",
"numpy.zeros",
"numpy.ones",
"numpy.argsort",
"numpy.tile",
"numpy.eye"
] |
[((707, 731), 'scipy.spatial.distance.cdist', 'cdist', (['X', 'B', '"""euclidean"""'], {}), "(X, B, 'euclidean')\n", (712, 731), False, 'from scipy.spatial.distance import cdist\n'), ((759, 787), 'numpy.zeros', 'np.zeros', (['(N, k_nn)', '"""int32"""'], {}), "((N, k_nn), 'int32')\n", (767, 787), True, 'import numpy as np\n'), ((897, 909), 'numpy.eye', 'np.eye', (['k_nn'], {}), '(k_nn)\n', (903, 909), True, 'import numpy as np\n'), ((923, 948), 'numpy.zeros', 'np.zeros', (['(N, B.shape[0])'], {}), '((N, B.shape[0]))\n', (931, 948), True, 'import numpy as np\n'), ((845, 858), 'numpy.argsort', 'np.argsort', (['d'], {}), '(d)\n', (855, 858), True, 'import numpy as np\n'), ((1018, 1045), 'numpy.tile', 'np.tile', (['X[i, :]', '(k_nn, 1)'], {}), '(X[i, :], (k_nn, 1))\n', (1025, 1045), True, 'import numpy as np\n'), ((1213, 1231), 'numpy.ones', 'np.ones', (['(k_nn, 1)'], {}), '((k_nn, 1))\n', (1220, 1231), True, 'import numpy as np\n'), ((1249, 1258), 'numpy.sum', 'np.sum', (['w'], {}), '(w)\n', (1255, 1258), True, 'import numpy as np\n'), ((1135, 1146), 'numpy.trace', 'np.trace', (['z'], {}), '(z)\n', (1143, 1146), True, 'import numpy as np\n')]
|
import numpy as np
import copy
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from betl.linear_system import DiscreteTimeLinearSystem as LinearSystem
from betl.linear_system import StateFeedbackLaw, ExcitingStateFeedbackLaw
from betl.synthesis.robust_lqr_synth import RLQRSyntheziser
from betl.uncertain_state_space_model import MatrixNormal, UncertainStateSpaceModel
from betl.cost_analysis import LinearQuadraticCostAnalysis, EmpiricalQuadraticCostAnalysis
from betl.excitation_strategy import optimal_signal
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.WARN)
def plot_1d_result(result):
import matplotlib as mpl
from utils.postprocessing_utils import initialize_plot, set_size
# Use the pgf backend (must be set before pyplot imported)
mpl.use('pgf')
# You can also load latex packages
mpl.rcParams.update({
"pgf.preamble": '\\usepackage[utf8x]{inputenc}\\usepackage[light]{kpfonts}\\usepackage{amsfonts}\\usepackage{amsmath}\\usepackage{amssymb}',
})
import seaborn as sns
import matplotlib.pyplot as plt
settings = result['settings']
system = LinearSystem(settings['system']['A'], settings['system']['B'], settings['system']['V'])
K_prior = result['K0']
controller = StateFeedbackLaw(K=K_prior)
system.controller = controller
T = settings['T']
ussm_prior = result['prior_ussm']
N_s = result['Ns']
K_s = result['Ks']
beta_data = result['beta_data']
synthesis_settings = result['synthesis_settings']
J_K0_true = result['J_K0_true']
J_pi_true = result['J_pi_true']
J_K_opt_true = result['J_K_opt_true']
J_K0 = result['J_K0']
G = result['G']
J_pi = result['J_pi']
E_J_K0 = result['E_J_K0']
E_G = result['E_G']
factor = result['factor']
E_J_pi = result['E_J_pi']
print('-----------------------------------------------------------')
A_prior, B_prior, V_prior = ussm_prior.sample(2000, c=synthesis_settings['confidence_interval'])
data_list = list()
for A, B, V in zip(A_prior, B_prior, V_prior):
system_sample = dict()
system_sample['A'] = A[0][0]
system_sample['B'] = B[0][0]
system_sample['V'] = V[0][0]
system_sample['dist'] = 'prior'
data_list.append(system_sample)
systems_prior = pd.DataFrame.from_dict(data_list)
# prior_plot = sns.kdeplot(data=systems_prior, x="A", y="B", fill=True)
# plt.plot(system.A, system.B, 'rx', ms=10)
# plt.show()
def plot_1d_ussm(axis, ussm, prior_list, color_prior, color_post, color_true):
A_post, B_post, V_post = ussm.sample(2000, c=synthesis_settings['confidence_interval'])
# data_list = list()
data_list = copy.deepcopy(prior_list)
for A, B, V in zip(A_post, B_post, V_post):
system_sample = dict()
system_sample['A'] = A[0][0]
system_sample['B'] = B[0][0]
system_sample['V'] = V[0][0]
system_sample['dist'] = 'posterior'
data_list.append(system_sample)
import pandas as pd
systems_post = pd.DataFrame.from_dict(data_list)
prior = systems_post.loc[systems_post['dist'] == 'prior']
post = systems_post.loc[systems_post['dist'] == 'posterior']
# plot = sns.kdeplot(ax=axis, data=prior, x="A", y="B", levels=10, fill=True, color=color_prior, alpha=.5)
plot = sns.kdeplot(ax=axis, data=post, x="A", y="B", levels=10, fill=True, color=color_post)
# plot.set_xlim(0.88, 1.12)
# plot.set_ylim(0.18, 1.82)
plot.plot(system.A, system.B, 'x', mew=2.5, ms=8, color=color_true)
# plt.show()
return plot
from matplotlib.gridspec import GridSpec
# ADJUST PATH IN "initialize_plot"
c, params = initialize_plot('CDC_paper') # specify font size etc.,
plt.rcParams.update(params)
# CDC column width is 245pt, for double column plot change to 505pt
x, y = set_size(245,
subplots=(1, 1), # specify subplot layout for nice scaling
fraction=1.) # scale width/height
colors = sns.color_palette("deep", 5)
colors[4] = 'black'
sns.set_palette(colors)
sns.set_style("whitegrid")
sns.set_context("paper", rc={"font.size": 8, "axes.titlesize": 8, "axes.labelsize": 8})
# def format_axes(fig):
# for i, ax in enumerate(fig.axes):
# ax.text(0.5, 0.5, "ax%d" % (i+1), va="center", ha="center")
# ax.tick_params(labelbottom=False, labelleft=False)
fig = plt.figure(figsize=(x, y))
gs = GridSpec(1, 2, figure=fig)
ax1 = fig.add_subplot(gs[0, 0])
ax2 = fig.add_subplot(gs[0, 1], sharey=ax1, sharex=ax1)
ax2.set(ylabel=None)
ax2.tick_params('y', labelleft=False)
prior_samples = data_list
plt_p = plot_1d_ussm(ax1, result['post_ussm'][0]['ussm'], prior_samples, color_prior=colors[0], color_post=colors[0], color_true=colors[4])
plt_i = plot_1d_ussm(ax2, result['post_ussm'][2]['ussm'], prior_samples, color_prior=colors[0], color_post=colors[2], color_true=colors[4])
plt_i.plot(system.A - 0.15, system.B + 0.2, 'x', mew=2.5, ms=8, color=colors[3], alpha=0.7)
# ax2.set(ylabel=None)
# ax2.set_yticklabels([])
#### BOTTOM ARROW
# Create the arrow
# 1. Get transformation operators for axis and figure
ax0tr = ax1.transData # Axis 0 -> Display
ax1tr = ax2.transData # Axis 1 -> Display
figtr = fig.transFigure.inverted() # Display -> Figure
# 2. Transform arrow start point from axis 0 to figure coordinates
ptB = figtr.transform(ax0tr.transform((1.2, 0.8)))
# 3. Transform arrow end point from axis 1 to figure coordinates
ptE = figtr.transform(ax1tr.transform((0.9, 0.8)))
# 4. Create the patch
from matplotlib.patches import FancyArrowPatch
arrow = FancyArrowPatch(
ptB, ptE, transform=fig.transFigure, # Place arrow in figure coord system
fc="k", connectionstyle="arc3,rad=0.3", arrowstyle='simple', alpha=0.7,
mutation_scale=20.
)
ax1.text(.95, 0.07, "learn", size=8, ha="center",
transform=ax1.transAxes)
ax1.text(.95, 0.73, "trigger", size=8, ha="center",
transform=ax1.transAxes)
# 5. Add patch to list of objects to draw onto the figure
fig.patches.append(arrow)
# UPPER ARROW
ax0tr = ax1.transData # Axis 0 -> Display
ax1tr = ax2.transData # Axis 1 -> Display
figtr = fig.transFigure.inverted() # Display -> Figure
# 2. Transform arrow start point from axis 0 to figure coordinates
ptB2 = figtr.transform(ax1tr.transform((0.9, 1.2)))
# 3. Transform arrow end point from axis 1 to figure coordinates
ptE2 = figtr.transform(ax0tr.transform((1.2, 1.2)))
arrow2 = FancyArrowPatch(
ptB2, ptE2, transform=fig.transFigure, # Place arrow in figure coord system
fc="k", connectionstyle="arc3,rad=0.3", arrowstyle='simple', alpha=0.7,
mutation_scale=20.
)
# 5. Add patch to list of objects to draw onto the figure
fig.patches.append(arrow2)
# change ARROW
ax0tr = ax2.transData # Axis 0 -> Display
ax1tr = ax2.transData # Axis 1 -> Display
figtr = fig.transFigure.inverted() # Display -> Figure
# 2. Transform arrow start point from axis 0 to figure coordinates
ptB2 = figtr.transform(ax1tr.transform((system.A[0][0], system.B[0][0])))
# 3. Transform arrow end point from axis 1 to figure coordinates
ptE2 = figtr.transform(ax0tr.transform((system.A[0][0] - 0.15, system.B[0][0] + 0.2)))
arrow2 = FancyArrowPatch(
(system.A[0][0], system.B[0][0]), (system.A[0][0] - 0.15, system.B[0][0] + 0.2), # transform=fig.transFigure, # Place arrow in figure coord system
fc=colors[3], connectionstyle="arc3,rad=0.3", arrowstyle='simple', alpha=0.7,
mutation_scale=12.
)
# 5. Add patch to list of objects to draw onto the figure
ax2.add_patch(arrow2)
ax2.text(1.1, 1.1, "change", size=8, ha="center")
ax1.title.set_text(r'Robust control $K_0$')
ax2.title.set_text(r'Learned control $K_N$')
ax1.set_xlabel("$A$")
ax1.set_ylabel("$B$")
ax2.set_xlabel("$A$")
gs.update(left=0.15, right=0.99, top=0.9, bottom=0.2, wspace=0.1, hspace=0)
# plt.savefig('1d-example-300dpi.eps', dpi=300)
# plt.savefig('1d-example-300dpi.pdf', dpi=300)
# plt.show()
plt.savefig('figures/algo.pgf', format='pgf')
plt.close()
if __name__ == "__main__":
np.random.seed(2)
import pickle
with open(f'data/1d-result-data.pickle', 'rb') as file:
result = pickle.load(file)
plot_1d_result(result)
|
[
"numpy.random.seed",
"seaborn.kdeplot",
"matplotlib.pyplot.figure",
"pickle.load",
"matplotlib.pyplot.close",
"matplotlib.rcParams.update",
"matplotlib.patches.FancyArrowPatch",
"matplotlib.pyplot.rcParams.update",
"utils.postprocessing_utils.initialize_plot",
"betl.linear_system.DiscreteTimeLinearSystem",
"seaborn.set_context",
"betl.linear_system.StateFeedbackLaw",
"seaborn.set_style",
"copy.deepcopy",
"pandas.DataFrame.from_dict",
"matplotlib.use",
"seaborn.set_palette",
"logging.basicConfig",
"utils.postprocessing_utils.set_size",
"seaborn.color_palette",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.savefig"
] |
[((569, 627), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.WARN'}), '(stream=sys.stdout, level=logging.WARN)\n', (588, 627), False, 'import logging\n'), ((825, 839), 'matplotlib.use', 'mpl.use', (['"""pgf"""'], {}), "('pgf')\n", (832, 839), True, 'import matplotlib as mpl\n'), ((884, 1055), 'matplotlib.rcParams.update', 'mpl.rcParams.update', (["{'pgf.preamble':\n '\\\\usepackage[utf8x]{inputenc}\\\\usepackage[light]{kpfonts}\\\\usepackage{amsfonts}\\\\usepackage{amsmath}\\\\usepackage{amssymb}'\n }"], {}), "({'pgf.preamble':\n '\\\\usepackage[utf8x]{inputenc}\\\\usepackage[light]{kpfonts}\\\\usepackage{amsfonts}\\\\usepackage{amsmath}\\\\usepackage{amssymb}'\n })\n", (903, 1055), True, 'import matplotlib as mpl\n'), ((1174, 1266), 'betl.linear_system.DiscreteTimeLinearSystem', 'LinearSystem', (["settings['system']['A']", "settings['system']['B']", "settings['system']['V']"], {}), "(settings['system']['A'], settings['system']['B'], settings[\n 'system']['V'])\n", (1186, 1266), True, 'from betl.linear_system import DiscreteTimeLinearSystem as LinearSystem\n'), ((1306, 1333), 'betl.linear_system.StateFeedbackLaw', 'StateFeedbackLaw', ([], {'K': 'K_prior'}), '(K=K_prior)\n', (1322, 1333), False, 'from betl.linear_system import StateFeedbackLaw, ExcitingStateFeedbackLaw\n'), ((2365, 2398), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['data_list'], {}), '(data_list)\n', (2387, 2398), True, 'import pandas as pd\n'), ((3830, 3858), 'utils.postprocessing_utils.initialize_plot', 'initialize_plot', (['"""CDC_paper"""'], {}), "('CDC_paper')\n", (3845, 3858), False, 'from utils.postprocessing_utils import initialize_plot, set_size\n'), ((3890, 3917), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (['params'], {}), '(params)\n', (3909, 3917), True, 'import matplotlib.pyplot as plt\n'), ((4002, 4046), 'utils.postprocessing_utils.set_size', 'set_size', (['(245)'], {'subplots': '(1, 1)', 'fraction': '(1.0)'}), '(245, subplots=(1, 1), fraction=1.0)\n', (4010, 4046), False, 'from utils.postprocessing_utils import initialize_plot, set_size\n'), ((4165, 4193), 'seaborn.color_palette', 'sns.color_palette', (['"""deep"""', '(5)'], {}), "('deep', 5)\n", (4182, 4193), True, 'import seaborn as sns\n'), ((4222, 4245), 'seaborn.set_palette', 'sns.set_palette', (['colors'], {}), '(colors)\n', (4237, 4245), True, 'import seaborn as sns\n'), ((4250, 4276), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (4263, 4276), True, 'import seaborn as sns\n'), ((4281, 4372), 'seaborn.set_context', 'sns.set_context', (['"""paper"""'], {'rc': "{'font.size': 8, 'axes.titlesize': 8, 'axes.labelsize': 8}"}), "('paper', rc={'font.size': 8, 'axes.titlesize': 8,\n 'axes.labelsize': 8})\n", (4296, 4372), True, 'import seaborn as sns\n'), ((4593, 4619), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(x, y)'}), '(figsize=(x, y))\n', (4603, 4619), True, 'import matplotlib.pyplot as plt\n'), ((4630, 4656), 'matplotlib.gridspec.GridSpec', 'GridSpec', (['(1)', '(2)'], {'figure': 'fig'}), '(1, 2, figure=fig)\n', (4638, 4656), False, 'from matplotlib.gridspec import GridSpec\n'), ((5895, 6048), 'matplotlib.patches.FancyArrowPatch', 'FancyArrowPatch', (['ptB', 'ptE'], {'transform': 'fig.transFigure', 'fc': '"""k"""', 'connectionstyle': '"""arc3,rad=0.3"""', 'arrowstyle': '"""simple"""', 'alpha': '(0.7)', 'mutation_scale': '(20.0)'}), "(ptB, ptE, transform=fig.transFigure, fc='k',\n connectionstyle='arc3,rad=0.3', arrowstyle='simple', alpha=0.7,\n mutation_scale=20.0)\n", (5910, 6048), False, 'from matplotlib.patches import FancyArrowPatch\n'), ((6826, 6981), 'matplotlib.patches.FancyArrowPatch', 'FancyArrowPatch', (['ptB2', 'ptE2'], {'transform': 'fig.transFigure', 'fc': '"""k"""', 'connectionstyle': '"""arc3,rad=0.3"""', 'arrowstyle': '"""simple"""', 'alpha': '(0.7)', 'mutation_scale': '(20.0)'}), "(ptB2, ptE2, transform=fig.transFigure, fc='k',\n connectionstyle='arc3,rad=0.3', arrowstyle='simple', alpha=0.7,\n mutation_scale=20.0)\n", (6841, 6981), False, 'from matplotlib.patches import FancyArrowPatch\n'), ((7630, 7834), 'matplotlib.patches.FancyArrowPatch', 'FancyArrowPatch', (['(system.A[0][0], system.B[0][0])', '(system.A[0][0] - 0.15, system.B[0][0] + 0.2)'], {'fc': 'colors[3]', 'connectionstyle': '"""arc3,rad=0.3"""', 'arrowstyle': '"""simple"""', 'alpha': '(0.7)', 'mutation_scale': '(12.0)'}), "((system.A[0][0], system.B[0][0]), (system.A[0][0] - 0.15, \n system.B[0][0] + 0.2), fc=colors[3], connectionstyle='arc3,rad=0.3',\n arrowstyle='simple', alpha=0.7, mutation_scale=12.0)\n", (7645, 7834), False, 'from matplotlib.patches import FancyArrowPatch\n'), ((8451, 8496), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""figures/algo.pgf"""'], {'format': '"""pgf"""'}), "('figures/algo.pgf', format='pgf')\n", (8462, 8496), True, 'import matplotlib.pyplot as plt\n'), ((8501, 8512), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8510, 8512), True, 'import matplotlib.pyplot as plt\n'), ((8545, 8562), 'numpy.random.seed', 'np.random.seed', (['(2)'], {}), '(2)\n', (8559, 8562), True, 'import numpy as np\n'), ((2771, 2796), 'copy.deepcopy', 'copy.deepcopy', (['prior_list'], {}), '(prior_list)\n', (2784, 2796), False, 'import copy\n'), ((3152, 3185), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['data_list'], {}), '(data_list)\n', (3174, 3185), True, 'import pandas as pd\n'), ((3452, 3542), 'seaborn.kdeplot', 'sns.kdeplot', ([], {'ax': 'axis', 'data': 'post', 'x': '"""A"""', 'y': '"""B"""', 'levels': '(10)', 'fill': '(True)', 'color': 'color_post'}), "(ax=axis, data=post, x='A', y='B', levels=10, fill=True, color=\n color_post)\n", (3463, 3542), True, 'import seaborn as sns\n'), ((8659, 8676), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (8670, 8676), False, 'import pickle\n')]
|
'''
Feature Engineering and model training
'''
import pickle
import pandas as pd
import numpy as np
from sklearn.decomposition import NMF
from sklearn.impute import KNNImputer
links = pd.DataFrame(pd.read_csv('links.csv'))
movies_ = pd.DataFrame(pd.read_csv('movies.csv'))
ratings = pd.DataFrame(pd.read_csv('ratings.csv'))
tags = pd.DataFrame(pd.read_csv('tags.csv'))
# Take columns from movies into links
links['title'] = movies_['title']
links['genres'] = movies_['genres']
# Set Indexes
links.set_index('movieId', inplace=True)
ratings.set_index(['movieId', 'userId'], inplace=True)
tags.set_index(['movieId', 'userId'], inplace=True)
# Merge links, ratings and tags into "df"
links_ratings = pd.merge(left=links, right=ratings, left_index=True, right_index=True)
df = pd.merge(left=links_ratings, right=tags, how='left', left_on=['movieId', 'userId'], right_on=['movieId', 'userId'])
df.rename(columns={'timestamp_x': 'timestamp_rating', 'timestamp_y': 'timestamp_tag'}, inplace=True)
### Non-Negative Matrix Factorization ###
ratings_ = pd.DataFrame(pd.read_csv('ratings.csv'))
users = range(1, 611)
# user_movie_ratings_matrix
R = ratings_.pivot(index='userId', columns='movieId', values='rating')
### K-Nearest-Neighbor (KNN) ###
imputer = KNNImputer()
Rtrans = imputer.fit_transform(R)
### Non-Negative Matrix Factorization (NMF) ###
model = NMF(n_components=100)
model.fit(Rtrans)
# movie-genre matrix
Q = pd.DataFrame(model.components_, columns=R.columns.to_list(), index=range(model.n_components))
# user-genre matrix
P = pd.DataFrame(model.transform(Rtrans), columns=range(model.n_components), index=users)
# Reconstructed matrix
Rhat = pd.DataFrame(np.dot(P, Q))
# Reconstruction error
model.reconstruction_err_
### Save the model ###
binary = pickle.dumps(model)
open('nmf_model.bin', 'wb').write(binary)
|
[
"sklearn.decomposition.NMF",
"pandas.read_csv",
"pandas.merge",
"sklearn.impute.KNNImputer",
"numpy.dot",
"pickle.dumps"
] |
[((701, 771), 'pandas.merge', 'pd.merge', ([], {'left': 'links', 'right': 'ratings', 'left_index': '(True)', 'right_index': '(True)'}), '(left=links, right=ratings, left_index=True, right_index=True)\n', (709, 771), True, 'import pandas as pd\n'), ((777, 896), 'pandas.merge', 'pd.merge', ([], {'left': 'links_ratings', 'right': 'tags', 'how': '"""left"""', 'left_on': "['movieId', 'userId']", 'right_on': "['movieId', 'userId']"}), "(left=links_ratings, right=tags, how='left', left_on=['movieId',\n 'userId'], right_on=['movieId', 'userId'])\n", (785, 896), True, 'import pandas as pd\n'), ((1256, 1268), 'sklearn.impute.KNNImputer', 'KNNImputer', ([], {}), '()\n', (1266, 1268), False, 'from sklearn.impute import KNNImputer\n'), ((1360, 1381), 'sklearn.decomposition.NMF', 'NMF', ([], {'n_components': '(100)'}), '(n_components=100)\n', (1363, 1381), False, 'from sklearn.decomposition import NMF\n'), ((1772, 1791), 'pickle.dumps', 'pickle.dumps', (['model'], {}), '(model)\n', (1784, 1791), False, 'import pickle\n'), ((198, 222), 'pandas.read_csv', 'pd.read_csv', (['"""links.csv"""'], {}), "('links.csv')\n", (209, 222), True, 'import pandas as pd\n'), ((247, 272), 'pandas.read_csv', 'pd.read_csv', (['"""movies.csv"""'], {}), "('movies.csv')\n", (258, 272), True, 'import pandas as pd\n'), ((297, 323), 'pandas.read_csv', 'pd.read_csv', (['"""ratings.csv"""'], {}), "('ratings.csv')\n", (308, 323), True, 'import pandas as pd\n'), ((345, 368), 'pandas.read_csv', 'pd.read_csv', (['"""tags.csv"""'], {}), "('tags.csv')\n", (356, 368), True, 'import pandas as pd\n'), ((1062, 1088), 'pandas.read_csv', 'pd.read_csv', (['"""ratings.csv"""'], {}), "('ratings.csv')\n", (1073, 1088), True, 'import pandas as pd\n'), ((1675, 1687), 'numpy.dot', 'np.dot', (['P', 'Q'], {}), '(P, Q)\n', (1681, 1687), True, 'import numpy as np\n')]
|
#SPDX-License-Identifier: MIT
import pandas as pd
import sqlalchemy as s
import numpy as np
import re
class GHTorrent(object):
"""Uses GHTorrent and other GitHub data sources and returns dataframes with interesting GitHub indicators"""
def __init__(self, dbstr):
"""
Connect to GHTorrent
:param dbstr: The [database string](http://docs.sqlalchemy.org/en/latest/core/engines.html) to connect to the GHTorrent database
"""
self.DB_STR = dbstr
self.db = s.create_engine(dbstr)
try:
self.userid('howderek')
except Exception as e:
print("Could not connect to database.\nError: " + str(e))
def __single_table_count_by_date(self, table, repo_col='project_id', user_col='author_id', group_by="week"):
"""
Generates query string to count occurances of rows per date for a given table.
External input must never be sent to this function, it is for internal use only.
:param table: The table in GHTorrent to generate the string for
:param repo_col: The column in that table with the project ids
:param user_col: The column in that table with the user ids
:param group_by: Default week; Options raw, day, week, month, year; Selects period of time to be grouped by
:return: Query string
"""
if group_by == "raw":
return """
SELECT date(created_at) AS "date", {2} AS "user_id"
FROM {0}
WHERE {1} = :repoid
""".format(table, repo_col, user_col)
if group_by == "day":
return """
SELECT date(created_at) AS "date", COUNT(*) AS "{0}"
FROM {0}
WHERE {1} = :repoid
GROUP BY DATE(created_at)""".format(table, repo_col)
if group_by == "week":
return """
SELECT date(created_at) AS "date", COUNT(*) AS "{0}"
FROM {0}
WHERE {1} = :repoid
GROUP BY YEARWEEK(created_at)""".format(table, repo_col)
if group_by == "month":
return """
SELECT date(created_at) AS "date", COUNT(*) AS "{0}"
FROM {0}
WHERE {1} = :repoid
GROUP BY MONTH(created_at), YEAR(created_at)""".format(table, repo_col)
if group_by == "year":
return """
SELECT date(created_at) AS "date", COUNT(*) AS "{0}"
FROM {0}
WHERE {1} = :repoid
GROUP BY YEAR(created_at)""".format(table, repo_col)
def repoid(self, owner_or_repoid, repo=None):
"""
Returns a repository's ID as it appears in the GHTorrent projects table
github.com/[owner]/[project]
:param owner: The username of a project's owner
:param repo: The name of the repository
:return: The repository's ID as it appears in the GHTorrent projects table
"""
repoid = 0
if repo is None:
repoid = owner_or_repoid
else:
reposql = s.sql.text('SELECT projects.id FROM projects INNER JOIN users ON projects.owner_id = users.id WHERE projects.name = :repo AND users.login = :repoowner')
result = self.db.execute(reposql, repo=repo, repoowner=owner_or_repoid)
for row in result:
repoid = row[0]
return repoid
def userid(self, username):
"""
Returns the userid given a username
:param username: GitHub username to be matched against the login table in GHTorrent
:return: The id from the users table in GHTorrent
"""
reposql = s.sql.text('SELECT users.id FROM users WHERE users.login = :username')
userid = 0
result = self.db.execute(reposql, username=username)
for row in result:
userid = row[0]
return userid
# Basic timeseries queries
def stargazers(self, owner, repo=None, group_by="week"):
"""
Timeseries of when people starred a repo
:param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this.
:param repo: The name of the repo. Unneeded if repository id was passed as owner.
:return: DataFrame with stargazers/day
"""
repoid = self.repoid(owner, repo)
stargazersSQL = s.sql.text(self.__single_table_count_by_date('watchers', 'repo_id', 'user_id', group_by=group_by))
df = pd.read_sql(stargazersSQL, self.db, params={"repoid": str(repoid)})
df.drop(df.index[:1], inplace=True)
return df
def commits(self, owner, repo=None, group_by="week"):
"""
Timeseries of all the commits on a repo
:param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this.
:param repo: The name of the repo. Unneeded if repository id was passed as owner.
:return: DataFrame with commits/day
"""
repoid = self.repoid(owner, repo)
commitsSQL = s.sql.text(self.__single_table_count_by_date('commits', group_by=group_by))
return pd.read_sql(commitsSQL, self.db, params={"repoid": str(repoid)})
def forks(self, owner, repo=None, group_by="week"):
"""
Timeseries of when a repo's forks were created
:param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this.
:param repo: The name of the repo. Unneeded if repository id was passed as owner.
:return: DataFrame with forks/day
"""
repoid = self.repoid(owner, repo)
forksSQL = s.sql.text(self.__single_table_count_by_date('projects', 'forked_from', 'owner_id', group_by=group_by))
return pd.read_sql(forksSQL, self.db, params={"repoid": str(repoid)}).drop(0)
def issues(self, owner, repo=None, group_by="week"):
"""
Timeseries of when people starred a repo
:param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this.
:param repo: The name of the repo. Unneeded if repository id was passed as owner.
:return: DataFrame with issues/day
"""
repoid = self.repoid(owner, repo)
issuesSQL = s.sql.text(self.__single_table_count_by_date('issues', 'repo_id', 'reporter_id', group_by=group_by))
return pd.read_sql(issuesSQL, self.db, params={"repoid": str(repoid)})
def issues_with_close(self, owner, repo=None):
"""
How long on average each week it takes to close an issue
:param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this.
:param repo: The name of the repo. Unneeded if repository id was passed as owner.
:return: DataFrame with issues/day
"""
repoid = self.repoid(owner, repo)
issuesSQL = s.sql.text("""
SELECT issues.id as "id",
issues.created_at as "date",
DATEDIFF(closed.created_at, issues.created_at) AS "days_to_close"
FROM issues
JOIN
(SELECT * FROM issue_events
WHERE issue_events.action = "closed") closed
ON issues.id = closed.issue_id
WHERE issues.repo_id = :repoid""")
return pd.read_sql(issuesSQL, self.db, params={"repoid": str(repoid)})
def pulls(self, owner, repo=None):
"""
Timeseries of pull requests creation, also gives their associated activity
:param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this.
:param repo: The name of the repo. Unneeded if repository id was passed as owner.
:return: DataFrame with pull requests by day
"""
repoid = self.repoid(owner, repo)
pullsSQL = s.sql.text("""
SELECT date(pull_request_history.created_at) AS "date",
(COUNT(pull_requests.id)) AS "pull_requests",
(SELECT COUNT(*) FROM pull_request_comments
WHERE pull_request_comments.pull_request_id = pull_request_history.pull_request_id) AS "comments"
FROM pull_request_history
INNER JOIN pull_requests
ON pull_request_history.pull_request_id = pull_requests.id
WHERE pull_requests.head_repo_id = :repoid
AND pull_request_history.action = "merged"
GROUP BY WEEK(pull_request_history.created_at)
""")
return pd.read_sql(pullsSQL, self.db, params={"repoid": str(repoid)})
def contributors(self, owner, repo=None):
"""
All the contributors to a project and the counts of their contributions
:param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table. Use repoid() to get this.
:param repo: The name of the repo. Unneeded if repository id was passed as owner.
:return: DataFrame with users id, users login, and their contributions by type
"""
repoid = self.repoid(owner, repo)
contributorsSQL = s.sql.text("""
SELECT * FROM
(
SELECT users.id as "user_id",
users.login as "login",
users.location as "location",
com.count as "commits",
pulls.count as "pull_requests",
iss.count as "issues",
comcoms.count as "commit_comments",
pullscoms.count as "pull_request_comments",
isscoms.count as "issue_comments",
com.count + pulls.count + iss.count + comcoms.count + pullscoms.count + isscoms.count as "total"
FROM users
LEFT JOIN (SELECT committer_id AS id, COUNT(*) AS count FROM commits INNER JOIN project_commits ON project_commits.commit_id = commits.id WHERE project_commits.project_id = :repoid GROUP BY commits.committer_id) AS com
ON com.id = users.id
LEFT JOIN (SELECT pull_request_history.actor_id AS id, COUNT(*) AS count FROM pull_request_history JOIN pull_requests ON pull_requests.id = pull_request_history.pull_request_id WHERE pull_requests.base_repo_id = :repoid AND pull_request_history.action = 'merged' GROUP BY pull_request_history.actor_id) AS pulls
ON pulls.id = users.id
LEFT JOIN (SELECT reporter_id AS id, COUNT(*) AS count FROM issues WHERE issues.repo_id = :repoid GROUP BY issues.reporter_id) AS iss
ON iss.id = users.id
LEFT JOIN (SELECT commit_comments.user_id AS id, COUNT(*) AS count FROM commit_comments JOIN project_commits ON project_commits.commit_id = commit_comments.commit_id WHERE project_commits.project_id = :repoid GROUP BY commit_comments.user_id) AS comcoms
ON comcoms.id = users.id
LEFT JOIN (SELECT pull_request_comments.user_id AS id, COUNT(*) AS count FROM pull_request_comments JOIN pull_requests ON pull_request_comments.pull_request_id = pull_requests.id WHERE pull_requests.base_repo_id = :repoid GROUP BY pull_request_comments.user_id) AS pullscoms
ON pullscoms.id = users.id
LEFT JOIN (SELECT issue_comments.user_id AS id, COUNT(*) AS count FROM issue_comments JOIN issues ON issue_comments.issue_id = issues.id WHERE issues.repo_id = :repoid GROUP BY issue_comments.user_id) AS isscoms
ON isscoms.id = users.id
GROUP BY users.id
ORDER BY com.count DESC
) user_activity
WHERE commits IS NOT NULL
OR pull_requests IS NOT NULL
OR issues IS NOT NULL
OR commit_comments IS NOT NULL
OR pull_request_comments IS NOT NULL
OR issue_comments IS NOT NULL;
""")
return pd.read_sql(contributorsSQL, self.db, index_col=['user_id'], params={"repoid": str(repoid)})
def contributions(self, owner, repo=None, userid=None):
"""
Timeseries of all the contributions to a project, optionally limited to a specific user
:param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table
:param repo: The name of the repo. Unneeded if repository id was passed as owner.
:param userid: The id of user if you want to limit the contributions to a specific user.
:return: DataFrame with all of the contributions seperated by day.
"""
repoid = self.repoid(owner, repo)
rawContributionsSQL = """
SELECT DATE(coms.created_at) as "date",
coms.count as "commits",
pulls.count as "pull_requests",
iss.count as "issues",
comcoms.count as "commit_comments",
pullscoms.count as "pull_request_comments",
isscoms.count as "issue_comments",
coms.count + pulls.count + iss.count + comcoms.count + pullscoms.count + isscoms.count as "total"
FROM (SELECT created_at AS created_at, COUNT(*) AS count FROM commits INNER JOIN project_commits ON project_commits.commit_id = commits.id WHERE project_commits.project_id = :repoid[[ AND commits.author_id = :userid]] GROUP BY DATE(created_at)) coms
LEFT JOIN (SELECT pull_request_history.created_at AS created_at, COUNT(*) AS count FROM pull_request_history JOIN pull_requests ON pull_requests.id = pull_request_history.pull_request_id WHERE pull_requests.base_repo_id = :repoid AND pull_request_history.action = 'merged'[[ AND pull_request_history.actor_id = :userid]] GROUP BY DATE(created_at)) AS pulls
ON DATE(pulls.created_at) = DATE(coms.created_at)
LEFT JOIN (SELECT issues.created_at AS created_at, COUNT(*) AS count FROM issues WHERE issues.repo_id = :repoid[[ AND issues.reporter_id = :userid]] GROUP BY DATE(created_at)) AS iss
ON DATE(iss.created_at) = DATE(coms.created_at)
LEFT JOIN (SELECT commit_comments.created_at AS created_at, COUNT(*) AS count FROM commit_comments JOIN project_commits ON project_commits.commit_id = commit_comments.commit_id WHERE project_commits.project_id = :repoid[[ AND commit_comments.user_id = :userid]] GROUP BY DATE(commit_comments.created_at)) AS comcoms
ON DATE(comcoms.created_at) = DATE(coms.created_at)
LEFT JOIN (SELECT pull_request_comments.created_at AS created_at, COUNT(*) AS count FROM pull_request_comments JOIN pull_requests ON pull_request_comments.pull_request_id = pull_requests.id WHERE pull_requests.base_repo_id = :repoid[[ AND pull_request_comments.user_id = :userid]] GROUP BY DATE(pull_request_comments.created_at)) AS pullscoms
ON DATE(pullscoms.created_at) = DATE(coms.created_at)
LEFT JOIN (SELECT issue_comments.created_at AS created_at, COUNT(*) AS count FROM issue_comments JOIN issues ON issue_comments.issue_id = issues.id WHERE issues.repo_id = :repoid[[ AND issue_comments.user_id = :userid]] GROUP BY DATE(issue_comments.created_at)) AS isscoms
ON DATE(isscoms.created_at) = DATE(coms.created_at)
GROUP BY YEARWEEK(coms.created_at)
ORDER BY DATE(coms.created_at)
"""
if (userid is not None and len(userid) > 0):
rawContributionsSQL = rawContributionsSQL.replace('[[', '')
rawContributionsSQL = rawContributionsSQL.replace(']]', '')
parameterized = s.sql.text(rawContributionsSQL)
return pd.read_sql(parameterized, self.db, params={"repoid": str(repoid), "userid": str(userid)})
else:
rawContributionsSQL = re.sub(r'\[\[.+?\]\]', '', rawContributionsSQL)
parameterized = s.sql.text(rawContributionsSQL)
return pd.read_sql(parameterized, self.db, params={"repoid": str(repoid)})
def committer_locations(self, owner, repo=None):
"""
Return committers and their locations
@todo: Group by country code instead of users, needs the new schema
:param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table.
:param repo: The name of the repo.
:return: DataFrame with users and locations sorted by commtis
"""
repoid = self.repoid(owner, repo)
rawContributionsSQL = s.sql.text("""
SELECT users.login, users.location, COUNT(*) AS "commits"
FROM commits
JOIN project_commits
ON commits.id = project_commits.commit_id
JOIN users
ON users.id = commits.author_id
WHERE project_commits.project_id = :repoid
GROUP BY users.id
ORDER BY commits DESC
""")
return pd.read_sql(rawContributionsSQL, self.db, params={"repoid": str(repoid)})
def issue_response_time(self, owner, repo=None):
"""
How long it takes for issues to be responded to by people who have commits associate with the project
:param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table.
:param repo: The name of the repo. Unneeded if repository id was passed as owner.
:return: DataFrame with the issues' id the date it was
opened, and the date it was first responded to
"""
repoid = self.repoid(owner, repo)
issuesSQL = s.sql.text("""
SELECT issues.created_at AS "created_at",
MIN(issue_comments.created_at) AS "responded_at"
FROM issues
JOIN issue_comments
ON issue_comments.issue_id = issues.id
WHERE issue_comments.user_id IN
(SELECT users.id
FROM users
JOIN commits
WHERE commits.author_id = users.id
AND commits.project_id = :repoid)
AND issues.repo_id = :repoid
GROUP BY issues.id
""")
df = pd.read_sql(issuesSQL, self.db, params={"repoid": str(repoid)})
df['created_at'] = pd.to_datetime(df['created_at'])
df['responded_at'] = pd.to_datetime(df['responded_at'])
df['hours_between'] = np.floor((df['responded_at'] - df['created_at']) / np.timedelta64(1, 'h'))
df = df['hours_between'].value_counts().sort_index().reset_index().rename(columns={'index': 'hours_between', 'hours_between': 'count'})
df = df[df['hours_between'] < 48]
return df
def pull_acceptance_rate(self, owner, repo=None):
"""
Timeseries of pull request acceptance rate (Number of pull requests merged on a date over Number of pull requests opened on a date)
:param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table.
:param repo: The name of the repo. Unneeded if repository id was passed as owner.
:return: DataFrame with the pull acceptance rate and the dates
"""
repoid = self.repoid(owner, repo)
pullAcceptanceSQL = s.sql.text("""
SELECT DATE(date_created) AS "date", CAST(num_approved AS DECIMAL)/CAST(num_open AS DECIMAL) AS "rate"
FROM
(SELECT COUNT(DISTINCT pull_request_id) AS num_approved, DATE(pull_request_history.created_at) AS accepted_on
FROM pull_request_history
JOIN pull_requests ON pull_request_history.pull_request_id = pull_requests.id
WHERE action = 'merged' AND pull_requests.base_repo_id = :repoid
GROUP BY accepted_on) accepted
JOIN
(SELECT count(distinct pull_request_id) AS num_open, DATE(pull_request_history.created_at) AS date_created
FROM pull_request_history
JOIN pull_requests ON pull_request_history.pull_request_id = pull_requests.id
WHERE action = 'opened'
AND pull_requests.base_repo_id = :repoid
GROUP BY date_created) opened
ON opened.date_created = accepted.accepted_on
""")
return pd.read_sql(pullAcceptanceSQL, self.db, params={"repoid": str(repoid)})
def classify_contributors(self, owner, repo=None):
"""
Classify everyone who has interacted with a repo into
- user
- tester
- rejected_contributor
- contributor
- major_contributor
- maintainer
:param owner: The name of the project owner or the id of the project in the projects table of the project in the projects table.
:param repo: The name of the repo. Unneeded if repository id was passed as owner.
:return: DataFrame with the login and role of contributors
"""
repoid = self.repoid(owner, repo)
contributors = self.contributors(repoid, repo=None)
sums = contributors.sum()
def classify(row):
role = 'user'
ratio = row / sums
if (ratio['issue_comments'] > 0.05):
role = 'tester'
if (row['pull_requests'] >= 1 and row['commits'] == 0):
role = 'rejected_contributor'
if (row['pull_requests'] >= 1 and row['commits'] >= 1):
role = 'contributor'
if (ratio['pull_requests'] > 0.10 or ratio['commits'] > 0.01):
role = 'major_contributor'
if (ratio['commits'] > 0.02 or ratio['pull_request_comments'] > 0.15):
role = 'maintainer'
return pd.Series({'login': row['login'], 'role': role})
roles = contributors.apply(classify, axis=1)
return roles
def community_age(self, owner, repo=None):
"""
Information helpful to determining a community's age
For now, returns the date of the first of each type of action (fork, pull request, etc.)
"""
repoid = self.repoid(owner, repo)
communityAgeSQL = s.sql.text("""
SELECT DATE(proj.created_at) AS "project",
DATE(commits.created_at) AS "commit",
DATE(frk.created_at) AS "fork",
DATE(iss.created_at) AS "issue",
DATE(pr.created_at) AS "pull_request"
FROM commits
LEFT JOIN (SELECT forked_from_id AS "repo_id", created_at AS "created_at" FROM forks WHERE forks.forked_from_id = :repoid ORDER BY created_at DESC LIMIT 1) AS frk
ON frk.repo_id = commits.project_id
LEFT JOIN (SELECT repo_id AS "repo_id", created_at AS "created_at" FROM issues WHERE issues.repo_id = :repoid ORDER BY created_at DESC LIMIT 1) AS iss
ON iss.repo_id = commits.project_id
LEFT JOIN (SELECT pull_request_history.created_at AS "created_at", pull_requests.base_repo_id AS "repo_id" FROM pull_request_history JOIN pull_requests ON pull_requests.id = pull_request_history.pull_request_id WHERE pull_requests.base_repo_id = :repoid AND pull_request_history.action = 'merged' ORDER BY pull_request_history.created_at DESC LIMIT 1) AS pr
ON pr.repo_id = commits.project_id
LEFT JOIN (SELECT projects.id AS "repo_id", created_at AS "created_at" FROM projects WHERE projects.id = :repoid) AS proj
ON proj.repo_id = commits.project_id
WHERE commits.project_id = :repoid
ORDER BY commits.created_at DESC
LIMIT 1
""")
return pd.read_sql(communityAgeSQL, self.db, params={"repoid": str(repoid)})
def unique_committers(self, owner, repo=None):
repoid = self.repoid(owner, repo)
uniqueCommittersSQL = s.sql.text("""
SELECT unique_committers.created_at AS "date", MAX(@number_of_committers:=@number_of_committers+1) total_unique_committers
FROM (
SELECT author_id, MIN(DATE(created_at)) created_at
FROM commits
WHERE project_id = :repoid
GROUP BY author_id
ORDER BY created_at ASC) AS unique_committers,
(SELECT @number_of_committers:= 0) AS number_of_committers
GROUP BY DATE(unique_committers.created_at)
""")
return pd.read_sql(uniqueCommittersSQL, self.db, params={"repoid": str(repoid)})
def ghtorrent_range(self):
ghtorrentRangeSQL = s.sql.text("""
SELECT MIN(date(created_at)) AS "min_date", MAX(date(created_at)) AS "max_date"
FROM commits
""")
return pd.read_sql(ghtorrentRangeSQL, self.db)
|
[
"sqlalchemy.sql.text",
"numpy.timedelta64",
"pandas.to_datetime",
"pandas.Series",
"pandas.read_sql",
"sqlalchemy.create_engine",
"re.sub"
] |
[((510, 532), 'sqlalchemy.create_engine', 's.create_engine', (['dbstr'], {}), '(dbstr)\n', (525, 532), True, 'import sqlalchemy as s\n'), ((3721, 3791), 'sqlalchemy.sql.text', 's.sql.text', (['"""SELECT users.id FROM users WHERE users.login = :username"""'], {}), "('SELECT users.id FROM users WHERE users.login = :username')\n", (3731, 3791), True, 'import sqlalchemy as s\n'), ((7226, 7660), 'sqlalchemy.sql.text', 's.sql.text', (['"""\n SELECT issues.id as "id",\n issues.created_at as "date",\n DATEDIFF(closed.created_at, issues.created_at) AS "days_to_close"\n FROM issues\n\n JOIN\n (SELECT * FROM issue_events\n WHERE issue_events.action = "closed") closed\n ON issues.id = closed.issue_id\n\n WHERE issues.repo_id = :repoid"""'], {}), '(\n """\n SELECT issues.id as "id",\n issues.created_at as "date",\n DATEDIFF(closed.created_at, issues.created_at) AS "days_to_close"\n FROM issues\n\n JOIN\n (SELECT * FROM issue_events\n WHERE issue_events.action = "closed") closed\n ON issues.id = closed.issue_id\n\n WHERE issues.repo_id = :repoid"""\n )\n', (7236, 7660), True, 'import sqlalchemy as s\n'), ((8245, 8889), 'sqlalchemy.sql.text', 's.sql.text', (['"""\n SELECT date(pull_request_history.created_at) AS "date",\n (COUNT(pull_requests.id)) AS "pull_requests",\n (SELECT COUNT(*) FROM pull_request_comments\n WHERE pull_request_comments.pull_request_id = pull_request_history.pull_request_id) AS "comments"\n FROM pull_request_history\n INNER JOIN pull_requests\n ON pull_request_history.pull_request_id = pull_requests.id\n WHERE pull_requests.head_repo_id = :repoid\n AND pull_request_history.action = "merged"\n GROUP BY WEEK(pull_request_history.created_at)\n """'], {}), '(\n """\n SELECT date(pull_request_history.created_at) AS "date",\n (COUNT(pull_requests.id)) AS "pull_requests",\n (SELECT COUNT(*) FROM pull_request_comments\n WHERE pull_request_comments.pull_request_id = pull_request_history.pull_request_id) AS "comments"\n FROM pull_request_history\n INNER JOIN pull_requests\n ON pull_request_history.pull_request_id = pull_requests.id\n WHERE pull_requests.head_repo_id = :repoid\n AND pull_request_history.action = "merged"\n GROUP BY WEEK(pull_request_history.created_at)\n """\n )\n', (8255, 8889), True, 'import sqlalchemy as s\n'), ((9518, 12368), 'sqlalchemy.sql.text', 's.sql.text', (['"""\n SELECT * FROM\n\n (\n SELECT users.id as "user_id",\n users.login as "login",\n users.location as "location",\n com.count as "commits",\n pulls.count as "pull_requests",\n iss.count as "issues",\n comcoms.count as "commit_comments",\n pullscoms.count as "pull_request_comments",\n isscoms.count as "issue_comments",\n com.count + pulls.count + iss.count + comcoms.count + pullscoms.count + isscoms.count as "total"\n\n FROM users\n\n LEFT JOIN (SELECT committer_id AS id, COUNT(*) AS count FROM commits INNER JOIN project_commits ON project_commits.commit_id = commits.id WHERE project_commits.project_id = :repoid GROUP BY commits.committer_id) AS com\n ON com.id = users.id\n\n LEFT JOIN (SELECT pull_request_history.actor_id AS id, COUNT(*) AS count FROM pull_request_history JOIN pull_requests ON pull_requests.id = pull_request_history.pull_request_id WHERE pull_requests.base_repo_id = :repoid AND pull_request_history.action = \'merged\' GROUP BY pull_request_history.actor_id) AS pulls\n ON pulls.id = users.id\n\n LEFT JOIN (SELECT reporter_id AS id, COUNT(*) AS count FROM issues WHERE issues.repo_id = :repoid GROUP BY issues.reporter_id) AS iss\n ON iss.id = users.id\n\n LEFT JOIN (SELECT commit_comments.user_id AS id, COUNT(*) AS count FROM commit_comments JOIN project_commits ON project_commits.commit_id = commit_comments.commit_id WHERE project_commits.project_id = :repoid GROUP BY commit_comments.user_id) AS comcoms\n ON comcoms.id = users.id\n\n LEFT JOIN (SELECT pull_request_comments.user_id AS id, COUNT(*) AS count FROM pull_request_comments JOIN pull_requests ON pull_request_comments.pull_request_id = pull_requests.id WHERE pull_requests.base_repo_id = :repoid GROUP BY pull_request_comments.user_id) AS pullscoms\n ON pullscoms.id = users.id\n\n LEFT JOIN (SELECT issue_comments.user_id AS id, COUNT(*) AS count FROM issue_comments JOIN issues ON issue_comments.issue_id = issues.id WHERE issues.repo_id = :repoid GROUP BY issue_comments.user_id) AS isscoms\n ON isscoms.id = users.id\n\n GROUP BY users.id\n ORDER BY com.count DESC\n ) user_activity\n\n WHERE commits IS NOT NULL\n OR pull_requests IS NOT NULL\n OR issues IS NOT NULL\n OR commit_comments IS NOT NULL\n OR pull_request_comments IS NOT NULL\n OR issue_comments IS NOT NULL;\n """'], {}), '(\n """\n SELECT * FROM\n\n (\n SELECT users.id as "user_id",\n users.login as "login",\n users.location as "location",\n com.count as "commits",\n pulls.count as "pull_requests",\n iss.count as "issues",\n comcoms.count as "commit_comments",\n pullscoms.count as "pull_request_comments",\n isscoms.count as "issue_comments",\n com.count + pulls.count + iss.count + comcoms.count + pullscoms.count + isscoms.count as "total"\n\n FROM users\n\n LEFT JOIN (SELECT committer_id AS id, COUNT(*) AS count FROM commits INNER JOIN project_commits ON project_commits.commit_id = commits.id WHERE project_commits.project_id = :repoid GROUP BY commits.committer_id) AS com\n ON com.id = users.id\n\n LEFT JOIN (SELECT pull_request_history.actor_id AS id, COUNT(*) AS count FROM pull_request_history JOIN pull_requests ON pull_requests.id = pull_request_history.pull_request_id WHERE pull_requests.base_repo_id = :repoid AND pull_request_history.action = \'merged\' GROUP BY pull_request_history.actor_id) AS pulls\n ON pulls.id = users.id\n\n LEFT JOIN (SELECT reporter_id AS id, COUNT(*) AS count FROM issues WHERE issues.repo_id = :repoid GROUP BY issues.reporter_id) AS iss\n ON iss.id = users.id\n\n LEFT JOIN (SELECT commit_comments.user_id AS id, COUNT(*) AS count FROM commit_comments JOIN project_commits ON project_commits.commit_id = commit_comments.commit_id WHERE project_commits.project_id = :repoid GROUP BY commit_comments.user_id) AS comcoms\n ON comcoms.id = users.id\n\n LEFT JOIN (SELECT pull_request_comments.user_id AS id, COUNT(*) AS count FROM pull_request_comments JOIN pull_requests ON pull_request_comments.pull_request_id = pull_requests.id WHERE pull_requests.base_repo_id = :repoid GROUP BY pull_request_comments.user_id) AS pullscoms\n ON pullscoms.id = users.id\n\n LEFT JOIN (SELECT issue_comments.user_id AS id, COUNT(*) AS count FROM issue_comments JOIN issues ON issue_comments.issue_id = issues.id WHERE issues.repo_id = :repoid GROUP BY issue_comments.user_id) AS isscoms\n ON isscoms.id = users.id\n\n GROUP BY users.id\n ORDER BY com.count DESC\n ) user_activity\n\n WHERE commits IS NOT NULL\n OR pull_requests IS NOT NULL\n OR issues IS NOT NULL\n OR commit_comments IS NOT NULL\n OR pull_request_comments IS NOT NULL\n OR issue_comments IS NOT NULL;\n """\n )\n', (9528, 12368), True, 'import sqlalchemy as s\n'), ((17019, 17424), 'sqlalchemy.sql.text', 's.sql.text', (['"""\n SELECT users.login, users.location, COUNT(*) AS "commits"\n FROM commits\n JOIN project_commits\n ON commits.id = project_commits.commit_id\n JOIN users\n ON users.id = commits.author_id\n WHERE project_commits.project_id = :repoid\n GROUP BY users.id\n ORDER BY commits DESC\n """'], {}), '(\n """\n SELECT users.login, users.location, COUNT(*) AS "commits"\n FROM commits\n JOIN project_commits\n ON commits.id = project_commits.commit_id\n JOIN users\n ON users.id = commits.author_id\n WHERE project_commits.project_id = :repoid\n GROUP BY users.id\n ORDER BY commits DESC\n """\n )\n', (17029, 17424), True, 'import sqlalchemy as s\n'), ((18110, 18697), 'sqlalchemy.sql.text', 's.sql.text', (['"""\n SELECT issues.created_at AS "created_at",\n MIN(issue_comments.created_at) AS "responded_at"\n FROM issues\n JOIN issue_comments\n ON issue_comments.issue_id = issues.id\n WHERE issue_comments.user_id IN\n (SELECT users.id\n FROM users\n JOIN commits\n WHERE commits.author_id = users.id\n AND commits.project_id = :repoid)\n AND issues.repo_id = :repoid\n GROUP BY issues.id\n """'], {}), '(\n """\n SELECT issues.created_at AS "created_at",\n MIN(issue_comments.created_at) AS "responded_at"\n FROM issues\n JOIN issue_comments\n ON issue_comments.issue_id = issues.id\n WHERE issue_comments.user_id IN\n (SELECT users.id\n FROM users\n JOIN commits\n WHERE commits.author_id = users.id\n AND commits.project_id = :repoid)\n AND issues.repo_id = :repoid\n GROUP BY issues.id\n """\n )\n', (18120, 18697), True, 'import sqlalchemy as s\n'), ((18792, 18824), 'pandas.to_datetime', 'pd.to_datetime', (["df['created_at']"], {}), "(df['created_at'])\n", (18806, 18824), True, 'import pandas as pd\n'), ((18854, 18888), 'pandas.to_datetime', 'pd.to_datetime', (["df['responded_at']"], {}), "(df['responded_at'])\n", (18868, 18888), True, 'import pandas as pd\n'), ((19786, 20762), 'sqlalchemy.sql.text', 's.sql.text', (['"""\n SELECT DATE(date_created) AS "date", CAST(num_approved AS DECIMAL)/CAST(num_open AS DECIMAL) AS "rate"\n FROM\n (SELECT COUNT(DISTINCT pull_request_id) AS num_approved, DATE(pull_request_history.created_at) AS accepted_on\n FROM pull_request_history\n JOIN pull_requests ON pull_request_history.pull_request_id = pull_requests.id\n WHERE action = \'merged\' AND pull_requests.base_repo_id = :repoid\n GROUP BY accepted_on) accepted\n JOIN\n (SELECT count(distinct pull_request_id) AS num_open, DATE(pull_request_history.created_at) AS date_created\n FROM pull_request_history\n JOIN pull_requests ON pull_request_history.pull_request_id = pull_requests.id\n WHERE action = \'opened\'\n AND pull_requests.base_repo_id = :repoid\n GROUP BY date_created) opened\n ON opened.date_created = accepted.accepted_on\n """'], {}), '(\n """\n SELECT DATE(date_created) AS "date", CAST(num_approved AS DECIMAL)/CAST(num_open AS DECIMAL) AS "rate"\n FROM\n (SELECT COUNT(DISTINCT pull_request_id) AS num_approved, DATE(pull_request_history.created_at) AS accepted_on\n FROM pull_request_history\n JOIN pull_requests ON pull_request_history.pull_request_id = pull_requests.id\n WHERE action = \'merged\' AND pull_requests.base_repo_id = :repoid\n GROUP BY accepted_on) accepted\n JOIN\n (SELECT count(distinct pull_request_id) AS num_open, DATE(pull_request_history.created_at) AS date_created\n FROM pull_request_history\n JOIN pull_requests ON pull_request_history.pull_request_id = pull_requests.id\n WHERE action = \'opened\'\n AND pull_requests.base_repo_id = :repoid\n GROUP BY date_created) opened\n ON opened.date_created = accepted.accepted_on\n """\n )\n', (19796, 20762), True, 'import sqlalchemy as s\n'), ((22626, 24044), 'sqlalchemy.sql.text', 's.sql.text', (['"""\n SELECT DATE(proj.created_at) AS "project",\n DATE(commits.created_at) AS "commit",\n DATE(frk.created_at) AS "fork",\n DATE(iss.created_at) AS "issue",\n DATE(pr.created_at) AS "pull_request"\n\n FROM commits\n\n LEFT JOIN (SELECT forked_from_id AS "repo_id", created_at AS "created_at" FROM forks WHERE forks.forked_from_id = :repoid ORDER BY created_at DESC LIMIT 1) AS frk\n ON frk.repo_id = commits.project_id\n\n LEFT JOIN (SELECT repo_id AS "repo_id", created_at AS "created_at" FROM issues WHERE issues.repo_id = :repoid ORDER BY created_at DESC LIMIT 1) AS iss\n ON iss.repo_id = commits.project_id\n\n LEFT JOIN (SELECT pull_request_history.created_at AS "created_at", pull_requests.base_repo_id AS "repo_id" FROM pull_request_history JOIN pull_requests ON pull_requests.id = pull_request_history.pull_request_id WHERE pull_requests.base_repo_id = :repoid AND pull_request_history.action = \'merged\' ORDER BY pull_request_history.created_at DESC LIMIT 1) AS pr\n ON pr.repo_id = commits.project_id\n\n LEFT JOIN (SELECT projects.id AS "repo_id", created_at AS "created_at" FROM projects WHERE projects.id = :repoid) AS proj\n ON proj.repo_id = commits.project_id\n\n WHERE commits.project_id = :repoid\n ORDER BY commits.created_at DESC\n LIMIT 1\n """'], {}), '(\n """\n SELECT DATE(proj.created_at) AS "project",\n DATE(commits.created_at) AS "commit",\n DATE(frk.created_at) AS "fork",\n DATE(iss.created_at) AS "issue",\n DATE(pr.created_at) AS "pull_request"\n\n FROM commits\n\n LEFT JOIN (SELECT forked_from_id AS "repo_id", created_at AS "created_at" FROM forks WHERE forks.forked_from_id = :repoid ORDER BY created_at DESC LIMIT 1) AS frk\n ON frk.repo_id = commits.project_id\n\n LEFT JOIN (SELECT repo_id AS "repo_id", created_at AS "created_at" FROM issues WHERE issues.repo_id = :repoid ORDER BY created_at DESC LIMIT 1) AS iss\n ON iss.repo_id = commits.project_id\n\n LEFT JOIN (SELECT pull_request_history.created_at AS "created_at", pull_requests.base_repo_id AS "repo_id" FROM pull_request_history JOIN pull_requests ON pull_requests.id = pull_request_history.pull_request_id WHERE pull_requests.base_repo_id = :repoid AND pull_request_history.action = \'merged\' ORDER BY pull_request_history.created_at DESC LIMIT 1) AS pr\n ON pr.repo_id = commits.project_id\n\n LEFT JOIN (SELECT projects.id AS "repo_id", created_at AS "created_at" FROM projects WHERE projects.id = :repoid) AS proj\n ON proj.repo_id = commits.project_id\n\n WHERE commits.project_id = :repoid\n ORDER BY commits.created_at DESC\n LIMIT 1\n """\n )\n', (22636, 24044), True, 'import sqlalchemy as s\n'), ((24245, 24764), 'sqlalchemy.sql.text', 's.sql.text', (['"""\n SELECT unique_committers.created_at AS "date", MAX(@number_of_committers:=@number_of_committers+1) total_unique_committers\n FROM (\n SELECT author_id, MIN(DATE(created_at)) created_at\n FROM commits\n WHERE project_id = :repoid\n GROUP BY author_id\n ORDER BY created_at ASC) AS unique_committers,\n (SELECT @number_of_committers:= 0) AS number_of_committers\n GROUP BY DATE(unique_committers.created_at)\n """'], {}), '(\n """\n SELECT unique_committers.created_at AS "date", MAX(@number_of_committers:=@number_of_committers+1) total_unique_committers\n FROM (\n SELECT author_id, MIN(DATE(created_at)) created_at\n FROM commits\n WHERE project_id = :repoid\n GROUP BY author_id\n ORDER BY created_at ASC) AS unique_committers,\n (SELECT @number_of_committers:= 0) AS number_of_committers\n GROUP BY DATE(unique_committers.created_at)\n """\n )\n', (24255, 24764), True, 'import sqlalchemy as s\n'), ((24904, 25051), 'sqlalchemy.sql.text', 's.sql.text', (['"""\n SELECT MIN(date(created_at)) AS "min_date", MAX(date(created_at)) AS "max_date" \n FROM commits\n """'], {}), '(\n """\n SELECT MIN(date(created_at)) AS "min_date", MAX(date(created_at)) AS "max_date" \n FROM commits\n """\n )\n', (24914, 25051), True, 'import sqlalchemy as s\n'), ((25057, 25096), 'pandas.read_sql', 'pd.read_sql', (['ghtorrentRangeSQL', 'self.db'], {}), '(ghtorrentRangeSQL, self.db)\n', (25068, 25096), True, 'import pandas as pd\n'), ((3129, 3291), 'sqlalchemy.sql.text', 's.sql.text', (['"""SELECT projects.id FROM projects INNER JOIN users ON projects.owner_id = users.id WHERE projects.name = :repo AND users.login = :repoowner"""'], {}), "(\n 'SELECT projects.id FROM projects INNER JOIN users ON projects.owner_id = users.id WHERE projects.name = :repo AND users.login = :repoowner'\n )\n", (3139, 3291), True, 'import sqlalchemy as s\n'), ((16110, 16141), 'sqlalchemy.sql.text', 's.sql.text', (['rawContributionsSQL'], {}), '(rawContributionsSQL)\n', (16120, 16141), True, 'import sqlalchemy as s\n'), ((16300, 16350), 're.sub', 're.sub', (['"""\\\\[\\\\[.+?\\\\]\\\\]"""', '""""""', 'rawContributionsSQL'], {}), "('\\\\[\\\\[.+?\\\\]\\\\]', '', rawContributionsSQL)\n", (16306, 16350), False, 'import re\n'), ((16376, 16407), 'sqlalchemy.sql.text', 's.sql.text', (['rawContributionsSQL'], {}), '(rawContributionsSQL)\n', (16386, 16407), True, 'import sqlalchemy as s\n'), ((22202, 22250), 'pandas.Series', 'pd.Series', (["{'login': row['login'], 'role': role}"], {}), "({'login': row['login'], 'role': role})\n", (22211, 22250), True, 'import pandas as pd\n'), ((18970, 18992), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""h"""'], {}), "(1, 'h')\n", (18984, 18992), True, 'import numpy as np\n')]
|
import numpy as np
import math
import fatpack
# import rainflow
import matplotlib.pyplot as plt
import pandas as pd
import h5py
# import seaborn as sns
from scipy.signal import savgol_filter
import scipy.stats as stats
def Goodman_method_correction(M_a,M_m,M_max):
M_u = 1.5*M_max
M_ar = M_a/(1-M_m/M_u)
return M_ar
caselist = ["NREL-m-2","NREL-m","NREL-m-1"]
yaw_angle = [-20,0,20]
f = [None] * len(yaw_angle)
m_f = [None] * len(yaw_angle)
m_e = [None] * len(yaw_angle)
N = [None] * len(yaw_angle)
S = [None] * len(yaw_angle)
ranges_corrected = [None] * len(yaw_angle)
DEL = np.zeros(len(yaw_angle))
ix_0 = len(yaw_angle)//2
m = 10
Neq = 1000
start = 0
end = 30000
bins_num = 101
bins_max = 10
bins = np.linspace(0, bins_max, bins_num)
bin_width = bins_max/(bins_num-1)
bins_fine = np.linspace(0, bins_max, 501)
plt.figure(figsize=(14, 8),dpi=100)
plt.rcParams.update({'font.size': 22})
for ix,name in enumerate(caselist):
print(ix)
f[ix] = h5py.File('../job/'+name+'/output/'+name+'_force.h5','r')
time = np.array(f[ix].get('time'))[start:end]
m_f[ix] = np.array(f[ix].get('moment_flap')[:,0,0,0])/1e6
m_e[ix] = np.array(f[ix].get('moment_edge')[:,0,0,0])/1e6
rev, rev_ix = fatpack.find_reversals_racetrack_filtered(m_f[ix], h=0.1, k=256)
ranges,means = fatpack.find_rainflow_ranges(rev, k=256, return_means=True)
ranges_corrected[ix] = Goodman_method_correction(ranges,means,np.max(m_f[ix]))
N[ix], S[ix] = fatpack.find_range_count(ranges_corrected[ix],bins)
DEL[ix] = (np.sum(N[ix]*S[ix]**m)/Neq)**(1/m)
print(DEL[ix])
# print(ranges_corrected[0])
DEL_test = DEL[ix_0]
for ii in range(3):
for ix,name in enumerate(caselist):
print(ix)
f[ix] = h5py.File('../job/'+name+'/output/'+name+'_force.h5','r')
time = np.array(f[ix].get('time'))[start:end]
m_f[ix] = np.array(f[ix].get('moment_flap')[:,0,0,ii])/1e6
m_e[ix] = np.array(f[ix].get('moment_edge')[:,0,0,ii])/1e6
rev, rev_ix = fatpack.find_reversals_racetrack_filtered(m_f[ix], h=1, k=256)
ranges,means = fatpack.find_rainflow_ranges(rev, k=256, return_means=True)
ranges_corrected[ix] = Goodman_method_correction(ranges,means,np.max(m_f[ix]))
N[ix], S[ix] = fatpack.find_range_count(ranges_corrected[ix], bins)
DEL[ix] = (np.sum(N[ix]*S[ix]**m)/Neq)**(1/m)
plt.plot(yaw_angle,DEL/DEL_test,'o-',label="WT"+str(ii+1))
plt.legend()
plt.xlabel('Yaw angle (degree)')
plt.ylabel('$DEL/DEL_{baseline}$')
plt.savefig('plot/DEL.png')
# df = pd.read_csv('power_3wt.csv')
# print(df)
# plt.figure(figsize=(14, 8),dpi=100)
# plt.plot(df['yaw'],df['wt1']/4173025,'o-',label='WT1')
# plt.plot(df['yaw'],df['wt2']/4173025,'o-',label='WT2')
# plt.plot(df['yaw'],df['wt3']/4173025,'o-',label='WT3')
# plt.legend()
# plt.xlabel('Yaw angle (degree)')
# plt.ylabel('$P/P_{baseline}$')
# plt.savefig('plot/power.png')
|
[
"h5py.File",
"fatpack.find_reversals_racetrack_filtered",
"numpy.sum",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.rcParams.update",
"fatpack.find_range_count",
"numpy.max",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"fatpack.find_rainflow_ranges",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((720, 754), 'numpy.linspace', 'np.linspace', (['(0)', 'bins_max', 'bins_num'], {}), '(0, bins_max, bins_num)\n', (731, 754), True, 'import numpy as np\n'), ((801, 830), 'numpy.linspace', 'np.linspace', (['(0)', 'bins_max', '(501)'], {}), '(0, bins_max, 501)\n', (812, 830), True, 'import numpy as np\n'), ((833, 869), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(14, 8)', 'dpi': '(100)'}), '(figsize=(14, 8), dpi=100)\n', (843, 869), True, 'import matplotlib.pyplot as plt\n'), ((869, 907), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 22}"], {}), "({'font.size': 22})\n", (888, 907), True, 'import matplotlib.pyplot as plt\n'), ((2431, 2443), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2441, 2443), True, 'import matplotlib.pyplot as plt\n'), ((2444, 2476), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Yaw angle (degree)"""'], {}), "('Yaw angle (degree)')\n", (2454, 2476), True, 'import matplotlib.pyplot as plt\n'), ((2477, 2511), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$DEL/DEL_{baseline}$"""'], {}), "('$DEL/DEL_{baseline}$')\n", (2487, 2511), True, 'import matplotlib.pyplot as plt\n'), ((2512, 2539), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plot/DEL.png"""'], {}), "('plot/DEL.png')\n", (2523, 2539), True, 'import matplotlib.pyplot as plt\n'), ((971, 1037), 'h5py.File', 'h5py.File', (["('../job/' + name + '/output/' + name + '_force.h5')", '"""r"""'], {}), "('../job/' + name + '/output/' + name + '_force.h5', 'r')\n", (980, 1037), False, 'import h5py\n'), ((1221, 1285), 'fatpack.find_reversals_racetrack_filtered', 'fatpack.find_reversals_racetrack_filtered', (['m_f[ix]'], {'h': '(0.1)', 'k': '(256)'}), '(m_f[ix], h=0.1, k=256)\n', (1262, 1285), False, 'import fatpack\n'), ((1305, 1364), 'fatpack.find_rainflow_ranges', 'fatpack.find_rainflow_ranges', (['rev'], {'k': '(256)', 'return_means': '(True)'}), '(rev, k=256, return_means=True)\n', (1333, 1364), False, 'import fatpack\n'), ((1467, 1519), 'fatpack.find_range_count', 'fatpack.find_range_count', (['ranges_corrected[ix]', 'bins'], {}), '(ranges_corrected[ix], bins)\n', (1491, 1519), False, 'import fatpack\n'), ((1431, 1446), 'numpy.max', 'np.max', (['m_f[ix]'], {}), '(m_f[ix])\n', (1437, 1446), True, 'import numpy as np\n'), ((1734, 1800), 'h5py.File', 'h5py.File', (["('../job/' + name + '/output/' + name + '_force.h5')", '"""r"""'], {}), "('../job/' + name + '/output/' + name + '_force.h5', 'r')\n", (1743, 1800), False, 'import h5py\n'), ((2002, 2064), 'fatpack.find_reversals_racetrack_filtered', 'fatpack.find_reversals_racetrack_filtered', (['m_f[ix]'], {'h': '(1)', 'k': '(256)'}), '(m_f[ix], h=1, k=256)\n', (2043, 2064), False, 'import fatpack\n'), ((2088, 2147), 'fatpack.find_rainflow_ranges', 'fatpack.find_rainflow_ranges', (['rev'], {'k': '(256)', 'return_means': '(True)'}), '(rev, k=256, return_means=True)\n', (2116, 2147), False, 'import fatpack\n'), ((2258, 2310), 'fatpack.find_range_count', 'fatpack.find_range_count', (['ranges_corrected[ix]', 'bins'], {}), '(ranges_corrected[ix], bins)\n', (2282, 2310), False, 'import fatpack\n'), ((1534, 1560), 'numpy.sum', 'np.sum', (['(N[ix] * S[ix] ** m)'], {}), '(N[ix] * S[ix] ** m)\n', (1540, 1560), True, 'import numpy as np\n'), ((2218, 2233), 'numpy.max', 'np.max', (['m_f[ix]'], {}), '(m_f[ix])\n', (2224, 2233), True, 'import numpy as np\n'), ((2330, 2356), 'numpy.sum', 'np.sum', (['(N[ix] * S[ix] ** m)'], {}), '(N[ix] * S[ix] ** m)\n', (2336, 2356), True, 'import numpy as np\n')]
|
import numpy as np
from gridgeo.ugrid import ugrid
def _make_grid(coords):
if coords.ndim != 3:
raise ValueError(f"Expected 3 dimension array, got {coords.ndim}.")
M, N, L = coords.shape
polygons = np.concatenate(
(
coords[0:-1, 0:-1],
coords[0:-1, 1:],
coords[1:, 1:],
coords[1:, 0:-1],
),
axis=L,
)
polygons = polygons.reshape(((M - 1) * (N - 1), 4, L))
return [p for p in polygons if not np.isnan(p).any()]
def _filled_masked(arr):
if hasattr(arr, "filled"):
return arr.filled(fill_value=np.NaN)
else:
return arr
class CFVariable(object):
def __init__(self, nc, **kwargs):
"""
FIXME: this should be done when slicing a CFVariable in pocean-core.
This class is only a temporary workaround until something better is created.
"""
self._nc = nc
variables = self._nc.get_variables_by_attributes(**kwargs)
if len(variables) > 1:
raise ValueError(
f"Found more than 1 variable with criteria {kwargs}"
)
elif not variables:
raise ValueError(
f"Could not find any variables with criteria {kwargs}"
)
else:
self._variable = variables[0]
self._coords = self._variable.coordinates.split()
def _filter_coords(self, variables):
valid_coords = []
for var in variables:
if var.name in self._coords:
valid_coords.append(var)
if len(valid_coords) != 1:
raise ValueError(f"Expected a single coord, got '{valid_coords}'.")
return valid_coords[0]
def axis(self, name):
return getattr(self, "{}_axis".format(name.lower()))()
def t_axis(self):
tvars = list(
set(
self._nc.get_variables_by_attributes(
axis=lambda x: x and str(x).lower() == "t"
)
+ self._nc.get_variables_by_attributes(
standard_name=lambda x: str(x)
in [
"time"
] # We don't want coords `forecast_reference_time`.
)
+ self._nc.get_variables_by_attributes(
_CoordinateAxisType=lambda x: str(x).lower() == "time"
)
)
)
# _CoordinateAxisType: Time
return self._filter_coords(tvars)
def crs(self):
crs = getattr(self._variable, "grid_mapping", None)
if crs:
crs = self._nc[crs]
return crs
def x_axis(self):
xnames = ["longitude", "grid_longitude", "projection_x_coordinate"]
xunits = [
"degrees_east",
"degree_east",
"degree_E",
"degrees_E",
"degreeE",
"degreesE",
]
xvars = list(
set(
self._nc.get_variables_by_attributes(
axis=lambda x: x and str(x).lower() == "x"
)
+ self._nc.get_variables_by_attributes(
standard_name=lambda x: x and str(x).lower() in xnames
)
+ self._nc.get_variables_by_attributes(
units=lambda x: x and str(x).lower() in xunits
)
)
)
return self._filter_coords(xvars)
def y_axis(self):
ynames = ["latitude", "grid_latitude", "projection_y_coordinate"]
yunits = [
"degrees_north",
"degree_north",
"degree_N",
"degrees_N",
"degreeN",
"degreesN",
]
yvars = list(
set(
self._nc.get_variables_by_attributes(
axis=lambda x: x and str(x).lower() == "y"
)
+ self._nc.get_variables_by_attributes(
standard_name=lambda x: x and str(x).lower() in ynames
)
+ self._nc.get_variables_by_attributes(
units=lambda x: x and str(x).lower() in yunits
)
)
)
return self._filter_coords(yvars)
def z_axis(self):
znames = [
"atmosphere_ln_pressure_coordinate",
"atmosphere_sigma_coordinate",
"atmosphere_hybrid_sigma_pressure_coordinate",
"atmosphere_hybrid_height_coordinate",
"atmosphere_sleve_coordinate",
"ocean_sigma_coordinate",
"ocean_s_coordinate",
"ocean_s_coordinate_g1",
"ocean_s_coordinate_g2",
"ocean_sigma_z_coordinate",
"ocean_double_sigma_coordinate",
]
zvars = list(
set(
self._nc.get_variables_by_attributes(
axis=lambda x: x and str(x).lower() == "z"
)
+ self._nc.get_variables_by_attributes(
positive=lambda x: x and str(x).lower() in ["up", "down"]
)
+ self._nc.get_variables_by_attributes(
standard_name=lambda x: x and str(x).lower() in znames
)
)
)
return self._filter_coords(zvars)
def topology(self):
vnames = ["grid_topology", "mesh_topology"]
topologies = self._nc.get_variables_by_attributes(
cf_role=lambda v: v in vnames
)
if not topologies:
if self.x_axis().ndim == 1 and self.y_axis().ndim == 1:
return "unknown_1d"
elif self.x_axis().ndim == 2 and self.y_axis().ndim == 2:
return "unknown_2d"
else:
raise ValueError(
f"Could not identify the topology for {self._nc}."
)
if topologies and len(topologies) > 1:
raise ValueError(
f"Expected 1 topology variable, got {len(topologies)}."
)
mesh = topologies[0]
dims = getattr(mesh, "topology_dimension", None)
cf_role = getattr(mesh, "cf_role", None)
if cf_role == "mesh_topology" and dims in (1, 2):
return "ugrid"
if cf_role == "grid_topology" and dims == 2:
return "sgrid"
def polygons(self):
if self.topology() == "ugrid":
grid = ugrid(self._nc)
node_x = grid["nodes"]["x"]
node_y = grid["nodes"]["y"]
faces = grid["faces"]
return [(list(zip(node_x[k], node_y[k]))) for k in faces]
if self.topology() == "sgrid":
x, y = self.x_axis()[:], self.y_axis()[:]
coords = np.concatenate([x[..., None], y[..., None]], axis=2)
return _make_grid(coords)
if self.topology() == "unknown_1d":
x, y = self.x_axis()[:], self.y_axis()[:]
# Some non-compliant grids, like NYHOPS, may have missing_value/fill_value.
x = _filled_masked(x)
y = _filled_masked(y)
if hasattr(y, "filled"):
y = y.filled(fill_value=np.NaN)
x, y = np.meshgrid(x, y)
coords = np.stack([x, y], axis=2)
return _make_grid(coords)
if self.topology() == "unknown_2d":
x, y = self.x_axis()[:], self.y_axis()[:]
# Some non-compliant grids, like NYHOPS, may have missing_value/fill_value.
x = _filled_masked(x)
y = _filled_masked(y)
coords = np.concatenate([x[..., None], y[..., None]], axis=2)
return _make_grid(coords)
# Replication of the `netCDF4.Variable` object via composition.
def __getitem__(self, key):
return self._variable.__getitem__(key)
def __repr__(self):
return self._variable.__repr__()
@property
def units(self):
return self._variable.units
@property
def standard_name(self):
return self._variable.standard_name
@property
def long_name(self):
return self._variable.long_name
@property
def coordinates(self):
return self._variable.coordinates
@property
def ndim(self):
return self._variable.ndim
@property
def size(self):
return self._variable.size
@property
def shape(self):
return self._variable.shape
@property
def scale(self):
return self._variable.scale
@property
def datatype(self):
return self._variable.datatype
@property
def dimensions(self):
return self._variable.dimensions
@property
def dtype(self):
return self._variable.dtype
@property
def field(self):
return self._variable.field
@property
def name(self):
return self._variable.name
@property
def mask(self):
return self._variable.mask
@property
def _FillValue(self):
return self._variable._FillValue
@property
def _ChunkSizes(self):
return self._variable._ChunkSizes
@property
def chartostring(self):
return self._variable.chartostring
def chunking(self):
return self._variable.chunking()
def endian(self):
return self._variable.endian()
def filters(self):
return self._variable.filters()
def group(self):
return self._variable.group()
def ncattrs(self):
return self._variable.ncattrs()
|
[
"numpy.stack",
"numpy.meshgrid",
"numpy.isnan",
"gridgeo.ugrid.ugrid",
"numpy.concatenate"
] |
[((221, 321), 'numpy.concatenate', 'np.concatenate', (['(coords[0:-1, 0:-1], coords[0:-1, 1:], coords[1:, 1:], coords[1:, 0:-1])'], {'axis': 'L'}), '((coords[0:-1, 0:-1], coords[0:-1, 1:], coords[1:, 1:],\n coords[1:, 0:-1]), axis=L)\n', (235, 321), True, 'import numpy as np\n'), ((6461, 6476), 'gridgeo.ugrid.ugrid', 'ugrid', (['self._nc'], {}), '(self._nc)\n', (6466, 6476), False, 'from gridgeo.ugrid import ugrid\n'), ((6776, 6828), 'numpy.concatenate', 'np.concatenate', (['[x[..., None], y[..., None]]'], {'axis': '(2)'}), '([x[..., None], y[..., None]], axis=2)\n', (6790, 6828), True, 'import numpy as np\n'), ((7226, 7243), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (7237, 7243), True, 'import numpy as np\n'), ((7265, 7289), 'numpy.stack', 'np.stack', (['[x, y]'], {'axis': '(2)'}), '([x, y], axis=2)\n', (7273, 7289), True, 'import numpy as np\n'), ((7604, 7656), 'numpy.concatenate', 'np.concatenate', (['[x[..., None], y[..., None]]'], {'axis': '(2)'}), '([x[..., None], y[..., None]], axis=2)\n', (7618, 7656), True, 'import numpy as np\n'), ((499, 510), 'numpy.isnan', 'np.isnan', (['p'], {}), '(p)\n', (507, 510), True, 'import numpy as np\n')]
|
import numpy as np
def get_PF_Results():
results=\
{
10:
{
0:
{
'delta' :
{
'Yyn': np.array
([
#10,0,deltaYyn
#BusTr_HV,Tr_LV,Load
1.0000001787261197, 0.9990664471050634, 0.9408623912831601,
0.9999997973033823, 0.9989329879720452, 0.9398981202882926,
1.000000023970535, 0.9990124767159095, 0.9422153531204793,
] )
,
'YNyn': np.array
([
#10,0,deltaYNyn
#BusTr_HV,Tr_LV,Load
1.0000001786899793, 0.9990638105447855, 0.9408586320432043,
0.9999997971517767, 0.9989338020819162, 0.9398997093459485,
1.000000024158281, 0.9990142941344189, 0.9422174830541402,
] )
,
'Dyn': np.array
([
#10,0,deltaDyn
#BusTr_HV,Tr_LV,Load
1.000000178603741, 0.9990638106892, 0.9408586322473715,
0.9999997971832201, 0.9989338020666364, 0.9398997093074486,
1.000000024213076, 0.9990142940055439, 0.9422174828921106,
] )
,
'Yzn': np.array
([
#10,0,deltaYzn
#BusTr_HV,Tr_LV,Load
1.000000178603741, 0.9990638106892, 0.9408586322473715,
0.9999997971832201, 0.9989338020666364, 0.9398997093074486,
1.000000024213076, 0.9990142940055439, 0.9422174828921106,
] )
,
},
'wye' :
{
'Yyn': np.array
([
#10,0,wyeYyn
#BusTr_HV,Tr_LV,Load
0.9999998021362442, 0.9915031010358111, 0.9206318374527404,
0.9999997791045989, 1.0143417780460269, 0.9616365638634155,
1.000000418759289, 0.9913387390190033, 0.9408558778822637,
] )
,
'YNyn': np.array
([
#10,0,wyeYNyn
#BusTr_HV,Tr_LV,Load
0.9999997083766274, 0.9988968962217385, 0.9287452455114519,
1.0000001672319114, 0.999061839981782, 0.9452915718541725,
1.0000001243918462, 0.9990504923797096, 0.9488965582258678,
] )
,
'Dyn': np.array
([
#10,0,wyeDyn
#BusTr_HV,Tr_LV,Load
0.9999999599731432, 0.9988963012384348, 0.9287445940341739,
0.999999734429128, 0.9990625733649781, 0.9452923634430362,
1.000000305597812, 0.9990503538577492, 0.9488964199625295,
] )
,
'Yzn': np.array
([
#10,0,wyeYzn
#BusTr_HV,Tr_LV,Load
0.9999999599731432, 0.9988963012384348, 0.9287445940341739,
0.999999734429128, 0.9990625733649781, 0.9452923634430362,
1.000000305597812, 0.9990503538577492, 0.9488964199625295,
] )
,
},
'delta_wye' :
{
'Yyn': np.array
([
#10,0,delta_wyeYyn
#BusTr_HV,Tr_LV,Load
1.000000289039923, 0.9945259444558469, 0.9241479442057374,
0.9999996598061066, 1.0028660964609941, 0.9332827547884484,
1.0000000511540714, 0.9989227003917809, 0.9366758414321353,
] )
,
'YNyn': np.array
([
#10,0,delta_wyeYNyn
#BusTr_HV,Tr_LV,Load
1.0000001633660651, 0.9988186334488024, 0.9284513283443013,
0.9999997731436624, 0.9986857571039884, 0.9290168825920521,
1.0000000634904662, 0.9987917974558278, 0.9366076053493121,
] )
,
'Dyn': np.array
([
#10,0,delta_wyeDyn
#BusTr_HV,Tr_LV,Load
1.0000002947774138, 0.9988183812973129, 0.928451074375663,
0.9999996601592913, 0.9986859152711799, 0.9290170457925304,
1.0000000450633972, 0.9987918914643369, 0.936607696605823,
] )
,
'Yzn': np.array
([
#10,0,delta_wyeYzn
#BusTr_HV,Tr_LV,Load
1.0000002947774138, 0.9988183812973129, 0.928451074375663,
0.9999996601592913, 0.9986859152711799, 0.9290170457925304,
1.0000000450633972, 0.9987918914643369, 0.936607696605823,
] )
,
},
'bal_wye' :
{
'Yyn': np.array
([
#10,0,bal_wyeYyn
#BusTr_HV,Tr_LV,Load
0.9999999999999879, 0.9990668908275987, 0.9446728357045939,
0.9999999999999739, 0.9990668910254652, 0.9446728363197381,
1.0000000000000384, 0.9990668908667012, 0.9446728362625954,
] )
,
'YNyn': np.array
([
#10,0,bal_wyeYNyn
#BusTr_HV,Tr_LV,Load
0.9999999999999863, 0.9990668909016067, 0.9446728357836535,
0.9999999999999772, 0.9990668908990621, 0.9446728361848189,
1.0000000000000362, 0.9990668909190944, 0.9446728363184529,
] )
,
'Dyn': np.array
([
#10,0,bal_wyeDyn
#BusTr_HV,Tr_LV,Load
0.999999999999989, 0.999066890901618, 0.9446728357836652,
0.9999999999999737, 0.999066890899081, 0.9446728361848393,
1.0000000000000375, 0.999066890919066, 0.9446728363184226,
] )
,
'Yzn': np.array
([
#10,0,bal_wyeYzn
#BusTr_HV,Tr_LV,Load
0.999999999999989, 0.999066890901618, 0.9446728357836652,
0.9999999999999737, 0.999066890899081, 0.9446728361848393,
1.0000000000000375, 0.999066890919066, 0.9446728363184226,
] )
,
},
},
1:
{
'delta' :
{
'Yyn': np.array
([
#10,1,deltaYyn
#BusTr_HV,Tr_LV,Load
1.0000001795040512, 1.0240495841864894, 0.9674397511496959,
0.9999997971910463, 1.0239111614639989, 0.9664923222986317,
1.0000000233049395, 1.0239935208058917, 0.9687543048259518,
] )
,
'YNyn': np.array
([
#10,1,deltaYNyn
#BusTr_HV,Tr_LV,Load
1.0000001782704175, 1.0240459468337655, 0.9674352916726019,
0.9999997977852046, 1.0239130527637306, 0.9664952324047731,
1.0000000239444145, 1.023995255504894, 0.9687558295327158,
] )
,
'Dyn': np.array
([
#10,1,deltaDyn
#BusTr_HV,Tr_LV,Load
1.0000001782214243, 1.024045946940332, 0.967435291834159,
0.9999997978066542, 1.0239130527420286, 0.9664952323430777,
1.0000000239719584, 1.023995255420507, 0.9687558294364838,
] )
,
'Yzn': np.array
([
#10,1,deltaYzn
#BusTr_HV,Tr_LV,Load
1.0000001782214243, 1.024045946940332, 0.967435291834159,
0.9999997978066542, 1.0239130527420286, 0.9664952323430777,
1.0000000239719584, 1.023995255420507, 0.9687558294364838,
] )
,
},
'wye' :
{
'Yyn': np.array
([
#10,1,wyeYyn
#BusTr_HV,Tr_LV,Load
0.9999998049723338, 1.0163471727161444, 0.9474851372085454,
0.9999997835047069, 1.0396033478524176, 0.9883119194148919,
1.0000004115230865, 1.016177862041642, 0.9670415224711911,
] )
,
'YNyn': np.array
([
#10,1,wyeYNyn
#BusTr_HV,Tr_LV,Load
0.9999997111904564, 1.023876123903735, 0.9557104532156954,
1.000000169840967, 1.024045000904823, 0.97172789408756,
1.0000001189689527, 1.024030547850082, 0.9752090807560196,
] )
,
'Dyn': np.array
([
#10,1,wyeDyn
#BusTr_HV,Tr_LV,Load
0.9999999610844935, 1.0238755180281829, 0.9557097928361534,
0.9999997396431541, 1.0240457481759326, 0.9717286975282872,
1.0000002992724317, 1.0240304063318828, 0.975208939465858,
] )
,
'Yzn': np.array
([
#10,1,wyeYzn
#BusTr_HV,Tr_LV,Load
0.9999999610844935, 1.0238755180281829, 0.9557097928361534,
0.9999997396431541, 1.0240457481759326, 0.9717286975282872,
1.0000002992724317, 1.0240304063318828, 0.975208939465858,
] )
,
},
'delta_wye' :
{
'Yyn': np.array
([
#10,1,delta_wyeYyn
#BusTr_HV,Tr_LV,Load
1.0000002896605282, 1.0194026014413138, 0.9509830141499932,
0.9999996606572187, 1.0279455302463374, 0.9603073239465667,
1.0000000496823542, 1.0238970684816717, 0.9633884768515291,
] )
,
'YNyn': np.array
([
#10,1,delta_wyeYNyn
#BusTr_HV,Tr_LV,Load
1.0000001631049464, 1.0237965435008547, 0.9553922424619002,
0.9999997741736003, 1.0236607923322103, 0.9559358029296258,
1.000000062721646, 1.0237688359303385, 0.9633200580357987,
] )
,
'Dyn': np.array
([
#10,1,delta_wyeDyn
#BusTr_HV,Tr_LV,Load
1.0000002940160242, 1.023796285978077, 0.9553919829548445,
0.9999996614657936, 1.0236609541452617, 0.9559359697011912,
1.000000044518284, 1.0237689316654306, 0.9633201512377196,
] )
,
'Yzn': np.array
([
#10,1,delta_wyeYzn
#BusTr_HV,Tr_LV,Load
1.0000002940160242, 1.023796285978077, 0.9553919829548445,
0.9999996614657936, 1.0236609541452617, 0.9559359697011912,
1.000000044518284, 1.0237689316654306, 0.9633201512377196,
] )
,
},
'bal_wye' :
{
'Yyn': np.array
([
#10,1,bal_wyeYyn
#BusTr_HV,Tr_LV,Load
0.99999999999999, 1.02404859308445, 0.971134029249497,
0.9999999999999845, 1.0240485931685195, 0.9711340295967834,
1.0000000000000258, 1.0240485931044616, 0.9711340295607079,
] )
,
'YNyn': np.array
([
#10,1,bal_wyeYNyn
#BusTr_HV,Tr_LV,Load
0.9999999999999892, 1.0240485931151249, 0.9711340292823146,
0.9999999999999865, 1.024048593114567, 0.9711340295398108,
1.0000000000000244, 1.0240485931277552, 0.9711340295848808,
] )
,
'Dyn': np.array
([
#10,1,bal_wyeDyn
#BusTr_HV,Tr_LV,Load
0.9999999999999902, 1.024048593115119, 0.9711340292823075,
0.9999999999999848, 1.0240485931145844, 0.9711340295398292,
1.0000000000000249, 1.024048593127728, 0.9711340295848522,
] )
,
'Yzn': np.array
([
#10,1,bal_wyeYzn
#BusTr_HV,Tr_LV,Load
0.9999999999999902, 1.024048593115119, 0.9711340292823075,
0.9999999999999848, 1.0240485931145844, 0.9711340295398292,
1.0000000000000249, 1.024048593127728, 0.9711340295848522,
] )
,
},
},
},
11:
{
0:
{
'delta' :
{
'Yyn': np.array
([
#11,0,deltaYyn
#BusTr_HV,Tr_LV,Load
1.0000001770832512, 1.0991666419999009, 1.046863039382953,
0.9999997998271506, 1.0990478952608114, 1.0459974904307656,
1.0000000230896342, 1.0991196058562567, 1.0480820977965253,
] )
,
'YNyn': np.array
([
#11,0,deltaYNyn
#BusTr_HV,Tr_LV,Load
1.000000177064337, 1.0991653032170863, 1.0468611006390927,
0.9999997997417357, 1.0990483460592901, 1.0459983357170173,
1.0000000231939636, 1.0991204912844936, 1.0480831713683516,
] )
,
'Dyn': np.array
([
#11,0,deltaDyn
#BusTr_HV,Tr_LV,Load
1.0000001770170086, 1.099165303280019, 1.046861100729514,
0.9999997997589116, 1.0990483460550085, 1.0459983357036897,
1.0000000232241157, 1.0991204912259542, 1.0480831712929268,
] )
,
'Yzn': np.array
([
#11,0,deltaYzn
#BusTr_HV,Tr_LV,Load
1.0000001770170086, 1.099165303280019, 1.046861100729514,
0.9999997997589116, 1.0990483460550085, 1.0459983357036897,
1.0000000232241157, 1.0991204912259542, 1.0480831712929268,
] )
,
},
'wye' :
{
'Yyn': np.array
([
#11,0,wyeYyn
#BusTr_HV,Tr_LV,Load
0.9999998409135958, 1.0924753274233265, 1.0291805067306592,
0.9999997887228856, 1.112638254093763, 1.0649872145063082,
1.0000003703636224, 1.0923417509837368, 1.0468846408299153,
] )
,
'YNyn': np.array
([
#11,0,wyeYNyn
#BusTr_HV,Tr_LV,Load
0.9999997198861459, 1.0990179190476412, 1.0362148303868974,
1.0000001764446427, 1.0991669773561135, 1.0507765134998273,
1.0000001036695618, 1.0991473807202723, 1.0539233691792418,
] )
,
'Dyn': np.array
([
#11,0,wyeDyn
#BusTr_HV,Tr_LV,Load
0.9999999645965844, 1.0990174387140366, 1.036214314982853,
0.9999997540341666, 1.0991675482923782, 1.0507771199594842,
1.0000002813693196, 1.0991472900387962, 1.0539232794875342,
] )
,
'Yzn': np.array
([
#11,0,wyeYzn
#BusTr_HV,Tr_LV,Load
0.9999999645965844, 1.0990174387140366, 1.036214314982853,
0.9999997540341666, 1.0991675482923782, 1.0507771199594842,
1.0000002813693196, 1.0991472900387962, 1.0539232794875342,
] )
,
},
'delta_wye' :
{
'Yyn': np.array
([
#11,0,delta_wyeYyn
#BusTr_HV,Tr_LV,Load
1.0000002867915057, 1.09511471406464, 1.0320045668742739,
0.9999996655448716, 1.102582851029247, 1.0401766570762196,
1.0000000476637207, 1.0990187740288424, 1.0431968194073924,
] )
,
'YNyn': np.array
([
#11,0,delta_wyeYNyn
#BusTr_HV,Tr_LV,Load
1.0000001623852481, 1.0989490480618516, 1.0358488170212126,
0.9999997776678232, 1.098829878782537, 1.0363599386677118,
1.0000000599471168, 1.0989238972185933, 1.0431472226133363,
] )
,
'Dyn': np.array
([
#11,0,delta_wyeDyn
#BusTr_HV,Tr_LV,Load
1.000000291479138, 1.0989488469146447, 1.0358486145520418,
0.9999996659434413, 1.0988300000349813, 1.0363600632236267,
1.0000000425775202, 1.098923977128452, 1.0431473008280179,
] )
,
'Yzn': np.array
([
#11,0,delta_wyeYzn
#BusTr_HV,Tr_LV,Load
1.000000291479138, 1.0989488469146447, 1.0358486145520418,
0.9999996659434413, 1.0988300000349813, 1.0363600632236267,
1.0000000425775202, 1.098923977128452, 1.0431473008280179,
] )
,
},
'bal_wye' :
{
'Yyn': np.array
([
#11,0,bal_wyeYyn
#BusTr_HV,Tr_LV,Load
0.999999999999994, 1.0991663222840553, 1.0502483483014522,
0.999999999999986, 1.0991663223629755, 1.0502483485683893,
1.00000000000002, 1.0991663223022374, 1.0502483485566558,
] )
,
'YNyn': np.array
([
#11,0,bal_wyeYNyn
#BusTr_HV,Tr_LV,Load
0.9999999999999934, 1.0991663223142185, 1.050248348333234,
0.9999999999999878, 1.0991663223125718, 1.0502483485153113,
1.000000000000019, 1.0991663223224817, 1.0502483485779557,
] )
,
'Dyn': np.array
([
#11,0,bal_wyeDyn
#BusTr_HV,Tr_LV,Load
0.9999999999999944, 1.099166322314217, 1.0502483483332314,
0.999999999999986, 1.0991663223125883, 1.050248348515329,
1.0000000000000195, 1.099166322322463, 1.0502483485779364,
] )
,
'Yzn': np.array
([
#11,0,bal_wyeYzn
#BusTr_HV,Tr_LV,Load
0.9999999999999944, 1.099166322314217, 1.0502483483332314,
0.999999999999986, 1.0991663223125883, 1.050248348515329,
1.0000000000000195, 1.099166322322463, 1.0502483485779364,
] )
,
},
},
1:
{
'delta' :
{
'Yyn': np.array
([
#11,1,deltaYyn
#BusTr_HV,Tr_LV,Load
1.000000177759738, 1.1266508599188314, 1.075749945733859,
0.9999997996753168, 1.1265276819882335, 1.0748995015125222,
1.0000000225649812, 1.1266018378562361, 1.076934372664356,
] )
,
'YNyn': np.array
([
#11,1,deltaYNyn
#BusTr_HV,Tr_LV,Load
1.000000176730594, 1.1266486259211201, 1.0757473443700512,
0.9999998002521623, 1.1265290107226675, 1.0749013345769867,
1.0000000230172796, 1.1266027366684568, 1.0769351304583261,
] )
,
'Dyn': np.array
([
#11,1,deltaDyn
#BusTr_HV,Tr_LV,Load
1.0000001767039686, 1.1266486259729462, 1.0757473444450258,
0.9999998002646232, 1.1265290107113315, 1.0749013345478544,
1.0000000230314439, 1.126602736628164, 1.0769351304141572,
] )
,
'Yzn': np.array
([
#11,1,deltaYzn
#BusTr_HV,Tr_LV,Load
1.0000001767039686, 1.1266486259729462, 1.0757473444450258,
0.9999998002646232, 1.1265290107113315, 1.0749013345478544,
1.0000000230314439, 1.126602736628164, 1.0769351304141572,
] )
,
},
'wye' :
{
'Yyn': np.array
([
#11,1,wyeYyn
#BusTr_HV,Tr_LV,Load
0.9999998425139852, 1.1198215550651343, 1.0582701679876008,
0.999999792808548, 1.1404037383383383, 1.0940119347447643,
1.000000364677568, 1.119678656475928, 1.0754147798091545,
] )
,
'YNyn': np.array
([
#11,1,wyeYNyn
#BusTr_HV,Tr_LV,Load
0.9999997220234313, 1.1264984365036237, 1.065423794124721,
1.0000001785338588, 1.126651120595415, 1.0795452055229118,
1.0000000994430542, 1.126629015453866, 1.0825891788506536,
] )
,
'Dyn': np.array
([
#11,1,wyeDyn
#BusTr_HV,Tr_LV,Load
0.9999999654333293, 1.1264979466596041, 1.0654232703853377,
0.9999997580954444, 1.1266517031402583, 1.079545822405393,
1.0000002764712945, 1.1266289226736226, 1.0825890870214312,
] )
,
'Yzn': np.array
([
#11,1,wyeYzn
#BusTr_HV,Tr_LV,Load
0.9999999654333293, 1.1264979466596041, 1.0654232703853377,
0.9999997580954444, 1.1266517031402583, 1.079545822405393,
1.0000002764712945, 1.1266289226736226, 1.0825890870214312,
] )
,
},
'delta_wye' :
{
'Yyn': np.array
([
#11,1,delta_wyeYyn
#BusTr_HV,Tr_LV,Load
1.0000002872593454, 1.122503013135439, 1.061107915739188,
0.9999996662661563, 1.1301536319129346, 1.069448792307849,
1.0000000464745962, 1.1264944198323028, 1.0721922685731713,
] )
,
'YNyn': np.array
([
#11,1,delta_wyeYNyn
#BusTr_HV,Tr_LV,Load
1.0000001621739123, 1.126428316031026, 1.0650458103409908,
0.9999997785161929, 1.1263065012425137, 1.0655375147447366,
1.0000000593100822, 1.12640238251751, 1.0721435619381965,
] )
,
'Dyn': np.array
([
#11,1,delta_wyeDyn
#BusTr_HV,Tr_LV,Load
1.0000002908474748, 1.1264281104824707, 1.0650456033928053,
0.9999996670234566, 1.1263066253385652, 1.065537642082384,
1.0000000421291677, 1.126402463985756, 1.0721436418376473,
] )
,
'Yzn': np.array
([
#11,1,delta_wyeYzn
#BusTr_HV,Tr_LV,Load
1.0000002908474748, 1.1264281104824707, 1.0650456033928053,
0.9999996670234566, 1.1263066253385652, 1.065537642082384,
1.0000000421291677, 1.126402463985756, 1.0721436418376473,
] )
,
},
'bal_wye' :
{
'Yyn': np.array
([
#11,1,bal_wyeYyn
#BusTr_HV,Tr_LV,Load
0.9999999999999946, 1.126649305937712, 1.0790357881145098,
0.9999999999999919, 1.1266493059651883, 1.0790357882640247,
1.0000000000000135, 1.1266493059449603, 1.0790357882526134,
] )
,
'YNyn': np.array
([
#11,1,bal_wyeYNyn
#BusTr_HV,Tr_LV,Load
0.9999999999999944, 1.126649305947411, 1.079035788124742,
0.9999999999999928, 1.126649305946962, 1.0790357882450081,
1.000000000000013, 1.1266493059535365, 1.079035788261449,
] )
,
'Dyn': np.array
([
#11,1,bal_wyeDyn
#BusTr_HV,Tr_LV,Load
0.9999999999999944, 1.1266493059473897, 1.0790357881247188,
0.9999999999999922, 1.1266493059469642, 1.079035788245011,
1.0000000000000133, 1.1266493059535063, 1.0790357882614174,
] )
,
'Yzn': np.array
([
#11,1,bal_wyeYzn
#BusTr_HV,Tr_LV,Load
0.9999999999999944, 1.1266493059473897, 1.0790357881247188,
0.9999999999999922, 1.1266493059469642, 1.079035788245011,
1.0000000000000133, 1.1266493059535063, 1.0790357882614174,
] )
,
},
},
},
}
return results
|
[
"numpy.array"
] |
[((116, 315), 'numpy.array', 'np.array', (['[1.0000001787261197, 0.9990664471050634, 0.9408623912831601, \n 0.9999997973033823, 0.9989329879720452, 0.9398981202882926, \n 1.000000023970535, 0.9990124767159095, 0.9422153531204793]'], {}), '([1.0000001787261197, 0.9990664471050634, 0.9408623912831601, \n 0.9999997973033823, 0.9989329879720452, 0.9398981202882926, \n 1.000000023970535, 0.9990124767159095, 0.9422153531204793])\n', (124, 315), True, 'import numpy as np\n'), ((424, 623), 'numpy.array', 'np.array', (['[1.0000001786899793, 0.9990638105447855, 0.9408586320432043, \n 0.9999997971517767, 0.9989338020819162, 0.9398997093459485, \n 1.000000024158281, 0.9990142941344189, 0.9422174830541402]'], {}), '([1.0000001786899793, 0.9990638105447855, 0.9408586320432043, \n 0.9999997971517767, 0.9989338020819162, 0.9398997093459485, \n 1.000000024158281, 0.9990142941344189, 0.9422174830541402])\n', (432, 623), True, 'import numpy as np\n'), ((732, 927), 'numpy.array', 'np.array', (['[1.000000178603741, 0.9990638106892, 0.9408586322473715, 0.9999997971832201,\n 0.9989338020666364, 0.9398997093074486, 1.000000024213076, \n 0.9990142940055439, 0.9422174828921106]'], {}), '([1.000000178603741, 0.9990638106892, 0.9408586322473715, \n 0.9999997971832201, 0.9989338020666364, 0.9398997093074486, \n 1.000000024213076, 0.9990142940055439, 0.9422174828921106])\n', (740, 927), True, 'import numpy as np\n'), ((1035, 1230), 'numpy.array', 'np.array', (['[1.000000178603741, 0.9990638106892, 0.9408586322473715, 0.9999997971832201,\n 0.9989338020666364, 0.9398997093074486, 1.000000024213076, \n 0.9990142940055439, 0.9422174828921106]'], {}), '([1.000000178603741, 0.9990638106892, 0.9408586322473715, \n 0.9999997971832201, 0.9989338020666364, 0.9398997093074486, \n 1.000000024213076, 0.9990142940055439, 0.9422174828921106])\n', (1043, 1230), True, 'import numpy as np\n'), ((1365, 1564), 'numpy.array', 'np.array', (['[0.9999998021362442, 0.9915031010358111, 0.9206318374527404, \n 0.9999997791045989, 1.0143417780460269, 0.9616365638634155, \n 1.000000418759289, 0.9913387390190033, 0.9408558778822637]'], {}), '([0.9999998021362442, 0.9915031010358111, 0.9206318374527404, \n 0.9999997791045989, 1.0143417780460269, 0.9616365638634155, \n 1.000000418759289, 0.9913387390190033, 0.9408558778822637])\n', (1373, 1564), True, 'import numpy as np\n'), ((1671, 1870), 'numpy.array', 'np.array', (['[0.9999997083766274, 0.9988968962217385, 0.9287452455114519, \n 1.0000001672319114, 0.999061839981782, 0.9452915718541725, \n 1.0000001243918462, 0.9990504923797096, 0.9488965582258678]'], {}), '([0.9999997083766274, 0.9988968962217385, 0.9287452455114519, \n 1.0000001672319114, 0.999061839981782, 0.9452915718541725, \n 1.0000001243918462, 0.9990504923797096, 0.9488965582258678])\n', (1679, 1870), True, 'import numpy as np\n'), ((1977, 2175), 'numpy.array', 'np.array', (['[0.9999999599731432, 0.9988963012384348, 0.9287445940341739, \n 0.999999734429128, 0.9990625733649781, 0.9452923634430362, \n 1.000000305597812, 0.9990503538577492, 0.9488964199625295]'], {}), '([0.9999999599731432, 0.9988963012384348, 0.9287445940341739, \n 0.999999734429128, 0.9990625733649781, 0.9452923634430362, \n 1.000000305597812, 0.9990503538577492, 0.9488964199625295])\n', (1985, 2175), True, 'import numpy as np\n'), ((2281, 2479), 'numpy.array', 'np.array', (['[0.9999999599731432, 0.9988963012384348, 0.9287445940341739, \n 0.999999734429128, 0.9990625733649781, 0.9452923634430362, \n 1.000000305597812, 0.9990503538577492, 0.9488964199625295]'], {}), '([0.9999999599731432, 0.9988963012384348, 0.9287445940341739, \n 0.999999734429128, 0.9990625733649781, 0.9452923634430362, \n 1.000000305597812, 0.9990503538577492, 0.9488964199625295])\n', (2289, 2479), True, 'import numpy as np\n'), ((2618, 2817), 'numpy.array', 'np.array', (['[1.000000289039923, 0.9945259444558469, 0.9241479442057374, \n 0.9999996598061066, 1.0028660964609941, 0.9332827547884484, \n 1.0000000511540714, 0.9989227003917809, 0.9366758414321353]'], {}), '([1.000000289039923, 0.9945259444558469, 0.9241479442057374, \n 0.9999996598061066, 1.0028660964609941, 0.9332827547884484, \n 1.0000000511540714, 0.9989227003917809, 0.9366758414321353])\n', (2626, 2817), True, 'import numpy as np\n'), ((2930, 3130), 'numpy.array', 'np.array', (['[1.0000001633660651, 0.9988186334488024, 0.9284513283443013, \n 0.9999997731436624, 0.9986857571039884, 0.9290168825920521, \n 1.0000000634904662, 0.9987917974558278, 0.9366076053493121]'], {}), '([1.0000001633660651, 0.9988186334488024, 0.9284513283443013, \n 0.9999997731436624, 0.9986857571039884, 0.9290168825920521, \n 1.0000000634904662, 0.9987917974558278, 0.9366076053493121])\n', (2938, 3130), True, 'import numpy as np\n'), ((3243, 3441), 'numpy.array', 'np.array', (['[1.0000002947774138, 0.9988183812973129, 0.928451074375663, \n 0.9999996601592913, 0.9986859152711799, 0.9290170457925304, \n 1.0000000450633972, 0.9987918914643369, 0.936607696605823]'], {}), '([1.0000002947774138, 0.9988183812973129, 0.928451074375663, \n 0.9999996601592913, 0.9986859152711799, 0.9290170457925304, \n 1.0000000450633972, 0.9987918914643369, 0.936607696605823])\n', (3251, 3441), True, 'import numpy as np\n'), ((3553, 3751), 'numpy.array', 'np.array', (['[1.0000002947774138, 0.9988183812973129, 0.928451074375663, \n 0.9999996601592913, 0.9986859152711799, 0.9290170457925304, \n 1.0000000450633972, 0.9987918914643369, 0.936607696605823]'], {}), '([1.0000002947774138, 0.9988183812973129, 0.928451074375663, \n 0.9999996601592913, 0.9986859152711799, 0.9290170457925304, \n 1.0000000450633972, 0.9987918914643369, 0.936607696605823])\n', (3561, 3751), True, 'import numpy as np\n'), ((3894, 4094), 'numpy.array', 'np.array', (['[0.9999999999999879, 0.9990668908275987, 0.9446728357045939, \n 0.9999999999999739, 0.9990668910254652, 0.9446728363197381, \n 1.0000000000000384, 0.9990668908667012, 0.9446728362625954]'], {}), '([0.9999999999999879, 0.9990668908275987, 0.9446728357045939, \n 0.9999999999999739, 0.9990668910254652, 0.9446728363197381, \n 1.0000000000000384, 0.9990668908667012, 0.9446728362625954])\n', (3902, 4094), True, 'import numpy as np\n'), ((4205, 4405), 'numpy.array', 'np.array', (['[0.9999999999999863, 0.9990668909016067, 0.9446728357836535, \n 0.9999999999999772, 0.9990668908990621, 0.9446728361848189, \n 1.0000000000000362, 0.9990668909190944, 0.9446728363184529]'], {}), '([0.9999999999999863, 0.9990668909016067, 0.9446728357836535, \n 0.9999999999999772, 0.9990668908990621, 0.9446728361848189, \n 1.0000000000000362, 0.9990668909190944, 0.9446728363184529])\n', (4213, 4405), True, 'import numpy as np\n'), ((4516, 4712), 'numpy.array', 'np.array', (['[0.999999999999989, 0.999066890901618, 0.9446728357836652, \n 0.9999999999999737, 0.999066890899081, 0.9446728361848393, \n 1.0000000000000375, 0.999066890919066, 0.9446728363184226]'], {}), '([0.999999999999989, 0.999066890901618, 0.9446728357836652, \n 0.9999999999999737, 0.999066890899081, 0.9446728361848393, \n 1.0000000000000375, 0.999066890919066, 0.9446728363184226])\n', (4524, 4712), True, 'import numpy as np\n'), ((4822, 5018), 'numpy.array', 'np.array', (['[0.999999999999989, 0.999066890901618, 0.9446728357836652, \n 0.9999999999999737, 0.999066890899081, 0.9446728361848393, \n 1.0000000000000375, 0.999066890919066, 0.9446728363184226]'], {}), '([0.999999999999989, 0.999066890901618, 0.9446728357836652, \n 0.9999999999999737, 0.999066890899081, 0.9446728361848393, \n 1.0000000000000375, 0.999066890919066, 0.9446728363184226])\n', (4830, 5018), True, 'import numpy as np\n'), ((5176, 5376), 'numpy.array', 'np.array', (['[1.0000001795040512, 1.0240495841864894, 0.9674397511496959, \n 0.9999997971910463, 1.0239111614639989, 0.9664923222986317, \n 1.0000000233049395, 1.0239935208058917, 0.9687543048259518]'], {}), '([1.0000001795040512, 1.0240495841864894, 0.9674397511496959, \n 0.9999997971910463, 1.0239111614639989, 0.9664923222986317, \n 1.0000000233049395, 1.0239935208058917, 0.9687543048259518])\n', (5184, 5376), True, 'import numpy as np\n'), ((5485, 5684), 'numpy.array', 'np.array', (['[1.0000001782704175, 1.0240459468337655, 0.9674352916726019, \n 0.9999997977852046, 1.0239130527637306, 0.9664952324047731, \n 1.0000000239444145, 1.023995255504894, 0.9687558295327158]'], {}), '([1.0000001782704175, 1.0240459468337655, 0.9674352916726019, \n 0.9999997977852046, 1.0239130527637306, 0.9664952324047731, \n 1.0000000239444145, 1.023995255504894, 0.9687558295327158])\n', (5493, 5684), True, 'import numpy as np\n'), ((5793, 5990), 'numpy.array', 'np.array', (['[1.0000001782214243, 1.024045946940332, 0.967435291834159, \n 0.9999997978066542, 1.0239130527420286, 0.9664952323430777, \n 1.0000000239719584, 1.023995255420507, 0.9687558294364838]'], {}), '([1.0000001782214243, 1.024045946940332, 0.967435291834159, \n 0.9999997978066542, 1.0239130527420286, 0.9664952323430777, \n 1.0000000239719584, 1.023995255420507, 0.9687558294364838])\n', (5801, 5990), True, 'import numpy as np\n'), ((6098, 6295), 'numpy.array', 'np.array', (['[1.0000001782214243, 1.024045946940332, 0.967435291834159, \n 0.9999997978066542, 1.0239130527420286, 0.9664952323430777, \n 1.0000000239719584, 1.023995255420507, 0.9687558294364838]'], {}), '([1.0000001782214243, 1.024045946940332, 0.967435291834159, \n 0.9999997978066542, 1.0239130527420286, 0.9664952323430777, \n 1.0000000239719584, 1.023995255420507, 0.9687558294364838])\n', (6106, 6295), True, 'import numpy as np\n'), ((6430, 6629), 'numpy.array', 'np.array', (['[0.9999998049723338, 1.0163471727161444, 0.9474851372085454, \n 0.9999997835047069, 1.0396033478524176, 0.9883119194148919, \n 1.0000004115230865, 1.016177862041642, 0.9670415224711911]'], {}), '([0.9999998049723338, 1.0163471727161444, 0.9474851372085454, \n 0.9999997835047069, 1.0396033478524176, 0.9883119194148919, \n 1.0000004115230865, 1.016177862041642, 0.9670415224711911])\n', (6438, 6629), True, 'import numpy as np\n'), ((6736, 6930), 'numpy.array', 'np.array', (['[0.9999997111904564, 1.023876123903735, 0.9557104532156954, \n 1.000000169840967, 1.024045000904823, 0.97172789408756, \n 1.0000001189689527, 1.024030547850082, 0.9752090807560196]'], {}), '([0.9999997111904564, 1.023876123903735, 0.9557104532156954, \n 1.000000169840967, 1.024045000904823, 0.97172789408756, \n 1.0000001189689527, 1.024030547850082, 0.9752090807560196])\n', (6744, 6930), True, 'import numpy as np\n'), ((7037, 7236), 'numpy.array', 'np.array', (['[0.9999999610844935, 1.0238755180281829, 0.9557097928361534, \n 0.9999997396431541, 1.0240457481759326, 0.9717286975282872, \n 1.0000002992724317, 1.0240304063318828, 0.975208939465858]'], {}), '([0.9999999610844935, 1.0238755180281829, 0.9557097928361534, \n 0.9999997396431541, 1.0240457481759326, 0.9717286975282872, \n 1.0000002992724317, 1.0240304063318828, 0.975208939465858])\n', (7045, 7236), True, 'import numpy as np\n'), ((7342, 7541), 'numpy.array', 'np.array', (['[0.9999999610844935, 1.0238755180281829, 0.9557097928361534, \n 0.9999997396431541, 1.0240457481759326, 0.9717286975282872, \n 1.0000002992724317, 1.0240304063318828, 0.975208939465858]'], {}), '([0.9999999610844935, 1.0238755180281829, 0.9557097928361534, \n 0.9999997396431541, 1.0240457481759326, 0.9717286975282872, \n 1.0000002992724317, 1.0240304063318828, 0.975208939465858])\n', (7350, 7541), True, 'import numpy as np\n'), ((7680, 7880), 'numpy.array', 'np.array', (['[1.0000002896605282, 1.0194026014413138, 0.9509830141499932, \n 0.9999996606572187, 1.0279455302463374, 0.9603073239465667, \n 1.0000000496823542, 1.0238970684816717, 0.9633884768515291]'], {}), '([1.0000002896605282, 1.0194026014413138, 0.9509830141499932, \n 0.9999996606572187, 1.0279455302463374, 0.9603073239465667, \n 1.0000000496823542, 1.0238970684816717, 0.9633884768515291])\n', (7688, 7880), True, 'import numpy as np\n'), ((7993, 8192), 'numpy.array', 'np.array', (['[1.0000001631049464, 1.0237965435008547, 0.9553922424619002, \n 0.9999997741736003, 1.0236607923322103, 0.9559358029296258, \n 1.000000062721646, 1.0237688359303385, 0.9633200580357987]'], {}), '([1.0000001631049464, 1.0237965435008547, 0.9553922424619002, \n 0.9999997741736003, 1.0236607923322103, 0.9559358029296258, \n 1.000000062721646, 1.0237688359303385, 0.9633200580357987])\n', (8001, 8192), True, 'import numpy as np\n'), ((8305, 8503), 'numpy.array', 'np.array', (['[1.0000002940160242, 1.023796285978077, 0.9553919829548445, \n 0.9999996614657936, 1.0236609541452617, 0.9559359697011912, \n 1.000000044518284, 1.0237689316654306, 0.9633201512377196]'], {}), '([1.0000002940160242, 1.023796285978077, 0.9553919829548445, \n 0.9999996614657936, 1.0236609541452617, 0.9559359697011912, \n 1.000000044518284, 1.0237689316654306, 0.9633201512377196])\n', (8313, 8503), True, 'import numpy as np\n'), ((8615, 8813), 'numpy.array', 'np.array', (['[1.0000002940160242, 1.023796285978077, 0.9553919829548445, \n 0.9999996614657936, 1.0236609541452617, 0.9559359697011912, \n 1.000000044518284, 1.0237689316654306, 0.9633201512377196]'], {}), '([1.0000002940160242, 1.023796285978077, 0.9553919829548445, \n 0.9999996614657936, 1.0236609541452617, 0.9559359697011912, \n 1.000000044518284, 1.0237689316654306, 0.9633201512377196])\n', (8623, 8813), True, 'import numpy as np\n'), ((8956, 9151), 'numpy.array', 'np.array', (['[0.99999999999999, 1.02404859308445, 0.971134029249497, 0.9999999999999845,\n 1.0240485931685195, 0.9711340295967834, 1.0000000000000258, \n 1.0240485931044616, 0.9711340295607079]'], {}), '([0.99999999999999, 1.02404859308445, 0.971134029249497, \n 0.9999999999999845, 1.0240485931685195, 0.9711340295967834, \n 1.0000000000000258, 1.0240485931044616, 0.9711340295607079])\n', (8964, 9151), True, 'import numpy as np\n'), ((9262, 9461), 'numpy.array', 'np.array', (['[0.9999999999999892, 1.0240485931151249, 0.9711340292823146, \n 0.9999999999999865, 1.024048593114567, 0.9711340295398108, \n 1.0000000000000244, 1.0240485931277552, 0.9711340295848808]'], {}), '([0.9999999999999892, 1.0240485931151249, 0.9711340292823146, \n 0.9999999999999865, 1.024048593114567, 0.9711340295398108, \n 1.0000000000000244, 1.0240485931277552, 0.9711340295848808])\n', (9270, 9461), True, 'import numpy as np\n'), ((9572, 9770), 'numpy.array', 'np.array', (['[0.9999999999999902, 1.024048593115119, 0.9711340292823075, \n 0.9999999999999848, 1.0240485931145844, 0.9711340295398292, \n 1.0000000000000249, 1.024048593127728, 0.9711340295848522]'], {}), '([0.9999999999999902, 1.024048593115119, 0.9711340292823075, \n 0.9999999999999848, 1.0240485931145844, 0.9711340295398292, \n 1.0000000000000249, 1.024048593127728, 0.9711340295848522])\n', (9580, 9770), True, 'import numpy as np\n'), ((9880, 10078), 'numpy.array', 'np.array', (['[0.9999999999999902, 1.024048593115119, 0.9711340292823075, \n 0.9999999999999848, 1.0240485931145844, 0.9711340295398292, \n 1.0000000000000249, 1.024048593127728, 0.9711340295848522]'], {}), '([0.9999999999999902, 1.024048593115119, 0.9711340292823075, \n 0.9999999999999848, 1.0240485931145844, 0.9711340295398292, \n 1.0000000000000249, 1.024048593127728, 0.9711340295848522])\n', (9888, 10078), True, 'import numpy as np\n'), ((10253, 10452), 'numpy.array', 'np.array', (['[1.0000001770832512, 1.0991666419999009, 1.046863039382953, \n 0.9999997998271506, 1.0990478952608114, 1.0459974904307656, \n 1.0000000230896342, 1.0991196058562567, 1.0480820977965253]'], {}), '([1.0000001770832512, 1.0991666419999009, 1.046863039382953, \n 0.9999997998271506, 1.0990478952608114, 1.0459974904307656, \n 1.0000000230896342, 1.0991196058562567, 1.0480820977965253])\n', (10261, 10452), True, 'import numpy as np\n'), ((10561, 10760), 'numpy.array', 'np.array', (['[1.000000177064337, 1.0991653032170863, 1.0468611006390927, \n 0.9999997997417357, 1.0990483460592901, 1.0459983357170173, \n 1.0000000231939636, 1.0991204912844936, 1.0480831713683516]'], {}), '([1.000000177064337, 1.0991653032170863, 1.0468611006390927, \n 0.9999997997417357, 1.0990483460592901, 1.0459983357170173, \n 1.0000000231939636, 1.0991204912844936, 1.0480831713683516])\n', (10569, 10760), True, 'import numpy as np\n'), ((10869, 11067), 'numpy.array', 'np.array', (['[1.0000001770170086, 1.099165303280019, 1.046861100729514, \n 0.9999997997589116, 1.0990483460550085, 1.0459983357036897, \n 1.0000000232241157, 1.0991204912259542, 1.0480831712929268]'], {}), '([1.0000001770170086, 1.099165303280019, 1.046861100729514, \n 0.9999997997589116, 1.0990483460550085, 1.0459983357036897, \n 1.0000000232241157, 1.0991204912259542, 1.0480831712929268])\n', (10877, 11067), True, 'import numpy as np\n'), ((11175, 11373), 'numpy.array', 'np.array', (['[1.0000001770170086, 1.099165303280019, 1.046861100729514, \n 0.9999997997589116, 1.0990483460550085, 1.0459983357036897, \n 1.0000000232241157, 1.0991204912259542, 1.0480831712929268]'], {}), '([1.0000001770170086, 1.099165303280019, 1.046861100729514, \n 0.9999997997589116, 1.0990483460550085, 1.0459983357036897, \n 1.0000000232241157, 1.0991204912259542, 1.0480831712929268])\n', (11183, 11373), True, 'import numpy as np\n'), ((11508, 11707), 'numpy.array', 'np.array', (['[0.9999998409135958, 1.0924753274233265, 1.0291805067306592, \n 0.9999997887228856, 1.112638254093763, 1.0649872145063082, \n 1.0000003703636224, 1.0923417509837368, 1.0468846408299153]'], {}), '([0.9999998409135958, 1.0924753274233265, 1.0291805067306592, \n 0.9999997887228856, 1.112638254093763, 1.0649872145063082, \n 1.0000003703636224, 1.0923417509837368, 1.0468846408299153])\n', (11516, 11707), True, 'import numpy as np\n'), ((11814, 12014), 'numpy.array', 'np.array', (['[0.9999997198861459, 1.0990179190476412, 1.0362148303868974, \n 1.0000001764446427, 1.0991669773561135, 1.0507765134998273, \n 1.0000001036695618, 1.0991473807202723, 1.0539233691792418]'], {}), '([0.9999997198861459, 1.0990179190476412, 1.0362148303868974, \n 1.0000001764446427, 1.0991669773561135, 1.0507765134998273, \n 1.0000001036695618, 1.0991473807202723, 1.0539233691792418])\n', (11822, 12014), True, 'import numpy as np\n'), ((12121, 12320), 'numpy.array', 'np.array', (['[0.9999999645965844, 1.0990174387140366, 1.036214314982853, \n 0.9999997540341666, 1.0991675482923782, 1.0507771199594842, \n 1.0000002813693196, 1.0991472900387962, 1.0539232794875342]'], {}), '([0.9999999645965844, 1.0990174387140366, 1.036214314982853, \n 0.9999997540341666, 1.0991675482923782, 1.0507771199594842, \n 1.0000002813693196, 1.0991472900387962, 1.0539232794875342])\n', (12129, 12320), True, 'import numpy as np\n'), ((12426, 12625), 'numpy.array', 'np.array', (['[0.9999999645965844, 1.0990174387140366, 1.036214314982853, \n 0.9999997540341666, 1.0991675482923782, 1.0507771199594842, \n 1.0000002813693196, 1.0991472900387962, 1.0539232794875342]'], {}), '([0.9999999645965844, 1.0990174387140366, 1.036214314982853, \n 0.9999997540341666, 1.0991675482923782, 1.0507771199594842, \n 1.0000002813693196, 1.0991472900387962, 1.0539232794875342])\n', (12434, 12625), True, 'import numpy as np\n'), ((12764, 12961), 'numpy.array', 'np.array', (['[1.0000002867915057, 1.09511471406464, 1.0320045668742739, \n 0.9999996655448716, 1.102582851029247, 1.0401766570762196, \n 1.0000000476637207, 1.0990187740288424, 1.0431968194073924]'], {}), '([1.0000002867915057, 1.09511471406464, 1.0320045668742739, \n 0.9999996655448716, 1.102582851029247, 1.0401766570762196, \n 1.0000000476637207, 1.0990187740288424, 1.0431968194073924])\n', (12772, 12961), True, 'import numpy as np\n'), ((13074, 13273), 'numpy.array', 'np.array', (['[1.0000001623852481, 1.0989490480618516, 1.0358488170212126, \n 0.9999997776678232, 1.098829878782537, 1.0363599386677118, \n 1.0000000599471168, 1.0989238972185933, 1.0431472226133363]'], {}), '([1.0000001623852481, 1.0989490480618516, 1.0358488170212126, \n 0.9999997776678232, 1.098829878782537, 1.0363599386677118, \n 1.0000000599471168, 1.0989238972185933, 1.0431472226133363])\n', (13082, 13273), True, 'import numpy as np\n'), ((13386, 13584), 'numpy.array', 'np.array', (['[1.000000291479138, 1.0989488469146447, 1.0358486145520418, \n 0.9999996659434413, 1.0988300000349813, 1.0363600632236267, \n 1.0000000425775202, 1.098923977128452, 1.0431473008280179]'], {}), '([1.000000291479138, 1.0989488469146447, 1.0358486145520418, \n 0.9999996659434413, 1.0988300000349813, 1.0363600632236267, \n 1.0000000425775202, 1.098923977128452, 1.0431473008280179])\n', (13394, 13584), True, 'import numpy as np\n'), ((13696, 13894), 'numpy.array', 'np.array', (['[1.000000291479138, 1.0989488469146447, 1.0358486145520418, \n 0.9999996659434413, 1.0988300000349813, 1.0363600632236267, \n 1.0000000425775202, 1.098923977128452, 1.0431473008280179]'], {}), '([1.000000291479138, 1.0989488469146447, 1.0358486145520418, \n 0.9999996659434413, 1.0988300000349813, 1.0363600632236267, \n 1.0000000425775202, 1.098923977128452, 1.0431473008280179])\n', (13704, 13894), True, 'import numpy as np\n'), ((14037, 14233), 'numpy.array', 'np.array', (['[0.999999999999994, 1.0991663222840553, 1.0502483483014522, \n 0.999999999999986, 1.0991663223629755, 1.0502483485683893, \n 1.00000000000002, 1.0991663223022374, 1.0502483485566558]'], {}), '([0.999999999999994, 1.0991663222840553, 1.0502483483014522, \n 0.999999999999986, 1.0991663223629755, 1.0502483485683893, \n 1.00000000000002, 1.0991663223022374, 1.0502483485566558])\n', (14045, 14233), True, 'import numpy as np\n'), ((14344, 14542), 'numpy.array', 'np.array', (['[0.9999999999999934, 1.0991663223142185, 1.050248348333234, \n 0.9999999999999878, 1.0991663223125718, 1.0502483485153113, \n 1.000000000000019, 1.0991663223224817, 1.0502483485779557]'], {}), '([0.9999999999999934, 1.0991663223142185, 1.050248348333234, \n 0.9999999999999878, 1.0991663223125718, 1.0502483485153113, \n 1.000000000000019, 1.0991663223224817, 1.0502483485779557])\n', (14352, 14542), True, 'import numpy as np\n'), ((14653, 14849), 'numpy.array', 'np.array', (['[0.9999999999999944, 1.099166322314217, 1.0502483483332314, \n 0.999999999999986, 1.0991663223125883, 1.050248348515329, \n 1.0000000000000195, 1.099166322322463, 1.0502483485779364]'], {}), '([0.9999999999999944, 1.099166322314217, 1.0502483483332314, \n 0.999999999999986, 1.0991663223125883, 1.050248348515329, \n 1.0000000000000195, 1.099166322322463, 1.0502483485779364])\n', (14661, 14849), True, 'import numpy as np\n'), ((14959, 15155), 'numpy.array', 'np.array', (['[0.9999999999999944, 1.099166322314217, 1.0502483483332314, \n 0.999999999999986, 1.0991663223125883, 1.050248348515329, \n 1.0000000000000195, 1.099166322322463, 1.0502483485779364]'], {}), '([0.9999999999999944, 1.099166322314217, 1.0502483483332314, \n 0.999999999999986, 1.0991663223125883, 1.050248348515329, \n 1.0000000000000195, 1.099166322322463, 1.0502483485779364])\n', (14967, 15155), True, 'import numpy as np\n'), ((15313, 15510), 'numpy.array', 'np.array', (['[1.000000177759738, 1.1266508599188314, 1.075749945733859, \n 0.9999997996753168, 1.1265276819882335, 1.0748995015125222, \n 1.0000000225649812, 1.1266018378562361, 1.076934372664356]'], {}), '([1.000000177759738, 1.1266508599188314, 1.075749945733859, \n 0.9999997996753168, 1.1265276819882335, 1.0748995015125222, \n 1.0000000225649812, 1.1266018378562361, 1.076934372664356])\n', (15321, 15510), True, 'import numpy as np\n'), ((15619, 15818), 'numpy.array', 'np.array', (['[1.000000176730594, 1.1266486259211201, 1.0757473443700512, \n 0.9999998002521623, 1.1265290107226675, 1.0749013345769867, \n 1.0000000230172796, 1.1266027366684568, 1.0769351304583261]'], {}), '([1.000000176730594, 1.1266486259211201, 1.0757473443700512, \n 0.9999998002521623, 1.1265290107226675, 1.0749013345769867, \n 1.0000000230172796, 1.1266027366684568, 1.0769351304583261])\n', (15627, 15818), True, 'import numpy as np\n'), ((15927, 16126), 'numpy.array', 'np.array', (['[1.0000001767039686, 1.1266486259729462, 1.0757473444450258, \n 0.9999998002646232, 1.1265290107113315, 1.0749013345478544, \n 1.0000000230314439, 1.126602736628164, 1.0769351304141572]'], {}), '([1.0000001767039686, 1.1266486259729462, 1.0757473444450258, \n 0.9999998002646232, 1.1265290107113315, 1.0749013345478544, \n 1.0000000230314439, 1.126602736628164, 1.0769351304141572])\n', (15935, 16126), True, 'import numpy as np\n'), ((16234, 16433), 'numpy.array', 'np.array', (['[1.0000001767039686, 1.1266486259729462, 1.0757473444450258, \n 0.9999998002646232, 1.1265290107113315, 1.0749013345478544, \n 1.0000000230314439, 1.126602736628164, 1.0769351304141572]'], {}), '([1.0000001767039686, 1.1266486259729462, 1.0757473444450258, \n 0.9999998002646232, 1.1265290107113315, 1.0749013345478544, \n 1.0000000230314439, 1.126602736628164, 1.0769351304141572])\n', (16242, 16433), True, 'import numpy as np\n'), ((16568, 16765), 'numpy.array', 'np.array', (['[0.9999998425139852, 1.1198215550651343, 1.0582701679876008, \n 0.999999792808548, 1.1404037383383383, 1.0940119347447643, \n 1.000000364677568, 1.119678656475928, 1.0754147798091545]'], {}), '([0.9999998425139852, 1.1198215550651343, 1.0582701679876008, \n 0.999999792808548, 1.1404037383383383, 1.0940119347447643, \n 1.000000364677568, 1.119678656475928, 1.0754147798091545])\n', (16576, 16765), True, 'import numpy as np\n'), ((16872, 17069), 'numpy.array', 'np.array', (['[0.9999997220234313, 1.1264984365036237, 1.065423794124721, \n 1.0000001785338588, 1.126651120595415, 1.0795452055229118, \n 1.0000000994430542, 1.126629015453866, 1.0825891788506536]'], {}), '([0.9999997220234313, 1.1264984365036237, 1.065423794124721, \n 1.0000001785338588, 1.126651120595415, 1.0795452055229118, \n 1.0000000994430542, 1.126629015453866, 1.0825891788506536])\n', (16880, 17069), True, 'import numpy as np\n'), ((17176, 17375), 'numpy.array', 'np.array', (['[0.9999999654333293, 1.1264979466596041, 1.0654232703853377, \n 0.9999997580954444, 1.1266517031402583, 1.079545822405393, \n 1.0000002764712945, 1.1266289226736226, 1.0825890870214312]'], {}), '([0.9999999654333293, 1.1264979466596041, 1.0654232703853377, \n 0.9999997580954444, 1.1266517031402583, 1.079545822405393, \n 1.0000002764712945, 1.1266289226736226, 1.0825890870214312])\n', (17184, 17375), True, 'import numpy as np\n'), ((17481, 17680), 'numpy.array', 'np.array', (['[0.9999999654333293, 1.1264979466596041, 1.0654232703853377, \n 0.9999997580954444, 1.1266517031402583, 1.079545822405393, \n 1.0000002764712945, 1.1266289226736226, 1.0825890870214312]'], {}), '([0.9999999654333293, 1.1264979466596041, 1.0654232703853377, \n 0.9999997580954444, 1.1266517031402583, 1.079545822405393, \n 1.0000002764712945, 1.1266289226736226, 1.0825890870214312])\n', (17489, 17680), True, 'import numpy as np\n'), ((17819, 18016), 'numpy.array', 'np.array', (['[1.0000002872593454, 1.122503013135439, 1.061107915739188, \n 0.9999996662661563, 1.1301536319129346, 1.069448792307849, \n 1.0000000464745962, 1.1264944198323028, 1.0721922685731713]'], {}), '([1.0000002872593454, 1.122503013135439, 1.061107915739188, \n 0.9999996662661563, 1.1301536319129346, 1.069448792307849, \n 1.0000000464745962, 1.1264944198323028, 1.0721922685731713])\n', (17827, 18016), True, 'import numpy as np\n'), ((18129, 18326), 'numpy.array', 'np.array', (['[1.0000001621739123, 1.126428316031026, 1.0650458103409908, \n 0.9999997785161929, 1.1263065012425137, 1.0655375147447366, \n 1.0000000593100822, 1.12640238251751, 1.0721435619381965]'], {}), '([1.0000001621739123, 1.126428316031026, 1.0650458103409908, \n 0.9999997785161929, 1.1263065012425137, 1.0655375147447366, \n 1.0000000593100822, 1.12640238251751, 1.0721435619381965])\n', (18137, 18326), True, 'import numpy as np\n'), ((18439, 18637), 'numpy.array', 'np.array', (['[1.0000002908474748, 1.1264281104824707, 1.0650456033928053, \n 0.9999996670234566, 1.1263066253385652, 1.065537642082384, \n 1.0000000421291677, 1.126402463985756, 1.0721436418376473]'], {}), '([1.0000002908474748, 1.1264281104824707, 1.0650456033928053, \n 0.9999996670234566, 1.1263066253385652, 1.065537642082384, \n 1.0000000421291677, 1.126402463985756, 1.0721436418376473])\n', (18447, 18637), True, 'import numpy as np\n'), ((18749, 18947), 'numpy.array', 'np.array', (['[1.0000002908474748, 1.1264281104824707, 1.0650456033928053, \n 0.9999996670234566, 1.1263066253385652, 1.065537642082384, \n 1.0000000421291677, 1.126402463985756, 1.0721436418376473]'], {}), '([1.0000002908474748, 1.1264281104824707, 1.0650456033928053, \n 0.9999996670234566, 1.1263066253385652, 1.065537642082384, \n 1.0000000421291677, 1.126402463985756, 1.0721436418376473])\n', (18757, 18947), True, 'import numpy as np\n'), ((19090, 19289), 'numpy.array', 'np.array', (['[0.9999999999999946, 1.126649305937712, 1.0790357881145098, \n 0.9999999999999919, 1.1266493059651883, 1.0790357882640247, \n 1.0000000000000135, 1.1266493059449603, 1.0790357882526134]'], {}), '([0.9999999999999946, 1.126649305937712, 1.0790357881145098, \n 0.9999999999999919, 1.1266493059651883, 1.0790357882640247, \n 1.0000000000000135, 1.1266493059449603, 1.0790357882526134])\n', (19098, 19289), True, 'import numpy as np\n'), ((19400, 19595), 'numpy.array', 'np.array', (['[0.9999999999999944, 1.126649305947411, 1.079035788124742, \n 0.9999999999999928, 1.126649305946962, 1.0790357882450081, \n 1.000000000000013, 1.1266493059535365, 1.079035788261449]'], {}), '([0.9999999999999944, 1.126649305947411, 1.079035788124742, \n 0.9999999999999928, 1.126649305946962, 1.0790357882450081, \n 1.000000000000013, 1.1266493059535365, 1.079035788261449])\n', (19408, 19595), True, 'import numpy as np\n'), ((19706, 19905), 'numpy.array', 'np.array', (['[0.9999999999999944, 1.1266493059473897, 1.0790357881247188, \n 0.9999999999999922, 1.1266493059469642, 1.079035788245011, \n 1.0000000000000133, 1.1266493059535063, 1.0790357882614174]'], {}), '([0.9999999999999944, 1.1266493059473897, 1.0790357881247188, \n 0.9999999999999922, 1.1266493059469642, 1.079035788245011, \n 1.0000000000000133, 1.1266493059535063, 1.0790357882614174])\n', (19714, 19905), True, 'import numpy as np\n'), ((20015, 20214), 'numpy.array', 'np.array', (['[0.9999999999999944, 1.1266493059473897, 1.0790357881247188, \n 0.9999999999999922, 1.1266493059469642, 1.079035788245011, \n 1.0000000000000133, 1.1266493059535063, 1.0790357882614174]'], {}), '([0.9999999999999944, 1.1266493059473897, 1.0790357881247188, \n 0.9999999999999922, 1.1266493059469642, 1.079035788245011, \n 1.0000000000000133, 1.1266493059535063, 1.0790357882614174])\n', (20023, 20214), True, 'import numpy as np\n')]
|
"""Clustering-related mathematical functions.
"""
from typing import Optional
import numpy as np
import py4research.math.random as r
def kmeans(x: np.ndarray,
n_clusters: Optional[int] = 1,
max_iterations: Optional[int] = 100,
tol: Optional[float] = 1e-4) -> np.ndarray:
"""Performs the K-Means clustering over the input data.
Args:
x: Input array with a shape equal to (n_samples, n_variables, n_dimensions).
n_clusters: Number of clusters.
max_iterations: Maximum number of clustering iterations.
tol: Tolerance value to stop the clustering.
Returns:
(np.ndarray): Assigned cluster per input sample.
"""
# Gathers the corresponding dimensions
n_samples, n_variables, n_dimensions = x.shape[0], x.shape[1], x.shape[2]
# Creates an array of centroids and labels
centroids = np.zeros((n_clusters, n_variables, n_dimensions))
labels = np.zeros(n_samples)
for i in range(n_clusters):
# Chooses a random sample to compose the centroid
idx = r.generate_integer_random_number(0, n_samples)
centroids[i] = x[idx]
for _ in range(max_iterations):
# Calculates the euclidean distance between samples and each centroid
dists = np.squeeze(np.array([np.linalg.norm(x - c, axis=1) for c in centroids]))
# Gathers the minimum distance as the cluster that conquers the sample
updated_labels = np.squeeze(np.array(np.argmin(dists, axis=0)))
# Calculates the difference ratio between old and new labels
ratio = np.sum(labels != updated_labels) / n_samples
if ratio <= tol:
break
# Updates the old labels with the new ones
labels = updated_labels
for i in range(n_clusters):
# Gathers the samples that belongs to current centroid
centroid_samples = x[labels == i]
# If there are samples that belongs to the centroid
if centroid_samples.shape[0] > 0:
# Updates the centroid position
centroids[i] = np.mean(centroid_samples, axis=0)
return labels
|
[
"numpy.sum",
"py4research.math.random.generate_integer_random_number",
"numpy.zeros",
"numpy.argmin",
"numpy.mean",
"numpy.linalg.norm"
] |
[((889, 938), 'numpy.zeros', 'np.zeros', (['(n_clusters, n_variables, n_dimensions)'], {}), '((n_clusters, n_variables, n_dimensions))\n', (897, 938), True, 'import numpy as np\n'), ((952, 971), 'numpy.zeros', 'np.zeros', (['n_samples'], {}), '(n_samples)\n', (960, 971), True, 'import numpy as np\n'), ((1077, 1123), 'py4research.math.random.generate_integer_random_number', 'r.generate_integer_random_number', (['(0)', 'n_samples'], {}), '(0, n_samples)\n', (1109, 1123), True, 'import py4research.math.random as r\n'), ((1596, 1628), 'numpy.sum', 'np.sum', (['(labels != updated_labels)'], {}), '(labels != updated_labels)\n', (1602, 1628), True, 'import numpy as np\n'), ((1483, 1507), 'numpy.argmin', 'np.argmin', (['dists'], {'axis': '(0)'}), '(dists, axis=0)\n', (1492, 1507), True, 'import numpy as np\n'), ((2109, 2142), 'numpy.mean', 'np.mean', (['centroid_samples'], {'axis': '(0)'}), '(centroid_samples, axis=0)\n', (2116, 2142), True, 'import numpy as np\n'), ((1306, 1335), 'numpy.linalg.norm', 'np.linalg.norm', (['(x - c)'], {'axis': '(1)'}), '(x - c, axis=1)\n', (1320, 1335), True, 'import numpy as np\n')]
|
import numpy
import torch
from allennlp.modules.span_extractors import SpanExtractor, SelfAttentiveSpanExtractor
from allennlp.common.params import Params
class TestSelfAttentiveSpanExtractor:
def test_locally_normalised_span_extractor_can_build_from_params(self):
params = Params(
{
"type": "self_attentive",
"input_dim": 7,
"num_width_embeddings": 5,
"span_width_embedding_dim": 3,
}
)
extractor = SpanExtractor.from_params(params)
assert isinstance(extractor, SelfAttentiveSpanExtractor)
assert extractor.get_output_dim() == 10 # input_dim + span_width_embedding_dim
def test_attention_is_normalised_correctly(self):
input_dim = 7
sequence_tensor = torch.randn([2, 5, input_dim])
extractor = SelfAttentiveSpanExtractor(input_dim=input_dim)
assert extractor.get_output_dim() == input_dim
assert extractor.get_input_dim() == input_dim
# In order to test the attention, we'll make the weight which computes the logits
# zero, so the attention distribution is uniform over the sentence. This lets
# us check that the computed spans are just the averages of their representations.
extractor._global_attention._module.weight.data.fill_(0.0)
extractor._global_attention._module.bias.data.fill_(0.0)
indices = torch.LongTensor(
[[[1, 3], [2, 4]], [[0, 2], [3, 4]]]
) # smaller span tests masking.
span_representations = extractor(sequence_tensor, indices)
assert list(span_representations.size()) == [2, 2, input_dim]
# First element in the batch.
batch_element = 0
spans = span_representations[batch_element]
# First span.
mean_embeddings = sequence_tensor[batch_element, 1:4, :].mean(0)
numpy.testing.assert_array_almost_equal(spans[0].data.numpy(), mean_embeddings.data.numpy())
# Second span.
mean_embeddings = sequence_tensor[batch_element, 2:5, :].mean(0)
numpy.testing.assert_array_almost_equal(spans[1].data.numpy(), mean_embeddings.data.numpy())
# Now the second element in the batch.
batch_element = 1
spans = span_representations[batch_element]
# First span.
mean_embeddings = sequence_tensor[batch_element, 0:3, :].mean(0)
numpy.testing.assert_array_almost_equal(spans[0].data.numpy(), mean_embeddings.data.numpy())
# Second span.
mean_embeddings = sequence_tensor[batch_element, 3:5, :].mean(0)
numpy.testing.assert_array_almost_equal(spans[1].data.numpy(), mean_embeddings.data.numpy())
# Now test the case in which we have some masked spans in our indices.
indices_mask = torch.tensor([[True, True], [True, False]])
span_representations = extractor(sequence_tensor, indices, span_indices_mask=indices_mask)
# First element in the batch.
batch_element = 0
spans = span_representations[batch_element]
# First span.
mean_embeddings = sequence_tensor[batch_element, 1:4, :].mean(0)
numpy.testing.assert_array_almost_equal(spans[0].data.numpy(), mean_embeddings.data.numpy())
# Second span.
mean_embeddings = sequence_tensor[batch_element, 2:5, :].mean(0)
numpy.testing.assert_array_almost_equal(spans[1].data.numpy(), mean_embeddings.data.numpy())
# Now the second element in the batch.
batch_element = 1
spans = span_representations[batch_element]
# First span.
mean_embeddings = sequence_tensor[batch_element, 0:3, :].mean(0)
numpy.testing.assert_array_almost_equal(spans[0].data.numpy(), mean_embeddings.data.numpy())
# Second span was masked, so should be completely zero.
numpy.testing.assert_array_almost_equal(spans[1].data.numpy(), numpy.zeros([input_dim]))
def test_widths_are_embedded_correctly(self):
input_dim = 7
max_span_width = 5
span_width_embedding_dim = 3
output_dim = input_dim + span_width_embedding_dim
extractor = SelfAttentiveSpanExtractor(
input_dim=input_dim,
num_width_embeddings=max_span_width,
span_width_embedding_dim=span_width_embedding_dim,
)
assert extractor.get_output_dim() == output_dim
assert extractor.get_input_dim() == input_dim
sequence_tensor = torch.randn([2, max_span_width, input_dim])
indices = torch.LongTensor(
[[[1, 3], [0, 4], [0, 0]], [[0, 2], [1, 4], [2, 2]]]
) # smaller span tests masking.
span_representations = extractor(sequence_tensor, indices)
assert list(span_representations.size()) == [2, 3, output_dim]
width_embeddings = extractor._span_width_embedding.weight.data.numpy()
widths_minus_one = indices[..., 1] - indices[..., 0]
for element in range(indices.size(0)):
for span in range(indices.size(1)):
width = widths_minus_one[element, span].item()
width_embedding = span_representations[element, span, input_dim:]
numpy.testing.assert_array_almost_equal(
width_embedding.data.numpy(), width_embeddings[width]
)
|
[
"torch.LongTensor",
"allennlp.modules.span_extractors.SelfAttentiveSpanExtractor",
"numpy.zeros",
"torch.randn",
"allennlp.modules.span_extractors.SpanExtractor.from_params",
"torch.tensor",
"allennlp.common.params.Params"
] |
[((289, 401), 'allennlp.common.params.Params', 'Params', (["{'type': 'self_attentive', 'input_dim': 7, 'num_width_embeddings': 5,\n 'span_width_embedding_dim': 3}"], {}), "({'type': 'self_attentive', 'input_dim': 7, 'num_width_embeddings': 5,\n 'span_width_embedding_dim': 3})\n", (295, 401), False, 'from allennlp.common.params import Params\n'), ((519, 552), 'allennlp.modules.span_extractors.SpanExtractor.from_params', 'SpanExtractor.from_params', (['params'], {}), '(params)\n', (544, 552), False, 'from allennlp.modules.span_extractors import SpanExtractor, SelfAttentiveSpanExtractor\n'), ((809, 839), 'torch.randn', 'torch.randn', (['[2, 5, input_dim]'], {}), '([2, 5, input_dim])\n', (820, 839), False, 'import torch\n'), ((860, 907), 'allennlp.modules.span_extractors.SelfAttentiveSpanExtractor', 'SelfAttentiveSpanExtractor', ([], {'input_dim': 'input_dim'}), '(input_dim=input_dim)\n', (886, 907), False, 'from allennlp.modules.span_extractors import SpanExtractor, SelfAttentiveSpanExtractor\n'), ((1436, 1490), 'torch.LongTensor', 'torch.LongTensor', (['[[[1, 3], [2, 4]], [[0, 2], [3, 4]]]'], {}), '([[[1, 3], [2, 4]], [[0, 2], [3, 4]]])\n', (1452, 1490), False, 'import torch\n'), ((2812, 2855), 'torch.tensor', 'torch.tensor', (['[[True, True], [True, False]]'], {}), '([[True, True], [True, False]])\n', (2824, 2855), False, 'import torch\n'), ((4162, 4302), 'allennlp.modules.span_extractors.SelfAttentiveSpanExtractor', 'SelfAttentiveSpanExtractor', ([], {'input_dim': 'input_dim', 'num_width_embeddings': 'max_span_width', 'span_width_embedding_dim': 'span_width_embedding_dim'}), '(input_dim=input_dim, num_width_embeddings=\n max_span_width, span_width_embedding_dim=span_width_embedding_dim)\n', (4188, 4302), False, 'from allennlp.modules.span_extractors import SpanExtractor, SelfAttentiveSpanExtractor\n'), ((4482, 4525), 'torch.randn', 'torch.randn', (['[2, max_span_width, input_dim]'], {}), '([2, max_span_width, input_dim])\n', (4493, 4525), False, 'import torch\n'), ((4544, 4614), 'torch.LongTensor', 'torch.LongTensor', (['[[[1, 3], [0, 4], [0, 0]], [[0, 2], [1, 4], [2, 2]]]'], {}), '([[[1, 3], [0, 4], [0, 0]], [[0, 2], [1, 4], [2, 2]]])\n', (4560, 4614), False, 'import torch\n'), ((3921, 3945), 'numpy.zeros', 'numpy.zeros', (['[input_dim]'], {}), '([input_dim])\n', (3932, 3945), False, 'import numpy\n')]
|
#!/usr/bin/env python3
""" MTCNN Face detection plugin """
from __future__ import absolute_import, division, print_function
import cv2
from keras.layers import Conv2D, Dense, Flatten, Input, MaxPool2D, Permute, PReLU
import numpy as np
from lib.model.session import KSession
from ._base import Detector, logger
class Detect(Detector):
""" MTCNN detector for face recognition """
def __init__(self, **kwargs):
git_model_id = 2
model_filename = ["mtcnn_det_v2.1.h5", "mtcnn_det_v2.2.h5", "mtcnn_det_v2.3.h5"]
super().__init__(git_model_id=git_model_id, model_filename=model_filename, **kwargs)
self.name = "MTCNN"
self.input_size = 640
self.vram = 320
self.vram_warnings = 64 # Will run at this with warnings
self.vram_per_batch = 32
self.batchsize = self.config["batch-size"]
self.kwargs = self.validate_kwargs()
self.color_format = "RGB"
def validate_kwargs(self):
""" Validate that config options are correct. If not reset to default """
valid = True
threshold = [self.config["threshold_1"],
self.config["threshold_2"],
self.config["threshold_3"]]
kwargs = {"minsize": self.config["minsize"],
"threshold": threshold,
"factor": self.config["scalefactor"]}
if kwargs["minsize"] < 10:
valid = False
elif not all(0.0 < threshold <= 1.0 for threshold in kwargs['threshold']):
valid = False
elif not 0.0 < kwargs['factor'] < 1.0:
valid = False
if not valid:
kwargs = {"minsize": 20, # minimum size of face
"threshold": [0.6, 0.7, 0.7], # three steps threshold
"factor": 0.709} # scale factor
logger.warning("Invalid MTCNN options in config. Running with defaults")
logger.debug("Using mtcnn kwargs: %s", kwargs)
return kwargs
def init_model(self):
""" Initialize S3FD Model"""
self.model = MTCNN(self.model_path, self.config["allow_growth"], **self.kwargs)
def process_input(self, batch):
""" Compile the detection image(s) for prediction """
batch["feed"] = (batch["image"] - 127.5) / 127.5
return batch
def predict(self, batch):
""" Run model to get predictions """
prediction, points = self.model.detect_faces(batch["feed"])
logger.trace("filename: %s, prediction: %s, mtcnn_points: %s",
batch["filename"], prediction, points)
batch["prediction"], batch["mtcnn_points"] = prediction, points
return batch
def process_output(self, batch):
""" Post process the detected faces """
return batch
# MTCNN Detector
# Code adapted from: https://github.com/xiangrufan/keras-mtcnn
#
# Keras implementation of the face detection / alignment algorithm
# found at
# https://github.com/kpzhang93/MTCNN_face_detection_alignment
#
# MIT License
#
# Copyright (c) 2016 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
class PNet(KSession):
""" Keras PNet model for MTCNN """
def __init__(self, model_path, allow_growth):
super().__init__("MTCNN-PNet", model_path, allow_growth=allow_growth)
self.define_model(self.model_definition)
self.load_model_weights()
@staticmethod
def model_definition():
""" Keras PNetwork for MTCNN """
input_ = Input(shape=(None, None, 3))
var_x = Conv2D(10, (3, 3), strides=1, padding='valid', name='conv1')(input_)
var_x = PReLU(shared_axes=[1, 2], name='PReLU1')(var_x)
var_x = MaxPool2D(pool_size=2)(var_x)
var_x = Conv2D(16, (3, 3), strides=1, padding='valid', name='conv2')(var_x)
var_x = PReLU(shared_axes=[1, 2], name='PReLU2')(var_x)
var_x = Conv2D(32, (3, 3), strides=1, padding='valid', name='conv3')(var_x)
var_x = PReLU(shared_axes=[1, 2], name='PReLU3')(var_x)
classifier = Conv2D(2, (1, 1), activation='softmax', name='conv4-1')(var_x)
bbox_regress = Conv2D(4, (1, 1), name='conv4-2')(var_x)
return [input_], [classifier, bbox_regress]
class RNet(KSession):
""" Keras RNet model for MTCNN """
def __init__(self, model_path, allow_growth):
super().__init__("MTCNN-RNet", model_path, allow_growth=allow_growth)
self.define_model(self.model_definition)
self.load_model_weights()
@staticmethod
def model_definition():
""" Keras RNetwork for MTCNN """
input_ = Input(shape=(24, 24, 3))
var_x = Conv2D(28, (3, 3), strides=1, padding='valid', name='conv1')(input_)
var_x = PReLU(shared_axes=[1, 2], name='prelu1')(var_x)
var_x = MaxPool2D(pool_size=3, strides=2, padding='same')(var_x)
var_x = Conv2D(48, (3, 3), strides=1, padding='valid', name='conv2')(var_x)
var_x = PReLU(shared_axes=[1, 2], name='prelu2')(var_x)
var_x = MaxPool2D(pool_size=3, strides=2)(var_x)
var_x = Conv2D(64, (2, 2), strides=1, padding='valid', name='conv3')(var_x)
var_x = PReLU(shared_axes=[1, 2], name='prelu3')(var_x)
var_x = Permute((3, 2, 1))(var_x)
var_x = Flatten()(var_x)
var_x = Dense(128, name='conv4')(var_x)
var_x = PReLU(name='prelu4')(var_x)
classifier = Dense(2, activation='softmax', name='conv5-1')(var_x)
bbox_regress = Dense(4, name='conv5-2')(var_x)
return [input_], [classifier, bbox_regress]
class ONet(KSession):
""" Keras ONet model for MTCNN """
def __init__(self, model_path, allow_growth):
super().__init__("MTCNN-ONet", model_path, allow_growth=allow_growth)
self.define_model(self.model_definition)
self.load_model_weights()
@staticmethod
def model_definition():
""" Keras ONetwork for MTCNN """
input_ = Input(shape=(48, 48, 3))
var_x = Conv2D(32, (3, 3), strides=1, padding='valid', name='conv1')(input_)
var_x = PReLU(shared_axes=[1, 2], name='prelu1')(var_x)
var_x = MaxPool2D(pool_size=3, strides=2, padding='same')(var_x)
var_x = Conv2D(64, (3, 3), strides=1, padding='valid', name='conv2')(var_x)
var_x = PReLU(shared_axes=[1, 2], name='prelu2')(var_x)
var_x = MaxPool2D(pool_size=3, strides=2)(var_x)
var_x = Conv2D(64, (3, 3), strides=1, padding='valid', name='conv3')(var_x)
var_x = PReLU(shared_axes=[1, 2], name='prelu3')(var_x)
var_x = MaxPool2D(pool_size=2)(var_x)
var_x = Conv2D(128, (2, 2), strides=1, padding='valid', name='conv4')(var_x)
var_x = PReLU(shared_axes=[1, 2], name='prelu4')(var_x)
var_x = Permute((3, 2, 1))(var_x)
var_x = Flatten()(var_x)
var_x = Dense(256, name='conv5')(var_x)
var_x = PReLU(name='prelu5')(var_x)
classifier = Dense(2, activation='softmax', name='conv6-1')(var_x)
bbox_regress = Dense(4, name='conv6-2')(var_x)
landmark_regress = Dense(10, name='conv6-3')(var_x)
return [input_], [classifier, bbox_regress, landmark_regress]
class MTCNN():
""" MTCNN Detector for face alignment """
# TODO Batching for rnet and onet
def __init__(self, model_path, allow_growth, minsize, threshold, factor):
"""
minsize: minimum faces' size
threshold: threshold=[th1, th2, th3], th1-3 are three steps threshold
factor: the factor used to create a scaling pyramid of face sizes to
detect in the image.
pnet, rnet, onet: caffemodel
"""
logger.debug("Initializing: %s: (model_path: '%s', allow_growth: %s, minsize: %s, "
"threshold: %s, factor: %s)", self.__class__.__name__, model_path,
allow_growth, minsize, threshold, factor)
self.minsize = minsize
self.threshold = threshold
self.factor = factor
self.pnet = PNet(model_path[0], allow_growth)
self.rnet = RNet(model_path[1], allow_growth)
self.onet = ONet(model_path[2], allow_growth)
self._pnet_scales = None
logger.debug("Initialized: %s", self.__class__.__name__)
def detect_faces(self, batch):
"""Detects faces in an image, and returns bounding boxes and points for them.
batch: input batch
"""
origin_h, origin_w = batch.shape[1:3]
rectangles = self.detect_pnet(batch, origin_h, origin_w)
rectangles = self.detect_rnet(batch, rectangles, origin_h, origin_w)
rectangles = self.detect_onet(batch, rectangles, origin_h, origin_w)
ret_boxes = list()
ret_points = list()
for rects in rectangles:
if rects:
total_boxes = np.array([result[:5] for result in rects])
points = np.array([result[5:] for result in rects]).T
else:
total_boxes = np.empty((0, 9))
points = np.empty(0)
ret_boxes.append(total_boxes)
ret_points.append(points)
return ret_boxes, ret_points
def detect_pnet(self, images, height, width):
# pylint: disable=too-many-locals
""" first stage - fast proposal network (pnet) to obtain face candidates """
if self._pnet_scales is None:
self._pnet_scales = calculate_scales(height, width, self.minsize, self.factor)
rectangles = [[] for _ in range(images.shape[0])]
batch_items = images.shape[0]
for scale in self._pnet_scales:
rwidth, rheight = int(width * scale), int(height * scale)
batch = np.empty((batch_items, rheight, rwidth, 3), dtype="float32")
for idx in range(batch_items):
batch[idx, ...] = cv2.resize(images[idx, ...], (rwidth, rheight))
output = self.pnet.predict(batch)
cls_prob = output[0][..., 1]
roi = output[1]
out_h, out_w = cls_prob.shape[1:3]
out_side = max(out_h, out_w)
cls_prob = np.swapaxes(cls_prob, 1, 2)
roi = np.swapaxes(roi, 1, 3)
for idx in range(batch_items):
# first index 0 = class score, 1 = one hot repr
rectangle = detect_face_12net(cls_prob[idx, ...],
roi[idx, ...],
out_side,
1 / scale,
width,
height,
self.threshold[0])
rectangles[idx].extend(rectangle)
return [nms(x, 0.7, 'iou') for x in rectangles]
def detect_rnet(self, images, rectangle_batch, height, width):
""" second stage - refinement of face candidates with rnet """
ret = []
# TODO: batching
for idx, rectangles in enumerate(rectangle_batch):
if not rectangles:
ret.append(list())
continue
image = images[idx]
crop_number = 0
predict_24_batch = []
for rect in rectangles:
crop_img = image[int(rect[1]):int(rect[3]), int(rect[0]):int(rect[2])]
scale_img = cv2.resize(crop_img, (24, 24))
predict_24_batch.append(scale_img)
crop_number += 1
predict_24_batch = np.array(predict_24_batch)
output = self.rnet.predict(predict_24_batch, batch_size=128)
cls_prob = output[0]
cls_prob = np.array(cls_prob)
roi_prob = output[1]
roi_prob = np.array(roi_prob)
ret.append(filter_face_24net(
cls_prob, roi_prob, rectangles, width, height, self.threshold[1]
))
return ret
def detect_onet(self, images, rectangle_batch, height, width):
""" third stage - further refinement and facial landmarks positions with onet """
ret = list()
# TODO: batching
for idx, rectangles in enumerate(rectangle_batch):
if not rectangles:
ret.append(list())
continue
image = images[idx]
crop_number = 0
predict_batch = []
for rect in rectangles:
crop_img = image[int(rect[1]):int(rect[3]), int(rect[0]):int(rect[2])]
scale_img = cv2.resize(crop_img, (48, 48))
predict_batch.append(scale_img)
crop_number += 1
predict_batch = np.array(predict_batch)
output = self.onet.predict(predict_batch, batch_size=128)
cls_prob = output[0]
roi_prob = output[1]
pts_prob = output[2] # index
ret.append(filter_face_48net(
cls_prob,
roi_prob,
pts_prob,
rectangles,
width,
height,
self.threshold[2]
))
return ret
def detect_face_12net(cls_prob, roi, out_side, scale, width, height, threshold):
# pylint: disable=too-many-locals, too-many-arguments
""" Detect face position and calibrate bounding box on 12net feature map(matrix version)
Input:
cls_prob : softmax feature map for face classify
roi : feature map for regression
out_side : feature map's largest size
scale : current input image scale in multi-scales
width : image's origin width
height : image's origin height
threshold: 0.6 can have 99% recall rate
"""
in_side = 2*out_side+11
stride = 0
if out_side != 1:
stride = float(in_side-12)/(out_side-1)
(var_x, var_y) = np.where(cls_prob >= threshold)
boundingbox = np.array([var_x, var_y]).T
bb1 = np.fix((stride * (boundingbox) + 0) * scale)
bb2 = np.fix((stride * (boundingbox) + 11) * scale)
boundingbox = np.concatenate((bb1, bb2), axis=1)
dx_1 = roi[0][var_x, var_y]
dx_2 = roi[1][var_x, var_y]
dx3 = roi[2][var_x, var_y]
dx4 = roi[3][var_x, var_y]
score = np.array([cls_prob[var_x, var_y]]).T
offset = np.array([dx_1, dx_2, dx3, dx4]).T
boundingbox = boundingbox + offset*12.0*scale
rectangles = np.concatenate((boundingbox, score), axis=1)
rectangles = rect2square(rectangles)
pick = []
for rect in rectangles:
x_1 = int(max(0, rect[0]))
y_1 = int(max(0, rect[1]))
x_2 = int(min(width, rect[2]))
y_2 = int(min(height, rect[3]))
sc_ = rect[4]
if x_2 > x_1 and y_2 > y_1:
pick.append([x_1, y_1, x_2, y_2, sc_])
return nms(pick, 0.3, "iou")
def filter_face_24net(cls_prob, roi, rectangles, width, height, threshold):
# pylint: disable=too-many-locals, too-many-arguments
""" Filter face position and calibrate bounding box on 12net's output
Input:
cls_prob : softmax feature map for face classify
roi_prob : feature map for regression
rectangles: 12net's predict
width : image's origin width
height : image's origin height
threshold : 0.6 can have 97% recall rate
Output:
rectangles: possible face positions
"""
prob = cls_prob[:, 1]
pick = np.where(prob >= threshold)
rectangles = np.array(rectangles)
x_1 = rectangles[pick, 0]
y_1 = rectangles[pick, 1]
x_2 = rectangles[pick, 2]
y_2 = rectangles[pick, 3]
sc_ = np.array([prob[pick]]).T
dx_1 = roi[pick, 0]
dx_2 = roi[pick, 1]
dx3 = roi[pick, 2]
dx4 = roi[pick, 3]
r_width = x_2-x_1
r_height = y_2-y_1
x_1 = np.array([(x_1 + dx_1 * r_width)[0]]).T
y_1 = np.array([(y_1 + dx_2 * r_height)[0]]).T
x_2 = np.array([(x_2 + dx3 * r_width)[0]]).T
y_2 = np.array([(y_2 + dx4 * r_height)[0]]).T
rectangles = np.concatenate((x_1, y_1, x_2, y_2, sc_), axis=1)
rectangles = rect2square(rectangles)
pick = []
for rect in rectangles:
x_1 = int(max(0, rect[0]))
y_1 = int(max(0, rect[1]))
x_2 = int(min(width, rect[2]))
y_2 = int(min(height, rect[3]))
sc_ = rect[4]
if x_2 > x_1 and y_2 > y_1:
pick.append([x_1, y_1, x_2, y_2, sc_])
return nms(pick, 0.3, 'iou')
def filter_face_48net(cls_prob, roi, pts, rectangles, width, height, threshold):
# pylint: disable=too-many-locals, too-many-arguments
""" Filter face position and calibrate bounding box on 12net's output
Input:
cls_prob : cls_prob[1] is face possibility
roi : roi offset
pts : 5 landmark
rectangles: 12net's predict, rectangles[i][0:3] is the position, rectangles[i][4] is score
width : image's origin width
height : image's origin height
threshold : 0.7 can have 94% recall rate on CelebA-database
Output:
rectangles: face positions and landmarks
"""
prob = cls_prob[:, 1]
pick = np.where(prob >= threshold)
rectangles = np.array(rectangles)
x_1 = rectangles[pick, 0]
y_1 = rectangles[pick, 1]
x_2 = rectangles[pick, 2]
y_2 = rectangles[pick, 3]
sc_ = np.array([prob[pick]]).T
dx_1 = roi[pick, 0]
dx_2 = roi[pick, 1]
dx3 = roi[pick, 2]
dx4 = roi[pick, 3]
r_width = x_2-x_1
r_height = y_2-y_1
pts0 = np.array([(r_width * pts[pick, 0] + x_1)[0]]).T
pts1 = np.array([(r_height * pts[pick, 5] + y_1)[0]]).T
pts2 = np.array([(r_width * pts[pick, 1] + x_1)[0]]).T
pts3 = np.array([(r_height * pts[pick, 6] + y_1)[0]]).T
pts4 = np.array([(r_width * pts[pick, 2] + x_1)[0]]).T
pts5 = np.array([(r_height * pts[pick, 7] + y_1)[0]]).T
pts6 = np.array([(r_width * pts[pick, 3] + x_1)[0]]).T
pts7 = np.array([(r_height * pts[pick, 8] + y_1)[0]]).T
pts8 = np.array([(r_width * pts[pick, 4] + x_1)[0]]).T
pts9 = np.array([(r_height * pts[pick, 9] + y_1)[0]]).T
x_1 = np.array([(x_1 + dx_1 * r_width)[0]]).T
y_1 = np.array([(y_1 + dx_2 * r_height)[0]]).T
x_2 = np.array([(x_2 + dx3 * r_width)[0]]).T
y_2 = np.array([(y_2 + dx4 * r_height)[0]]).T
rectangles = np.concatenate((x_1, y_1, x_2, y_2, sc_,
pts0, pts1, pts2, pts3, pts4, pts5, pts6, pts7, pts8, pts9),
axis=1)
pick = []
for rect in rectangles:
x_1 = int(max(0, rect[0]))
y_1 = int(max(0, rect[1]))
x_2 = int(min(width, rect[2]))
y_2 = int(min(height, rect[3]))
if x_2 > x_1 and y_2 > y_1:
pick.append([x_1, y_1, x_2, y_2,
rect[4], rect[5], rect[6], rect[7], rect[8], rect[9],
rect[10], rect[11], rect[12], rect[13], rect[14]])
return nms(pick, 0.3, 'iom')
def nms(rectangles, threshold, method):
# pylint:disable=too-many-locals
""" apply NMS(non-maximum suppression) on ROIs in same scale(matrix version)
Input:
rectangles: rectangles[i][0:3] is the position, rectangles[i][4] is score
Output:
rectangles: same as input
"""
if not rectangles:
return rectangles
boxes = np.array(rectangles)
x_1 = boxes[:, 0]
y_1 = boxes[:, 1]
x_2 = boxes[:, 2]
y_2 = boxes[:, 3]
var_s = boxes[:, 4]
area = np.multiply(x_2-x_1+1, y_2-y_1+1)
s_sort = np.array(var_s.argsort())
pick = []
while len(s_sort) > 0:
# s_sort[-1] have highest prob score, s_sort[0:-1]->others
xx_1 = np.maximum(x_1[s_sort[-1]], x_1[s_sort[0:-1]])
yy_1 = np.maximum(y_1[s_sort[-1]], y_1[s_sort[0:-1]])
xx_2 = np.minimum(x_2[s_sort[-1]], x_2[s_sort[0:-1]])
yy_2 = np.minimum(y_2[s_sort[-1]], y_2[s_sort[0:-1]])
width = np.maximum(0.0, xx_2 - xx_1 + 1)
height = np.maximum(0.0, yy_2 - yy_1 + 1)
inter = width * height
if method == 'iom':
var_o = inter / np.minimum(area[s_sort[-1]], area[s_sort[0:-1]])
else:
var_o = inter / (area[s_sort[-1]] + area[s_sort[0:-1]] - inter)
pick.append(s_sort[-1])
s_sort = s_sort[np.where(var_o <= threshold)[0]]
result_rectangle = boxes[pick].tolist()
return result_rectangle
def calculate_scales(height, width, minsize, factor):
""" Calculate multi-scale
Input:
height: Original image height
width: Original image width
minsize: Minimum size for a face to be accepted
factor: Scaling factor
Output:
scales : Multi-scale
"""
factor_count = 0
minl = np.amin([height, width])
var_m = 12.0 / minsize
minl = minl * var_m
# create scale pyramid
scales = []
while minl >= 12:
scales += [var_m * np.power(factor, factor_count)]
minl = minl * factor
factor_count += 1
logger.trace(scales)
return scales
def rect2square(rectangles):
""" change rectangles into squares (matrix version)
Input:
rectangles: rectangles[i][0:3] is the position, rectangles[i][4] is score
Output:
squares: same as input
"""
width = rectangles[:, 2] - rectangles[:, 0]
height = rectangles[:, 3] - rectangles[:, 1]
length = np.maximum(width, height).T
rectangles[:, 0] = rectangles[:, 0] + width * 0.5 - length * 0.5
rectangles[:, 1] = rectangles[:, 1] + height * 0.5 - length * 0.5
rectangles[:, 2:4] = rectangles[:, 0:2] + np.repeat([length], 2, axis=0).T
return rectangles
|
[
"numpy.maximum",
"numpy.amin",
"numpy.empty",
"keras.layers.MaxPool2D",
"keras.layers.Input",
"numpy.multiply",
"numpy.power",
"keras.layers.Flatten",
"numpy.swapaxes",
"keras.layers.Permute",
"cv2.resize",
"numpy.repeat",
"numpy.minimum",
"numpy.fix",
"keras.layers.PReLU",
"keras.layers.Conv2D",
"numpy.concatenate",
"numpy.where",
"numpy.array",
"keras.layers.Dense"
] |
[((14833, 14864), 'numpy.where', 'np.where', (['(cls_prob >= threshold)'], {}), '(cls_prob >= threshold)\n', (14841, 14864), True, 'import numpy as np\n'), ((14920, 14962), 'numpy.fix', 'np.fix', (['((stride * boundingbox + 0) * scale)'], {}), '((stride * boundingbox + 0) * scale)\n', (14926, 14962), True, 'import numpy as np\n'), ((14975, 15018), 'numpy.fix', 'np.fix', (['((stride * boundingbox + 11) * scale)'], {}), '((stride * boundingbox + 11) * scale)\n', (14981, 15018), True, 'import numpy as np\n'), ((15039, 15073), 'numpy.concatenate', 'np.concatenate', (['(bb1, bb2)'], {'axis': '(1)'}), '((bb1, bb2), axis=1)\n', (15053, 15073), True, 'import numpy as np\n'), ((15364, 15408), 'numpy.concatenate', 'np.concatenate', (['(boundingbox, score)'], {'axis': '(1)'}), '((boundingbox, score), axis=1)\n', (15378, 15408), True, 'import numpy as np\n'), ((16378, 16405), 'numpy.where', 'np.where', (['(prob >= threshold)'], {}), '(prob >= threshold)\n', (16386, 16405), True, 'import numpy as np\n'), ((16423, 16443), 'numpy.array', 'np.array', (['rectangles'], {}), '(rectangles)\n', (16431, 16443), True, 'import numpy as np\n'), ((16955, 17004), 'numpy.concatenate', 'np.concatenate', (['(x_1, y_1, x_2, y_2, sc_)'], {'axis': '(1)'}), '((x_1, y_1, x_2, y_2, sc_), axis=1)\n', (16969, 17004), True, 'import numpy as np\n'), ((18075, 18102), 'numpy.where', 'np.where', (['(prob >= threshold)'], {}), '(prob >= threshold)\n', (18083, 18102), True, 'import numpy as np\n'), ((18120, 18140), 'numpy.array', 'np.array', (['rectangles'], {}), '(rectangles)\n', (18128, 18140), True, 'import numpy as np\n'), ((19247, 19360), 'numpy.concatenate', 'np.concatenate', (['(x_1, y_1, x_2, y_2, sc_, pts0, pts1, pts2, pts3, pts4, pts5, pts6, pts7,\n pts8, pts9)'], {'axis': '(1)'}), '((x_1, y_1, x_2, y_2, sc_, pts0, pts1, pts2, pts3, pts4, pts5,\n pts6, pts7, pts8, pts9), axis=1)\n', (19261, 19360), True, 'import numpy as np\n'), ((20250, 20270), 'numpy.array', 'np.array', (['rectangles'], {}), '(rectangles)\n', (20258, 20270), True, 'import numpy as np\n'), ((20394, 20435), 'numpy.multiply', 'np.multiply', (['(x_2 - x_1 + 1)', '(y_2 - y_1 + 1)'], {}), '(x_2 - x_1 + 1, y_2 - y_1 + 1)\n', (20405, 20435), True, 'import numpy as np\n'), ((21677, 21701), 'numpy.amin', 'np.amin', (['[height, width]'], {}), '([height, width])\n', (21684, 21701), True, 'import numpy as np\n'), ((4519, 4547), 'keras.layers.Input', 'Input', ([], {'shape': '(None, None, 3)'}), '(shape=(None, None, 3))\n', (4524, 4547), False, 'from keras.layers import Conv2D, Dense, Flatten, Input, MaxPool2D, Permute, PReLU\n'), ((5618, 5642), 'keras.layers.Input', 'Input', ([], {'shape': '(24, 24, 3)'}), '(shape=(24, 24, 3))\n', (5623, 5642), False, 'from keras.layers import Conv2D, Dense, Flatten, Input, MaxPool2D, Permute, PReLU\n'), ((6948, 6972), 'keras.layers.Input', 'Input', ([], {'shape': '(48, 48, 3)'}), '(shape=(48, 48, 3))\n', (6953, 6972), False, 'from keras.layers import Conv2D, Dense, Flatten, Input, MaxPool2D, Permute, PReLU\n'), ((14883, 14907), 'numpy.array', 'np.array', (['[var_x, var_y]'], {}), '([var_x, var_y])\n', (14891, 14907), True, 'import numpy as np\n'), ((15212, 15246), 'numpy.array', 'np.array', (['[cls_prob[var_x, var_y]]'], {}), '([cls_prob[var_x, var_y]])\n', (15220, 15246), True, 'import numpy as np\n'), ((15262, 15294), 'numpy.array', 'np.array', (['[dx_1, dx_2, dx3, dx4]'], {}), '([dx_1, dx_2, dx3, dx4])\n', (15270, 15294), True, 'import numpy as np\n'), ((16574, 16596), 'numpy.array', 'np.array', (['[prob[pick]]'], {}), '([prob[pick]])\n', (16582, 16596), True, 'import numpy as np\n'), ((16748, 16785), 'numpy.array', 'np.array', (['[(x_1 + dx_1 * r_width)[0]]'], {}), '([(x_1 + dx_1 * r_width)[0]])\n', (16756, 16785), True, 'import numpy as np\n'), ((16798, 16836), 'numpy.array', 'np.array', (['[(y_1 + dx_2 * r_height)[0]]'], {}), '([(y_1 + dx_2 * r_height)[0]])\n', (16806, 16836), True, 'import numpy as np\n'), ((16849, 16885), 'numpy.array', 'np.array', (['[(x_2 + dx3 * r_width)[0]]'], {}), '([(x_2 + dx3 * r_width)[0]])\n', (16857, 16885), True, 'import numpy as np\n'), ((16898, 16935), 'numpy.array', 'np.array', (['[(y_2 + dx4 * r_height)[0]]'], {}), '([(y_2 + dx4 * r_height)[0]])\n', (16906, 16935), True, 'import numpy as np\n'), ((18271, 18293), 'numpy.array', 'np.array', (['[prob[pick]]'], {}), '([prob[pick]])\n', (18279, 18293), True, 'import numpy as np\n'), ((18446, 18491), 'numpy.array', 'np.array', (['[(r_width * pts[pick, 0] + x_1)[0]]'], {}), '([(r_width * pts[pick, 0] + x_1)[0]])\n', (18454, 18491), True, 'import numpy as np\n'), ((18505, 18551), 'numpy.array', 'np.array', (['[(r_height * pts[pick, 5] + y_1)[0]]'], {}), '([(r_height * pts[pick, 5] + y_1)[0]])\n', (18513, 18551), True, 'import numpy as np\n'), ((18565, 18610), 'numpy.array', 'np.array', (['[(r_width * pts[pick, 1] + x_1)[0]]'], {}), '([(r_width * pts[pick, 1] + x_1)[0]])\n', (18573, 18610), True, 'import numpy as np\n'), ((18624, 18670), 'numpy.array', 'np.array', (['[(r_height * pts[pick, 6] + y_1)[0]]'], {}), '([(r_height * pts[pick, 6] + y_1)[0]])\n', (18632, 18670), True, 'import numpy as np\n'), ((18684, 18729), 'numpy.array', 'np.array', (['[(r_width * pts[pick, 2] + x_1)[0]]'], {}), '([(r_width * pts[pick, 2] + x_1)[0]])\n', (18692, 18729), True, 'import numpy as np\n'), ((18743, 18789), 'numpy.array', 'np.array', (['[(r_height * pts[pick, 7] + y_1)[0]]'], {}), '([(r_height * pts[pick, 7] + y_1)[0]])\n', (18751, 18789), True, 'import numpy as np\n'), ((18803, 18848), 'numpy.array', 'np.array', (['[(r_width * pts[pick, 3] + x_1)[0]]'], {}), '([(r_width * pts[pick, 3] + x_1)[0]])\n', (18811, 18848), True, 'import numpy as np\n'), ((18862, 18908), 'numpy.array', 'np.array', (['[(r_height * pts[pick, 8] + y_1)[0]]'], {}), '([(r_height * pts[pick, 8] + y_1)[0]])\n', (18870, 18908), True, 'import numpy as np\n'), ((18922, 18967), 'numpy.array', 'np.array', (['[(r_width * pts[pick, 4] + x_1)[0]]'], {}), '([(r_width * pts[pick, 4] + x_1)[0]])\n', (18930, 18967), True, 'import numpy as np\n'), ((18981, 19027), 'numpy.array', 'np.array', (['[(r_height * pts[pick, 9] + y_1)[0]]'], {}), '([(r_height * pts[pick, 9] + y_1)[0]])\n', (18989, 19027), True, 'import numpy as np\n'), ((19040, 19077), 'numpy.array', 'np.array', (['[(x_1 + dx_1 * r_width)[0]]'], {}), '([(x_1 + dx_1 * r_width)[0]])\n', (19048, 19077), True, 'import numpy as np\n'), ((19090, 19128), 'numpy.array', 'np.array', (['[(y_1 + dx_2 * r_height)[0]]'], {}), '([(y_1 + dx_2 * r_height)[0]])\n', (19098, 19128), True, 'import numpy as np\n'), ((19141, 19177), 'numpy.array', 'np.array', (['[(x_2 + dx3 * r_width)[0]]'], {}), '([(x_2 + dx3 * r_width)[0]])\n', (19149, 19177), True, 'import numpy as np\n'), ((19190, 19227), 'numpy.array', 'np.array', (['[(y_2 + dx4 * r_height)[0]]'], {}), '([(y_2 + dx4 * r_height)[0]])\n', (19198, 19227), True, 'import numpy as np\n'), ((20590, 20636), 'numpy.maximum', 'np.maximum', (['x_1[s_sort[-1]]', 'x_1[s_sort[0:-1]]'], {}), '(x_1[s_sort[-1]], x_1[s_sort[0:-1]])\n', (20600, 20636), True, 'import numpy as np\n'), ((20652, 20698), 'numpy.maximum', 'np.maximum', (['y_1[s_sort[-1]]', 'y_1[s_sort[0:-1]]'], {}), '(y_1[s_sort[-1]], y_1[s_sort[0:-1]])\n', (20662, 20698), True, 'import numpy as np\n'), ((20714, 20760), 'numpy.minimum', 'np.minimum', (['x_2[s_sort[-1]]', 'x_2[s_sort[0:-1]]'], {}), '(x_2[s_sort[-1]], x_2[s_sort[0:-1]])\n', (20724, 20760), True, 'import numpy as np\n'), ((20776, 20822), 'numpy.minimum', 'np.minimum', (['y_2[s_sort[-1]]', 'y_2[s_sort[0:-1]]'], {}), '(y_2[s_sort[-1]], y_2[s_sort[0:-1]])\n', (20786, 20822), True, 'import numpy as np\n'), ((20839, 20871), 'numpy.maximum', 'np.maximum', (['(0.0)', '(xx_2 - xx_1 + 1)'], {}), '(0.0, xx_2 - xx_1 + 1)\n', (20849, 20871), True, 'import numpy as np\n'), ((20889, 20921), 'numpy.maximum', 'np.maximum', (['(0.0)', '(yy_2 - yy_1 + 1)'], {}), '(0.0, yy_2 - yy_1 + 1)\n', (20899, 20921), True, 'import numpy as np\n'), ((22316, 22341), 'numpy.maximum', 'np.maximum', (['width', 'height'], {}), '(width, height)\n', (22326, 22341), True, 'import numpy as np\n'), ((4564, 4624), 'keras.layers.Conv2D', 'Conv2D', (['(10)', '(3, 3)'], {'strides': '(1)', 'padding': '"""valid"""', 'name': '"""conv1"""'}), "(10, (3, 3), strides=1, padding='valid', name='conv1')\n", (4570, 4624), False, 'from keras.layers import Conv2D, Dense, Flatten, Input, MaxPool2D, Permute, PReLU\n'), ((4649, 4689), 'keras.layers.PReLU', 'PReLU', ([], {'shared_axes': '[1, 2]', 'name': '"""PReLU1"""'}), "(shared_axes=[1, 2], name='PReLU1')\n", (4654, 4689), False, 'from keras.layers import Conv2D, Dense, Flatten, Input, MaxPool2D, Permute, PReLU\n'), ((4713, 4735), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': '(2)'}), '(pool_size=2)\n', (4722, 4735), False, 'from keras.layers import Conv2D, Dense, Flatten, Input, MaxPool2D, Permute, PReLU\n'), ((4759, 4819), 'keras.layers.Conv2D', 'Conv2D', (['(16)', '(3, 3)'], {'strides': '(1)', 'padding': '"""valid"""', 'name': '"""conv2"""'}), "(16, (3, 3), strides=1, padding='valid', name='conv2')\n", (4765, 4819), False, 'from keras.layers import Conv2D, Dense, Flatten, Input, MaxPool2D, Permute, PReLU\n'), ((4843, 4883), 'keras.layers.PReLU', 'PReLU', ([], {'shared_axes': '[1, 2]', 'name': '"""PReLU2"""'}), "(shared_axes=[1, 2], name='PReLU2')\n", (4848, 4883), False, 'from keras.layers import Conv2D, Dense, Flatten, Input, MaxPool2D, Permute, PReLU\n'), ((4907, 4967), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'strides': '(1)', 'padding': '"""valid"""', 'name': '"""conv3"""'}), "(32, (3, 3), strides=1, padding='valid', name='conv3')\n", (4913, 4967), False, 'from keras.layers import Conv2D, Dense, Flatten, Input, MaxPool2D, Permute, PReLU\n'), ((4991, 5031), 'keras.layers.PReLU', 'PReLU', ([], {'shared_axes': '[1, 2]', 'name': '"""PReLU3"""'}), "(shared_axes=[1, 2], name='PReLU3')\n", (4996, 5031), False, 'from keras.layers import Conv2D, Dense, Flatten, Input, MaxPool2D, Permute, PReLU\n'), ((5060, 5115), 'keras.layers.Conv2D', 'Conv2D', (['(2)', '(1, 1)'], {'activation': '"""softmax"""', 'name': '"""conv4-1"""'}), "(2, (1, 1), activation='softmax', name='conv4-1')\n", (5066, 5115), False, 'from keras.layers import Conv2D, Dense, Flatten, Input, MaxPool2D, Permute, PReLU\n'), ((5146, 5179), 'keras.layers.Conv2D', 'Conv2D', (['(4)', '(1, 1)'], {'name': '"""conv4-2"""'}), "(4, (1, 1), name='conv4-2')\n", (5152, 5179), False, 'from keras.layers import Conv2D, Dense, Flatten, Input, MaxPool2D, Permute, PReLU\n'), ((5659, 5719), 'keras.layers.Conv2D', 'Conv2D', (['(28)', '(3, 3)'], {'strides': '(1)', 'padding': '"""valid"""', 'name': '"""conv1"""'}), "(28, (3, 3), strides=1, padding='valid', name='conv1')\n", (5665, 5719), False, 'from keras.layers import Conv2D, Dense, Flatten, Input, MaxPool2D, Permute, PReLU\n'), ((5744, 5784), 'keras.layers.PReLU', 'PReLU', ([], {'shared_axes': '[1, 2]', 'name': '"""prelu1"""'}), "(shared_axes=[1, 2], name='prelu1')\n", (5749, 5784), False, 'from keras.layers import Conv2D, Dense, Flatten, Input, MaxPool2D, Permute, PReLU\n'), ((5808, 5857), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': '(3)', 'strides': '(2)', 'padding': '"""same"""'}), "(pool_size=3, strides=2, padding='same')\n", (5817, 5857), False, 'from keras.layers import Conv2D, Dense, Flatten, Input, MaxPool2D, Permute, PReLU\n'), ((5882, 5942), 'keras.layers.Conv2D', 'Conv2D', (['(48)', '(3, 3)'], {'strides': '(1)', 'padding': '"""valid"""', 'name': '"""conv2"""'}), "(48, (3, 3), strides=1, padding='valid', name='conv2')\n", (5888, 5942), False, 'from keras.layers import Conv2D, Dense, Flatten, Input, MaxPool2D, Permute, PReLU\n'), ((5966, 6006), 'keras.layers.PReLU', 'PReLU', ([], {'shared_axes': '[1, 2]', 'name': '"""prelu2"""'}), "(shared_axes=[1, 2], name='prelu2')\n", (5971, 6006), False, 'from keras.layers import Conv2D, Dense, Flatten, Input, MaxPool2D, Permute, PReLU\n'), ((6030, 6063), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': '(3)', 'strides': '(2)'}), '(pool_size=3, strides=2)\n', (6039, 6063), False, 'from keras.layers import Conv2D, Dense, Flatten, Input, MaxPool2D, Permute, PReLU\n'), ((6088, 6148), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(2, 2)'], {'strides': '(1)', 'padding': '"""valid"""', 'name': '"""conv3"""'}), "(64, (2, 2), strides=1, padding='valid', name='conv3')\n", (6094, 6148), False, 'from keras.layers import Conv2D, Dense, Flatten, Input, MaxPool2D, Permute, PReLU\n'), ((6172, 6212), 'keras.layers.PReLU', 'PReLU', ([], {'shared_axes': '[1, 2]', 'name': '"""prelu3"""'}), "(shared_axes=[1, 2], name='prelu3')\n", (6177, 6212), False, 'from keras.layers import Conv2D, Dense, Flatten, Input, MaxPool2D, Permute, PReLU\n'), ((6236, 6254), 'keras.layers.Permute', 'Permute', (['(3, 2, 1)'], {}), '((3, 2, 1))\n', (6243, 6254), False, 'from keras.layers import Conv2D, Dense, Flatten, Input, MaxPool2D, Permute, PReLU\n'), ((6278, 6287), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (6285, 6287), False, 'from keras.layers import Conv2D, Dense, Flatten, Input, MaxPool2D, Permute, PReLU\n'), ((6311, 6335), 'keras.layers.Dense', 'Dense', (['(128)'], {'name': '"""conv4"""'}), "(128, name='conv4')\n", (6316, 6335), False, 'from keras.layers import Conv2D, Dense, Flatten, Input, MaxPool2D, Permute, PReLU\n'), ((6359, 6379), 'keras.layers.PReLU', 'PReLU', ([], {'name': '"""prelu4"""'}), "(name='prelu4')\n", (6364, 6379), False, 'from keras.layers import Conv2D, Dense, Flatten, Input, MaxPool2D, Permute, PReLU\n'), ((6408, 6454), 'keras.layers.Dense', 'Dense', (['(2)'], {'activation': '"""softmax"""', 'name': '"""conv5-1"""'}), "(2, activation='softmax', name='conv5-1')\n", (6413, 6454), False, 'from keras.layers import Conv2D, Dense, Flatten, Input, MaxPool2D, Permute, PReLU\n'), ((6485, 6509), 'keras.layers.Dense', 'Dense', (['(4)'], {'name': '"""conv5-2"""'}), "(4, name='conv5-2')\n", (6490, 6509), False, 'from keras.layers import Conv2D, Dense, Flatten, Input, MaxPool2D, Permute, PReLU\n'), ((6989, 7049), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'strides': '(1)', 'padding': '"""valid"""', 'name': '"""conv1"""'}), "(32, (3, 3), strides=1, padding='valid', name='conv1')\n", (6995, 7049), False, 'from keras.layers import Conv2D, Dense, Flatten, Input, MaxPool2D, Permute, PReLU\n'), ((7074, 7114), 'keras.layers.PReLU', 'PReLU', ([], {'shared_axes': '[1, 2]', 'name': '"""prelu1"""'}), "(shared_axes=[1, 2], name='prelu1')\n", (7079, 7114), False, 'from keras.layers import Conv2D, Dense, Flatten, Input, MaxPool2D, Permute, PReLU\n'), ((7138, 7187), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': '(3)', 'strides': '(2)', 'padding': '"""same"""'}), "(pool_size=3, strides=2, padding='same')\n", (7147, 7187), False, 'from keras.layers import Conv2D, Dense, Flatten, Input, MaxPool2D, Permute, PReLU\n'), ((7211, 7271), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'strides': '(1)', 'padding': '"""valid"""', 'name': '"""conv2"""'}), "(64, (3, 3), strides=1, padding='valid', name='conv2')\n", (7217, 7271), False, 'from keras.layers import Conv2D, Dense, Flatten, Input, MaxPool2D, Permute, PReLU\n'), ((7295, 7335), 'keras.layers.PReLU', 'PReLU', ([], {'shared_axes': '[1, 2]', 'name': '"""prelu2"""'}), "(shared_axes=[1, 2], name='prelu2')\n", (7300, 7335), False, 'from keras.layers import Conv2D, Dense, Flatten, Input, MaxPool2D, Permute, PReLU\n'), ((7359, 7392), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': '(3)', 'strides': '(2)'}), '(pool_size=3, strides=2)\n', (7368, 7392), False, 'from keras.layers import Conv2D, Dense, Flatten, Input, MaxPool2D, Permute, PReLU\n'), ((7416, 7476), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'strides': '(1)', 'padding': '"""valid"""', 'name': '"""conv3"""'}), "(64, (3, 3), strides=1, padding='valid', name='conv3')\n", (7422, 7476), False, 'from keras.layers import Conv2D, Dense, Flatten, Input, MaxPool2D, Permute, PReLU\n'), ((7500, 7540), 'keras.layers.PReLU', 'PReLU', ([], {'shared_axes': '[1, 2]', 'name': '"""prelu3"""'}), "(shared_axes=[1, 2], name='prelu3')\n", (7505, 7540), False, 'from keras.layers import Conv2D, Dense, Flatten, Input, MaxPool2D, Permute, PReLU\n'), ((7564, 7586), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': '(2)'}), '(pool_size=2)\n', (7573, 7586), False, 'from keras.layers import Conv2D, Dense, Flatten, Input, MaxPool2D, Permute, PReLU\n'), ((7610, 7671), 'keras.layers.Conv2D', 'Conv2D', (['(128)', '(2, 2)'], {'strides': '(1)', 'padding': '"""valid"""', 'name': '"""conv4"""'}), "(128, (2, 2), strides=1, padding='valid', name='conv4')\n", (7616, 7671), False, 'from keras.layers import Conv2D, Dense, Flatten, Input, MaxPool2D, Permute, PReLU\n'), ((7695, 7735), 'keras.layers.PReLU', 'PReLU', ([], {'shared_axes': '[1, 2]', 'name': '"""prelu4"""'}), "(shared_axes=[1, 2], name='prelu4')\n", (7700, 7735), False, 'from keras.layers import Conv2D, Dense, Flatten, Input, MaxPool2D, Permute, PReLU\n'), ((7759, 7777), 'keras.layers.Permute', 'Permute', (['(3, 2, 1)'], {}), '((3, 2, 1))\n', (7766, 7777), False, 'from keras.layers import Conv2D, Dense, Flatten, Input, MaxPool2D, Permute, PReLU\n'), ((7801, 7810), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (7808, 7810), False, 'from keras.layers import Conv2D, Dense, Flatten, Input, MaxPool2D, Permute, PReLU\n'), ((7834, 7858), 'keras.layers.Dense', 'Dense', (['(256)'], {'name': '"""conv5"""'}), "(256, name='conv5')\n", (7839, 7858), False, 'from keras.layers import Conv2D, Dense, Flatten, Input, MaxPool2D, Permute, PReLU\n'), ((7882, 7902), 'keras.layers.PReLU', 'PReLU', ([], {'name': '"""prelu5"""'}), "(name='prelu5')\n", (7887, 7902), False, 'from keras.layers import Conv2D, Dense, Flatten, Input, MaxPool2D, Permute, PReLU\n'), ((7932, 7978), 'keras.layers.Dense', 'Dense', (['(2)'], {'activation': '"""softmax"""', 'name': '"""conv6-1"""'}), "(2, activation='softmax', name='conv6-1')\n", (7937, 7978), False, 'from keras.layers import Conv2D, Dense, Flatten, Input, MaxPool2D, Permute, PReLU\n'), ((8009, 8033), 'keras.layers.Dense', 'Dense', (['(4)'], {'name': '"""conv6-2"""'}), "(4, name='conv6-2')\n", (8014, 8033), False, 'from keras.layers import Conv2D, Dense, Flatten, Input, MaxPool2D, Permute, PReLU\n'), ((8068, 8093), 'keras.layers.Dense', 'Dense', (['(10)'], {'name': '"""conv6-3"""'}), "(10, name='conv6-3')\n", (8073, 8093), False, 'from keras.layers import Conv2D, Dense, Flatten, Input, MaxPool2D, Permute, PReLU\n'), ((10671, 10731), 'numpy.empty', 'np.empty', (['(batch_items, rheight, rwidth, 3)'], {'dtype': '"""float32"""'}), "((batch_items, rheight, rwidth, 3), dtype='float32')\n", (10679, 10731), True, 'import numpy as np\n'), ((11083, 11110), 'numpy.swapaxes', 'np.swapaxes', (['cls_prob', '(1)', '(2)'], {}), '(cls_prob, 1, 2)\n', (11094, 11110), True, 'import numpy as np\n'), ((11129, 11151), 'numpy.swapaxes', 'np.swapaxes', (['roi', '(1)', '(3)'], {}), '(roi, 1, 3)\n', (11140, 11151), True, 'import numpy as np\n'), ((12499, 12525), 'numpy.array', 'np.array', (['predict_24_batch'], {}), '(predict_24_batch)\n', (12507, 12525), True, 'import numpy as np\n'), ((12655, 12673), 'numpy.array', 'np.array', (['cls_prob'], {}), '(cls_prob)\n', (12663, 12673), True, 'import numpy as np\n'), ((12730, 12748), 'numpy.array', 'np.array', (['roi_prob'], {}), '(roi_prob)\n', (12738, 12748), True, 'import numpy as np\n'), ((13642, 13665), 'numpy.array', 'np.array', (['predict_batch'], {}), '(predict_batch)\n', (13650, 13665), True, 'import numpy as np\n'), ((22529, 22559), 'numpy.repeat', 'np.repeat', (['[length]', '(2)'], {'axis': '(0)'}), '([length], 2, axis=0)\n', (22538, 22559), True, 'import numpy as np\n'), ((9806, 9848), 'numpy.array', 'np.array', (['[result[:5] for result in rects]'], {}), '([result[:5] for result in rects])\n', (9814, 9848), True, 'import numpy as np\n'), ((9967, 9983), 'numpy.empty', 'np.empty', (['(0, 9)'], {}), '((0, 9))\n', (9975, 9983), True, 'import numpy as np\n'), ((10009, 10020), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (10017, 10020), True, 'import numpy as np\n'), ((10809, 10856), 'cv2.resize', 'cv2.resize', (['images[idx, ...]', '(rwidth, rheight)'], {}), '(images[idx, ...], (rwidth, rheight))\n', (10819, 10856), False, 'import cv2\n'), ((12353, 12383), 'cv2.resize', 'cv2.resize', (['crop_img', '(24, 24)'], {}), '(crop_img, (24, 24))\n', (12363, 12383), False, 'import cv2\n'), ((13502, 13532), 'cv2.resize', 'cv2.resize', (['crop_img', '(48, 48)'], {}), '(crop_img, (48, 48))\n', (13512, 13532), False, 'import cv2\n'), ((21009, 21057), 'numpy.minimum', 'np.minimum', (['area[s_sort[-1]]', 'area[s_sort[0:-1]]'], {}), '(area[s_sort[-1]], area[s_sort[0:-1]])\n', (21019, 21057), True, 'import numpy as np\n'), ((21204, 21232), 'numpy.where', 'np.where', (['(var_o <= threshold)'], {}), '(var_o <= threshold)\n', (21212, 21232), True, 'import numpy as np\n'), ((21845, 21875), 'numpy.power', 'np.power', (['factor', 'factor_count'], {}), '(factor, factor_count)\n', (21853, 21875), True, 'import numpy as np\n'), ((9874, 9916), 'numpy.array', 'np.array', (['[result[5:] for result in rects]'], {}), '([result[5:] for result in rects])\n', (9882, 9916), True, 'import numpy as np\n')]
|
"""
fftshift on OCLArrays
as of now, only supports even dimensions (as ifftshift == fftshift then ;)
kernels adapted from
<NAME>.
cufftShift: high performance CUDA-accelerated FFT-shift library.
Proc High Performance Computing Symposium.
2014.
<EMAIL>
"""
from __future__ import print_function, unicode_literals, absolute_import, division
import numpy as np
from gputools import OCLArray, OCLProgram
from ._abspath import abspath
DTYPE_KERNEL_NAMES = {np.float32:"fftshift_1_f",
np.complex64:"fftshift_1_c"}
def fftshift(arr_obj, axes = None, res_g = None, return_buffer = False):
"""
gpu version of fftshift for numpy arrays or OCLArrays
Parameters
----------
arr_obj: numpy array or OCLArray (float32/complex64)
the array to be fftshifted
axes: list or None
the axes over which to shift (like np.fft.fftshift)
if None, all axes are taken
res_g:
if given, fills it with the result (has to be same shape and dtype as arr_obj)
else internally creates a new one
Returns
-------
if return_buffer, returns the result as (well :) OCLArray
else returns the result as numpy array
"""
if axes is None:
axes = list(range(arr_obj.ndim))
if isinstance(arr_obj, OCLArray):
if not arr_obj.dtype.type in DTYPE_KERNEL_NAMES:
raise NotImplementedError("only works for float32 or complex64")
elif isinstance(arr_obj, np.ndarray):
if np.iscomplexobj(arr_obj):
arr_obj = OCLArray.from_array(arr_obj.astype(np.complex64,copy = False))
else:
arr_obj = OCLArray.from_array(arr_obj.astype(np.float32,copy = False))
else:
raise ValueError("unknown type (%s)"%(type(arr_obj)))
if not np.all([arr_obj.shape[a]%2==0 for a in axes]):
raise NotImplementedError("only works on axes of even dimensions")
if res_g is None:
res_g = OCLArray.empty_like(arr_obj)
# iterate over all axes
# FIXME: this is still rather inefficient
in_g = arr_obj
for ax in axes:
_fftshift_single(in_g, res_g, ax)
in_g = res_g
if return_buffer:
return res_g
else:
return res_g.get()
def _fftshift_single(d_g, res_g, ax = 0):
"""
basic fftshift of an OCLArray
shape(d_g) = [N_0,N_1...., N, .... N_{k-1, N_k]
= [N1, N, N2]
the we can address each element in the flat buffer by
index = i + N2*j + N2*N*k
where i = 1 .. N2
j = 1 .. N
k = 1 .. N1
and the swap of elements is performed on the index j
"""
dtype_kernel_name = {np.float32:"fftshift_1_f",
np.complex64:"fftshift_1_c"
}
N = d_g.shape[ax]
N1 = 1 if ax==0 else np.prod(d_g.shape[:ax])
N2 = 1 if ax == len(d_g.shape)-1 else np.prod(d_g.shape[ax+1:])
dtype = d_g.dtype.type
prog = OCLProgram(abspath("kernels/fftshift.cl"))
prog.run_kernel(dtype_kernel_name[dtype],(N2,N//2,N1),None,
d_g.data, res_g.data,
np.int32(N),
np.int32(N2))
return res_g
#
# def _fftshift_core(d_g, res_g, axes = 1):
# """
# basic fftshift of a OCLArray
# """
#
# dtype_kernel_name = {np.float32:"fftshift_1_f",
# np.complex64:"fftshift_1_c"
# }
#
# N = d_g.shape[axes]
# N_pref = 1 if axes==0 else np.prod(d_g.shape[:axes])
# N_post = 1 if axes == len(d_g.shape)-1 else np.prod(d_g.shape[axes+1:])
#
# if axes == 0:
# stride1 = d_g.shape[1]
# stride2 = 1
# if axes == 1:
# stride1 = 1
# stride2 = d_g.shape[1]
#
# # stride1 = N_post
# # stride2 = N_pref
# # #offset = N_pref
# offset = 0
#
#
# print "strides: ", stride1, stride2
#
# print N_pref, N, N_post
#
# dtype = d_g.dtype.type
#
# prog = OCLProgram(abspath("kernels/fftshift.cl"))
# prog.run_kernel(dtype_kernel_name[dtype],(N/2,N_pref*N_post),None,
# d_g.data, res_g.data,
# np.int32(N),
# np.int32(stride1),
# np.int32(stride2),
# np.int32(offset))
#
#
# return res_g
#
# def fftshift1(d_g):
# """
# 1d fftshift inplace
#
# see
#
# """
#
# N, = d_g.shape
#
# dtype_kernel_name = {np.float32:"fftshift_1_f",
# np.complex64:"fftshift_1_c"
# }
# dtype = d_g.dtype.type
#
# if not isinstance(d_g, OCLArray):
# raise ValueError("only works on OCLArrays")
#
# if not dtype in dtype_kernel_name.keys():
# raise NotImplementedError("only works for float32 or complex64")
#
# if not N%2==0:
# raise NotImplementedError("only works on even length arryas")
#
# prog = OCLProgram(abspath("kernels/fftshift.cl"))
# prog.run_kernel(dtype_kernel_name[dtype],(N/2,),None,
# d_g.data, d_g.data, np.int32(N))
#
# return d_g
#
# def fftshift2(d_g):
# """
# 2d fftshift inplace
# """
#
#
# Ny, Nx = d_g.shape
#
# dtype_kernel_name = {np.float32:"fftshift_2_f",
# np.complex64:"fftshift_2_c"
# }
# dtype = d_g.dtype.type
#
# if not isinstance(d_g, OCLArray):
# raise ValueError("only works on OCLArrays")
#
# if not dtype in dtype_kernel_name.keys():
# raise NotImplementedError("only works for float32 or complex64")
#
# if not np.all([n%2==0 for n in d_g.shape]):
# raise NotImplementedError("only works on even length arryas")
#
# prog = OCLProgram(abspath("kernels/fftshift.cl"))
# prog.run_kernel(dtype_kernel_name[dtype],(Nx,Ny,),None,
# d_g.data, d_g.data,
# np.int32(Nx), np.int32(Ny))
# return d_g
if __name__ == '__main__':
Nx, Ny, Nz = (256,)*3
d = np.linspace(0,1,Nx*Ny*Nz).reshape(Nz, Ny,Nx).astype(np.float32)
d[Nz//2-30:Nz//2+30,Ny//2-20:Ny//2+20,Nx//2-20:Nx//2+20] = 2.
d_g = OCLArray.from_array(d)
out_g = OCLArray.empty_like(d)
out = fftshift(d, axes= (0,1,2))
|
[
"numpy.iscomplexobj",
"gputools.OCLArray.empty_like",
"numpy.prod",
"numpy.int32",
"numpy.linspace",
"gputools.OCLArray.from_array",
"numpy.all"
] |
[((6082, 6104), 'gputools.OCLArray.from_array', 'OCLArray.from_array', (['d'], {}), '(d)\n', (6101, 6104), False, 'from gputools import OCLArray, OCLProgram\n'), ((6117, 6139), 'gputools.OCLArray.empty_like', 'OCLArray.empty_like', (['d'], {}), '(d)\n', (6136, 6139), False, 'from gputools import OCLArray, OCLProgram\n'), ((1787, 1838), 'numpy.all', 'np.all', (['[(arr_obj.shape[a] % 2 == 0) for a in axes]'], {}), '([(arr_obj.shape[a] % 2 == 0) for a in axes])\n', (1793, 1838), True, 'import numpy as np\n'), ((1948, 1976), 'gputools.OCLArray.empty_like', 'OCLArray.empty_like', (['arr_obj'], {}), '(arr_obj)\n', (1967, 1976), False, 'from gputools import OCLArray, OCLProgram\n'), ((2793, 2816), 'numpy.prod', 'np.prod', (['d_g.shape[:ax]'], {}), '(d_g.shape[:ax])\n', (2800, 2816), True, 'import numpy as np\n'), ((2859, 2886), 'numpy.prod', 'np.prod', (['d_g.shape[ax + 1:]'], {}), '(d_g.shape[ax + 1:])\n', (2866, 2886), True, 'import numpy as np\n'), ((3094, 3105), 'numpy.int32', 'np.int32', (['N'], {}), '(N)\n', (3102, 3105), True, 'import numpy as np\n'), ((3127, 3139), 'numpy.int32', 'np.int32', (['N2'], {}), '(N2)\n', (3135, 3139), True, 'import numpy as np\n'), ((1495, 1519), 'numpy.iscomplexobj', 'np.iscomplexobj', (['arr_obj'], {}), '(arr_obj)\n', (1510, 1519), True, 'import numpy as np\n'), ((5940, 5971), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(Nx * Ny * Nz)'], {}), '(0, 1, Nx * Ny * Nz)\n', (5951, 5971), True, 'import numpy as np\n')]
|
# File for the p4p Project
import numpy as np
__version__ = "0.0.1"
# initial values
g = -9.8
def abs_value(vector):
"""
Calculates the length of a vector
Args:
vector (np.array) Vector to get the length from the
Returns:
Length (float) Length of the Vector
"""
return np.sqrt(np.sum(np.square(vector)))
class Tether:
"""
Tether object to fix the springs to
Attr:
self._position ist the position of the point if given, (0,0) otherwise
"""
def __init__(self, position=np.zeros(2)):
self._position = position
self._initpos = position
self._velocity = 0
self._initvel = 0
self.mass = 0
def __str__(self):
return str(self._position)
@property
def velocity(self):
return self._initvel
@property
def position(self):
return self._initpos
@position.setter
def position(self, position):
self._position = position
@velocity.setter
def velocity(self, velocity):
self._velocity = velocity
def acceleration(self, spring, position0, position1):
return 0
class MassPoint:
"""
Masspoint is a point with a mass that is fixed to a spring.
"""
def __init__(self, mass, position, velocity):
"""
Construct a MassPoint
Attr:
self._mass = Mass of the MassPoint
self._position(np.array) = Position of the MassPoint
self._velocity(np.array) = Velocity of the MassPoint
"""
self._mass = mass
self._position = position
self._velocity = velocity
def __str__(self):
return str(self._mass + " at " + self._position + " with speed " + self._velocity)
# setters and getters for MassPoint
@property
def mass(self):
return self._mass
@mass.setter
def mass(self, value):
self._mass = value
@property
def position(self):
return self._position
@position.setter
def position(self, value):
self._position = value
@property
def velocity(self):
return self._velocity
@velocity.setter
def velocity(self, value):
self._velocity = value
def acceleration(self, spring, position0, position1):
"""
gives acceleration for 1 point at 1 spring
Args:
spring (Spring) Spring for which to calculate the effect on the mass point
position0 (np.arrays) Position of the spring
position1 (np.arrays) Position of the spring
Returns:
dv (np.array): acceleration
"""
if abs_value(position0 - position1) == 0:
dv = 0
else:
dv = spring.stiffness * (abs_value(position0 - position1) - spring.rest_length) / self.mass * (
position1 - position0) / (abs_value(position0 - position1))
return dv
class Spring:
"""
A Spring that can be used to connect masses to other masses ot a mass to a Tether
"""
def __init__(self, stiffness=0, rest_length=0):
"""Initialize a Spring with a stiffness and a rest length. Both connectors have to be specified
Arguments:
stiffness (float): spring stiffness
rest_length (float): rest length used to calculate the length of the spring
Attr:
self._stiffness (float) = stiffness of the spring
self._rest_length (float)= rest_length of the spring
"""
self._stiffness = stiffness # variable
self._rest_length = rest_length # variable
def __str__(self):
return (
f"This spring boiings with a stiffness of {self.stiffness}. The length of the spring is {self.rest_length}")
# setters and getters
@property
def stiffness(self):
return self._stiffness
@stiffness.setter
def stiffness(self, value):
self._stiffness = value
@property
def rest_length(self):
return self._rest_length
@rest_length.setter
def rest_length(self, value):
self._rest_length = value
class SpringMassSystem:
"""
sets up a SpringMassSystem to be calculated and animated. Springs, Tethers and Masses need to be created beforehand!
"""
def __init__(self, points, k_matrix, time_step=.001, gravity=np.array([0, g])):
"""
Initializes a SpringMassSystem that will be calculated
Arguments:
points (tuple): All Masses and Tethers of the MassSystem
gravity (float): The gravity of the MassSystem. Pointing down = negative value. Default value is -9.8.
time_step (float): the time steps to be integrated over. Default value is 0.0001, Make sure to not make it too small.
points (tuple): All Masses and Tethers of the MassSystem
springs (tuple): All Springs of the system
k_matrix (np.array): all the spring stiffnesseseses in the system; the indices indicate which masspoints the springs connect
Attr:
self._points (tuple): All Masses and Tethers of the system
self._gravity (float)= gravity that is acting on the MassSystem
self._time_step (float)= time step that is used to integrate
self._springs (tuple) = springs of the MassSystem
"""
self._points = points
self._gravity = gravity
self._time_step = time_step
# finds the count of springs (relevant later)
self.spring_count = 0
for i in range(k_matrix.shape[0]):
for j in range(k_matrix.shape[1]):
if k_matrix[i, j] != 0:
self.spring_count += 1
self.spring_count = self.spring_count / 2
self.spring_matrix = [['null' for j in range(len(self.points))] for i in range(
len(self.points))] # make a matrix filled with springs where i,j indicate connection
for i in range(len(self.points)):
for j in range(len(self.points)):
# springlength vielleicht nicht allgemein, für einfache sollte es pasen
springlength = abs_value(self.points[i].position - self.points[j].position)
self.spring_matrix[i][j] = Spring(k_matrix[i][j], springlength)
self.spring_matrix = np.array(self.spring_matrix)
def __str__(self):
"""
A nice text representation of the SpringMassSystem
"""
return (
f"This systems points {self.points} are connected with {self.springs} and the gravity is {self.gravity}")
@property
def gravity(self):
return self._gravity
@property
def time_step(self):
return self._time_step
@property
def springs(self):
return self.spring_matrix
@property
def points(self):
return self._points
def equation_of_motion(self, point, velocity, position):
"""
Returns the equation of motion for the point
Arguments: point : (MassPoint) Masspoint whose acceleration is summed up over all other points (with
the new_dv() function)
velocity: (np.array) Array containing the velocities
position: (np.array) Array containing the positions
"""
dv = self.new_dv(point, position)
dx = velocity[point, :]
return dv, dx
def new_dv(self, point, position):
"""
Calculates the new dv with the spring matrix for point "point"
Arguments:
point : (MassPoint)
position: (np.array)
"""
dv = self.gravity
for i in range(len(self.points)):
if i != point:
dv = dv + self.points[point].acceleration(self.spring_matrix[point, i], position[point, :],
position[i, :])
return dv
def change(self):
new_vel = np.zeros((len(self.points), len(self.points[0].position)))
new_pos = np.zeros((len(self.points), len(self.points[0].position)))
k_1 = np.zeros((len(self.points), 2, len(self.points[0].position)))
k_2 = np.zeros((len(self.points), 2, len(self.points[0].position)))
k_3 = np.zeros((len(self.points), 2, len(self.points[0].position)))
k_4 = np.zeros((len(self.points), 2, len(self.points[0].position)))
for i in range(len(self.points)):
new_vel[i, :] = self.points[i].velocity # gets initial position into vel and pos (for the first step)
new_pos[i, :] = self.points[i].position
for i in range(len(self.points)):
k_1[i, :, :] = self.equation_of_motion(i, new_vel, new_pos)
for i in range(len(self.points)):
k_2[i, :, :] = self.equation_of_motion(i, new_vel + (self.time_step / 2) * k_1[:, 0, :],
new_pos + (self.time_step / 2) * k_1[:, 1, :])
for i in range(len(self.points)):
k_3[i, :, :] = self.equation_of_motion(i, new_vel + (self.time_step / 2) * k_2[:, 0, :],
new_pos + (self.time_step / 2) * k_2[:, 1, :])
for i in range(len(self.points)):
k_4[i, :, :] = self.equation_of_motion(i, new_vel + self.time_step * k_3[:, 0, :],
new_pos + self.time_step * k_3[:, 1, :])
for i in range(len(self.points)):
if isinstance(self.points[i], Tether):
new_vel[i, :] = 0
new_pos[i, :] = self.points[i].position
else:
# new_vel[i,:] = self.points[i].velocity + dv*self.time_step
new_vel[i, :] = self.points[i].velocity + self.time_step / 6 * (
k_1[i, 0, :] + 2 * k_2[i, 0, :] + 2 * k_3[i, 0, :] + k_4[i, 0, :])
new_pos[i, :] = self.points[i].position + self.time_step / 6 * (
k_1[i, 1, :] + 2 * k_2[i, 1, :] + 2 * k_3[i, 1, :] + k_4[i, 1, :])
self.points[i].position = new_pos[i, :]
self.points[i].velocity = new_vel[i, :]
def simulate(self, t):
"""
Runs the simulation,
Args:
t: (float) the total time the simulation should run for.
Returns:
result, velocity: (np.array,np.array) Matrices with the result matrix and the velocity Matrix.
"""
steps_taken = int(np.round(t / self.time_step))
result = np.zeros((len(self.points[0].position), steps_taken + 1,
len(self.points))) # contains [2 dimensions for space, the steps taken, and the points]
velocity = np.zeros((len(self.points[1].position), steps_taken + 1, len(self.points)))
for i in range(len(self.points)):
result[:, 0, i] = self.points[i].position
velocity[:, 0, i] = self.points[i].velocity
for i in range(steps_taken):
self.change()
for j in range(len(self.points)):
result[:, i + 1, j] = self.points[j].position
velocity[:, i + 1, j] = self.points[j].velocity
return result, velocity
def energy_step(self, step, result_matrix, velocity_matrix):
energy_mass_at_step = 0
energy_spring_at_step = 0
steps_taken = result_matrix.shape[1]
for i in range(len(self.points)):
v_squared = abs_value(velocity_matrix[:, step, i]) ** 2
energy_mass_at_step = energy_mass_at_step + self.points[i].mass * -self.gravity[1] * (
result_matrix[1, step, i]) + 0.5 * self.points[i].mass * v_squared
for i in range(self.spring_matrix.shape[0]):
for j in range(self.spring_matrix.shape[1]):
spring_stretch = abs_value(result_matrix[:, step, i] - result_matrix[:, step, j])
energy_spring_at_step = energy_spring_at_step + 0.25 * self.spring_matrix[i, j].stiffness * (
self.spring_matrix[i, j].rest_length - spring_stretch) ** 2
return energy_spring_at_step + energy_mass_at_step
def energy(self, result_matrix, velocity_matrix):
steps_taken = result_matrix.shape[1]
energy = np.zeros(steps_taken)
for i in range(steps_taken):
energy[i] = self.energy_step(i, result_matrix, velocity_matrix)
return energy
def kin_energy_step(self, step, velocity_matrix):
kin_energy_at_step = 0
for i in range(len(self.points)):
v_squared = abs_value(velocity_matrix[:, step, i]) ** 2
kin_energy_at_step = kin_energy_at_step + 1 / 2 * self.points[i].mass * v_squared
return kin_energy_at_step
def kin_energy(self, result_matrix, velocity_matrix):
steps_taken = result_matrix.shape[1]
kin_energy = np.zeros(steps_taken)
for i in range(steps_taken):
kin_energy[i] = self.kin_energy_step(i, velocity_matrix)
return kin_energy
|
[
"numpy.array",
"numpy.round",
"numpy.square",
"numpy.zeros"
] |
[((553, 564), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (561, 564), True, 'import numpy as np\n'), ((4368, 4384), 'numpy.array', 'np.array', (['[0, g]'], {}), '([0, g])\n', (4376, 4384), True, 'import numpy as np\n'), ((6299, 6327), 'numpy.array', 'np.array', (['self.spring_matrix'], {}), '(self.spring_matrix)\n', (6307, 6327), True, 'import numpy as np\n'), ((12279, 12300), 'numpy.zeros', 'np.zeros', (['steps_taken'], {}), '(steps_taken)\n', (12287, 12300), True, 'import numpy as np\n'), ((12886, 12907), 'numpy.zeros', 'np.zeros', (['steps_taken'], {}), '(steps_taken)\n', (12894, 12907), True, 'import numpy as np\n'), ((341, 358), 'numpy.square', 'np.square', (['vector'], {}), '(vector)\n', (350, 358), True, 'import numpy as np\n'), ((10497, 10525), 'numpy.round', 'np.round', (['(t / self.time_step)'], {}), '(t / self.time_step)\n', (10505, 10525), True, 'import numpy as np\n')]
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data as Data
import numpy as np
import os,time
import model
import h5py
import itertools
import utility
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score
import argparse
import pickle
import librosa
class Dataset_4(Data.Dataset):
def __init__(self, data_tensor, target_tensor1, target_tensor2, target_tensor3):
assert data_tensor.size(0) == target_tensor1.size(0)
self.data_tensor = data_tensor
self.target_tensor1 = target_tensor1
self.target_tensor2 = target_tensor2
self.target_tensor3 = target_tensor3
def __getitem__(self, index):
return self.data_tensor[index], self.target_tensor1[index], self.target_tensor2[index], self.target_tensor3[index]
def __len__(self):
return self.data_tensor.size(0)
class Dataset_3(Data.Dataset):
def __init__(self, data_tensor, target_tensor1, target_tensor2):
assert data_tensor.size(0) == target_tensor1.size(0)
self.data_tensor = data_tensor
self.target_tensor1 = target_tensor1
self.target_tensor2 = target_tensor2
def __getitem__(self, index):
return self.data_tensor[index], self.target_tensor1[index], self.target_tensor2[index]
def __len__(self):
return self.data_tensor.size(0)
class Dataset_2(Data.Dataset):
def __init__(self, data_tensor, target_tensor):
assert data_tensor.size(0) == target_tensor.size(0)
self.data_tensor = data_tensor
self.target_tensor = target_tensor
def __getitem__(self, index):
return self.data_tensor[index], self.target_tensor[index]
def __len__(self):
return self.data_tensor.size(0)
class Dataset_1(Data.Dataset):
def __init__(self, data_tensor):
self.data_tensor = data_tensor
def __getitem__(self, index):
return self.data_tensor[index]
def __len__(self):
return self.data_tensor.size(0)
def main(classes_num=20, gid=0, random_state=0, \
bs=100, learn_rate=0.0001, \
val_num=1, stop_num=20,
origin=True, vocal=True, remix=True,
CRNN_model=True, CRNNx2_model=False,
debug=False):
start_time = time.time()
save_folder = '../save/'+str(random_state)+'/'
if origin and vocal and remix:
save_folder = save_folder + '/all/'
elif origin:
save_folder = save_folder + '/ori/'
elif vocal:
save_folder = save_folder + '/voc/'
elif remix:
save_folder = save_folder + '/remix/'
if not os.path.exists(save_folder+'/model/'):
os.makedirs(save_folder+'/model/')
if not os.path.exists(save_folder+'/result/'):
os.makedirs(save_folder+'/result/')
epoch_num = 10000
print('Loading pretrain model ...')
# Classifier = model.CRNN2D_elu(224,classes_num)
# Classifier.float()
# Classifier.cuda()
# Classifier.train()
if CRNN_model:
Classifier = model.CRNN2D_elu(224,classes_num)
Classifier.float()
Classifier.cuda()
Classifier.train()
elif CRNNx2_model:
Classifier = model.CRNN2D_elu2(288,classes_num)
Classifier.float()
Classifier.cuda()
Classifier.train()
print('Loading training data ...')
artist_folder=f'/home/bill317996/189/homes/kevinco27/dataset/artist20_mix'
song_folder=f'/home/bill317996/189/homes/kevinco27/ICASSP2020_meledy_extraction/music-artist-classification-crnn/song_data_mix'
voc_folder=f'/home/bill317996/189/homes/kevinco27/ICASSP2020_meledy_extraction/music-artist-classification-crnn/song_data_open_unmix_vocal_2'
bgm_folder = f'/home/bill317996/189/homes/kevinco27/ICASSP2020_meledy_extraction/music-artist-classification-crnn/song_data_open_unmix_kala'
# random_states = [0,21,42]
if debug:
Y_train, X_train, S_train, V_train, B_train,\
Y_test, X_test, S_test, V_test, B_test,\
Y_val, X_val, S_val, V_val, B_val = \
np.zeros(11437, dtype=int), np.zeros((11437, 128, 157)), np.zeros(11437), np.zeros((11437, 128, 157)), np.zeros((11437, 128, 157)), \
np.zeros(11437, dtype=int), np.zeros((11437, 128, 157)), np.zeros(11437), np.zeros((11437, 128, 157)), np.zeros((11437, 128, 157)), \
np.zeros(11437, dtype=int), np.zeros((11437, 128, 157)), np.zeros(11437), np.zeros((11437, 128, 157)), np.zeros((11437, 128, 157))
Y_train[0] = 1
Y_val[0] = 1
Y_test[0] = 1
else:
Y_train, X_train, S_train, V_train, B_train,\
Y_test, X_test, S_test, V_test, B_test,\
Y_val, X_val, S_val, V_val, B_val = \
utility.load_dataset_album_split_da(song_folder_name=song_folder,
artist_folder=artist_folder,
voc_song_folder=voc_folder,
bgm_song_folder=bgm_folder,
nb_classes=classes_num,
random_state=random_state)
if not debug:
print("Loaded and split dataset. Slicing songs...")
slice_length = 157
# Create slices out of the songs
X_train, Y_train, S_train, V_train, B_train = utility.slice_songs_da(X_train, Y_train, S_train, V_train, B_train,
length=slice_length)
X_val, Y_val, S_val, V_val, B_val = utility.slice_songs_da(X_val, Y_val, S_val, V_val, B_val,
length=slice_length)
X_test, Y_test, S_test, V_test, B_test = utility.slice_songs_da(X_test, Y_test, S_test, V_test, B_test,
length=slice_length)
print("Training set label counts:", np.unique(Y_train, return_counts=True))
# # Encode the target vectors into one-hot encoded vectors
Y_train, le, enc = utility.encode_labels(Y_train)
Y_test, le, enc = utility.encode_labels(Y_test, le, enc)
Y_val, le, enc = utility.encode_labels(Y_val, le, enc)
Y_train = Y_train[:,0]
Y_test = Y_test[:,0]
Y_val = Y_val[:,0]
print(X_train.shape, Y_train.shape, S_train.shape, V_train.shape, B_train.shape)
print(X_val.shape, Y_val.shape, S_val.shape, V_val.shape, B_val.shape)
print(X_test.shape, Y_test.shape, S_test.shape, V_test.shape, B_test.shape)
#####################################
# numpy to tensor to data_loader
# train
X_train = torch.from_numpy(X_train).float()
Y_train = torch.from_numpy(Y_train).long()
V_train = torch.from_numpy(V_train).float()
B_train = torch.from_numpy(B_train).float()
if origin:
original_set = Dataset_2(data_tensor=X_train, target_tensor=Y_train)
original_loader = Data.DataLoader(dataset=original_set, batch_size=bs, shuffle=True)
if vocal or remix:
vocal_set = Dataset_2(data_tensor=V_train, target_tensor=Y_train)
vocal_loader = Data.DataLoader(dataset=vocal_set, batch_size=bs, shuffle=True)
if remix:
bgm_set = Dataset_1(data_tensor=B_train)
bgm_loader = Data.DataLoader(dataset=bgm_set, batch_size=bs, shuffle=True)
# val
if vocal and not origin:
X_val = torch.from_numpy(V_val).float()
Y_val = torch.from_numpy(Y_val).long()
else:
X_val = torch.from_numpy(X_val).float()
Y_val = torch.from_numpy(Y_val).long()
val_set = Dataset_2(data_tensor=X_val, target_tensor=Y_val)
val_loader = Data.DataLoader(dataset=val_set, batch_size=bs, shuffle=False)
# Test
X_test = torch.from_numpy(X_test).float()
Y_test = torch.from_numpy(Y_test).long()
V_test = torch.from_numpy(V_test).float()
test_o_set = Dataset_4(data_tensor=X_test, target_tensor1=Y_test, target_tensor2=S_test, target_tensor3=V_test)
test_o_loader = Data.DataLoader(dataset=test_o_set, batch_size=bs, shuffle=False)
# test_v_set = Dataset_3(data_tensor=V_test, target_tensor1=Y_test, target_tensor2=S_test)
# test_v_loader = Data.DataLoader(dataset=test_v_set, batch_size=bs, shuffle=False)
#####################################
best_epoch = 0
best_F1 = 0
CELoss = nn.CrossEntropyLoss()
opt = optim.Adam(Classifier.parameters(),lr=learn_rate)
print('Start training ...')
# start_time = time.time()
early_stop_flag = False
for epoch in range(epoch_num):
if early_stop_flag:
print('rs: ', random_state)
print('Origin: ', origin, ' | Vocal: ', vocal, ' | Remix: ', remix)
print('CRNN: ', CRNN_model, ' | CRNNx2: ', CRNNx2_model)
print(' best_epoch: ', best_epoch, ' | best_val_F1: %.2f'% best_F1)
print(' Test original | frame level: %.2f'% test_F1_frame_o, ' | songs level: %.2f'% test_F1_songs_o)
if vocal:
print(' Test vocal | frame level: %.2f'% test_F1_frame_v, ' | songs level: %.2f'% test_F1_songs_v)
break
if stop_num:
if epoch - best_epoch >= stop_num:
early_stop_flag = True
print('Early Stop!')
all_loss = 0
Classifier.train()
if origin:
for step, (batch_x, batch_y) in enumerate(original_loader):
opt.zero_grad()
batch_x = batch_x.cuda()
batch_y = batch_y.cuda()
batch_h = torch.randn(1, batch_x.size(0), 32).cuda()
pred_y, emb = Classifier(batch_x, batch_h)
loss = CELoss(pred_y, batch_y)
loss.backward()
opt.step()
all_loss += loss
if vocal:
for step, (batch_x, batch_y) in enumerate(vocal_loader):
opt.zero_grad()
batch_x = batch_x.cuda()
batch_y = batch_y.cuda()
batch_h = torch.randn(1, batch_x.size(0), 32).cuda()
pred_y, emb = Classifier(batch_x, batch_h)
loss = CELoss(pred_y, batch_y)
loss.backward()
opt.step()
all_loss += loss
if remix:
for step, ((batch_x, batch_y), batch_b) in enumerate(zip(vocal_loader,bgm_loader)):
opt.zero_grad()
batch_x = batch_x.cuda()
batch_y = batch_y.cuda()
batch_h = torch.randn(1, batch_x.size(0), 32).cuda()
batch_b = batch_b.cuda()
batch_x = 10.0*torch.log10((10.0**(batch_x/10.0)) + (10.0**(batch_b/10.0)))
pred_y, emb = Classifier(batch_x, batch_h)
loss = CELoss(pred_y, batch_y)
loss.backward()
opt.step()
all_loss += loss
print('epoch: ', epoch, ' | Loss: %.4f'% all_loss, ' | time: %.2f'% (time.time()-start_time), '(s)')
start_time = time.time()
if epoch % val_num == 0:
Classifier.eval()
frame_true = []
frame_pred = []
for step, (batch_x, batch_y) in enumerate(val_loader):
batch_x = batch_x.cuda()
batch_y = batch_y.cuda()
batch_h = torch.randn(1, batch_x.size(0), 32).cuda()
pred_y, emb = Classifier(batch_x, batch_h)
pred_y = pred_y.detach().cpu().numpy()
batch_y = batch_y.detach().cpu().numpy()
for i in range(len(pred_y)):
frame_true.append(batch_y[i])
frame_pred.append(np.argmax(pred_y[i]) )
val_F1 = f1_score(frame_true, frame_pred, average='weighted')
print(' val F1: %.2f'% val_F1)
if best_F1 < val_F1:
best_F1 = val_F1
best_epoch = epoch
print(' best_epoch: ', best_epoch, ' | best_val_F1: %.2f'% best_F1)
torch.save({'Classifier_state_dict': Classifier.state_dict()
}, save_folder+'/model/CRNN2D_elu_model_state_dict')
frame_true = []
frame_pred = []
songs_true = []
songs_pred = []
songs_list = []
songs_vote_dict = {}
songs_true_dict = {}
emb_list = []
for step, (batch_x, batch_y, batch_song, batch_v) in enumerate(test_o_loader):
batch_x = batch_x.cuda()
batch_y = batch_y.cuda()
batch_h = torch.randn(1, batch_x.size(0), 32).cuda()
pred_y, emb = Classifier(batch_x, batch_h)
pred_y = pred_y.detach().cpu().numpy()
batch_y = batch_y.detach().cpu().numpy()
emb = emb.detach().cpu().numpy()
batch_v = batch_v.detach().cpu().numpy()
for i in range(len(pred_y)):
frame_true.append(batch_y[i])
frame_pred.append(np.argmax(pred_y[i]))
emb_list.append(emb[i])
onehot = np.zeros(20)
onehot[np.argmax(pred_y[i])] += 1
if batch_song[i] not in songs_list:
songs_list.append(batch_song[i])
songs_true_dict[batch_song[i]] = batch_y[i]
songs_vote_dict[batch_song[i]] = onehot
else:
songs_vote_dict[batch_song[i]] += onehot
for song in songs_list:
songs_true.append(songs_true_dict[song])
songs_pred.append(np.argmax(songs_vote_dict[song]))
np.savez(save_folder+'/result/ori_result.npz', \
pred=np.array(frame_pred), true=np.array(frame_true), emb=np.array(emb_list))
test_F1_frame_o = f1_score(frame_true, frame_pred, average='weighted')
test_F1_songs_o = f1_score(songs_true, songs_pred, average='weighted')
print(' Test original | frame level: %.2f'% test_F1_frame_o, ' | songs level: %.2f'% test_F1_songs_o)
if vocal:
frame_true = []
frame_pred = []
songs_true = []
songs_pred = []
songs_list = []
songs_vote_dict = {}
songs_true_dict = {}
for step, (batch_x, batch_y, batch_song, batch_v) in enumerate(test_o_loader):
batch_x = batch_v.cuda()
batch_y = batch_y.cuda()
batch_h = torch.randn(1, batch_x.size(0), 32).cuda()
pred_y, emb = Classifier(batch_x, batch_h)
pred_y = pred_y.detach().cpu().numpy()
batch_y = batch_y.detach().cpu().numpy()
for i in range(len(pred_y)):
frame_true.append(batch_y[i])
frame_pred.append(np.argmax(pred_y[i]))
onehot = np.zeros(20)
onehot[np.argmax(pred_y[i])] += 1
if batch_song[i] not in songs_list:
songs_list.append(batch_song[i])
songs_true_dict[batch_song[i]] = batch_y[i]
songs_vote_dict[batch_song[i]] = onehot
else:
songs_vote_dict[batch_song[i]] += onehot
for song in songs_list:
songs_true.append(songs_true_dict[song])
songs_pred.append(np.argmax(songs_vote_dict[song]))
test_F1_frame_v = f1_score(frame_true, frame_pred, average='weighted')
test_F1_songs_v = f1_score(songs_true, songs_pred, average='weighted')
print(' Test vocal | frame level: %.2f'% test_F1_frame_v, ' | songs level: %.2f'% test_F1_songs_v)
def parser():
p = argparse.ArgumentParser()
p.add_argument('-class', '--classes_num', type=int, default=20)
p.add_argument('-gid', '--gpu_index', type=int, default=0)
p.add_argument('-bs', '--batch_size', type=int, default=100)
p.add_argument('-lr', '--learn_rate', type=float, default=0.0001)
p.add_argument('-val', '--val_num', type=int, default=1)
p.add_argument('-stop', '--stop_num', type=int, default=20)
p.add_argument('-rs', '--random_state', type=int, default=0)
p.add_argument('--origin', dest='origin', action='store_true')
p.add_argument('--vocal', dest='vocal', action='store_true')
p.add_argument('--remix', dest='remix', action='store_true')
p.add_argument('--all', dest='all', action='store_true')
p.add_argument('--CRNNx2', dest='CRNNx2', action='store_true')
p.add_argument('--debug', dest='debug', action='store_true')
return p.parse_args()
if __name__ == '__main__':
args = parser()
classes_num = args.classes_num
gid = args.gpu_index
bs = args.batch_size
learn_rate = args.learn_rate
val_num = args.val_num
stop_num = args.stop_num
random_state = args.random_state
origin = args.origin
vocal = args.vocal
remix = args.remix
if args.all:
origin = True
vocal = True
remix = True
if CRNNx2:
CRNNx2_model = True
CRNN_model = False
else:
CRNN_model = True
CRNNx2_model = False
debug = args.debug
print('Singers classification with CRNN2D')
print('Update in 20191016: artist20 ')
print('=======================')
print('classes_num', classes_num)
print('gpu_index: ', gid, ' | random_state: ', random_state)
print('bs: ',bs, ' | lr: %.5f'% learn_rate)
print('val_num: ', val_num, ' | stop_num: ', stop_num)
print('Origin: ', origin, ' | Vocal: ', vocal, ' | Remix: ', remix)
print('CRNN: ', CRNN_model, ' | CRNNx2: ', CRNNx2_model)
print('debug: ', debug)
print('=======================')
with torch.cuda.device(gid):
main(classes_num=classes_num, gid=gid, random_state=random_state, \
bs=bs, learn_rate=learn_rate, \
val_num=val_num, stop_num=stop_num,
origin=origin, vocal=vocal, remix=remix,
CRNN_model=CRNN_model, CRNNx2_model=CRNNx2_model,
debug=debug
)
|
[
"argparse.ArgumentParser",
"numpy.argmax",
"utility.encode_labels",
"model.CRNN2D_elu",
"utility.load_dataset_album_split_da",
"sklearn.metrics.f1_score",
"numpy.unique",
"model.CRNN2D_elu2",
"torch.utils.data.DataLoader",
"os.path.exists",
"utility.slice_songs_da",
"torch.cuda.device",
"torch.from_numpy",
"os.makedirs",
"torch.nn.CrossEntropyLoss",
"numpy.zeros",
"time.time",
"numpy.array",
"torch.log10"
] |
[((2384, 2395), 'time.time', 'time.time', ([], {}), '()\n', (2393, 2395), False, 'import os, time\n'), ((7877, 7939), 'torch.utils.data.DataLoader', 'Data.DataLoader', ([], {'dataset': 'val_set', 'batch_size': 'bs', 'shuffle': '(False)'}), '(dataset=val_set, batch_size=bs, shuffle=False)\n', (7892, 7939), True, 'import torch.utils.data as Data\n'), ((8236, 8301), 'torch.utils.data.DataLoader', 'Data.DataLoader', ([], {'dataset': 'test_o_set', 'batch_size': 'bs', 'shuffle': '(False)'}), '(dataset=test_o_set, batch_size=bs, shuffle=False)\n', (8251, 8301), True, 'import torch.utils.data as Data\n'), ((8589, 8610), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (8608, 8610), True, 'import torch.nn as nn\n'), ((17222, 17247), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (17245, 17247), False, 'import argparse\n'), ((2736, 2775), 'os.path.exists', 'os.path.exists', (["(save_folder + '/model/')"], {}), "(save_folder + '/model/')\n", (2750, 2775), False, 'import os, time\n'), ((2784, 2820), 'os.makedirs', 'os.makedirs', (["(save_folder + '/model/')"], {}), "(save_folder + '/model/')\n", (2795, 2820), False, 'import os, time\n'), ((2831, 2871), 'os.path.exists', 'os.path.exists', (["(save_folder + '/result/')"], {}), "(save_folder + '/result/')\n", (2845, 2871), False, 'import os, time\n'), ((2880, 2917), 'os.makedirs', 'os.makedirs', (["(save_folder + '/result/')"], {}), "(save_folder + '/result/')\n", (2891, 2917), False, 'import os, time\n'), ((3161, 3195), 'model.CRNN2D_elu', 'model.CRNN2D_elu', (['(224)', 'classes_num'], {}), '(224, classes_num)\n', (3177, 3195), False, 'import model\n'), ((4880, 5094), 'utility.load_dataset_album_split_da', 'utility.load_dataset_album_split_da', ([], {'song_folder_name': 'song_folder', 'artist_folder': 'artist_folder', 'voc_song_folder': 'voc_folder', 'bgm_song_folder': 'bgm_folder', 'nb_classes': 'classes_num', 'random_state': 'random_state'}), '(song_folder_name=song_folder,\n artist_folder=artist_folder, voc_song_folder=voc_folder,\n bgm_song_folder=bgm_folder, nb_classes=classes_num, random_state=\n random_state)\n', (4915, 5094), False, 'import utility\n'), ((5523, 5616), 'utility.slice_songs_da', 'utility.slice_songs_da', (['X_train', 'Y_train', 'S_train', 'V_train', 'B_train'], {'length': 'slice_length'}), '(X_train, Y_train, S_train, V_train, B_train, length=\n slice_length)\n', (5545, 5616), False, 'import utility\n'), ((5714, 5792), 'utility.slice_songs_da', 'utility.slice_songs_da', (['X_val', 'Y_val', 'S_val', 'V_val', 'B_val'], {'length': 'slice_length'}), '(X_val, Y_val, S_val, V_val, B_val, length=slice_length)\n', (5736, 5792), False, 'import utility\n'), ((5894, 5982), 'utility.slice_songs_da', 'utility.slice_songs_da', (['X_test', 'Y_test', 'S_test', 'V_test', 'B_test'], {'length': 'slice_length'}), '(X_test, Y_test, S_test, V_test, B_test, length=\n slice_length)\n', (5916, 5982), False, 'import utility\n'), ((6225, 6255), 'utility.encode_labels', 'utility.encode_labels', (['Y_train'], {}), '(Y_train)\n', (6246, 6255), False, 'import utility\n'), ((6283, 6321), 'utility.encode_labels', 'utility.encode_labels', (['Y_test', 'le', 'enc'], {}), '(Y_test, le, enc)\n', (6304, 6321), False, 'import utility\n'), ((6348, 6385), 'utility.encode_labels', 'utility.encode_labels', (['Y_val', 'le', 'enc'], {}), '(Y_val, le, enc)\n', (6369, 6385), False, 'import utility\n'), ((7141, 7207), 'torch.utils.data.DataLoader', 'Data.DataLoader', ([], {'dataset': 'original_set', 'batch_size': 'bs', 'shuffle': '(True)'}), '(dataset=original_set, batch_size=bs, shuffle=True)\n', (7156, 7207), True, 'import torch.utils.data as Data\n'), ((7331, 7394), 'torch.utils.data.DataLoader', 'Data.DataLoader', ([], {'dataset': 'vocal_set', 'batch_size': 'bs', 'shuffle': '(True)'}), '(dataset=vocal_set, batch_size=bs, shuffle=True)\n', (7346, 7394), True, 'import torch.utils.data as Data\n'), ((7482, 7543), 'torch.utils.data.DataLoader', 'Data.DataLoader', ([], {'dataset': 'bgm_set', 'batch_size': 'bs', 'shuffle': '(True)'}), '(dataset=bgm_set, batch_size=bs, shuffle=True)\n', (7497, 7543), True, 'import torch.utils.data as Data\n'), ((11522, 11533), 'time.time', 'time.time', ([], {}), '()\n', (11531, 11533), False, 'import os, time\n'), ((19326, 19348), 'torch.cuda.device', 'torch.cuda.device', (['gid'], {}), '(gid)\n', (19343, 19348), False, 'import torch\n'), ((3324, 3359), 'model.CRNN2D_elu2', 'model.CRNN2D_elu2', (['(288)', 'classes_num'], {}), '(288, classes_num)\n', (3341, 3359), False, 'import model\n'), ((4215, 4241), 'numpy.zeros', 'np.zeros', (['(11437)'], {'dtype': 'int'}), '(11437, dtype=int)\n', (4223, 4241), True, 'import numpy as np\n'), ((4243, 4270), 'numpy.zeros', 'np.zeros', (['(11437, 128, 157)'], {}), '((11437, 128, 157))\n', (4251, 4270), True, 'import numpy as np\n'), ((4272, 4287), 'numpy.zeros', 'np.zeros', (['(11437)'], {}), '(11437)\n', (4280, 4287), True, 'import numpy as np\n'), ((4289, 4316), 'numpy.zeros', 'np.zeros', (['(11437, 128, 157)'], {}), '((11437, 128, 157))\n', (4297, 4316), True, 'import numpy as np\n'), ((4318, 4345), 'numpy.zeros', 'np.zeros', (['(11437, 128, 157)'], {}), '((11437, 128, 157))\n', (4326, 4345), True, 'import numpy as np\n'), ((4358, 4384), 'numpy.zeros', 'np.zeros', (['(11437)'], {'dtype': 'int'}), '(11437, dtype=int)\n', (4366, 4384), True, 'import numpy as np\n'), ((4386, 4413), 'numpy.zeros', 'np.zeros', (['(11437, 128, 157)'], {}), '((11437, 128, 157))\n', (4394, 4413), True, 'import numpy as np\n'), ((4415, 4430), 'numpy.zeros', 'np.zeros', (['(11437)'], {}), '(11437)\n', (4423, 4430), True, 'import numpy as np\n'), ((4432, 4459), 'numpy.zeros', 'np.zeros', (['(11437, 128, 157)'], {}), '((11437, 128, 157))\n', (4440, 4459), True, 'import numpy as np\n'), ((4461, 4488), 'numpy.zeros', 'np.zeros', (['(11437, 128, 157)'], {}), '((11437, 128, 157))\n', (4469, 4488), True, 'import numpy as np\n'), ((4501, 4527), 'numpy.zeros', 'np.zeros', (['(11437)'], {'dtype': 'int'}), '(11437, dtype=int)\n', (4509, 4527), True, 'import numpy as np\n'), ((4529, 4556), 'numpy.zeros', 'np.zeros', (['(11437, 128, 157)'], {}), '((11437, 128, 157))\n', (4537, 4556), True, 'import numpy as np\n'), ((4558, 4573), 'numpy.zeros', 'np.zeros', (['(11437)'], {}), '(11437)\n', (4566, 4573), True, 'import numpy as np\n'), ((4575, 4602), 'numpy.zeros', 'np.zeros', (['(11437, 128, 157)'], {}), '((11437, 128, 157))\n', (4583, 4602), True, 'import numpy as np\n'), ((4604, 4631), 'numpy.zeros', 'np.zeros', (['(11437, 128, 157)'], {}), '((11437, 128, 157))\n', (4612, 4631), True, 'import numpy as np\n'), ((6079, 6117), 'numpy.unique', 'np.unique', (['Y_train'], {'return_counts': '(True)'}), '(Y_train, return_counts=True)\n', (6088, 6117), True, 'import numpy as np\n'), ((6838, 6863), 'torch.from_numpy', 'torch.from_numpy', (['X_train'], {}), '(X_train)\n', (6854, 6863), False, 'import torch\n'), ((6887, 6912), 'torch.from_numpy', 'torch.from_numpy', (['Y_train'], {}), '(Y_train)\n', (6903, 6912), False, 'import torch\n'), ((6935, 6960), 'torch.from_numpy', 'torch.from_numpy', (['V_train'], {}), '(V_train)\n', (6951, 6960), False, 'import torch\n'), ((6984, 7009), 'torch.from_numpy', 'torch.from_numpy', (['B_train'], {}), '(B_train)\n', (7000, 7009), False, 'import torch\n'), ((7970, 7994), 'torch.from_numpy', 'torch.from_numpy', (['X_test'], {}), '(X_test)\n', (7986, 7994), False, 'import torch\n'), ((8017, 8041), 'torch.from_numpy', 'torch.from_numpy', (['Y_test'], {}), '(Y_test)\n', (8033, 8041), False, 'import torch\n'), ((8063, 8087), 'torch.from_numpy', 'torch.from_numpy', (['V_test'], {}), '(V_test)\n', (8079, 8087), False, 'import torch\n'), ((12313, 12365), 'sklearn.metrics.f1_score', 'f1_score', (['frame_true', 'frame_pred'], {'average': '"""weighted"""'}), "(frame_true, frame_pred, average='weighted')\n", (12321, 12365), False, 'from sklearn.metrics import f1_score\n'), ((7604, 7627), 'torch.from_numpy', 'torch.from_numpy', (['V_val'], {}), '(V_val)\n', (7620, 7627), False, 'import torch\n'), ((7653, 7676), 'torch.from_numpy', 'torch.from_numpy', (['Y_val'], {}), '(Y_val)\n', (7669, 7676), False, 'import torch\n'), ((7712, 7735), 'torch.from_numpy', 'torch.from_numpy', (['X_val'], {}), '(X_val)\n', (7728, 7735), False, 'import torch\n'), ((7761, 7784), 'torch.from_numpy', 'torch.from_numpy', (['Y_val'], {}), '(Y_val)\n', (7777, 7784), False, 'import torch\n'), ((14850, 14902), 'sklearn.metrics.f1_score', 'f1_score', (['frame_true', 'frame_pred'], {'average': '"""weighted"""'}), "(frame_true, frame_pred, average='weighted')\n", (14858, 14902), False, 'from sklearn.metrics import f1_score\n'), ((14938, 14990), 'sklearn.metrics.f1_score', 'f1_score', (['songs_true', 'songs_pred'], {'average': '"""weighted"""'}), "(songs_true, songs_pred, average='weighted')\n", (14946, 14990), False, 'from sklearn.metrics import f1_score\n'), ((11100, 11164), 'torch.log10', 'torch.log10', (['(10.0 ** (batch_x / 10.0) + 10.0 ** (batch_b / 10.0))'], {}), '(10.0 ** (batch_x / 10.0) + 10.0 ** (batch_b / 10.0))\n', (11111, 11164), False, 'import torch\n'), ((11468, 11479), 'time.time', 'time.time', ([], {}), '()\n', (11477, 11479), False, 'import os, time\n'), ((16919, 16971), 'sklearn.metrics.f1_score', 'f1_score', (['frame_true', 'frame_pred'], {'average': '"""weighted"""'}), "(frame_true, frame_pred, average='weighted')\n", (16927, 16971), False, 'from sklearn.metrics import f1_score\n'), ((17011, 17063), 'sklearn.metrics.f1_score', 'f1_score', (['songs_true', 'songs_pred'], {'average': '"""weighted"""'}), "(songs_true, songs_pred, average='weighted')\n", (17019, 17063), False, 'from sklearn.metrics import f1_score\n'), ((12250, 12270), 'numpy.argmax', 'np.argmax', (['pred_y[i]'], {}), '(pred_y[i])\n', (12259, 12270), True, 'import numpy as np\n'), ((14004, 14016), 'numpy.zeros', 'np.zeros', (['(20)'], {}), '(20)\n', (14012, 14016), True, 'import numpy as np\n'), ((14590, 14622), 'numpy.argmax', 'np.argmax', (['songs_vote_dict[song]'], {}), '(songs_vote_dict[song])\n', (14599, 14622), True, 'import numpy as np\n'), ((14718, 14738), 'numpy.array', 'np.array', (['frame_pred'], {}), '(frame_pred)\n', (14726, 14738), True, 'import numpy as np\n'), ((14745, 14765), 'numpy.array', 'np.array', (['frame_true'], {}), '(frame_true)\n', (14753, 14765), True, 'import numpy as np\n'), ((14771, 14789), 'numpy.array', 'np.array', (['emb_list'], {}), '(emb_list)\n', (14779, 14789), True, 'import numpy as np\n'), ((13843, 13863), 'numpy.argmax', 'np.argmax', (['pred_y[i]'], {}), '(pred_y[i])\n', (13852, 13863), True, 'import numpy as np\n'), ((14049, 14069), 'numpy.argmax', 'np.argmax', (['pred_y[i]'], {}), '(pred_y[i])\n', (14058, 14069), True, 'import numpy as np\n'), ((16168, 16180), 'numpy.zeros', 'np.zeros', (['(20)'], {}), '(20)\n', (16176, 16180), True, 'import numpy as np\n'), ((16820, 16852), 'numpy.argmax', 'np.argmax', (['songs_vote_dict[song]'], {}), '(songs_vote_dict[song])\n', (16829, 16852), True, 'import numpy as np\n'), ((16106, 16126), 'numpy.argmax', 'np.argmax', (['pred_y[i]'], {}), '(pred_y[i])\n', (16115, 16126), True, 'import numpy as np\n'), ((16217, 16237), 'numpy.argmax', 'np.argmax', (['pred_y[i]'], {}), '(pred_y[i])\n', (16226, 16237), True, 'import numpy as np\n')]
|
import numpy as np
import scipy.special as special
import scipy.optimize as optimization
import matplotlib.pyplot as plt
# this is a list of definitions that can be used to predict noise in KIDS
# right now it just contains the nessasary requirements for perdicting G-R noise in TiN
# I should expand it to include some of the definitions in Jason code
# this is adapted from some IDL code I got from <NAME>
#Written by Jordan on 12/12/2016
#Change log
# 1/6/2017 added nqp_min as a specified parameter for grnoise
def nqp(t,tc,v,nqp_min):
'''
if ~keyword_set(nqp_min) then nqp_min=400. ; per cubic micron: zero-Temp residual QP density
V is in cubic microns
N0=1.72e10 for aluminum
'''
N0=4.e10 # for TiN
N0 = N0/1.6e-19 # now microns^3 / Joule
#Delta = double (3.5 * 1.381e-23 * tc)
Delta = 1.74 * 1.381e-23 * tc #factor in delta is suspect but it would be canceled out by a factor in tc
Nqp = v*2*N0*np.sqrt(2*np.pi*Delta*1.381e-23*t)*np.exp(-1*Delta/(1.381e-23*t))+v*nqp_min
return Nqp
def deltaf_f(t, tc, nu, alpha, gamma):
'''
From <NAME>, 2 Februay 2016
Calculate a model for the fractional frequency shift due to change in bath temperature
This is (alpha)*(gamma/2)*[sigma2(T)/sigma2(T=0)-1], where sigma2(T) is
from equation 2.92 in Gao thesis, and equation 20 in Gao+08 JLTP
nu is in MHz
if ~keyword_set(alpha) then alpha=1
if ~keyword_set(gamma) then gamma=1
'''
d_0 = 1.762*tc #factor of 1.762 is suspect
xi = 6.626e-34*nu*1.e6/(2.*1.38e-23*t)
model = -1.*alpha*gamma/2.*np.exp(-1.*d_0/t)*((2.*np.pi*t/d_0)**0.5 + 2.*np.exp(-1.*xi)*special.iv(0,xi))
return model
def deltaf_f_vec(t,tc,nu,alpha,gamma):
d_0 = 1.762*tc #factor of 1.762 is suspect
t = np.reshape(t,(t.shape[0],1,1))
xi = 6.626e-34*nu*1.e6/(2.*1.38e-23*t)
model = -1.*alpha*gamma/2.*np.exp(-1.*d_0/t)*((2.*np.pi*t/d_0)**0.5 + 2.*np.exp(-1.*xi)*special.iv(0,xi))
return model
def df_response(t,tc,f):
'''
calculate d (df/f) / dT via finite difference
f is in MHz
calls deltaf_f which computes frequency shift
'''
delta_t = t/100.
dff_dt = (deltaf_f(t+delta_t,tc,f,1,1) - deltaf_f(t-delta_t,tc,f,1,1))/(2*delta_t)
return dff_dt
def grnoise(t,tc,V,tau_qp,N0,f,nqp_min):
'''
this function calculates gr noise assuming constant tau
fuction below responsivity probably does a better job.
V (volume in cubic microns)
if ~keyword_set(N0) then N0=4.e10 ; microns^3 / eV
if ~keyword_set(tau_qp) then tau_qp = 5e-6 ; sec
if ~keyword_set(nqp_min) then nqp_min=400. ; QP per cubic micron at zero Temp
'''
N0 = N0/1.6e-19 # now microns^3 / Joule
#ef^2 = 4 beta^2 Nqp tau_qp
#beta = df_0 / d Nqp , so use (df_0/ dT) (dT / dNqp)
#Delta = double (3.5 * 1.381e-23 * tc)
Delta = 1.74*1.381e-23*tc
#Nqp = v * 2 * N0 * sqrt(2*!pi*delta * 1.381e-23 * T) *exp(-1*Delta / (1.381e-23 * T)) + v * nqp_min
dNqp_dt = V*2.*N0*np.sqrt(2*np.pi*Delta*1.318e-23)*np.exp(-1.*Delta/(1.381e-23*t))*(1./(2.*np.sqrt(t)) + np.sqrt(t)*Delta/1.381e-23/t**2)
delta_t = t/100.
dNqp_dt = (nqp(t+delta_t,tc,V,nqp_min)-nqp(t-delta_t,tc,V,nqp_min))/(2*delta_t)
beta = df_response(t,tc,f)/dNqp_dt
#assume a frequency of 100 MHz here, shouldn't matter.
ef2 = 4. * beta**2 * nqp(t,tc,V,nqp_min) * tau_qp
return ef2
def responsivity(temp,pabs,tc = 1.,N0 = 4.*10**10,nstar =100.,tau_max = 100.,eta_pb = 0.7,vol = 1.,fr = 100.,alpha_k = 1.,gamma_t = 1.,nu_opt = 250, n_gamma = 0.):
'''
Special thanks to <NAME> whose made this orginal function in idl
'''
#Define various constants
k_B = 1.381*10**-23 #Boltzmann constant [J K^-1]
ev_joule = 1.6022*10**-19 #eV/J ratio [eV J^-1]
h_p = 6.626*10**-34 #Planck constant [J s]
#Compute the ratio of Delta_0/k_B [K]
d0_kB = 1.764*tc
# Compute n thermal
nth = 2.*N0*k_B/ev_joule*np.sqrt(2.*np.pi*temp*d0_kB)*np.exp(-1.*d0_kB/temp)
# Compute nqp
# This expression has a term of the form [sqrt(1 + eps) - 1], where eps is small when nth and pabs are small.
# When eps is small this term is not computed accurately, and here we explicitly linearize it.
#nqp = nstar*(-1. + sqrt(1. + 2.*nth/nstar + (nth/nstar)^2. + 2.*eta_pb*pabs*1.e-12*tau_max*1.e-6/(nstar*vol*d0_kB*k_B)))
eps = 2.*nth/nstar + (nth/nstar)**2. + 2.*eta_pb*pabs*1.*10**-12*tau_max*1.*10**-6/(nstar*vol*d0_kB*k_B)
term = np.sqrt(1. + eps) - 1.
indx = np.where(eps < 1*10**-8)
count = len(indx)
if (count != 0):
term[indx] = 0.5*eps[indx]
nqp = nstar*term
#Compute tau_qp
tau_qp = tau_max/(1. + nqp/nstar)
# Compute S1 and S2
xi = h_p*fr*1*10**6/(2.*k_B*temp)
s1 = (2./np.pi)*np.sqrt(2.*d0_kB/(np.pi*temp))*np.sinh(xi)*special.kv(0,xi)
s2 = 1. + np.sqrt(2.*d0_kB/(np.pi*temp))*np.exp(-1.*xi)*special.iv(0,xi)
#s2 = 3.
#Compute xr and Qi_inv
#Note that xr refers to the frequency shift from the nqp = 0 state
xr = -1.*alpha_k*gamma_t*s2*nqp/(4.*N0*d0_kB*k_B/ev_joule)
Qi_inv = -1.*xr*2.*s1/s2
#Compute the frequency and Qinv responsivity
r_x = -1.*alpha_k*gamma_t*s2/(4.*N0*d0_kB*k_B/ev_joule)*eta_pb*tau_qp*1*10**-6/(d0_kB*k_B*vol)
r_qinv = -1.*r_x*2.*s1/s2
#Compute Sxx_gr and Sxx_gr0
tau_th = tau_max/(1. + nth/nstar) #quasiparticle lifetime for a superconductor in thermal equilibrium at the specified temperature [microsec]
gamma_th = nth*vol/2.*(1./tau_max + 1./tau_th)*1*10**6 #quasiparticle generation rate due to thermal fluctuations ;[sec^-1]
gamma_r = nqp*vol/2.*(1./tau_max + 1./tau_qp)*1*10**6 #quasiparticle recombination rate ;[sec^-1]
sxx_gr = (alpha_k*gamma_t*s2/(4.*N0*d0_kB*k_B/ev_joule))**2.*4.*(tau_qp*1*10**-6)**2./vol**2.*(gamma_th + gamma_r)
sxx_gr0 = (alpha_k*gamma_t*s2/(4.*N0*d0_kB*k_B/ev_joule))**2.*4.*nqp/vol*tau_qp*1*10**-6
#Compute Sxx_gamma
sxx_gamma = (r_x)**2.*2.*h_p*nu_opt*1*10**9*pabs*1*10**-12*(1. + n_gamma)
#Define the output dictionary and return
dict = {'nth':nth,
'nqp':nqp,
'tau_qp':tau_qp,
's1':s1,
's2':s2,
'xr':xr,
'Qi_inv':Qi_inv,
'r_x':r_x,
'r_qinv':r_qinv,
'sxx_gr':sxx_gr,
'sxx_gr0':sxx_gr0,
'sxx_gamma':sxx_gamma}
return dict
def responsivity_help():
print("The input variables are temp,pabs,tc = 1.,N0 = 4.*10**10,nstar =100.,tau_max = 100.,eta_pb = 0.7,vol = 1.,fr = 100.,alpha_k = 1.,gamma_t = 1.,nu_opt = 250, n_gamma = 0.")
print("The output variables are nth, nqp, tau_qp, s1, s2, xr, Qi_inv, r_x, r_qinv, sxx_gr, sxx_gr0, sxx_gamma")
def f0dirshort(T, f00, Fdelta):
'''
fucntion that computes TLS frequency shift
is center frequency in GHz, p2 is the product of filling factor and
loss tangent Fdelta_TLS, returns the frequency in GHz
Taken from <NAME>'s Matlab code
'''
f01K = 20.8366
ref0 =np.real(special.digamma(1/2 + 1/(2*np.pi*1j)*f00/f01K/T))-np.log(f00/f01K/T/(2*np.pi));
y = f00 + f00*Fdelta*1/np.pi*ref0;
return y
def fit_tls(T,f,sigma = None,**keywords):
x0 = np.asarray((f[0],1e-5))
if sigma is not None:
print("using error")
print(sigma)
fit = optimization.curve_fit(f0dirshort, T, f,x0,sigma=sigma,absolute_sigma = True)
else:
fit = optimization.curve_fit(f0dirshort, T, f, x0)
return fit
def fit_tc_brute(t,df_over_f,nuref,tc_range = (0.5,1.5),alpha_range = (0,1), n_grid_points=100, error=None, plot = True,Verbose = False,**keywords):
'''
brute force fitter for fitting Tc and alpha assuming gamma =1
but of alpha and gamma are degenerate
t is temperature in kelvin
df_over_f is f-f0/f0
'''
if error is None:
error = np.ones(len(t))
tc_values = np.linspace(tc_range[0], tc_range[1], n_grid_points)
alpha_values = np.linspace(alpha_range[0], alpha_range[1], n_grid_points)
evaluated_ranges = np.vstack((tc_values, alpha_values))
a, b= np.meshgrid(tc_values, alpha_values, indexing="ij") # always index ij
#evaluated = noise_profile_lor_vec(x[index_for_fitting], a, b, c, d)
evaluated = deltaf_f_vec(t,a,nuref,b,1)
data_values = np.reshape(df_over_f, (df_over_f.shape[0], 1, 1))
error = np.reshape(error, (error.shape[0], 1, 1))
#print(evaluated.shape)
# print(data_values.shape)
# print(error.shape)
sum_dev = np.sum(((evaluated - data_values) ** 2 / error ** 2),
axis=0) # comparing in magnitude space rather than magnitude squared
min_index = np.where(sum_dev == np.min(sum_dev))
if Verbose:
print("grid values at minimum are")
print(min_index)
index1 = min_index[0][0]
index2 = min_index[1][0]
fit_values = np.asarray((tc_values[index1], alpha_values[index2]))
fit_values_names = ('tc', 'alpha')
fit_result = deltaf_f(t, tc_values[index1], nuref,alpha_values[index2],1)
if plot:
extent = [evaluated_ranges[1,0],evaluated_ranges[1,n_grid_points-1],evaluated_ranges[0,0],evaluated_ranges[0,n_grid_points-1]]
aspect = (evaluated_ranges[1,n_grid_points-1]-evaluated_ranges[1,0])/(evaluated_ranges[0,n_grid_points-1]-evaluated_ranges[0,0])
plt.figure()
plt.imshow(np.log10(sum_dev-np.min(sum_dev)),extent =extent,aspect = aspect,origin = 'lower', cmap = 'jet')
plt.xlabel("alpha")
plt.ylabel("Tc")
plt.colorbar(label = 'Log10(sum residuals squared)')
fit_dict = {'fit_values': fit_values, 'fit_values_names': fit_values_names, 'sum_dev': sum_dev,
'fit_result': fit_result,'evaluated_ranges': evaluated_ranges} #'marginalized_2d':marginalized_2d,'marginalized_1d':marginalized_1d,
return fit_dict
|
[
"numpy.sum",
"matplotlib.pyplot.figure",
"numpy.exp",
"scipy.special.kv",
"numpy.meshgrid",
"matplotlib.pyplot.colorbar",
"numpy.reshape",
"numpy.linspace",
"numpy.asarray",
"scipy.optimize.curve_fit",
"scipy.special.digamma",
"numpy.min",
"matplotlib.pyplot.ylabel",
"numpy.vstack",
"numpy.log",
"scipy.special.iv",
"numpy.where",
"matplotlib.pyplot.xlabel",
"numpy.sinh",
"numpy.sqrt"
] |
[((1777, 1810), 'numpy.reshape', 'np.reshape', (['t', '(t.shape[0], 1, 1)'], {}), '(t, (t.shape[0], 1, 1))\n', (1787, 1810), True, 'import numpy as np\n'), ((4509, 4537), 'numpy.where', 'np.where', (['(eps < 1 * 10 ** -8)'], {}), '(eps < 1 * 10 ** -8)\n', (4517, 4537), True, 'import numpy as np\n'), ((7207, 7232), 'numpy.asarray', 'np.asarray', (['(f[0], 1e-05)'], {}), '((f[0], 1e-05))\n', (7217, 7232), True, 'import numpy as np\n'), ((7886, 7938), 'numpy.linspace', 'np.linspace', (['tc_range[0]', 'tc_range[1]', 'n_grid_points'], {}), '(tc_range[0], tc_range[1], n_grid_points)\n', (7897, 7938), True, 'import numpy as np\n'), ((7958, 8016), 'numpy.linspace', 'np.linspace', (['alpha_range[0]', 'alpha_range[1]', 'n_grid_points'], {}), '(alpha_range[0], alpha_range[1], n_grid_points)\n', (7969, 8016), True, 'import numpy as np\n'), ((8040, 8076), 'numpy.vstack', 'np.vstack', (['(tc_values, alpha_values)'], {}), '((tc_values, alpha_values))\n', (8049, 8076), True, 'import numpy as np\n'), ((8088, 8139), 'numpy.meshgrid', 'np.meshgrid', (['tc_values', 'alpha_values'], {'indexing': '"""ij"""'}), "(tc_values, alpha_values, indexing='ij')\n", (8099, 8139), True, 'import numpy as np\n'), ((8295, 8344), 'numpy.reshape', 'np.reshape', (['df_over_f', '(df_over_f.shape[0], 1, 1)'], {}), '(df_over_f, (df_over_f.shape[0], 1, 1))\n', (8305, 8344), True, 'import numpy as np\n'), ((8357, 8398), 'numpy.reshape', 'np.reshape', (['error', '(error.shape[0], 1, 1)'], {}), '(error, (error.shape[0], 1, 1))\n', (8367, 8398), True, 'import numpy as np\n'), ((8497, 8556), 'numpy.sum', 'np.sum', (['((evaluated - data_values) ** 2 / error ** 2)'], {'axis': '(0)'}), '((evaluated - data_values) ** 2 / error ** 2, axis=0)\n', (8503, 8556), True, 'import numpy as np\n'), ((8857, 8910), 'numpy.asarray', 'np.asarray', (['(tc_values[index1], alpha_values[index2])'], {}), '((tc_values[index1], alpha_values[index2]))\n', (8867, 8910), True, 'import numpy as np\n'), ((3974, 4001), 'numpy.exp', 'np.exp', (['(-1.0 * d0_kB / temp)'], {}), '(-1.0 * d0_kB / temp)\n', (3980, 4001), True, 'import numpy as np\n'), ((4475, 4493), 'numpy.sqrt', 'np.sqrt', (['(1.0 + eps)'], {}), '(1.0 + eps)\n', (4482, 4493), True, 'import numpy as np\n'), ((4822, 4839), 'scipy.special.kv', 'special.kv', (['(0)', 'xi'], {}), '(0, xi)\n', (4832, 4839), True, 'import scipy.special as special\n'), ((7072, 7108), 'numpy.log', 'np.log', (['(f00 / f01K / T / (2 * np.pi))'], {}), '(f00 / f01K / T / (2 * np.pi))\n', (7078, 7108), True, 'import numpy as np\n'), ((7321, 7399), 'scipy.optimize.curve_fit', 'optimization.curve_fit', (['f0dirshort', 'T', 'f', 'x0'], {'sigma': 'sigma', 'absolute_sigma': '(True)'}), '(f0dirshort, T, f, x0, sigma=sigma, absolute_sigma=True)\n', (7343, 7399), True, 'import scipy.optimize as optimization\n'), ((7423, 7467), 'scipy.optimize.curve_fit', 'optimization.curve_fit', (['f0dirshort', 'T', 'f', 'x0'], {}), '(f0dirshort, T, f, x0)\n', (7445, 7467), True, 'import scipy.optimize as optimization\n'), ((9323, 9335), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9333, 9335), True, 'import matplotlib.pyplot as plt\n'), ((9460, 9479), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""alpha"""'], {}), "('alpha')\n", (9470, 9479), True, 'import matplotlib.pyplot as plt\n'), ((9488, 9504), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Tc"""'], {}), "('Tc')\n", (9498, 9504), True, 'import matplotlib.pyplot as plt\n'), ((9513, 9563), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'label': '"""Log10(sum residuals squared)"""'}), "(label='Log10(sum residuals squared)')\n", (9525, 9563), True, 'import matplotlib.pyplot as plt\n'), ((986, 1022), 'numpy.exp', 'np.exp', (['(-1 * Delta / (1.381e-23 * t))'], {}), '(-1 * Delta / (1.381e-23 * t))\n', (992, 1022), True, 'import numpy as np\n'), ((1586, 1608), 'numpy.exp', 'np.exp', (['(-1.0 * d_0 / t)'], {}), '(-1.0 * d_0 / t)\n', (1592, 1608), True, 'import numpy as np\n'), ((1882, 1904), 'numpy.exp', 'np.exp', (['(-1.0 * d_0 / t)'], {}), '(-1.0 * d_0 / t)\n', (1888, 1904), True, 'import numpy as np\n'), ((3032, 3070), 'numpy.exp', 'np.exp', (['(-1.0 * Delta / (1.381e-23 * t))'], {}), '(-1.0 * Delta / (1.381e-23 * t))\n', (3038, 3070), True, 'import numpy as np\n'), ((3945, 3980), 'numpy.sqrt', 'np.sqrt', (['(2.0 * np.pi * temp * d0_kB)'], {}), '(2.0 * np.pi * temp * d0_kB)\n', (3952, 3980), True, 'import numpy as np\n'), ((4810, 4821), 'numpy.sinh', 'np.sinh', (['xi'], {}), '(xi)\n', (4817, 4821), True, 'import numpy as np\n'), ((4899, 4916), 'scipy.special.iv', 'special.iv', (['(0)', 'xi'], {}), '(0, xi)\n', (4909, 4916), True, 'import scipy.special as special\n'), ((7022, 7086), 'scipy.special.digamma', 'special.digamma', (['(1 / 2 + 1 / (2 * np.pi * 1.0j) * f00 / f01K / T)'], {}), '(1 / 2 + 1 / (2 * np.pi * 1.0j) * f00 / f01K / T)\n', (7037, 7086), True, 'import scipy.special as special\n'), ((8680, 8695), 'numpy.min', 'np.min', (['sum_dev'], {}), '(sum_dev)\n', (8686, 8695), True, 'import numpy as np\n'), ((951, 993), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi * Delta * 1.381e-23 * t)'], {}), '(2 * np.pi * Delta * 1.381e-23 * t)\n', (958, 993), True, 'import numpy as np\n'), ((1647, 1664), 'scipy.special.iv', 'special.iv', (['(0)', 'xi'], {}), '(0, xi)\n', (1657, 1664), True, 'import scipy.special as special\n'), ((1943, 1960), 'scipy.special.iv', 'special.iv', (['(0)', 'xi'], {}), '(0, xi)\n', (1953, 1960), True, 'import scipy.special as special\n'), ((2999, 3037), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi * Delta * 1.318e-23)'], {}), '(2 * np.pi * Delta * 1.318e-23)\n', (3006, 3037), True, 'import numpy as np\n'), ((4779, 4816), 'numpy.sqrt', 'np.sqrt', (['(2.0 * d0_kB / (np.pi * temp))'], {}), '(2.0 * d0_kB / (np.pi * temp))\n', (4786, 4816), True, 'import numpy as np\n'), ((4853, 4890), 'numpy.sqrt', 'np.sqrt', (['(2.0 * d0_kB / (np.pi * temp))'], {}), '(2.0 * d0_kB / (np.pi * temp))\n', (4860, 4890), True, 'import numpy as np\n'), ((4884, 4901), 'numpy.exp', 'np.exp', (['(-1.0 * xi)'], {}), '(-1.0 * xi)\n', (4890, 4901), True, 'import numpy as np\n'), ((1632, 1649), 'numpy.exp', 'np.exp', (['(-1.0 * xi)'], {}), '(-1.0 * xi)\n', (1638, 1649), True, 'import numpy as np\n'), ((1928, 1945), 'numpy.exp', 'np.exp', (['(-1.0 * xi)'], {}), '(-1.0 * xi)\n', (1934, 1945), True, 'import numpy as np\n'), ((3072, 3082), 'numpy.sqrt', 'np.sqrt', (['t'], {}), '(t)\n', (3079, 3082), True, 'import numpy as np\n'), ((9372, 9387), 'numpy.min', 'np.min', (['sum_dev'], {}), '(sum_dev)\n', (9378, 9387), True, 'import numpy as np\n'), ((3086, 3096), 'numpy.sqrt', 'np.sqrt', (['t'], {}), '(t)\n', (3093, 3096), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# @Project : curve_fit
# @Time : 2019-05-27 14:51
# @Author : <NAME>
# @Email : <EMAIL>
# @File : continuous.py
import pickle
import numpy as np
from patsy import dmatrix
import statsmodels.api as sm
class Continuous:
def __init__(self, k=3):
self.model = None
self.all = None
self.k = k
def _transform(self, x):
n = x.shape[0]
if self.all is None:
self.all = np.array(x)
else:
self.all = np.concatenate([self.all, x])
transformed_x = dmatrix("cr(x,df={})".format(self.k), {"x": self.all}, return_type='dataframe')[-n:]
return transformed_x
def fit(self, x, y):
x = np.array(x)
y = np.array(y)
assert x.ndim == 1
assert x.shape == y.shape
trans_x = self._transform(x)
self.model = sm.GLM(y, trans_x).fit()
return self.model
def predict(self, x):
x = np.array(x)
trans_x = self._transform(x)
pred = self.model.predict(trans_x)
return pred
def save(self, dir):
with open(dir, "wb") as fw:
pickle.dump(self.model, fw)
def load(self, dir):
with open(dir, "rb") as fr:
self.model = pickle.load(fr)
|
[
"statsmodels.api.GLM",
"pickle.dump",
"pickle.load",
"numpy.array",
"numpy.concatenate"
] |
[((713, 724), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (721, 724), True, 'import numpy as np\n'), ((737, 748), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (745, 748), True, 'import numpy as np\n'), ((958, 969), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (966, 969), True, 'import numpy as np\n'), ((458, 469), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (466, 469), True, 'import numpy as np\n'), ((507, 536), 'numpy.concatenate', 'np.concatenate', (['[self.all, x]'], {}), '([self.all, x])\n', (521, 536), True, 'import numpy as np\n'), ((1144, 1171), 'pickle.dump', 'pickle.dump', (['self.model', 'fw'], {}), '(self.model, fw)\n', (1155, 1171), False, 'import pickle\n'), ((1259, 1274), 'pickle.load', 'pickle.load', (['fr'], {}), '(fr)\n', (1270, 1274), False, 'import pickle\n'), ((868, 886), 'statsmodels.api.GLM', 'sm.GLM', (['y', 'trans_x'], {}), '(y, trans_x)\n', (874, 886), True, 'import statsmodels.api as sm\n')]
|
import time
import datetime
import os
import sys
import numpy as np
use_cntk = True
if use_cntk:
try:
base_directory = os.path.split(sys.executable)[0]
os.environ['PATH'] += ';' + base_directory
import cntk
os.environ['KERAS_BACKEND'] = 'cntk'
except ImportError:
print('CNTK not installed')
else:
os.environ['KERAS_BACKEND'] = 'tensorflow'
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import keras
def learning_word_embeddings_with_the_embedding_layer():
# Number of words to consider as features
max_features = 10000
# Cut texts after this number of words
# (among top max_features most common words)
maxlen = 20
# Load the data as lists of integers.
(x_train, y_train), (x_test, y_test) = keras.datasets.imdb.load_data(num_words=max_features)
# This turns our lists of integers
# into a 2D integer tensor of shape `(samples, maxlen)`
x_train = keras.preprocessing.sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = keras.preprocessing.sequence.pad_sequences(x_test, maxlen=maxlen)
model = keras.models.Sequential()
# We specify the maximum input length to our Embedding layer
# so we can later flatten the embedded inputs
model.add(keras.layers.Embedding(max_features, 8, input_length=maxlen))
# After the Embedding layer,
# our activations have shape `(samples, maxlen, 8)`.
# We flatten the 3D tensor of embeddings
# into a 2D tensor of shape `(samples, maxlen * 8)`
model.add(keras.layers.Flatten())
# We add the classifier on top
model.add(keras.layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])
model.summary()
history = model.fit(x_train, y_train, epochs=10, batch_size=32, validation_split=0.2)
def learning_word_embeddings_with_the_embedding_layer_cntk():
x_train, y_train, x_test, y_test = load_from_files()
max_features = 10000
maxlen = 20
embedding_dim = 8
x = cntk.input_variable(shape=(maxlen,), dtype=np.float32)
y = cntk.input_variable(shape=(1,), dtype=np.float32)
model = cntk.one_hot(x, num_classes=max_features, sparse_output=True)
model = cntk.layers.Embedding(embedding_dim)(model)
model = cntk.layers.Dense(1, activation=cntk.sigmoid)(model)
loss_function = cntk.binary_cross_entropy(model.output, y)
round_predictions = cntk.round(model.output)
equal_elements = cntk.equal(round_predictions, y)
accuracy_function = cntk.reduce_mean(equal_elements, axis=0)
max_epochs = 30
batch_size = 32
learner = cntk.adam(model.parameters, cntk.learning_parameter_schedule_per_sample(0.0001), cntk.learning_parameter_schedule_per_sample(0.99))
progress_printer = cntk.logging.ProgressPrinter(tag='Training', num_epochs=max_epochs)
trainer = cntk.Trainer(model, (loss_function, accuracy_function), [learner], progress_printer)
evaluator = cntk.Evaluator(accuracy_function)
cntk_train(x, y, x_train, y_train, max_epochs, batch_size, trainer, evaluator)
def cntk_train(x, y, x_train, y_train, max_epochs, batch_size, trainer, evaluator):
N = len(x_train)
y_train = np.expand_dims(y_train, axis=1)
train_features = x_train[:int(N*0.8)]
train_labels = y_train[:int(N*0.8)]
validation_features = x_train[int(N*0.8):]
validation_labels = y_train[int(N*0.8):]
for current_epoch in range(max_epochs):
epoch_start_time = time.time()
train_indices = np.random.permutation(train_features.shape[0])
pos = 0
epoch_training_error = 0
num_batches = 0
while pos < len(train_indices):
pos_end = min(pos + batch_size, len(train_indices))
x_train_minibatch = train_features[train_indices[pos:pos_end]]
y_train_minibatch = train_labels[train_indices[pos:pos_end]]
trainer.train_minibatch({x: x_train_minibatch, y: y_train_minibatch})
epoch_training_error += trainer.previous_minibatch_evaluation_average
num_batches += 1
pos = pos_end
epoch_training_error /= num_batches
epoch_validation_error = 0
num_batches = 0
pos = 0
while pos < len(validation_features):
pos_end = min(pos + batch_size, len(validation_features))
x_train_minibatch = validation_features[pos:pos_end]
y_train_minibatch = validation_labels[pos:pos_end]
previous_minibatch_evaluation_average = evaluator.test_minibatch({x: x_train_minibatch, y: y_train_minibatch})
epoch_validation_error += previous_minibatch_evaluation_average
num_batches += 1
pos = pos_end
epoch_validation_error /= num_batches
print('Epoch Elapsed Time: {0}, training_accuracy={1:.3f}, evaluation_accuracy={2:.3f}'.format(
datetime.timedelta(seconds=time.time() - epoch_start_time),
epoch_training_error, epoch_validation_error))
def save_to_files(x_train, y_train, x_test, y_test):
x_train = np.ascontiguousarray(x_train.astype(np.float32))
y_train = np.ascontiguousarray(y_train.astype(np.float32))
x_test = np.ascontiguousarray(x_test.astype(np.float32))
y_test = np.ascontiguousarray(y_test.astype(np.float32))
print(x_train.shape, y_train.shape, x_test.shape, y_test.shape)
x_train.tofile('x_train_imdb.bin')
y_train.tofile('y_train_imdb.bin')
x_test.tofile('x_test_imdb.bin')
y_test.tofile('y_test_imdb.bin')
def load_from_files(x_shape=(25000, 20), y_shape=(25000,)):
print('Loading .bin files')
x_train = np.fromfile('x_train_imdb.bin', dtype=np.float32)
y_train = np.fromfile('y_train_imdb.bin', dtype=np.float32)
x_test = np.fromfile('x_test_imdb.bin', dtype=np.float32)
y_test = np.fromfile('y_test_imdb.bin', dtype=np.float32)
x_train = np.reshape(x_train, newshape=x_shape)
y_train = np.reshape(y_train, newshape=y_shape)
x_test = np.reshape(x_test, newshape=x_shape)
y_test = np.reshape(y_test, newshape=y_shape)
return x_train, y_train, x_test, y_test
class Constants:
maxlen = 100 # We will cut reviews after 100 words
training_samples = 200 # We will be training on 200 samples
validation_samples = 10000 # We will be validating on 10000 samples
max_words = 10000 # We will only consider the top 10,000 words in the dataset
embedding_dim = 100
imdb_dir = 'C:\\Users\\anastasios\\Downloads\\aclImdb'
def load_texts_labels(path):
import tqdm
labels = []
texts = []
for label_type in ['neg', 'pos']:
dir_name = os.path.join(path, label_type)
print('\nLoading ', dir_name, '\n', flush=True)
for fname in tqdm.tqdm(os.listdir(dir_name)):
if fname[-4:] == '.txt':
f = open(os.path.join(dir_name, fname), encoding='utf8')
texts.append(f.read())
f.close()
if label_type == 'neg':
labels.append(0)
else:
labels.append(1)
return texts, labels
def tokenize_alImdb():
import keras.preprocessing.text
train_dir = os.path.join(Constants.imdb_dir, 'train')
texts, labels = load_texts_labels(train_dir)
tokenizer = keras.preprocessing.text.Tokenizer(num_words=Constants.max_words)
print('\n\nRunning tokenizer...', end='', flush=True)
tokenizer.fit_on_texts(texts)
return tokenizer, texts, labels
def from_raw_text_to_word_embeddings():
import numpy as np
import keras.preprocessing.sequence
tokenizer, texts, labels = tokenize_alImdb()
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
data = keras.preprocessing.sequence.pad_sequences(sequences, maxlen=Constants.maxlen)
data = np.asarray(data, dtype=np.float32)
labels = np.asarray(labels, dtype=np.float32)
print('Shape of data tensor:', data.shape)
print('Shape of label tensor:', labels.shape)
# Split the data into a training set and a validation set
# But first, shuffle the data, since we started from data
# where sample are ordered (all negative first, then all positive).
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
x_train = data[:Constants.training_samples]
y_train = labels[:Constants.training_samples]
x_val = data[Constants.training_samples: Constants.training_samples + Constants.validation_samples]
y_val = labels[Constants.training_samples: Constants.training_samples + Constants.validation_samples]
return tokenizer, x_train, y_train, x_val, y_val
def preprocess_embeddings():
import numpy as np
import tqdm
glove_dir = 'C:\\Users\\anastasios\\Downloads\\glove.6B'
embeddings_index = {}
glove_path = os.path.join(glove_dir, 'glove.6B.100d.txt')
f = open(glove_path, encoding='utf8')
print('Processing ', glove_path)
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Found %s word vectors.' % len(embeddings_index))
return embeddings_index
def build_model():
model = keras.models.Sequential()
model.add(keras.layers.Embedding(Constants.max_words, Constants.embedding_dim, input_length=Constants.maxlen))
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(32, activation='relu'))
model.add(keras.layers.Dense(1, activation='sigmoid'))
model.summary()
return model
def use_glove_word_embeddings_cntk(preload_weights=False):
tokenizer, x_train, y_train, x_val, y_val = from_raw_text_to_word_embeddings()
x = cntk.input_variable(shape=(Constants.maxlen,), dtype=np.float32)
y = cntk.input_variable(shape=(1,), dtype=np.float32)
model = cntk.one_hot(x, num_classes=Constants.max_words, sparse_output=True)
if preload_weights is True:
embedding_matrix = compute_embedding_matrix(tokenizer)
assert (Constants.embedding_dim == embedding_matrix.shape[0]) or (Constants.embedding_dim == embedding_matrix.shape[1])
model = cntk.layers.Embedding(weights=embedding_matrix)(model)
else:
model = cntk.layers.Embedding(Constants.embedding_dim)(model)
model = cntk.layers.Dense(32, activation=cntk.relu)(model)
model = cntk.layers.Dense(1, activation=cntk.sigmoid)(model)
loss_function = cntk.binary_cross_entropy(model.output, y)
round_predictions = cntk.round(model.output)
equal_elements = cntk.equal(round_predictions, y)
accuracy_function = cntk.reduce_mean(equal_elements, axis=0)
max_epochs = 10
batch_size = 32
learner = cntk.adam(model.parameters, cntk.learning_parameter_schedule_per_sample(0.0001), cntk.learning_parameter_schedule_per_sample(0.99))
progress_printer = cntk.logging.ProgressPrinter(tag='Training', num_epochs=max_epochs)
trainer = cntk.Trainer(model, (loss_function, accuracy_function), [learner], progress_printer)
evaluator = cntk.Evaluator(accuracy_function)
cntk_train(x, y, x_train, y_train, max_epochs, batch_size, trainer, evaluator)
def compute_embedding_matrix(tokenizer):
embeddings_index = preprocess_embeddings()
embedding_matrix = np.zeros((Constants.max_words, Constants.embedding_dim))
for word, i in tokenizer.word_index.items():
embedding_vector = embeddings_index.get(word)
if i < Constants.max_words:
if embedding_vector is not None:
# Words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
return embedding_matrix
def use_glove_word_embeddings(preload_weights=True):
tokenizer, x_train, y_train, x_val, y_val = from_raw_text_to_word_embeddings()
model = build_model()
if preload_weights:
embedding_matrix = compute_embedding_matrix(tokenizer)
model.layers[0].set_weights([embedding_matrix])
model.layers[0].trainable = False
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['acc'])
history = model.fit(x_train, y_train,
epochs=10,
batch_size=32,
validation_data=(x_val, y_val))
model.save_weights('pre_trained_glove_model.h5')
plot_results(history)
def plot_results(history):
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
def evaluate_on_test_data():
import numpy as np
test_dir = os.path.join(Constants.imdb_dir, 'test')
tokenizer, _, _ = tokenize_alImdb()
texts, labels = load_texts_labels(test_dir)
sequences = tokenizer.texts_to_sequences(texts)
x_test = keras.preprocessing.sequence.pad_sequences(sequences, maxlen=Constants.maxlen)
y_test = np.asarray(labels)
model = build_model()
model.load_weights('pre_trained_glove_model.h5')
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])
print(model.evaluate(x_test, y_test))
if __name__ == '__main__':
learning_word_embeddings_with_the_embedding_layer()
# learning_word_embeddings_with_the_embedding_layer_cntk()
use_glove_word_embeddings(preload_weights=True)
# use_glove_word_embeddings_cntk(preload_weights=True)
|
[
"matplotlib.pyplot.title",
"keras.preprocessing.sequence.pad_sequences",
"cntk.layers.Embedding",
"matplotlib.pyplot.figure",
"numpy.arange",
"cntk.equal",
"cntk.layers.Dense",
"os.path.join",
"cntk.binary_cross_entropy",
"cntk.Evaluator",
"cntk.logging.ProgressPrinter",
"keras.layers.Flatten",
"keras.preprocessing.text.Tokenizer",
"numpy.reshape",
"keras.datasets.imdb.load_data",
"numpy.random.shuffle",
"cntk.reduce_mean",
"cntk.one_hot",
"matplotlib.pyplot.show",
"numpy.asarray",
"matplotlib.pyplot.legend",
"cntk.Trainer",
"numpy.random.permutation",
"cntk.round",
"os.listdir",
"matplotlib.pyplot.plot",
"numpy.fromfile",
"numpy.zeros",
"numpy.expand_dims",
"time.time",
"cntk.input_variable",
"keras.layers.Dense",
"keras.layers.Embedding",
"keras.models.Sequential",
"cntk.learning_parameter_schedule_per_sample",
"os.path.split"
] |
[((777, 830), 'keras.datasets.imdb.load_data', 'keras.datasets.imdb.load_data', ([], {'num_words': 'max_features'}), '(num_words=max_features)\n', (806, 830), False, 'import keras\n'), ((945, 1011), 'keras.preprocessing.sequence.pad_sequences', 'keras.preprocessing.sequence.pad_sequences', (['x_train'], {'maxlen': 'maxlen'}), '(x_train, maxlen=maxlen)\n', (987, 1011), False, 'import keras\n'), ((1025, 1090), 'keras.preprocessing.sequence.pad_sequences', 'keras.preprocessing.sequence.pad_sequences', (['x_test'], {'maxlen': 'maxlen'}), '(x_test, maxlen=maxlen)\n', (1067, 1090), False, 'import keras\n'), ((1104, 1129), 'keras.models.Sequential', 'keras.models.Sequential', ([], {}), '()\n', (1127, 1129), False, 'import keras\n'), ((2035, 2089), 'cntk.input_variable', 'cntk.input_variable', ([], {'shape': '(maxlen,)', 'dtype': 'np.float32'}), '(shape=(maxlen,), dtype=np.float32)\n', (2054, 2089), False, 'import cntk\n'), ((2098, 2147), 'cntk.input_variable', 'cntk.input_variable', ([], {'shape': '(1,)', 'dtype': 'np.float32'}), '(shape=(1,), dtype=np.float32)\n', (2117, 2147), False, 'import cntk\n'), ((2160, 2221), 'cntk.one_hot', 'cntk.one_hot', (['x'], {'num_classes': 'max_features', 'sparse_output': '(True)'}), '(x, num_classes=max_features, sparse_output=True)\n', (2172, 2221), False, 'import cntk\n'), ((2363, 2405), 'cntk.binary_cross_entropy', 'cntk.binary_cross_entropy', (['model.output', 'y'], {}), '(model.output, y)\n', (2388, 2405), False, 'import cntk\n'), ((2430, 2454), 'cntk.round', 'cntk.round', (['model.output'], {}), '(model.output)\n', (2440, 2454), False, 'import cntk\n'), ((2476, 2508), 'cntk.equal', 'cntk.equal', (['round_predictions', 'y'], {}), '(round_predictions, y)\n', (2486, 2508), False, 'import cntk\n'), ((2533, 2573), 'cntk.reduce_mean', 'cntk.reduce_mean', (['equal_elements'], {'axis': '(0)'}), '(equal_elements, axis=0)\n', (2549, 2573), False, 'import cntk\n'), ((2784, 2851), 'cntk.logging.ProgressPrinter', 'cntk.logging.ProgressPrinter', ([], {'tag': '"""Training"""', 'num_epochs': 'max_epochs'}), "(tag='Training', num_epochs=max_epochs)\n", (2812, 2851), False, 'import cntk\n'), ((2866, 2954), 'cntk.Trainer', 'cntk.Trainer', (['model', '(loss_function, accuracy_function)', '[learner]', 'progress_printer'], {}), '(model, (loss_function, accuracy_function), [learner],\n progress_printer)\n', (2878, 2954), False, 'import cntk\n'), ((2967, 3000), 'cntk.Evaluator', 'cntk.Evaluator', (['accuracy_function'], {}), '(accuracy_function)\n', (2981, 3000), False, 'import cntk\n'), ((3206, 3237), 'numpy.expand_dims', 'np.expand_dims', (['y_train'], {'axis': '(1)'}), '(y_train, axis=1)\n', (3220, 3237), True, 'import numpy as np\n'), ((5642, 5691), 'numpy.fromfile', 'np.fromfile', (['"""x_train_imdb.bin"""'], {'dtype': 'np.float32'}), "('x_train_imdb.bin', dtype=np.float32)\n", (5653, 5691), True, 'import numpy as np\n'), ((5706, 5755), 'numpy.fromfile', 'np.fromfile', (['"""y_train_imdb.bin"""'], {'dtype': 'np.float32'}), "('y_train_imdb.bin', dtype=np.float32)\n", (5717, 5755), True, 'import numpy as np\n'), ((5769, 5817), 'numpy.fromfile', 'np.fromfile', (['"""x_test_imdb.bin"""'], {'dtype': 'np.float32'}), "('x_test_imdb.bin', dtype=np.float32)\n", (5780, 5817), True, 'import numpy as np\n'), ((5831, 5879), 'numpy.fromfile', 'np.fromfile', (['"""y_test_imdb.bin"""'], {'dtype': 'np.float32'}), "('y_test_imdb.bin', dtype=np.float32)\n", (5842, 5879), True, 'import numpy as np\n'), ((5894, 5931), 'numpy.reshape', 'np.reshape', (['x_train'], {'newshape': 'x_shape'}), '(x_train, newshape=x_shape)\n', (5904, 5931), True, 'import numpy as np\n'), ((5946, 5983), 'numpy.reshape', 'np.reshape', (['y_train'], {'newshape': 'y_shape'}), '(y_train, newshape=y_shape)\n', (5956, 5983), True, 'import numpy as np\n'), ((5997, 6033), 'numpy.reshape', 'np.reshape', (['x_test'], {'newshape': 'x_shape'}), '(x_test, newshape=x_shape)\n', (6007, 6033), True, 'import numpy as np\n'), ((6047, 6083), 'numpy.reshape', 'np.reshape', (['y_test'], {'newshape': 'y_shape'}), '(y_test, newshape=y_shape)\n', (6057, 6083), True, 'import numpy as np\n'), ((7197, 7238), 'os.path.join', 'os.path.join', (['Constants.imdb_dir', '"""train"""'], {}), "(Constants.imdb_dir, 'train')\n", (7209, 7238), False, 'import os\n'), ((7305, 7370), 'keras.preprocessing.text.Tokenizer', 'keras.preprocessing.text.Tokenizer', ([], {'num_words': 'Constants.max_words'}), '(num_words=Constants.max_words)\n', (7339, 7370), False, 'import keras\n'), ((7813, 7891), 'keras.preprocessing.sequence.pad_sequences', 'keras.preprocessing.sequence.pad_sequences', (['sequences'], {'maxlen': 'Constants.maxlen'}), '(sequences, maxlen=Constants.maxlen)\n', (7855, 7891), False, 'import keras\n'), ((7904, 7938), 'numpy.asarray', 'np.asarray', (['data'], {'dtype': 'np.float32'}), '(data, dtype=np.float32)\n', (7914, 7938), True, 'import numpy as np\n'), ((7952, 7988), 'numpy.asarray', 'np.asarray', (['labels'], {'dtype': 'np.float32'}), '(labels, dtype=np.float32)\n', (7962, 7988), True, 'import numpy as np\n'), ((8297, 8321), 'numpy.arange', 'np.arange', (['data.shape[0]'], {}), '(data.shape[0])\n', (8306, 8321), True, 'import numpy as np\n'), ((8326, 8352), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (8343, 8352), True, 'import numpy as np\n'), ((8945, 8989), 'os.path.join', 'os.path.join', (['glove_dir', '"""glove.6B.100d.txt"""'], {}), "(glove_dir, 'glove.6B.100d.txt')\n", (8957, 8989), False, 'import os\n'), ((9374, 9399), 'keras.models.Sequential', 'keras.models.Sequential', ([], {}), '()\n', (9397, 9399), False, 'import keras\n'), ((9859, 9923), 'cntk.input_variable', 'cntk.input_variable', ([], {'shape': '(Constants.maxlen,)', 'dtype': 'np.float32'}), '(shape=(Constants.maxlen,), dtype=np.float32)\n', (9878, 9923), False, 'import cntk\n'), ((9932, 9981), 'cntk.input_variable', 'cntk.input_variable', ([], {'shape': '(1,)', 'dtype': 'np.float32'}), '(shape=(1,), dtype=np.float32)\n', (9951, 9981), False, 'import cntk\n'), ((9994, 10062), 'cntk.one_hot', 'cntk.one_hot', (['x'], {'num_classes': 'Constants.max_words', 'sparse_output': '(True)'}), '(x, num_classes=Constants.max_words, sparse_output=True)\n', (10006, 10062), False, 'import cntk\n'), ((10585, 10627), 'cntk.binary_cross_entropy', 'cntk.binary_cross_entropy', (['model.output', 'y'], {}), '(model.output, y)\n', (10610, 10627), False, 'import cntk\n'), ((10652, 10676), 'cntk.round', 'cntk.round', (['model.output'], {}), '(model.output)\n', (10662, 10676), False, 'import cntk\n'), ((10698, 10730), 'cntk.equal', 'cntk.equal', (['round_predictions', 'y'], {}), '(round_predictions, y)\n', (10708, 10730), False, 'import cntk\n'), ((10755, 10795), 'cntk.reduce_mean', 'cntk.reduce_mean', (['equal_elements'], {'axis': '(0)'}), '(equal_elements, axis=0)\n', (10771, 10795), False, 'import cntk\n'), ((11006, 11073), 'cntk.logging.ProgressPrinter', 'cntk.logging.ProgressPrinter', ([], {'tag': '"""Training"""', 'num_epochs': 'max_epochs'}), "(tag='Training', num_epochs=max_epochs)\n", (11034, 11073), False, 'import cntk\n'), ((11088, 11176), 'cntk.Trainer', 'cntk.Trainer', (['model', '(loss_function, accuracy_function)', '[learner]', 'progress_printer'], {}), '(model, (loss_function, accuracy_function), [learner],\n progress_printer)\n', (11100, 11176), False, 'import cntk\n'), ((11189, 11222), 'cntk.Evaluator', 'cntk.Evaluator', (['accuracy_function'], {}), '(accuracy_function)\n', (11203, 11222), False, 'import cntk\n'), ((11420, 11476), 'numpy.zeros', 'np.zeros', (['(Constants.max_words, Constants.embedding_dim)'], {}), '((Constants.max_words, Constants.embedding_dim))\n', (11428, 11476), True, 'import numpy as np\n'), ((12799, 12848), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'acc', '"""bo"""'], {'label': '"""Training acc"""'}), "(epochs, acc, 'bo', label='Training acc')\n", (12807, 12848), True, 'import matplotlib.pyplot as plt\n'), ((12853, 12907), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'val_acc', '"""b"""'], {'label': '"""Validation acc"""'}), "(epochs, val_acc, 'b', label='Validation acc')\n", (12861, 12907), True, 'import matplotlib.pyplot as plt\n'), ((12912, 12957), 'matplotlib.pyplot.title', 'plt.title', (['"""Training and validation accuracy"""'], {}), "('Training and validation accuracy')\n", (12921, 12957), True, 'import matplotlib.pyplot as plt\n'), ((12962, 12974), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (12972, 12974), True, 'import matplotlib.pyplot as plt\n'), ((12980, 12992), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (12990, 12992), True, 'import matplotlib.pyplot as plt\n'), ((12998, 13049), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'loss', '"""bo"""'], {'label': '"""Training loss"""'}), "(epochs, loss, 'bo', label='Training loss')\n", (13006, 13049), True, 'import matplotlib.pyplot as plt\n'), ((13054, 13110), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'val_loss', '"""b"""'], {'label': '"""Validation loss"""'}), "(epochs, val_loss, 'b', label='Validation loss')\n", (13062, 13110), True, 'import matplotlib.pyplot as plt\n'), ((13115, 13156), 'matplotlib.pyplot.title', 'plt.title', (['"""Training and validation loss"""'], {}), "('Training and validation loss')\n", (13124, 13156), True, 'import matplotlib.pyplot as plt\n'), ((13161, 13173), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (13171, 13173), True, 'import matplotlib.pyplot as plt\n'), ((13179, 13189), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13187, 13189), True, 'import matplotlib.pyplot as plt\n'), ((13259, 13299), 'os.path.join', 'os.path.join', (['Constants.imdb_dir', '"""test"""'], {}), "(Constants.imdb_dir, 'test')\n", (13271, 13299), False, 'import os\n'), ((13454, 13532), 'keras.preprocessing.sequence.pad_sequences', 'keras.preprocessing.sequence.pad_sequences', (['sequences'], {'maxlen': 'Constants.maxlen'}), '(sequences, maxlen=Constants.maxlen)\n', (13496, 13532), False, 'import keras\n'), ((13546, 13564), 'numpy.asarray', 'np.asarray', (['labels'], {}), '(labels)\n', (13556, 13564), True, 'import numpy as np\n'), ((1259, 1319), 'keras.layers.Embedding', 'keras.layers.Embedding', (['max_features', '(8)'], {'input_length': 'maxlen'}), '(max_features, 8, input_length=maxlen)\n', (1281, 1319), False, 'import keras\n'), ((1527, 1549), 'keras.layers.Flatten', 'keras.layers.Flatten', ([], {}), '()\n', (1547, 1549), False, 'import keras\n'), ((1601, 1644), 'keras.layers.Dense', 'keras.layers.Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (1619, 1644), False, 'import keras\n'), ((2234, 2270), 'cntk.layers.Embedding', 'cntk.layers.Embedding', (['embedding_dim'], {}), '(embedding_dim)\n', (2255, 2270), False, 'import cntk\n'), ((2290, 2335), 'cntk.layers.Dense', 'cntk.layers.Dense', (['(1)'], {'activation': 'cntk.sigmoid'}), '(1, activation=cntk.sigmoid)\n', (2307, 2335), False, 'import cntk\n'), ((2657, 2708), 'cntk.learning_parameter_schedule_per_sample', 'cntk.learning_parameter_schedule_per_sample', (['(0.0001)'], {}), '(0.0001)\n', (2700, 2708), False, 'import cntk\n'), ((2710, 2759), 'cntk.learning_parameter_schedule_per_sample', 'cntk.learning_parameter_schedule_per_sample', (['(0.99)'], {}), '(0.99)\n', (2753, 2759), False, 'import cntk\n'), ((3484, 3495), 'time.time', 'time.time', ([], {}), '()\n', (3493, 3495), False, 'import time\n'), ((3520, 3566), 'numpy.random.permutation', 'np.random.permutation', (['train_features.shape[0]'], {}), '(train_features.shape[0])\n', (3541, 3566), True, 'import numpy as np\n'), ((6643, 6673), 'os.path.join', 'os.path.join', (['path', 'label_type'], {}), '(path, label_type)\n', (6655, 6673), False, 'import os\n'), ((9159, 9198), 'numpy.asarray', 'np.asarray', (['values[1:]'], {'dtype': '"""float32"""'}), "(values[1:], dtype='float32')\n", (9169, 9198), True, 'import numpy as np\n'), ((9414, 9517), 'keras.layers.Embedding', 'keras.layers.Embedding', (['Constants.max_words', 'Constants.embedding_dim'], {'input_length': 'Constants.maxlen'}), '(Constants.max_words, Constants.embedding_dim,\n input_length=Constants.maxlen)\n', (9436, 9517), False, 'import keras\n'), ((9529, 9551), 'keras.layers.Flatten', 'keras.layers.Flatten', ([], {}), '()\n', (9549, 9551), False, 'import keras\n'), ((9567, 9608), 'keras.layers.Dense', 'keras.layers.Dense', (['(32)'], {'activation': '"""relu"""'}), "(32, activation='relu')\n", (9585, 9608), False, 'import keras\n'), ((9624, 9667), 'keras.layers.Dense', 'keras.layers.Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (9642, 9667), False, 'import keras\n'), ((10449, 10492), 'cntk.layers.Dense', 'cntk.layers.Dense', (['(32)'], {'activation': 'cntk.relu'}), '(32, activation=cntk.relu)\n', (10466, 10492), False, 'import cntk\n'), ((10512, 10557), 'cntk.layers.Dense', 'cntk.layers.Dense', (['(1)'], {'activation': 'cntk.sigmoid'}), '(1, activation=cntk.sigmoid)\n', (10529, 10557), False, 'import cntk\n'), ((10879, 10930), 'cntk.learning_parameter_schedule_per_sample', 'cntk.learning_parameter_schedule_per_sample', (['(0.0001)'], {}), '(0.0001)\n', (10922, 10930), False, 'import cntk\n'), ((10932, 10981), 'cntk.learning_parameter_schedule_per_sample', 'cntk.learning_parameter_schedule_per_sample', (['(0.99)'], {}), '(0.99)\n', (10975, 10981), False, 'import cntk\n'), ((132, 161), 'os.path.split', 'os.path.split', (['sys.executable'], {}), '(sys.executable)\n', (145, 161), False, 'import os\n'), ((6761, 6781), 'os.listdir', 'os.listdir', (['dir_name'], {}), '(dir_name)\n', (6771, 6781), False, 'import os\n'), ((10302, 10349), 'cntk.layers.Embedding', 'cntk.layers.Embedding', ([], {'weights': 'embedding_matrix'}), '(weights=embedding_matrix)\n', (10323, 10349), False, 'import cntk\n'), ((10383, 10429), 'cntk.layers.Embedding', 'cntk.layers.Embedding', (['Constants.embedding_dim'], {}), '(Constants.embedding_dim)\n', (10404, 10429), False, 'import cntk\n'), ((6846, 6875), 'os.path.join', 'os.path.join', (['dir_name', 'fname'], {}), '(dir_name, fname)\n', (6858, 6875), False, 'import os\n'), ((4919, 4930), 'time.time', 'time.time', ([], {}), '()\n', (4928, 4930), False, 'import time\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.