content
stringlengths 5
1.05M
|
---|
#!/usr/bin/python
from gearman.client import GearmanClient
client = GearmanClient(['localhost'])
URL = 'http://ifcb-data.whoi.edu/feed.json'
client.submit_job('spider', URL)
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from evaluator.CodeBLEU.parser import DFG_python, DFG_java, DFG_ruby, DFG_go, DFG_php, DFG_javascript, DFG_csharp
from evaluator.CodeBLEU.parser import (remove_comments_and_docstrings,
tree_to_token_index,
index_to_code_token,
tree_to_variable_index)
from tree_sitter import Language, Parser
import pdb
parser_path = '/mnt/sda/ys/codeT5/CodeT5/evaluator/CodeBLEU/parser'
dfg_function = {
'python': DFG_python,
'java': DFG_java,
'ruby': DFG_ruby,
'go': DFG_go,
'php': DFG_php,
'javascript': DFG_javascript,
'c_sharp': DFG_csharp,
}
def calc_dataflow_match(references, candidate, lang):
return corpus_dataflow_match([references], [candidate], lang)
def corpus_dataflow_match(references, candidates, lang):
LANGUAGE = Language('{}/my-languages.so'.format(parser_path), lang)
parser = Parser()
parser.set_language(LANGUAGE)
parser = [parser, dfg_function[lang]]
match_count = 0
total_count = 0
for i in range(len(candidates)):
references_sample = references[i]
candidate = candidates[i]
for reference in references_sample:
try:
candidate = remove_comments_and_docstrings(candidate, 'java')
except:
pass
try:
reference = remove_comments_and_docstrings(reference, 'java')
except:
pass
cand_dfg = get_data_flow(candidate, parser)
ref_dfg = get_data_flow(reference, parser)
normalized_cand_dfg = normalize_dataflow(cand_dfg)
normalized_ref_dfg = normalize_dataflow(ref_dfg)
if len(normalized_ref_dfg) > 0:
total_count += len(normalized_ref_dfg)
for dataflow in normalized_ref_dfg:
if dataflow in normalized_cand_dfg:
match_count += 1
normalized_cand_dfg.remove(dataflow)
if total_count == 0:
print(
"WARNING: There is no reference data-flows extracted from the whole corpus, and the data-flow match score degenerates to 0. Please consider ignoring this score.")
return 0
score = match_count / total_count
return score
def get_data_flow(code, parser):
try:
tree = parser[0].parse(bytes(code, 'utf8'))
root_node = tree.root_node
tokens_index = tree_to_token_index(root_node)
code = code.split('\n')
code_tokens = [index_to_code_token(x, code) for x in tokens_index]
index_to_code = {}
for idx, (index, code) in enumerate(zip(tokens_index, code_tokens)):
index_to_code[index] = (idx, code)
try:
DFG, _ = parser[1](root_node, index_to_code, {})
except:
DFG = []
DFG = sorted(DFG, key=lambda x: x[1])
indexs = set()
for d in DFG:
if len(d[-1]) != 0:
indexs.add(d[1])
for x in d[-1]:
indexs.add(x)
new_DFG = []
for d in DFG:
if d[1] in indexs:
new_DFG.append(d)
codes = code_tokens
dfg = new_DFG
except:
codes = code.split()
dfg = []
# merge nodes
dic = {}
for d in dfg:
if d[1] not in dic:
dic[d[1]] = d
else:
dic[d[1]] = (d[0], d[1], d[2], list(set(dic[d[1]][3] + d[3])), list(set(dic[d[1]][4] + d[4])))
DFG = []
for d in dic:
DFG.append(dic[d])
dfg = DFG
return dfg
def normalize_dataflow_item(dataflow_item):
var_name = dataflow_item[0]
var_pos = dataflow_item[1]
relationship = dataflow_item[2]
par_vars_name_list = dataflow_item[3]
par_vars_pos_list = dataflow_item[4]
var_names = list(set(par_vars_name_list + [var_name]))
norm_names = {}
for i in range(len(var_names)):
norm_names[var_names[i]] = 'var_' + str(i)
norm_var_name = norm_names[var_name]
relationship = dataflow_item[2]
norm_par_vars_name_list = [norm_names[x] for x in par_vars_name_list]
return (norm_var_name, relationship, norm_par_vars_name_list)
def normalize_dataflow(dataflow):
var_dict = {}
i = 0
normalized_dataflow = []
for item in dataflow:
var_name = item[0]
relationship = item[2]
par_vars_name_list = item[3]
for name in par_vars_name_list:
if name not in var_dict:
var_dict[name] = 'var_' + str(i)
i += 1
if var_name not in var_dict:
var_dict[var_name] = 'var_' + str(i)
i += 1
normalized_dataflow.append((var_dict[var_name], relationship, [var_dict[x] for x in par_vars_name_list]))
return normalized_dataflow
|
# import pandas as p
# file = "weather_data.csv"
# df = p.read_csv(file)
# monday = df[df.day == "Wednesday"]
# print(int(monday.temp) * 9/5 + 32)
# x = (59 - 32) * 5/9
# print(x)
|
import re
from metaflow.plugins.aws.eks.kubernetes import generate_rfc1123_name
rfc1123 = re.compile(r'^[a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?$')
def test_job_name_santitizer():
# Basic name
assert rfc1123.match(generate_rfc1123_name('HelloFlow', '1', 'end', '321', '1'))
# Step name ends with _
assert rfc1123.match(generate_rfc1123_name('HelloFlow', '1', '_end', '321', '1'))
# Step name starts and ends with _
assert rfc1123.match(generate_rfc1123_name('HelloFlow', '1', '_end_', '321', '1'))
# Flow name ends with _
assert rfc1123.match(generate_rfc1123_name('HelloFlow_', '1', 'end', '321', '1'))
# Same flow name, different case must produce different job names
assert generate_rfc1123_name('Helloflow', '1', 'end', '321', '1') != generate_rfc1123_name('HelloFlow', '1', 'end', '321', '1')
# Very long step name should be fine
assert rfc1123.match(generate_rfc1123_name('Helloflow', '1', 'end'*50, '321', '1'))
# Very long run id should be fine too
assert rfc1123.match(generate_rfc1123_name('Helloflow', '1'*100, 'end', '321', '1'))
|
import os
import argparse
import time
from tqdm import tqdm
import numpy as np
np.random.seed(0)
import torch
import torch.nn as nn
torch.manual_seed(0)
import torchvision
import torchvision.transforms as transforms
from torchvision.utils import save_image, make_grid
from model import Model
from config import diffusion_config
def _map_gpu(gpu):
if gpu == 'cuda':
return lambda x: x.cuda()
else:
return lambda x: x.to(torch.device('cuda:'+gpu))
def rescale(X, batch=True):
if not batch:
return (X - X.min()) / (X.max() - X.min())
else:
for i in range(X.shape[0]):
X[i] = rescale(X[i], batch=False)
return X
def std_normal(size):
return map_gpu(torch.normal(0, 1, size=size))
def print_size(net):
"""
Print the number of parameters of a network
"""
if net is not None and isinstance(net, torch.nn.Module):
module_parameters = filter(lambda p: p.requires_grad, net.parameters())
params = sum([np.prod(p.size()) for p in module_parameters])
print("{} Parameters: {:.6f}M".format(
net.__class__.__name__, params / 1e6), flush=True)
def calc_diffusion_hyperparams(T, beta_0, beta_T):
"""
Compute diffusion process hyperparameters
Parameters:
T (int): number of diffusion steps
beta_0 and beta_T (float): beta schedule start/end value,
where any beta_t in the middle is linearly interpolated
Returns:
a dictionary of diffusion hyperparameters including:
T (int), Beta/Alpha/Alpha_bar/Sigma (torch.tensor on cpu, shape=(T, ))
"""
Beta = torch.linspace(beta_0, beta_T, T)
Alpha = 1 - Beta
Alpha_bar = Alpha + 0
Beta_tilde = Beta + 0
for t in range(1, T):
Alpha_bar[t] *= Alpha_bar[t-1]
Beta_tilde[t] *= (1-Alpha_bar[t-1]) / (1-Alpha_bar[t])
Sigma = torch.sqrt(Beta_tilde)
_dh = {}
_dh["T"], _dh["Beta"], _dh["Alpha"], _dh["Alpha_bar"], _dh["Sigma"] = T, Beta, Alpha, Alpha_bar, Sigma
diffusion_hyperparams = _dh
return diffusion_hyperparams
def bisearch(f, domain, target, eps=1e-8):
"""
find smallest x such that f(x) > target
Parameters:
f (function): function
domain (tuple): x in (left, right)
target (float): target value
Returns:
x (float)
"""
#
sign = -1 if target < 0 else 1
left, right = domain
for _ in range(1000):
x = (left + right) / 2
if f(x) < target:
right = x
elif f(x) > (1 + sign * eps) * target:
left = x
else:
break
return x
def get_VAR_noise(S, schedule='linear'):
"""
Compute VAR noise levels
Parameters:
S (int): approximante diffusion process length
schedule (str): linear or quadratic
Returns:
np array of noise levels, size = (S, )
"""
target = np.prod(1 - np.linspace(diffusion_config["beta_0"], diffusion_config["beta_T"], diffusion_config["T"]))
if schedule == 'linear':
g = lambda x: np.linspace(diffusion_config["beta_0"], x, S)
domain = (diffusion_config["beta_0"], 0.99)
elif schedule == 'quadratic':
g = lambda x: np.array([diffusion_config["beta_0"] * (1+i*x) ** 2 for i in range(S)])
domain = (0.0, 0.95 / np.sqrt(diffusion_config["beta_0"]) / S)
else:
raise NotImplementedError
f = lambda x: np.prod(1 - g(x))
largest_var = bisearch(f, domain, target, eps=1e-4)
return g(largest_var)
def get_STEP_step(S, schedule='linear'):
"""
Compute STEP steps
Parameters:
S (int): approximante diffusion process length
schedule (str): linear or quadratic
Returns:
np array of steps, size = (S, )
"""
if schedule == 'linear':
c = (diffusion_config["T"] - 1.0) / (S - 1.0)
list_tau = [np.floor(i * c) for i in range(S)]
elif schedule == 'quadratic':
list_tau = np.linspace(0, np.sqrt(diffusion_config["T"] * 0.8), S) ** 2
else:
raise NotImplementedError
return [int(s) for s in list_tau]
def _log_gamma(x):
# Gamma(x+1) ~= sqrt(2\pi x) * (x/e)^x (1 + 1 / 12x)
y = x - 1
return np.log(2 * np.pi * y) / 2 + y * (np.log(y) - 1) + np.log(1 + 1 / (12 * y))
def _log_cont_noise(t, beta_0, beta_T, T):
# We want log_cont_noise(t, beta_0, beta_T, T) ~= np.log(Alpha_bar[-1].numpy())
delta_beta = (beta_T - beta_0) / (T - 1)
_c = (1.0 - beta_0) / delta_beta
t_1 = t + 1
return t_1 * np.log(delta_beta) + _log_gamma(_c + 1) - _log_gamma(_c - t_1 + 1)
# Standard DDPM generation
def STD_sampling(net, size, diffusion_hyperparams):
"""
Perform the complete sampling step according to DDPM
Parameters:
net (torch network): the model
size (tuple): size of tensor to be generated,
usually is (number of audios to generate, channels=1, length of audio)
diffusion_hyperparams (dict): dictionary of diffusion hyperparameters returned by calc_diffusion_hyperparams
note, the tensors need to be cuda tensors
Returns:
the generated images in torch.tensor, shape=size
"""
_dh = diffusion_hyperparams
T, Alpha, Alpha_bar, Beta = _dh["T"], _dh["Alpha"], _dh["Alpha_bar"], _dh["Beta"]
assert len(Alpha_bar) == T
assert len(size) == 4
Sigma = _dh["Sigma"]
x = std_normal(size)
with torch.no_grad():
for t in range(T-1, -1, -1):
diffusion_steps = t * map_gpu(torch.ones(size[0]))
epsilon_theta = net(x, diffusion_steps)
x = (x - (1-Alpha[t])/torch.sqrt(1-Alpha_bar[t]) * epsilon_theta) / torch.sqrt(Alpha[t])
if t > 0:
x = x + Sigma[t] * std_normal(size)
return x
# STEP
def STEP_sampling(net, size, diffusion_hyperparams, user_defined_steps, kappa):
"""
Perform the complete sampling step according to https://arxiv.org/pdf/2010.02502.pdf
official repo: https://github.com/ermongroup/ddim
Parameters:
net (torch network): the model
size (tuple): size of tensor to be generated,
usually is (number of audios to generate, channels=1, length of audio)
diffusion_hyperparams (dict): dictionary of diffusion hyperparameters returned by calc_diffusion_hyperparams
note, the tensors need to be cuda tensors
user_defined_steps (int list): User defined steps (sorted)
kappa (float): factor multipled over sigma, between 0 and 1
Returns:
the generated images in torch.tensor, shape=size
"""
_dh = diffusion_hyperparams
T, Alpha, Alpha_bar, _ = _dh["T"], _dh["Alpha"], _dh["Alpha_bar"], _dh["Sigma"]
assert len(Alpha_bar) == T
assert len(size) == 4
assert 0.0 <= kappa <= 1.0
T_user = len(user_defined_steps)
user_defined_steps = sorted(list(user_defined_steps), reverse=True)
x = std_normal(size)
with torch.no_grad():
for i, tau in enumerate(user_defined_steps):
diffusion_steps = tau * map_gpu(torch.ones(size[0]))
epsilon_theta = net(x, diffusion_steps)
if i == T_user - 1: # the next step is to generate x_0
assert tau == 0
alpha_next = torch.tensor(1.0)
sigma = torch.tensor(0.0)
else:
alpha_next = Alpha_bar[user_defined_steps[i+1]]
sigma = kappa * torch.sqrt((1-alpha_next) / (1-Alpha_bar[tau]) * (1 - Alpha_bar[tau] / alpha_next))
x *= torch.sqrt(alpha_next / Alpha_bar[tau])
c = torch.sqrt(1 - alpha_next - sigma ** 2) - torch.sqrt(1 - Alpha_bar[tau]) * torch.sqrt(alpha_next / Alpha_bar[tau])
x += c * epsilon_theta + sigma * std_normal(size)
return x
# VAR
def _precompute_VAR_steps(diffusion_hyperparams, user_defined_eta):
_dh = diffusion_hyperparams
T, Alpha, Alpha_bar, Beta = _dh["T"], _dh["Alpha"], _dh["Alpha_bar"], _dh["Beta"]
assert len(Alpha_bar) == T
# compute diffusion hyperparameters for user defined noise
T_user = len(user_defined_eta)
Beta_tilde = map_gpu(torch.from_numpy(user_defined_eta)).to(torch.float32)
Gamma_bar = 1 - Beta_tilde
for t in range(1, T_user):
Gamma_bar[t] *= Gamma_bar[t-1]
assert Gamma_bar[0] <= Alpha_bar[0] and Gamma_bar[-1] >= Alpha_bar[-1]
continuous_steps = []
with torch.no_grad():
for t in range(T_user-1, -1, -1):
t_adapted = None
for i in range(T - 1):
if Alpha_bar[i] >= Gamma_bar[t] > Alpha_bar[i+1]:
t_adapted = bisearch(f=lambda _t: _log_cont_noise(_t, Beta[0].cpu().numpy(), Beta[-1].cpu().numpy(), T),
domain=(i-0.01, i+1.01),
target=np.log(Gamma_bar[t].cpu().numpy()))
break
if t_adapted is None:
t_adapted = T - 1
continuous_steps.append(t_adapted) # must be decreasing
return continuous_steps
def VAR_sampling(net, size, diffusion_hyperparams, user_defined_eta, kappa, continuous_steps):
"""
Perform the complete sampling step according to user defined variances
Parameters:
net (torch network): the model
size (tuple): size of tensor to be generated,
usually is (number of audios to generate, channels=1, length of audio)
diffusion_hyperparams (dict): dictionary of diffusion hyperparameters returned by calc_diffusion_hyperparams
note, the tensors need to be cuda tensors
user_defined_eta (np.array): User defined noise
kappa (float): factor multipled over sigma, between 0 and 1
continuous_steps (list): continuous steps computed from user_defined_eta
Returns:
the generated images in torch.tensor, shape=size
"""
_dh = diffusion_hyperparams
T, Alpha, Alpha_bar, Beta = _dh["T"], _dh["Alpha"], _dh["Alpha_bar"], _dh["Beta"]
assert len(Alpha_bar) == T
assert len(size) == 4
assert 0.0 <= kappa <= 1.0
# compute diffusion hyperparameters for user defined noise
T_user = len(user_defined_eta)
Beta_tilde = map_gpu(torch.from_numpy(user_defined_eta)).to(torch.float32)
Gamma_bar = 1 - Beta_tilde
for t in range(1, T_user):
Gamma_bar[t] *= Gamma_bar[t-1]
assert Gamma_bar[0] <= Alpha_bar[0] and Gamma_bar[-1] >= Alpha_bar[-1]
# print('begin sampling, total number of reverse steps = %s' % T_user)
x = std_normal(size)
with torch.no_grad():
for i, tau in enumerate(continuous_steps):
diffusion_steps = tau * map_gpu(torch.ones(size[0]))
epsilon_theta = net(x, diffusion_steps)
if i == T_user - 1: # the next step is to generate x_0
assert abs(tau) < 0.1
alpha_next = torch.tensor(1.0)
sigma = torch.tensor(0.0)
else:
alpha_next = Gamma_bar[T_user-1-i - 1]
sigma = kappa * torch.sqrt((1-alpha_next) / (1-Gamma_bar[T_user-1-i]) * (1 - Gamma_bar[T_user-1-i] / alpha_next))
x *= torch.sqrt(alpha_next / Gamma_bar[T_user-1-i])
c = torch.sqrt(1 - alpha_next - sigma ** 2) - torch.sqrt(1 - Gamma_bar[T_user-1-i]) * torch.sqrt(alpha_next / Gamma_bar[T_user-1-i])
x += c * epsilon_theta + sigma * std_normal(size)
return x
def generate(output_name, model_path, model_config,
diffusion_config, approxdiff, generation_param,
n_generate, batchsize, n_exist):
"""
Parameters:
output_name (str): save generated images to this folder
model_path (str): checkpoint file
model_config (dic): dic of model config
diffusion_config (dic): dic of diffusion config
generation_param (dic): parameter: user defined variance or user defined steps
approxdiff (str): diffusion style: STD, STEP, VAR
n_generate (int): number of generated samples
batchsize (int): batch size of training
n_exist (int): existing number of samples
Returns:
Generated images (tensor): (B, C, H, W) where C = 3
"""
if batchsize > n_generate:
batchsize = n_generate
assert n_generate % batchsize == 0
if 'generated' not in os.listdir():
os.mkdir('generated')
if output_name not in os.listdir('generated'):
os.mkdir(os.path.join('generated', output_name))
# map diffusion hyperparameters to gpu
diffusion_hyperparams = calc_diffusion_hyperparams(**diffusion_config)
for key in diffusion_hyperparams:
if key is not "T":
diffusion_hyperparams[key] = map_gpu(diffusion_hyperparams[key])
# predefine model
net = Model(**model_config)
print_size(net)
# load checkpoint
try:
checkpoint = torch.load(model_path, map_location='cpu')
net.load_state_dict(checkpoint)
net = map_gpu(net)
net.eval()
print('checkpoint successfully loaded')
except:
raise Exception('No valid model found')
# sampling
C, H, W = model_config["in_channels"], model_config["resolution"], model_config["resolution"]
for i in tqdm(range(n_exist // batchsize, n_generate // batchsize)):
if approxdiff == 'STD':
Xi = STD_sampling(net, (batchsize, C, H, W), diffusion_hyperparams)
elif approxdiff == 'STEP':
user_defined_steps = generation_param["user_defined_steps"]
Xi = STEP_sampling(net, (batchsize, C, H, W),
diffusion_hyperparams,
user_defined_steps,
kappa=generation_param["kappa"])
elif approxdiff == 'VAR':
user_defined_eta = generation_param["user_defined_eta"]
continuous_steps = _precompute_VAR_steps(diffusion_hyperparams, user_defined_eta)
Xi = VAR_sampling(net, (batchsize, C, H, W),
diffusion_hyperparams,
user_defined_eta,
kappa=generation_param["kappa"],
continuous_steps=continuous_steps)
# save image
for j, x in enumerate(rescale(Xi)):
index = i * batchsize + j
save_image(x, fp=os.path.join('generated', output_name, '{}.jpg'.format(index)))
save_image(make_grid(rescale(Xi)[:64]), fp=os.path.join('generated', '{}.jpg'.format(output_name)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# dataset and model
parser.add_argument('-name', '--name', type=str, choices=["cifar10", "lsun_bedroom", "lsun_church", "lsun_cat", "celeba64"],
help='Name of experiment')
parser.add_argument('-ema', '--ema', action='store_true', help='Whether use ema')
# fast generation parameters
parser.add_argument('-approxdiff', '--approxdiff', type=str, choices=['STD', 'STEP', 'VAR'], help='approximate diffusion process')
parser.add_argument('-kappa', '--kappa', type=float, default=1.0, help='factor to be multiplied to sigma')
parser.add_argument('-S', '--S', type=int, default=50, help='number of steps')
parser.add_argument('-schedule', '--schedule', type=str, choices=['linear', 'quadratic'], help='noise level schedules')
# generation util
parser.add_argument('-n', '--n_generate', type=int, help='Number of samples to generate')
parser.add_argument('-bs', '--batchsize', type=int, default=256, help='Batchsize of generation')
parser.add_argument('-gpu', '--gpu', type=str, default='cuda', choices=['cuda']+[str(i) for i in range(16)], help='gpu device')
args = parser.parse_args()
global map_gpu
map_gpu = _map_gpu(args.gpu)
from config import model_config_map
model_config = model_config_map[args.name]
kappa = args.kappa
if args.approxdiff == 'STD':
variance_schedule = '1000'
generation_param = {"kappa": kappa}
elif args.approxdiff == 'VAR': # user defined variance
user_defined_eta = get_VAR_noise(args.S, args.schedule)
generation_param = {"kappa": kappa,
"user_defined_eta": user_defined_eta}
variance_schedule = '{}{}'.format(args.S, args.schedule)
elif args.approxdiff == 'STEP': # user defined step
user_defined_steps = get_STEP_step(args.S, args.schedule)
generation_param = {"kappa": kappa,
"user_defined_steps": user_defined_steps}
variance_schedule = '{}{}'.format(args.S, args.schedule)
else:
raise NotImplementedError
output_name = '{}{}_{}{}_kappa{}'.format('ema_' if args.ema else '',
args.name,
args.approxdiff,
variance_schedule,
kappa)
n_exist = 0
if 'generated' in os.listdir() and output_name in os.listdir('generated'):
if len(os.listdir(os.path.join('generated', output_name))) == args.n_generate:
print('{} already finished'.format(output_name))
n_exist = args.n_generate
else:
n_exist = len(os.listdir(os.path.join('generated', output_name)))
if n_exist < args.n_generate:
if n_exist > 0:
print('{} already generated, resuming'.format(n_exist))
else:
print('start generating')
model_path = os.path.join('checkpoints',
'{}diffusion_{}_model'.format('ema_' if args.ema else '', args.name),
'model.ckpt')
generate(output_name, model_path, model_config,
diffusion_config, args.approxdiff, generation_param,
args.n_generate, args.batchsize, n_exist)
|
#!/usr/bin/env python
from distutils.core import setup
#### Descriptions
setup(name= "PHOSforUS", version = '1.0.3',
description = "PHOSforUS horizontal information-based phosphorylation site predictor",
url = "https://github.com/bxlab/PHOSforUS",
author = "Min Hyung Cho", author_email = "[email protected]", license = "MIT",
packages = ['phosforus', 'phosforus/accessory_modules'],
package_dir = {'phosforus': 'phosforus', 'accessory_modules': 'phosforus/accessory_modules'},
package_data = {'phosforus': ['preset_indices/eScape_sorted_filled.csv', 'preset_indices/index_reselect.txt', 'preset_params/class_0/*.txt', 'preset_params/class_1/*.txt', 'preset_params/class_2/*.txt', 'preset_params/class_3/*.txt', 'preset_params/class_4/*.txt']},
keywords = ['phosphorylation', 'protein', 'predictor'])
|
"""TcEx JSON Update"""
# standard library
import os
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING: # pragma: no cover
from .tcex_json import TcexJson
class TcexJsonUpdate:
"""Update install.json file with current standards and schema."""
def __init__(self, tj: 'TcexJson') -> None: # pylint: disable=E0601
"""Initialize class properties."""
self.tj = tj
def multiple(self, template: Optional[str] = None) -> None:
"""Update the contents of the tcex.json file."""
# update app_name
self.update_package_app_name()
# update deprecated fields
# self.update_deprecated_fields()
# update package excludes
self.update_package_excludes()
# update package excludes
self.update_lib_versions()
# update template
if template is not None:
self.tj.template = template
# write updated profile
self.tj.write()
def update_package_app_name(self) -> None:
"""Update the package app_name in the tcex.json file."""
if (
self.tj.model.package.app_name is None
or self.tj.model.package.app_name in self.tj.ij.app_prefixes.values()
):
# lower case name and replace prefix if already exists
_app_name = (
os.path.basename(os.getcwd()).lower().replace(self.tj.ij.app_prefix.lower(), '')
)
# replace spaces and dashes with underscores
_app_name = _app_name.replace(' ', '_').replace('-', '_').lower()
# title case app name
_app_name = '_'.join([a.title() for a in _app_name.split('_')])
# prepend appropriate App prefix (e.g., TCPB_-_)
_app_name = f'{self.tj.ij.app_prefix}{_app_name}'
# update App name
self.tj.model.package.app_name = _app_name
# def update_deprecated_fields(self) -> None:
# """Update deprecated fields in the tcex.json file."""
# deprecated_fields = ['profile_include_dirs']
# for d in deprecated_fields:
# setattr(self.tj.model, d, None)
def update_package_excludes(self) -> None:
"""Update the excludes values in the tcex.json file."""
for i in [
'.gitignore',
'.pre-commit-config.yaml',
'local-*',
'pyproject.toml',
'setup.cfg',
'tcex.json',
]:
if i not in self.tj.model.package.excludes:
# TODO: [low] pydantic doesn't seem to allow removing items from list???
self.tj.model.package.excludes.append(i)
def update_lib_versions(self) -> None:
"""Update the lib_versions array in the tcex.json file."""
if os.getenv('TCEX_LIB_VERSIONS') and not self.tj.model.lib_versions:
_lib_versions = []
for version in os.getenv('TCEX_LIB_VERSIONS').split(','):
_lib_versions.append(
{
'lib_dir': f'lib_${{env:{version}}}',
'python_executable': f'~/.pyenv/versions/${{env:{version}}}/bin/python',
}
)
self.tj.model.lib_versions = _lib_versions
|
"""
IPython demo to illustrate the use of numpy matrices.
To run, if you have ipydemo.py, do
import ipydemo; ipydemo.rundemo('matrices_ipydemo.py')
"""
import math
import numpy as np
import numpy.fft as fft
import matplotlib.pyplot as plt
def printTypeAndValue(x):
print type(x), "\n", x
N = 8
I = np.matrix(np.identity(N))
printTypeAndValue(I)
printTypeAndValue(I[0])
printTypeAndValue(np.asarray(I[0]))
printTypeAndValue(np.squeeze(np.asarray(I[0])))
printTypeAndValue(np.asarray(I[0])[0])
printTypeAndValue(np.asarray(I[0]).reshape(-1))
printTypeAndValue(np.reshape(np.asarray(I[0]), -1))
# <demo> --- stop ---
# <demo> --- stop ---
for r in I:
a = np.squeeze(np.asarray(r))
print "row", r, "row", a, "fft", fft.fft(a)
DFTMatrix = np.matrix([ fft.fft(np.squeeze(np.asarray(r))) for r in I ])
printTypeAndValue(DFTMatrix)
# <demo> --- stop ---
# <demo> --- stop ---
printTypeAndValue(DFTMatrix.T == DFTMatrix)
printTypeAndValue((DFTMatrix.T == DFTMatrix).all())
print "Is DFT Matrix exactly equal to its transpose?", ("Yes!" if (DFTMatrix.T == DFTMatrix).all() else "No!")
printTypeAndValue(DFTMatrix.T - DFTMatrix)
printTypeAndValue(np.around(DFTMatrix.T - DFTMatrix, 10))
print "Is DFT Matrix nearly equal to its transpose?", ("Yes!" if (abs(DFTMatrix.T - DFTMatrix) < 1e-10).all() else "No!")
# <demo> --- stop ---
# <demo> --- stop ---
A = np.matrix("1, 2; 3 4")
print A
print A.T
print A.I
print A * A.I
# <demo> --- stop ---
print "A * A = ", A * A
print "A**2 = ", A**2
# <demo> --- stop ---
print "np.multiply(A, A) = ", np.multiply(A, A)
# <demo> --- stop ---
# <demo> --- stop ---
x = np.array([1, 2])
xm = np.matrix(x).T
print x, xm, xm.shape
print A * xm
# <demo> --- stop ---
|
"""
This script obtains records from Kinesis and writes them to a local file as
as defined by OUT_FILE. It will exit when no additional files have been read for WAIT_TIME.
"""
import os
import sys
import time
import boto3
from botocore import exceptions
from gzip import GzipFile
from io import BytesIO
from retry import retry
# Variables
OUT_FILE = os.getenv('OUT_FILE', '/output/test')
BUCKET_NAME = os.getenv('BUCKET_NAME', 'itests')
CREATE_BUCKET = bool(os.getenv('CREATE_BUCKET', '1'))
ENDPOINT_URL = os.getenv('ENDPOINT_URL', 'http://localstack:4572')
AWS_REGION = os.getenv('AWS_REGION', 'us-east-1')
EXPECTED_COUNT = int(os.getenv('EXPECTED_COUNT', '1')) # expect number of records (only used for logging)
INITIAL_WAIT_TIME = int(os.getenv('S3_POLLER_INITIAL_WAIT_TIME', '90')) # time to wait for initial list of keys
WAIT_TIME = int(os.getenv('S3_POLLER_WAIT_TIME', '10')) # incremental time to wait for new keys if none have been seen
MAP_KEYS_TO_OUTPUT_FILES = bool(os.getenv('S3_POLLER_MAP_KEYS_TO_OUTPUT_FILES', '')) # whether to create a single output file
client = boto3.client('s3',
endpoint_url=ENDPOINT_URL,
region_name=AWS_REGION)
# Create a bucket
@retry(exceptions.EndpointConnectionError, tries=10, delay=.5)
def _create_bucket(name):
print("Trying to create bucket {}".format(name))
return client.create_bucket(
Bucket=name)
@retry(ValueError, tries=10, delay=.5)
def _get_all_s3_keys(bucket):
"""Get a list of all keys in an S3 bucket."""
keys = []
resp = client.list_objects(Bucket=bucket)
file_list = resp['Contents']
for s3_key in file_list:
keys.append(s3_key['Key'])
return keys
if CREATE_BUCKET:
# Create the bucket
print("Creating a bucket")
try:
_create_bucket(BUCKET_NAME)
except exceptions.EndpointConnectionError:
print("Unable to contact endpoint at {}".format(ENDPOINT_URL))
exit(1)
except exceptions.ClientError as e:
if e.response['Error']['Code'] != 'ResourceInUseException':
raise e
# get initial set of keys with a deadline of INITIAL_WAIT_TIME
all_keys = []
timeout_for_first_keys = time.time() + INITIAL_WAIT_TIME
while True:
if time.time() > timeout_for_first_keys:
print("No data received to poller. Exiting.")
exit(1)
print("Getting initial keys list...")
sys.stdout.flush()
try:
all_keys = _get_all_s3_keys(BUCKET_NAME)
break
except KeyError:
time.sleep(1)
pass
all_keys.sort()
key_i = 0
total = 0
print("Records expected: {}".format(EXPECTED_COUNT))
# Start the moving deadline and iterate over new keys
moving_deadline = time.time() + WAIT_TIME
while time.time() <= moving_deadline:
if key_i >= len(all_keys):
# our pointer is past the length of the keys we have seen, so we wait for more...
print("Waiting for more keys...")
sys.stdout.flush()
time.sleep(1)
remote_keys = _get_all_s3_keys(BUCKET_NAME)
if len(remote_keys) > len(all_keys):
# if there are new keys, update our all_keys list and process
all_keys = list(set(all_keys + remote_keys))
all_keys.sort()
# update deadline as if we had new keys
moving_deadline = time.time() + WAIT_TIME
else:
# else, look back around
continue
record_count = 0
# get object data
resp = client.get_object(
Bucket=BUCKET_NAME,
Key=all_keys[key_i],
)
bytestream = BytesIO(resp['Body'].read())
got_text = GzipFile(None, 'rb', fileobj=bytestream).read().decode('utf-8')
records = got_text.split('\n')
# filter out any empty lines
records = filter(None, records)
sys.stdout.flush()
# By default we only create a single file no matter how many S3 keys we have
_file_num = 0
if MAP_KEYS_TO_OUTPUT_FILES:
_file_num = key_i
with open(OUT_FILE + "." + str(_file_num), "a") as fp:
for record in records:
fp.write(record)
fp.write('\n')
fp.flush()
record_count += len(records)
# update pointer in keys read
key_i += 1
total += record_count
print("total so far: {}".format(total))
if record_count == 0:
time.sleep(1)
sys.stdout.flush()
print("Records read {}".format(total))
sys.stdout.flush()
|
#!/usr/bin/python3
import requests
import argparse
import sys
import time
parser = argparse.ArgumentParser(
sys.argv[0],
description="Port scan a host for the top 1000 ports ",
)
parser.add_argument("--target", help="The SSRF target to be port scanned", default="127.0.0.1")
parser.add_argument("--cookie", help="Session cookie for the nagios user")
parser.add_argument("--nagios", help="URL of the nagios host")
parser.add_argument("-F", help="Scan top 100 ports", action="store_true")
parser.add_argument("-A", help="Scan all ports", action="store_true")
args = parser.parse_args()
if not args.cookie:
print("Gimme cookies! (authenticated session required, use --cookie session_cookie)")
exit(1)
if not args.nagios:
print("Specify the url of the nagios server with --nagios host_url")
exit(1)
top100ports = [80, 23, 443, 21, 22, 25, 3389, 110, 445, 139, 143, 53, 135, 3306, 8080, 1723, 111, 995, 993, 5900, 1025, 587, 8888, 199, 1720, 465, 548, 113, 81, 6001, 10000, 514, 5060, 179, 1026, 2000, 8443, 8000, 32768, 554, 26, 1433, 49152, 2001, 515, 8008, 49154, 1027, 5666, 646, 5000, 5631, 631, 49153, 8081, 2049, 88, 79, 5800, 106, 2121, 1110, 49155, 6000, 513, 990, 5357, 427, 49156, 543, 544, 5101, 144, 7, 389, 8009, 3128, 444, 9999, 5009, 7070, 5190, 3000, 5432, 1900, 3986, 13, 1029, 9, 5051, 6646, 49157, 1028, 873, 1755, 2717, 4899, 9100, 119, 37]
top1000ports = [1, 3, 4, 6, 7, 9, 13, 17, 19, 20, 21, 22, 23, 24, 25, 26, 30, 32, 33, 37, 42, 43, 49, 53, 70, 79, 80, 81, 82, 83, 84, 85, 88, 89, 90, 99, 100, 106, 109, 110, 111, 113, 119, 125, 135, 139, 143, 144, 146, 161, 163, 179, 199, 211, 212, 222, 254, 255, 256, 259, 264, 280, 301, 306, 311, 340, 366, 389, 406, 407, 416, 417, 425, 427, 443, 444, 445, 458, 464, 465, 481, 497, 500, 512, 513, 514, 515, 524, 541, 543, 544, 545, 548, 554, 555, 563, 587, 593, 616, 617, 625, 631, 636, 646, 648, 666, 667, 668, 683, 687, 691, 700, 705, 711, 714, 720, 722, 726, 749, 765, 777, 783, 787, 800, 801, 808, 843, 873, 880, 888, 898, 900, 901, 902, 903, 911, 912, 981, 987, 990, 992, 993, 995, 999, 1000, 1001, 1002, 1007, 1009, 1010, 1011, 1021, 1022, 1023, 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031, 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039, 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055, 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063, 1064, 1065, 1066, 1067, 1068, 1069, 1070, 1071, 1072, 1073, 1074, 1075, 1076, 1077, 1078, 1079, 1080, 1081, 1082, 1083, 1084, 1085, 1086, 1087, 1088, 1089, 1090, 1091, 1092, 1093, 1094, 1095, 1096, 1097, 1098, 1099, 1100, 1102, 1104, 1105, 1106, 1107, 1108, 1110, 1111, 1112, 1113, 1114, 1117, 1119, 1121, 1122, 1123, 1124, 1126, 1130, 1131, 1132, 1137, 1138, 1141, 1145, 1147, 1148, 1149, 1151, 1152, 1154, 1163, 1164, 1165, 1166, 1169, 1174, 1175, 1183, 1185, 1186, 1187, 1192, 1198, 1199, 1201, 1213, 1216, 1217, 1218, 1233, 1234, 1236, 1244, 1247, 1248, 1259, 1271, 1272, 1277, 1287, 1296, 1300, 1301, 1309, 1310, 1311, 1322, 1328, 1334, 1352, 1417, 1433, 1434, 1443, 1455, 1461, 1494, 1500, 1501, 1503, 1521, 1524, 1533, 1556, 1580, 1583, 1594, 1600, 1641, 1658, 1666, 1687, 1688, 1700, 1717, 1718, 1719, 1720, 1721, 1723, 1755, 1761, 1782, 1783, 1801, 1805, 1812, 1839, 1840, 1862, 1863, 1864, 1875, 1900, 1914, 1935, 1947, 1971, 1972, 1974, 1984, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2013, 2020, 2021, 2022, 2030, 2033, 2034, 2035, 2038, 2040, 2041, 2042, 2043, 2045, 2046, 2047, 2048, 2049, 2065, 2068, 2099, 2100, 2103, 2105, 2106, 2107, 2111, 2119, 2121, 2126, 2135, 2144, 2160, 2161, 2170, 2179, 2190, 2191, 2196, 2200, 2222, 2251, 2260, 2288, 2301, 2323, 2366, 2381, 2382, 2383, 2393, 2394, 2399, 2401, 2492, 2500, 2522, 2525, 2557, 2601, 2602, 2604, 2605, 2607, 2608, 2638, 2701, 2702, 2710, 2717, 2718, 2725, 2800, 2809, 2811, 2869, 2875, 2909, 2910, 2920, 2967, 2968, 2998, 3000, 3001, 3003, 3005, 3006, 3007, 3011, 3013, 3017, 3030, 3031, 3052, 3071, 3077, 3128, 3168, 3211, 3221, 3260, 3261, 3268, 3269, 3283, 3300, 3301, 3306, 3322, 3323, 3324, 3325, 3333, 3351, 3367, 3369, 3370, 3371, 3372, 3389, 3390, 3404, 3476, 3493, 3517, 3527, 3546, 3551, 3580, 3659, 3689, 3690, 3703, 3737, 3766, 3784, 3800, 3801, 3809, 3814, 3826, 3827, 3828, 3851, 3869, 3871, 3878, 3880, 3889, 3905, 3914, 3918, 3920, 3945, 3971, 3986, 3995, 3998, 4000, 4001, 4002, 4003, 4004, 4005, 4006, 4045, 4111, 4125, 4126, 4129, 4224, 4242, 4279, 4321, 4343, 4443, 4444, 4445, 4446, 4449, 4550, 4567, 4662, 4848, 4899, 4900, 4998, 5000, 5001, 5002, 5003, 5004, 5009, 5030, 5033, 5050, 5051, 5054, 5060, 5061, 5080, 5087, 5100, 5101, 5102, 5120, 5190, 5200, 5214, 5221, 5222, 5225, 5226, 5269, 5280, 5298, 5357, 5405, 5414, 5431, 5432, 5440, 5500, 5510, 5544, 5550, 5555, 5560, 5566, 5631, 5633, 5666, 5678, 5679, 5718, 5730, 5800, 5801, 5802, 5810, 5811, 5815, 5822, 5825, 5850, 5859, 5862, 5877, 5900, 5901, 5902, 5903, 5904, 5906, 5907, 5910, 5911, 5915, 5922, 5925, 5950, 5952, 5959, 5960, 5961, 5962, 5963, 5987, 5988, 5989, 5998, 5999, 6000, 6001, 6002, 6003, 6004, 6005, 6006, 6007, 6009, 6025, 6059, 6100, 6101, 6106, 6112, 6123, 6129, 6156, 6346, 6389, 6502, 6510, 6543, 6547, 6565, 6566, 6567, 6580, 6646, 6666, 6667, 6668, 6669, 6689, 6692, 6699, 6779, 6788, 6789, 6792, 6839, 6881, 6901, 6969, 7000, 7001, 7002, 7004, 7007, 7019, 7025, 7070, 7100, 7103, 7106, 7200, 7201, 7402, 7435, 7443, 7496, 7512, 7625, 7627, 7676, 7741, 7777, 7778, 7800, 7911, 7920, 7921, 7937, 7938, 7999, 8000, 8001, 8002, 8007, 8008, 8009, 8010, 8011, 8021, 8022, 8031, 8042, 8045, 8080, 8081, 8082, 8083, 8084, 8085, 8086, 8087, 8088, 8089, 8090, 8093, 8099, 8100, 8180, 8181, 8192, 8193, 8194, 8200, 8222, 8254, 8290, 8291, 8292, 8300, 8333, 8383, 8400, 8402, 8443, 8500, 8600, 8649, 8651, 8652, 8654, 8701, 8800, 8873, 8888, 8899, 8994, 9000, 9001, 9002, 9003, 9009, 9010, 9011, 9040, 9050, 9071, 9080, 9081, 9090, 9091, 9099, 9100, 9101, 9102, 9103, 9110, 9111, 9200, 9207, 9220, 9290, 9415, 9418, 9485, 9500, 9502, 9503, 9535, 9575, 9593, 9594, 9595, 9618, 9666, 9876, 9877, 9878, 9898, 9900, 9917, 9929, 9943, 9944, 9968, 9998, 9999, 10000, 10001, 10002, 10003, 10004, 10009, 10010, 10012, 10024, 10025, 10082, 10180, 10215, 10243, 10566, 10616, 10617, 10621, 10626, 10628, 10629, 10778, 11110, 11111, 11967, 12000, 12174, 12265, 12345, 13456, 13722, 13782, 13783, 14000, 14238, 14441, 14442, 15000, 15002, 15003, 15004, 15660, 15742, 16000, 16001, 16012, 16016, 16018, 16080, 16113, 16992, 16993, 17877, 17988, 18040, 18101, 18988, 19101, 19283, 19315, 19350, 19780, 19801, 19842, 20000, 20005, 20031, 20221, 20222, 20828, 21571, 22939, 23502, 24444, 24800, 25734, 25735, 26214, 27000, 27352, 27353, 27355, 27356, 27715, 28201, 30000, 30718, 30951, 31038, 31337, 32768, 32769, 32770, 32771, 32772, 32773, 32774, 32775, 32776, 32777, 32778, 32779, 32780, 32781, 32782, 32783, 32784, 32785, 33354, 33899, 34571, 34572, 34573, 35500, 38292, 40193, 40911, 41511, 42510, 44176, 44442, 44443, 44501, 45100, 48080, 49152, 49153, 49154, 49155, 49156, 49157, 49158, 49159, 49160, 49161, 49163, 49165, 49167, 49175, 49176, 49400, 49999, 50000, 50001, 50002, 50003, 50006, 50300, 50389, 50500, 50636, 50800, 51103, 51493, 52673, 52822, 52848, 52869, 54045, 54328, 55055, 55056, 55555, 55600, 56737, 56738, 57294, 57797, 58080, 60020, 60443, 61532, 61900, 62078, 63331, 64623, 64680, 65000, 65129, 65389]
ports = top1000ports
if args.F:
ports = top100ports
if args.A:
ports = range(1, 65535)
host = ""
if (args.nagios.endswith("/")):
host = args.nagios
else:
host = args.nagios + "/"
print(f"Scanning {args.target}")
print("IP:Request Time")
for port in ports:
start = time.time()
r = requests.get(f"{args.nagios}/nagiosxi/includes/configwizards/hyperv/hyperv-ajax.php", params={ 'ip_address': f"{args.target}:{port}/?" }, headers={ 'cookie': f"nagiosxi={args.cookie}"})
end = time.time()
roundtrip = "{:.3f}".format(end - start)
print(f"{port}:{roundtrip}")
if (len(r.text) > 0):
print(r.text)
sys.stdout.flush()
|
from django import template
from badge.models import BadgeAward
register = template.Library()
@register.simple_tag
def badge_count(user):
"""
Returns badge count for a user, valid usage is::
{% badge_count user %}
or
{% badge_count user as badges %}
"""
return BadgeAward.objects.filter(user=user).count()
@register.simple_tag
def badges_for_user(user):
"""
Sets the badges for a given user to a context var. Usage:
{% badges_for_user user as badges %}
"""
return BadgeAward.objects.filter(user=user).order_by("-awarded_at")
@register.inclusion_tag('badge_detail.html')
def badge_detail(badge):
return {'badge': badge}
|
# This problem was recently asked by Microsoft:
# A unival tree is a tree where all the nodes have the same value.
# Given a binary tree, return the number of unival subtrees in the tree.
class Node(object):
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def count_unival_subtrees(root):
# Fill this in.
count = [0]
countrec(root, count)
return count[0]
def countrec(root, count):
if root is None:
return True
left = countrec(root.left, count)
right = countrec(root.right, count)
if left == False or right == False:
return False
if root.left and root.val != root.left.val:
return False
if root.right and root.val != root.right.val:
return False
count[0] += 1
return True
a = Node(0)
a.left = Node(1)
a.right = Node(0)
a.right.left = Node(1)
a.right.right = Node(0)
a.right.left.left = Node(1)
a.right.left.right = Node(1)
print (count_unival_subtrees(a))
# 5
|
from PyQuantum.Tools.PlotBuilder2D import *
from PyQuantum.Tools.CSV import *
import numpy as np
x = []
y = []
path = 'oout'
# path = 'out_2'
count = 99
for i in range(count+1):
Fidelity_s2_list = list_from_csv(path+'/fidelity_s2' + str(i) + '.csv')
T_list = list_from_csv(path+'/T_list' + str(i) + '.csv')
cnt = list_from_csv(path+'/cnt' + str(i) + '.csv')
# print(T_list)
T_out = T_list[0]
for t in range(1, len(T_list)):
T_out += T_list[t] - T_list[t-1]
# T_avg = T_out
T_out /= cnt[0]
T_avg = sum([t for t in T_list]) / cnt[0]
# print(i, ': ', T_sum, ', ', cnt, sep='')
T_str = None
if max(T_list) >= 1e-3:
T_str = 'ms'
T_avg *= 1e3
T_out *= 1e3
# T_list = [i * 1e3 for i in T_list]
elif max(T_list) >= 1e-6:
T_str = 'mks'
T_avg *= 1e6
T_out *= 1e6
# T_list = [i * 1e6 for i in T_list]
elif max(T_list) >= 1e-9:
T_str = 'ns'
T_avg *= 1e9
T_out *= 1e9
# T_list = [i * 1e9 for i in T_list]
# print(Fidelity_s2_list[0], T_out)
y.append(T_avg)
x.append(T_out)
# x.append(Fidelity_s2_list[0])
# x.append(i)
# y.append(T_out)
# print(x)
# print(y)
# exit(0)
data = [go.Scatter(
x=x,
y=y,
mode='markers',
# x=T_list[1:],
# y=df(T_list, sink_list),
# name=w_0,
)]
make_plot({
'to_file': False,
'online': False,
'data': data,
'x_title': 'time, ' + str(T_str),
'y_title': 'sink',
'title': 'avg',
'html': 'avg.html',
})
|
import torch
from torch.nn import functional as F
class PGD:
"""
"""
@staticmethod
def attack(image, label, device, model, epsilon=0.3, steps=50, step_size = 2/255, no_kl=True):
model.zero_grad()
# random start
perturbation = torch.zeros_like(image).uniform_(-epsilon, epsilon)
perturbation.requires_grad = True
perturbed_image = image + perturbation
perturbed_image.to(device)
perturbed_image = torch.clamp(perturbed_image, 0, 1)
perturbed_image.detach().requires_grad_(True)
for _ in range(steps):
output = model(perturbed_image, no_kl)
loss = F.nll_loss(output, label)
#loss.backward()
grad = torch.autograd.grad(loss, perturbed_image, retain_graph=False, create_graph=False)[0]
sign_data_grad = grad.sign()
perturbed_image = perturbed_image + sign_data_grad * step_size
# clamp pertubation only to (-epsilon, epsilon)
perturbation = torch.clamp(perturbed_image - image, min=-epsilon, max=epsilon)
# clamp the whole image to (0, 1)
perturbed_image = torch.clamp(image + perturbation, min=0, max=1)
#perturbed_image.grad.zero_()
return perturbed_image
|
#Soma
tabb = [1+1]
tab = [2+1]
tab_2 = [3+1]
tab_3 = [4+1]
tab_4 = [5+1]
tab_5 = [6+1]
tab_6 = [7+1]
tab_7 = [8+1]
tab_8 = [9+1]
tab_9 = [10+1]
for y in tabb:
print("1+1 = {}".format(y))
for i in tab:
print("2+1 = {}".format(i))
for a in tab_2:
print("3+1 = {}".format(a))
for b in tab_3:
print("4+1 = {}".format(b))
for c in tab_4:
print("5+1 = {}".format(c))
for d in tab_5:
print("6+1 = {}".format(d))
for e in tab_6:
print("7+1 = {}".format(e))
for f in tab_7:
print("8+1 = {}".format(f))
for g in tab_8:
print("9+1 = {}".format(g))
for h in tab_9:
print("10+1 = {}".format(h))
print ("----------------------------------------------")
tab_1 = [1+2]
tab_2 = [2+2]
tab_3 = [3+2]
tab_4 = [4+2]
tab_5 = [5+2]
tab_6 = [6+2]
tab_7 = [7+2]
tab_8 = [8+2]
tab_9 = [9+2]
tab_10 = [10+2]
for y in tab_1:
print("1+2 = {}".format(y))
for i in tab_2:
print("2+2 = {}".format(i))
for a in tab_3:
print("3+2 = {}".format(a))
for b in tab_4:
print("4+2 = {}".format(b))
for c in tab_5:
print("5+2 = {}".format(c))
for d in tab_6:
print("6+2 = {}".format(d))
for e in tab_7:
print("7+2 = {}".format(e))
for f in tab_8:
print("8+2 = {}".format(f))
for g in tab_9:
print("9+2 = {}".format(g))
for h in tab_10:
print("10+2 = {}".format(h))
print ("----------------------------------------------")
tab_1 = [1+3]
tab_2 = [2+3]
tab_3 = [3+3]
tab_4 = [4+3]
tab_5 = [5+3]
tab_6 = [6+3]
tab_7 = [7+3]
tab_8 = [8+3]
tab_9 = [9+3]
tab_10 = [10+3]
for y in tab_1:
print("1+3 = {}".format(y))
for i in tab_2:
print("2+3 = {}".format(i))
for a in tab_3:
print("3+3 = {}".format(a))
for b in tab_4:
print("4+3 = {}".format(b))
for c in tab_5:
print("5+3 = {}".format(c))
for d in tab_6:
print("6+3 = {}".format(d))
for e in tab_7:
print("7+3 = {}".format(e))
for f in tab_8:
print("8+3 = {}".format(f))
for g in tab_9:
print("9+3 = {}".format(g))
for h in tab_10:
print("10+3 = {}".format(h))
print ("----------------------------------------------")
tab_1 = [1+4]
tab_2 = [2+4]
tab_3 = [3+4]
tab_4 = [4+4]
tab_5 = [5+4]
tab_6 = [6+4]
tab_7 = [7+4]
tab_8 = [8+4]
tab_9 = [9+4]
tab_10 = [10+4]
for y in tab_1:
print("1+4 = {}".format(y))
for i in tab_2:
print("2+4 = {}".format(i))
for a in tab_3:
print("3+4 = {}".format(a))
for b in tab_4:
print("4+4 = {}".format(b))
for c in tab_5:
print("5+4 = {}".format(c))
for d in tab_6:
print("6+4 = {}".format(d))
for e in tab_7:
print("7+4 = {}".format(e))
for f in tab_8:
print("8+4 = {}".format(f))
for g in tab_9:
print("9+4 = {}".format(g))
for h in tab_10:
print("10+4 = {}".format(h))
print ("----------------------------------------------")
#Subtracao
tab = [2-1]
tab_2 = [3-1]
tab_3 = [4-1]
tab_4 = [5-1]
tab_5 = [6-1]
tab_6 = [7-1]
tab_7 = [8-1]
tab_8 = [9-1]
tab_9 = [10-1]
for y in tabb:
print("1-1 = {}".format(y))
for i in tab:
print("2-1 = {}".format(i))
for a in tab_2:
print("3-1 = {}".format(a))
for b in tab_3:
print("4-1 = {}".format(b))
for c in tab_4:
print("5-1 = {}".format(c))
for d in tab_5:
print("6-1 = {}".format(d))
for e in tab_6:
print("7-1 = {}".format(e))
for f in tab_7:
print("8-1 = {}".format(f))
for g in tab_8:
print("9-1 = {}".format(g))
for h in tab_9:
print("10-1 = {}".format(h))
print ("----------------------------------------------")
tab_1 = [1-2]
tab_2 = [2-2]
tab_3 = [3-2]
tab_4 = [4-2]
tab_5 = [5-2]
tab_6 = [6-2]
tab_7 = [7-2]
tab_8 = [8-2]
tab_9 = [9-2]
tab_10 = [10-2]
for y in tab_1:
print("1-2 = {}".format(y))
for i in tab_2:
print("2-2 = {}".format(i))
for a in tab_3:
print("3-2 = {}".format(a))
for b in tab_4:
print("4-2 = {}".format(b))
for c in tab_5:
print("5-2 = {}".format(c))
for d in tab_6:
print("6-2 = {}".format(d))
for e in tab_7:
print("7-2 = {}".format(e))
for f in tab_8:
print("8-2 = {}".format(f))
for g in tab_9:
print("9-2 = {}".format(g))
for h in tab_10:
print("10-2 = {}".format(h))
print ("----------------------------------------------")
tab_1 = [1-3]
tab_2 = [2-3]
tab_3 = [3-3]
tab_4 = [4-3]
tab_5 = [5-3]
tab_6 = [6-3]
tab_7 = [7-3]
tab_8 = [8-3]
tab_9 = [9-3]
tab_10 = [10-3]
for y in tab_1:
print("1-3 = {}".format(y))
for i in tab_2:
print("2-3 = {}".format(i))
for a in tab_3:
print("3-3 = {}".format(a))
for b in tab_4:
print("4-3 = {}".format(b))
for c in tab_5:
print("5-3 = {}".format(c))
for d in tab_6:
print("6-3 = {}".format(d))
for e in tab_7:
print("7-3 = {}".format(e))
for f in tab_8:
print("8-3 = {}".format(f))
for g in tab_9:
print("9-3 = {}".format(g))
for h in tab_10:
print("10-3 = {}".format(h))
print ("----------------------------------------------")
tab_1 = [1-4]
tab_2 = [2-4]
tab_3 = [3-4]
tab_4 = [4-4]
tab_5 = [5-4]
tab_6 = [6-4]
tab_7 = [7-4]
tab_8 = [8-4]
tab_9 = [9-4]
tab_10 = [10-4]
for y in tab_1:
print("1-4 = {}".format(y))
for i in tab_2:
print("2-4 = {}".format(i))
for a in tab_3:
print("3-4 = {}".format(a))
for b in tab_4:
print("4-4 = {}".format(b))
for c in tab_5:
print("5-4 = {}".format(c))
for d in tab_6:
print("6-4 = {}".format(d))
for e in tab_7:
print("7-4 = {}".format(e))
for f in tab_8:
print("8-4 = {}".format(f))
for g in tab_9:
print("9-4 = {}".format(g))
for h in tab_10:
print("10-4 = {}".format(h))
print ("---------------------- Fim ------------------------")
|
import asyncio
import discord
import random
import json
import os
import io
import safygiphy
import requests
from discord.ext import commands
g = safygiphy.Giphy()
voz = True
if not discord.opus.is_loaded():
discord.opus.load_opus('libopus.so')
class VoiceEntry:
def __init__(self, message, player):
self.requester = message.author
self.channel = message.channel
self.player = player
def __str__(self):
fmt = '*{0.title}* uploaded by {0.uploader} and requested by {1.display_name}'
duration = self.player.duration
if duration:
fmt = fmt + ' [length: {0[0]}m {0[1]}s]'.format(divmod(duration, 60))
return fmt.format(self.player, self.requester)
class VoiceState:
def __init__(self, bot):
self.current = None
self.voice = None
self.bot = bot
self.play_next_song = asyncio.Event()
self.songs = asyncio.Queue()
self.skip_votes = set() # a set of user_ids that voted
self.audio_player = self.bot.loop.create_task(self.audio_player_task())
def is_playing(self):
if self.voice is None or self.current is None:
return False
player = self.current.player
return not player.is_done()
@property
def player(self):
return self.current.player
def skip(self):
self.skip_votes.clear()
if self.is_playing():
self.player.stop()
def toggle_next(self):
self.bot.loop.call_soon_threadsafe(self.play_next_song.set)
async def audio_player_task(self):
while True:
self.play_next_song.clear()
self.current = await self.songs.get()
await self.bot.send_message(self.current.channel, 'Now playing ' + str(self.current))
self.current.player.start()
await self.play_next_song.wait()
class Music:
"""Voice related commands.
Works in multiple servers at once.
"""
def __init__(self, bot):
self.bot = bot
self.voice_states = {}
def get_voice_state(self, server):
state = self.voice_states.get(server.id)
if state is None:
state = VoiceState(self.bot)
self.voice_states[server.id] = state
return state
async def create_voice_client(self, channel):
voice = await self.bot.join_voice_channel(channel)
state = self.get_voice_state(channel.server)
state.voice = voice
def __unload(self):
for state in self.voice_states.values():
try:
state.audio_player.cancel()
if state.voice:
self.bot.loop.create_task(state.voice.disconnect())
except:
pass
@commands.command(pass_context=True, no_pm=True)
async def join(self, ctx, *, channel : discord.Channel):
"""Joins a voice channel."""
try:
await self.create_voice_client(channel)
except discord.ClientException:
await self.bot.say('Already in a voice channel...')
except discord.InvalidArgument:
await self.bot.say('This is not a voice channel...')
else:
await self.bot.say('Ready to play audio in ' + channel.name)
@commands.command(pass_context=True, no_pm=True)
async def summon(self, ctx):
"""Summons the bot to join your voice channel."""
summoned_channel = ctx.message.author.voice_channel
if summoned_channel is None:
await self.bot.say('You are not in a voice channel.')
return False
state = self.get_voice_state(ctx.message.server)
if state.voice is None:
state.voice = await self.bot.join_voice_channel(summoned_channel)
else:
await state.voice.move_to(summoned_channel)
return True
@commands.command(pass_context=True, no_pm=True)
async def play(self, ctx, *, song : str):
"""Plays a song.
If there is a song currently in the queue, then it is
queued until the next song is done playing.
This command automatically searches as well from YouTube.
The list of supported sites can be found here:
https://rg3.github.io/youtube-dl/supportedsites.html
"""
state = self.get_voice_state(ctx.message.server)
opts = {
'default_search': 'auto',
'quiet': True,
}
if state.voice is None:
success = await ctx.invoke(self.summon)
if not success:
return
try:
player = await state.voice.create_ytdl_player(song, ytdl_options=opts, after=state.toggle_next)
except Exception as e:
fmt = 'An error occurred while processing this request: ```py\n{}: {}\n```'
await self.bot.send_message(ctx.message.channel, fmt.format(type(e).__name__, e))
else:
player.volume = 0.6
entry = VoiceEntry(ctx.message, player)
await self.bot.say('Enqueued ' + str(entry))
await state.songs.put(entry)
@commands.command(pass_context=True, no_pm=True)
async def volume(self, ctx, value : int):
"""Sets the volume of the currently playing song."""
state = self.get_voice_state(ctx.message.server)
if state.is_playing():
player = state.player
player.volume = value / 100
await self.bot.say('Set the volume to {:.0%}'.format(player.volume))
@commands.command(pass_context=True, no_pm=True)
async def pause(self, ctx):
"""Pauses the currently played song."""
state = self.get_voice_state(ctx.message.server)
if state.is_playing():
player = state.player
player.pause()
@commands.command(pass_context=True, no_pm=True)
async def resume(self, ctx):
"""Resumes the currently played song."""
state = self.get_voice_state(ctx.message.server)
if state.is_playing():
player = state.player
player.resume()
@commands.command(pass_context=True, no_pm=True)
async def stop(self, ctx):
"""Stops playing audio and leaves the voice channel.
This also clears the queue.
"""
server = ctx.message.server
state = self.get_voice_state(server)
if state.is_playing():
player = state.player
player.stop()
try:
state.audio_player.cancel()
del self.voice_states[server.id]
await state.voice.disconnect()
except:
pass
@commands.command(pass_context=True, no_pm=True)
async def leave(self, ctx):
server = ctx.message.server
state = self.get_voice_state(server)
if not state.is_playing():
player = state.player
player.stop()
if not state.is_playing():
del self.voice_states[server.id]
await state.voice.disconnect()
@commands.command(pass_context=True, no_pm=True)
async def skip(self, ctx):
"""Vote to skip a song. The song requester can automatically skip.
3 skip votes are needed for the song to be skipped.
"""
state = self.get_voice_state(ctx.message.server)
if not state.is_playing():
await self.bot.say('Not playing any music right now...')
return
voter = ctx.message.author
if voter == state.current.requester:
await self.bot.say('Requester requested skipping song...')
state.skip()
elif voter.id not in state.skip_votes:
state.skip_votes.add(voter.id)
total_votes = len(state.skip_votes)
if total_votes >= 3:
await self.bot.say('Skip vote passed, skipping song...')
state.skip()
else:
await self.bot.say('Skip vote added, currently at [{}/3]'.format(total_votes))
else:
await self.bot.say('You have already voted to skip this song.')
@commands.command(pass_context=True, no_pm=True)
async def playing(self, ctx):
"""Shows info about the currently played song."""
state = self.get_voice_state(ctx.message.server)
if state.current is None:
await self.bot.say('Not playing anything.')
else:
skip_count = len(state.skip_votes)
await self.bot.say('Now playing {} [skips: {}/3]'.format(state.current, skip_count))
@commands.command(pass_context=True, no_pm=True)
async def move(self, ctx, member: discord.Member, channel: discord.Channel):
await bot.move_member(member, channel)
@commands.command(pass_context=True, no_pm=True)
async def ohmaigad(self, ctx):
global voz
server = ctx.message.server
state = self.get_voice_state(server)
if state.voice is None:
success = await ctx.invoke(self.summon)
if not success:
return
try:
if voz is True:
player = state.voice.create_ffmpeg_player('./Audio/ohmaigad.mp3')
player.start()
while not player.is_done():
voz = False
voz = True
except:
pass
else:
return
@commands.command(pass_context=True, no_pm=True)
async def sotelo(self, ctx):
global voz
server = ctx.message.server
state = self.get_voice_state(server)
if state.voice is None:
success = await ctx.invoke(self.summon)
if not success:
return
try:
if voz is True:
player = state.voice.create_ffmpeg_player('./Audio/sotelo.mp3')
player.start()
while not player.is_done():
voz = False
voz = True
except:
pass
else:
return
@commands.command(pass_context=True, no_pm=True)
async def sotelo2(self, ctx):
global voz
server = ctx.message.server
state = self.get_voice_state(server)
if state.voice is None:
success = await ctx.invoke(self.summon)
if not success:
return
try:
if voz is True:
player = state.voice.create_ffmpeg_player('./Audio/sotelo2.mp3')
player.start()
while not player.is_done():
voz = False
voz = True
except:
pass
else:
return
@commands.command(pass_context=True, no_pm=True)
async def sostenlo(self, ctx):
global voz
server = ctx.message.server
state = self.get_voice_state(server)
if state.voice is None:
success = await ctx.invoke(self.summon)
if not success:
return
try:
if voz is True:
player = state.voice.create_ffmpeg_player('./Audio/sostenlo.mp3')
player.start()
while not player.is_done():
voz = False
voz = True
except:
pass
else:
return
@commands.command(pass_context=True, no_pm=True)
async def fonsi(self, ctx):
global voz
server = ctx.message.server
state = self.get_voice_state(server)
if state.voice is None:
success = await ctx.invoke(self.summon)
if not success:
return
try:
if voz is True:
player = state.voice.create_ffmpeg_player('./Audio/fonsi.mp3')
player.start()
while not player.is_done():
voz = False
voz = True
except:
pass
else:
return
@commands.command(pass_context=True, no_pm=True)
async def agusto(self, ctx):
global voz
server = ctx.message.server
state = self.get_voice_state(server)
if state.voice is None:
success = await ctx.invoke(self.summon)
if not success:
return
try:
if voz is True:
player = state.voice.create_ffmpeg_player('./Audio/pacheco.mp3')
player.start()
while not player.is_done():
voz = False
voz = True
except:
pass
else:
return
@commands.command(pass_context=True, no_pm=True)
async def pacheco(self, ctx):
global voz
server = ctx.message.server
state = self.get_voice_state(server)
if state.voice is None:
success = await ctx.invoke(self.summon)
if not success:
return
try:
if voz is True:
player = state.voice.create_ffmpeg_player('./Audio/agusto.mp3')
player.start()
while not player.is_done():
voz = False
voz = True
except:
pass
else:
return
@commands.command(pass_context=True, no_pm=True)
async def jalo(self, ctx):
global voz
server = ctx.message.server
state = self.get_voice_state(server)
if state.voice is None:
success = await ctx.invoke(self.summon)
if not success:
return
try:
if voz is True:
player = state.voice.create_ffmpeg_player('./Audio/jalo.mp3')
player.start()
while not player.is_done():
voz = False
voz = True
except:
pass
else:
return
@commands.command(pass_context=True, no_pm=True)
async def pacheco2(self, ctx):
global voz
server = ctx.message.server
state = self.get_voice_state(server)
if state.voice is None:
success = await ctx.invoke(self.summon)
if not success:
return
try:
if voz is True:
player = state.voice.create_ffmpeg_player('./Audio/pacheco2.mp3')
player.start()
while not player.is_done():
voz = False
voz = True
except:
pass
else:
return
@commands.command(pass_context=True, no_pm=True)
async def vaquero(self, ctx):
global voz
server = ctx.message.server
state = self.get_voice_state(server)
if state.voice is None:
success = await ctx.invoke(self.summon)
if not success:
return
try:
if voz is True:
player = state.voice.create_ffmpeg_player('./Audio/vaquero.mp3')
player.start()
while not player.is_done():
voz = False
voz = True
except:
pass
else:
return
@commands.command(pass_context=True, no_pm=True)
async def pinky(self, ctx):
global voz
server = ctx.message.server
state = self.get_voice_state(server)
if state.voice is None:
success = await ctx.invoke(self.summon)
if not success:
return
try:
if voz is True:
player = state.voice.create_ffmpeg_player('./Audio/pinky.mp3')
player.start()
while not player.is_done():
voz = False
voz = True
except:
pass
else:
return
@commands.command(pass_context=True, no_pm=True)
async def gag(self, ctx):
global voz
server = ctx.message.server
state = self.get_voice_state(server)
if state.voice is None:
success = await ctx.invoke(self.summon)
if not success:
return
try:
if voz is True:
player = state.voice.create_ffmpeg_player('./Audio/gag.mp3')
player.start()
while not player.is_done():
voz = False
voz = True
except:
pass
else:
return
@commands.command(pass_context=True, no_pm=True)
async def sotelo3(self, ctx):
global voz
server = ctx.message.server
state = self.get_voice_state(server)
if state.voice is None:
success = await ctx.invoke(self.summon)
if not success:
return
try:
if voz is True:
player = state.voice.create_ffmpeg_player('./Audio/sotelo3.mp3')
player.start()
while not player.is_done():
voz = False
voz = True
except:
pass
else:
return
@commands.command(pass_context=True, no_pm=True)
async def sotelo4(self, ctx):
global voz
server = ctx.message.server
state = self.get_voice_state(server)
if state.voice is None:
success = await ctx.invoke(self.summon)
if not success:
return
try:
if voz is True:
player = state.voice.create_ffmpeg_player('./Audio/sotelo4.mp3')
player.start()
while not player.is_done():
voz = False
voz = True
except:
pass
else:
return
@commands.command(pass_context=True, no_pm=True)
async def patito(self, ctx):
global voz
server = ctx.message.server
state = self.get_voice_state(server)
if state.voice is None:
success = await ctx.invoke(self.summon)
if not success:
return
try:
if voz is True:
player = state.voice.create_ffmpeg_player('./Audio/nose.mp3')
player.start()
while not player.is_done():
voz = False
voz = True
except:
pass
else:
return
@commands.command(pass_context=True, no_pm=True)
async def sotelo5(self, ctx):
global voz
server = ctx.message.server
state = self.get_voice_state(server)
if state.voice is None:
success = await ctx.invoke(self.summon)
if not success:
return
try:
if voz is True:
player = state.voice.create_ffmpeg_player('./Audio/sotelo5.mp3')
player.start()
while not player.is_done():
voz = False
voz = True
except:
pass
else:
return
@commands.command(pass_context=True, no_pm=True)
async def sotelovv(self, ctx):
global voz
server = ctx.message.server
state = self.get_voice_state(server)
if state.voice is None:
success = await ctx.invoke(self.summon)
if not success:
return
try:
if voz is True:
player = state.voice.create_ffmpeg_player('./Audio/sotelovv.mp3')
player.start()
while not player.is_done():
voz = False
voz = True
except:
pass
else:
return
@commands.command(pass_context=True, no_pm=True)
async def venga(self, ctx):
global voz
server = ctx.message.server
state = self.get_voice_state(server)
if state.voice is None:
success = await ctx.invoke(self.summon)
if not success:
return
try:
if voz is True:
player = state.voice.create_ffmpeg_player('./Audio/venga.mp3')
player.start()
while not player.is_done():
voz = False
voz = True
except:
pass
else:
return
@commands.command(pass_context=True, no_pm=True)
async def sostenlo2(self, ctx):
global voz
server = ctx.message.server
state = self.get_voice_state(server)
if state.voice is None:
success = await ctx.invoke(self.summon)
if not success:
return
try:
if voz is True:
player = state.voice.create_ffmpeg_player('./Audio/sostenlo2.mp3')
player.start()
while not player.is_done():
voz = False
voz = True
except:
pass
else:
return
bot = commands.Bot(command_prefix=commands.when_mentioned_or('$'), description='A playlist example for discord.py')
bot.add_cog(Music(bot))
@bot.event
async def on_ready():
print('Logged in as:\n{0} (ID: {0.id})'.format(bot.user))
await bot.change_presence(game=discord.Game(name="Pollo"))
@bot.event
async def on_message(message):
if message.content.startswith('!guardarfrase'):
if not os.path.isfile("./Frases/frase_file.pk1"):
frase_list = []
else:
with open("./Frases/frase_file.pk1" , "r") as frase_file:
frase_list = json.load(frase_file)
frase_list.append(message.content[13:])
with open("./Frases/frase_file.pk1" , "w") as frase_file:
json.dump(frase_list, frase_file)
elif message.content.startswith('!frase'):
with open("./Frases/frase_file.pk1" , "r") as frase_file:
frase_list = json.load(frase_file)
await bot.send_message(message.channel , random.choice(frase_list))
if message.content.startswith('!kiss'):
response = requests.get("https://media.giphy.com/media/fBS8d3MublSPmrb3Ys/giphy.gif", stream=True)
await bot.send_file(message.channel, io.BytesIO(response.raw.read()), filename='kiss.gif', content='Sotelo kiss Gif.')
if message.content.startswith('!sotelo'):
response = requests.get("https://media.giphy.com/media/C8975W8loq6omiX8QC/giphy.gif", stream=True)
await bot.send_file(message.channel, io.BytesIO(response.raw.read()), filename='ganzo.gif', content='Aahh aaah Soteloo! Gif.')
if message.content.startswith('!help'):
await bot.send_message(message.channel, 'Comandos:\n!help\n@Hector-Lagarda stop\nFrases:\n!frase\nGifs:\n!kiss\n!sotelo\nAudios:\n@Hector-Lagarda sotelo\n@Hector-Lagarda sotelo2\n@Hector-Lagarda sotelo3\n@Hector-Lagarda sotelo4\n@Hector-Lagarda sotelo5\n@Hector-Lagarda sostenlo\n@Hector-Lagarda sostenlo2\n@Hector-Lagarda fonsi\n@Hector-Lagarda ohmaigad\n@Hector-Lagarda pacheco\n@Hector-Lagarda agusto\n@Hector-Lagarda jalo\n@Hector-Lagarda pacheco2\n@Hector-Lagarda gag\n@Hector-Lagarda pinky\n@Hector-Lagarda vaquero\n@Hector-Lagarda venga\n@Hector-Lagarda patito\n@Hector-Lagarda sotelovv')
await bot.process_commands(message)
TOKEN = os.environ.get('TOKEN', None)
print(TOKEN)
bot.run(TOKEN)
|
from . import __version__ as app_version
app_name = "gwt"
app_title = "GWT"
app_publisher = "ERP Cloud Systems"
app_description = "Custom App"
app_icon = "octicon octicon-file-directory"
app_color = "grey"
app_email = "[email protected]"
app_license = "MIT"
doc_events = {
"Quotation": {
"onload": "gwt.event_triggers.quot_onload",
"before_validate": "gwt.event_triggers.quot_before_validate",
"validate": "gwt.event_triggers.quot_validate",
"on_submit": "gwt.event_triggers.quot_on_submit",
"on_cancel": "gwt.event_triggers.quot_on_cancel",
"on_update_after_submit": "gwt.event_triggers.quot_on_update_after_submit",
"before_save": "gwt.event_triggers.quot_before_save",
"before_cancel": "gwt.event_triggers.quot_before_cancel",
"on_update": "gwt.event_triggers.quot_on_update",
},
"Sales Invoice": {
"onload": "gwt.event_triggers.siv_onload",
"before_validate": "gwt.event_triggers.siv_before_validate",
"validate": "gwt.event_triggers.siv_validate",
"on_submit": "gwt.event_triggers.siv_on_submit",
"on_cancel": "gwt.event_triggers.siv_on_cancel",
"on_update_after_submit": "gwt.event_triggers.siv_on_update_after_submit",
"before_save": "gwt.event_triggers.siv_before_save",
"before_cancel": "gwt.event_triggers.siv_before_cancel",
"on_update": "gwt.event_triggers.siv_on_update",
},
"Sales Order": {
"onload": "gwt.event_triggers.so_onload",
"before_validate": "gwt.event_triggers.so_before_validate",
"validate": "gwt.event_triggers.so_validate",
"on_submit": "gwt.event_triggers.so_on_submit",
"on_cancel": "gwt.event_triggers.so_on_cancel",
"on_update_after_submit": "gwt.event_triggers.so_on_update_after_submit",
"before_save": "gwt.event_triggers.so_before_save",
"before_cancel": "gwt.event_triggers.so_before_cancel",
"on_update": "gwt.event_triggers.so_on_update",
},
"Material Request": {
"onload": "gwt.event_triggers.mr_onload",
"before_validate": "gwt.event_triggers.mr_before_validate",
"validate": "gwt.event_triggers.mr_validate",
"on_submit": "gwt.event_triggers.mr_on_submit",
"on_cancel": "gwt.event_triggers.mr_on_cancel",
"on_update_after_submit": "gwt.event_triggers.mr_on_update_after_submit",
"before_save": "gwt.event_triggers.mr_before_save",
"before_cancel": "gwt.event_triggers.mr_before_cancel",
"on_update": "gwt.event_triggers.mr_on_update",
},
"Stock Entry": {
"onload": "gwt.event_triggers.ste_onload",
"before_validate": "gwt.event_triggers.ste_before_validate",
"validate": "gwt.event_triggers.ste_validate",
"on_submit": "gwt.event_triggers.ste_on_submit",
"on_cancel": "gwt.event_triggers.ste_on_cancel",
"on_update_after_submit": "gwt.event_triggers.ste_on_update_after_submit",
"before_save": "gwt.event_triggers.ste_before_save",
"before_cancel": "gwt.event_triggers.ste_before_cancel",
"on_update": "gwt.event_triggers.ste_on_update",
},
"Delivery Note": {
"onload": "gwt.event_triggers.dn_onload",
"before_validate": "gwt.event_triggers.dn_before_validate",
"validate": "gwt.event_triggers.dn_validate",
"on_submit": "gwt.event_triggers.dn_on_submit",
"on_cancel": "gwt.event_triggers.dn_on_cancel",
"on_update_after_submit": "gwt.event_triggers.dn_on_update_after_submit",
"before_save": "gwt.event_triggers.dn_before_save",
"before_cancel": "gwt.event_triggers.dn_before_cancel",
"on_update": "gwt.event_triggers.dn_on_update",
},
"Purchase Order": {
"onload": "gwt.event_triggers.po_onload",
"before_validate": "gwt.event_triggers.po_before_validate",
"validate": "gwt.event_triggers.po_validate",
"on_submit": "gwt.event_triggers.po_on_submit",
"on_cancel": "gwt.event_triggers.po_on_cancel",
"on_update_after_submit": "gwt.event_triggers.po_on_update_after_submit",
"before_save": "gwt.event_triggers.po_before_save",
"before_cancel": "gwt.event_triggers.po_before_cancel",
"on_update": "gwt.event_triggers.po_on_update",
},
"Purchase Receipt": {
"onload": "gwt.event_triggers.pr_onload",
"before_validate": "gwt.event_triggers.pr_before_validate",
"validate": "gwt.event_triggers.pr_validate",
"on_submit": "gwt.event_triggers.pr_on_submit",
"on_cancel": "gwt.event_triggers.pr_on_cancel",
"on_update_after_submit": "gwt.event_triggers.pr_on_update_after_submit",
"before_save": "gwt.event_triggers.pr_before_save",
"before_cancel": "gwt.event_triggers.pr_before_cancel",
"on_update": "gwt.event_triggers.pr_on_update",
},
"Purchase Invoice": {
"onload": "gwt.event_triggers.piv_onload",
"before_validate": "gwt.event_triggers.piv_before_validate",
"validate": "gwt.event_triggers.piv_validate",
"on_submit": "gwt.event_triggers.piv_on_submit",
"on_cancel": "gwt.event_triggers.piv_on_cancel",
"on_update_after_submit": "gwt.event_triggers.piv_on_update_after_submit",
"before_save": "gwt.event_triggers.piv_before_save",
"before_cancel": "gwt.event_triggers.piv_before_cancel",
"on_update": "gwt.event_triggers.piv_on_update",
},
"Payment Entry": {
"onload": "gwt.event_triggers.pe_onload",
"before_validate": "gwt.event_triggers.pe_before_validate",
"validate": "gwt.event_triggers.pe_validate",
"on_submit": "gwt.event_triggers.pe_on_submit",
"on_cancel": "gwt.event_triggers.pe_on_cancel",
"on_update_after_submit": "gwt.event_triggers.pe_on_update_after_submit",
"before_save": "gwt.event_triggers.pe_before_save",
"before_cancel": "gwt.event_triggers.pe_before_cancel",
"on_update": "gwt.event_triggers.pe_on_update",
},
"Blanket Order": {
"onload": "gwt.event_triggers.blank_onload",
"before_validate": "gwt.event_triggers.blank_before_validate",
"validate": "gwt.event_triggers.blank_validate",
"on_submit": "gwt.event_triggers.blank_on_submit",
"on_cancel": "gwt.event_triggers.blank_on_cancel",
"on_update_after_submit": "gwt.event_triggers.blank_on_update_after_submit",
"before_save": "gwt.event_triggers.blank_before_save",
"before_cancel": "gwt.event_triggers.blank_before_cancel",
"on_update": "gwt.event_triggers.blank_on_update",
},
"Expense Claim": {
"onload": "gwt.event_triggers.excl_onload",
"before_validate": "gwt.event_triggers.excl_before_validate",
"validate": "gwt.event_triggers.excl_validate",
"on_submit": "gwt.event_triggers.excl_on_submit",
"on_cancel": "gwt.event_triggers.excl_on_cancel",
"on_update_after_submit": "gwt.event_triggers.excl_on_update_after_submit",
"before_save": "gwt.event_triggers.excl_before_save",
"before_cancel": "gwt.event_triggers.excl_before_cancel",
"on_update": "gwt.event_triggers.excl_on_update",
},
"Employee Advance": {
"onload": "gwt.event_triggers.emad_onload",
"before_validate": "gwt.event_triggers.emad_before_validate",
"validate": "gwt.event_triggers.emad_validate",
"on_submit": "gwt.event_triggers.emad_on_submit",
"on_cancel": "gwt.event_triggers.emad_on_cancel",
"on_update_after_submit": "gwt.event_triggers.emad_on_update_after_submit",
"before_save": "gwt.event_triggers.emad_before_save",
"before_cancel": "gwt.event_triggers.emad_before_cancel",
"on_update": "gwt.event_triggers.emad_on_update",
},
"Loan": {
"onload": "gwt.event_triggers.loan_onload",
"before_validate": "gwt.event_triggers.loan_before_validate",
"validate": "gwt.event_triggers.loan_validate",
"on_submit": "gwt.event_triggers.loan_on_submit",
"on_cancel": "gwt.event_triggers.loan_on_cancel",
"on_update_after_submit": "gwt.event_triggers.loan_on_update_after_submit",
"before_save": "gwt.event_triggers.loan_before_save",
"before_cancel": "gwt.event_triggers.loan_before_cancel",
"on_update": "gwt.event_triggers.loan_on_update",
},
}
# Includes in <head>
# ------------------
# include js, css files in header of desk.html
# app_include_css = "/assets/gwt/css/gwt.css"
# app_include_js = "/assets/gwt/js/gwt.js"
# include js, css files in header of web template
# web_include_css = "/assets/gwt/css/gwt.css"
# web_include_js = "/assets/gwt/js/gwt.js"
# include custom scss in every website theme (without file extension ".scss")
# website_theme_scss = "gwt/public/scss/website"
# include js, css files in header of web form
# webform_include_js = {"doctype": "public/js/doctype.js"}
# webform_include_css = {"doctype": "public/css/doctype.css"}
# include js in page
# page_js = {"page" : "public/js/file.js"}
# include js in doctype views
# doctype_js = {"doctype" : "public/js/doctype.js"}
# doctype_list_js = {"doctype" : "public/js/doctype_list.js"}
# doctype_tree_js = {"doctype" : "public/js/doctype_tree.js"}
# doctype_calendar_js = {"doctype" : "public/js/doctype_calendar.js"}
# Home Pages
# ----------
# application home page (will override Website Settings)
# home_page = "login"
# website user home page (by Role)
# role_home_page = {
# "Role": "home_page"
# }
# Generators
# ----------
# automatically create page for each record of this doctype
# website_generators = ["Web Page"]
# Installation
# ------------
# before_install = "gwt.install.before_install"
# after_install = "gwt.install.after_install"
# Uninstallation
# ------------
# before_uninstall = "gwt.uninstall.before_uninstall"
# after_uninstall = "gwt.uninstall.after_uninstall"
# Desk Notifications
# ------------------
# See frappe.core.notifications.get_notification_config
# notification_config = "gwt.notifications.get_notification_config"
# Permissions
# -----------
# Permissions evaluated in scripted ways
# permission_query_conditions = {
# "Event": "frappe.desk.doctype.event.event.get_permission_query_conditions",
# }
#
# has_permission = {
# "Event": "frappe.desk.doctype.event.event.has_permission",
# }
# DocType Class
# ---------------
# Override standard doctype classes
# override_doctype_class = {
# "ToDo": "custom_app.overrides.CustomToDo"
# }
# Document Events
# ---------------
# Hook on document methods and events
# doc_events = {
# "*": {
# "on_update": "method",
# "on_cancel": "method",
# "on_trash": "method"
# }
# }
# Scheduled Tasks
# ---------------
# scheduler_events = {
# "all": [
# "gwt.tasks.all"
# ],
# "daily": [
# "gwt.tasks.daily"
# ],
# "hourly": [
# "gwt.tasks.hourly"
# ],
# "weekly": [
# "gwt.tasks.weekly"
# ]
# "monthly": [
# "gwt.tasks.monthly"
# ]
# }
# Testing
# -------
# before_tests = "gwt.install.before_tests"
# Overriding Methods
# ------------------------------
#
# override_whitelisted_methods = {
# "frappe.desk.doctype.event.event.get_events": "gwt.event.get_events"
# }
#
# each overriding function accepts a `data` argument;
# generated from the base implementation of the doctype dashboard,
# along with any modifications made in other Frappe apps
# override_doctype_dashboards = {
# "Task": "gwt.task.get_dashboard_data"
# }
# exempt linked doctypes from being automatically cancelled
#
# auto_cancel_exempted_doctypes = ["Auto Repeat"]
# User Data Protection
# --------------------
user_data_fields = [
{
"doctype": "{doctype_1}",
"filter_by": "{filter_by}",
"redact_fields": ["{field_1}", "{field_2}"],
"partial": 1,
},
{
"doctype": "{doctype_2}",
"filter_by": "{filter_by}",
"partial": 1,
},
{
"doctype": "{doctype_3}",
"strict": False,
},
{
"doctype": "{doctype_4}"
}
]
# Authentication and authorization
# --------------------------------
# auth_hooks = [
# "gwt.auth.validate"
# ]
|
from numpy import *
import statsmodels.api as sm
import os
import sys
import re
class Regression():
def __init__(self):
self.statData = {}
self.totalPrice = {}
self.parameters = {}
self.resultSummary = {}
self.houseIndices = {}
self.getHouseIndex()
def getHouseIndex(self):
f = open('res/房價指數.txt', 'r')
line = f.readline()
cityName = line.strip().split('\t')
for i in range(1, 8, 1):
self.houseIndices.update({cityName[i]:{}})
while True:
line = f.readline()
if line == "":
break
tempList = line.strip().split('\t')
season = tempList[0]
for i in range(1, 8, 1):
self.houseIndices[cityName[i]].update({season:tempList[i]})
f.close()
def quantize(self, rawData):
self.statData.update({'total':{'total':[]}})
self.totalPrice.update({'total':{'total':[]}})
for city in rawData:
self.statData.update({city:{}})
self.totalPrice.update({city:{}})
self.statData[city].update({city:[]})
self.totalPrice[city].update({city:[]})
for block in rawData[city]:
self.statData[city].update({block:[]})
self.totalPrice[city].update({block:[]})
for rawRecord in rawData[city][block]:
X = self.quantizeForRec(rawRecord, city)
Y = [rawRecord['總價元']]
if len(X) > 0:
self.statData[city][block] = self.addArray(self.statData[city][block], X, vstack)
self.statData[city][city] = self.addArray(self.statData[city][city], X, vstack)
self.statData['total']['total'] = self.addArray(self.statData['total']['total'], X, vstack)
self.totalPrice[city][block] = self.addArray(self.totalPrice[city][block], Y, hstack)
self.totalPrice[city][city] = self.addArray(self.totalPrice[city][city], Y, hstack)
self.totalPrice['total']['total'] = self.addArray(self.totalPrice['total']['total'], Y, hstack)
self.countRegression()
def quantizeForRec(self, rawRecord, city):
const = 1
year = int(rawRecord['交易年月']/100)
if year < 99 or year > 103:
return []
season = int(rawRecord['交易年月']%100/4+1)
season = str(year)+"Q"+str(season)
if city in self.houseIndices:
x1 = float(self.houseIndices[city][season])
else:
x1 = float(self.houseIndices['全國'][season])
x2 = (rawRecord['有無管理組織'] == '有')
x3 = rawRecord['土地移轉總面積平方公尺']
x4 = rawRecord['車位移轉總面積平方公尺']
x5 = rawRecord['建物移轉總面積平方公尺']
if rawRecord['建築完成年月'] is not None:
houseAge = 1031231 - rawRecord['建築完成年月']
x6 = int((houseAge/10000)*12) + int((houseAge % 10000)/100)
else:
houseAge = 10312 - rawRecord['交易年月']
x6 = int(houseAge/100*12) + int(houseAge % 100)
x7 = (rawRecord['建物型態'].find('住宅大樓') > -1)
x8 = (rawRecord['建物型態'].find('套房') > -1)
x9 = (rawRecord['建物型態'].find('華廈') > -1)
x10 = (rawRecord['建物型態'].find('公寓') > -1)
x11 = (rawRecord['建物型態'].find('透天厝') > -1)
x12 = (rawRecord['建物型態'].find('店面') > -1)
return [const, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12]
def addArray(self, ori_array, new_array, method):
if len(ori_array):
return method((ori_array, new_array))
else:
return new_array
def countRegression(self):
for city in self.statData:
self.parameters.update({city:{}})
self.resultSummary.update({city:{}})
for block in self.statData[city]:
if len(self.totalPrice[city][block]) > 8:
self.parameters[city].update({block:[]})
self.resultSummary[city].update({block:[]})
model = sm.OLS(self.totalPrice[city][block], self.statData[city][block])
results = model.fit()
self.resultSummary[city][block] = results.summary()
self.parameters[city][block] = results.params
if city == 'total':
direct = 'regression_output/report/'
elif block == city:
direct = 'regression_output/report/'+city
else:
direct = 'regression_output/report/'+city+'/'+block
if not os.path.exists(direct): os.makedirs(direct)
f = open(direct+'/'+block+'.txt', 'w')
f.write(str(results.summary()))
f.close()
def getTotalParams(self):
return self.parameters
def getParameters(self, city, block):
if city in self.parameters:
if block in self.parameters[city]:
return self.parameters[city][block]
else:
return self.parameters[city][city]
else:
return self.parameters['total']['total']
def getRrsultSummary(self, city, block):
return self.resultSummary[city][block]
|
"""
Publishes figure of merit data
"""
from decisionengine.framework.modules import Publisher
from decisionengine_modules.AWS.publishers.AWS_generic_publisher import AWSGenericPublisher as publisher
@publisher.consumes_dataframe("AWS_Figure_Of_Merit")
class AWSFOMPublisher(publisher):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def graphite_context(self, dataframe):
self.logger.debug("in AWSFOMPublisher graphite_context")
d = {}
for _i, row in dataframe.iterrows():
key = f"{row['AccountName']}.{row['EntryName']}.FOM"
d[key] = row["AWS_Figure_Of_Merit"]
return self.graphite_context_header, d
Publisher.describe(AWSFOMPublisher)
|
# uncompyle6 version 3.2.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.15 (v2.7.15:ca079a3ea3, Apr 30 2018, 16:30:26) [MSC v.1500 64 bit (AMD64)]
# Embedded file name: lib.coginvasion.hood.Walk
from direct.fsm.ClassicFSM import ClassicFSM
from direct.fsm.StateData import StateData
from direct.fsm.State import State
from direct.directnotify.DirectNotifyGlobal import directNotify
class Walk(StateData):
notify = directNotify.newCategory('Walk')
def __init__(self, doneEvent):
StateData.__init__(self, doneEvent)
self.fsm = ClassicFSM('Walk', [
State('off', self.enterOff, self.exitOff, ['walking', 'deadWalking']),
State('walking', self.enterWalking, self.exitWalking),
State('deadWalking', self.enterDeadWalking, self.exitDeadWalking)], 'off', 'off')
self.fsm.enterInitialState()
def load(self):
pass
def unload(self):
del self.fsm
def enter(self):
base.localAvatar.startPosHprBroadcast()
base.localAvatar.d_broadcastPositionNow()
base.localAvatar.startBlink()
base.localAvatar.attachCamera()
base.localAvatar.startSmartCamera()
base.localAvatar.collisionsOn()
base.localAvatar.enableAvatarControls()
def exit(self):
base.localAvatar.lastState = None
self.fsm.request('off')
base.localAvatar.disableAvatarControls()
base.localAvatar.detachCamera()
base.localAvatar.stopSmartCamera()
base.localAvatar.stopPosHprBroadcast()
base.localAvatar.stopBlink()
base.localAvatar.collisionsOff()
base.localAvatar.controlManager.placeOnFloor()
return
def enterOff(self):
pass
def exitOff(self):
pass
def enterWalking(self):
if base.localAvatar.getHealth() > 0:
base.localAvatar.startTrackAnimToSpeed()
base.localAvatar.setWalkSpeedNormal()
else:
self.fsm.request('deadWalking')
def exitWalking(self):
base.localAvatar.stopTrackAnimToSpeed()
def enterDeadWalking(self):
base.localAvatar.startTrackAnimToSpeed()
base.localAvatar.setWalkSpeedSlow()
base.taskMgr.add(self.__watchForPositiveHP, base.localAvatar.uniqueName('watchforPositiveHP'))
def __watchForPositiveHP(self, task):
if base.localAvatar.getHealth() > 0:
self.fsm.request('walking')
return task.done
return task.cont
def exitDeadWalking(self):
base.taskMgr.remove(base.localAvatar.uniqueName('watchforPositiveHP'))
base.localAvatar.stopTrackAnimToSpeed()
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for glazier.lib.interact."""
import sys
from glazier.lib import interact
import mock
import unittest
class InteractTest(unittest.TestCase):
@mock.patch('__builtin__.raw_input', autospec=True)
def testGetUsername(self, raw):
raw.side_effect = iter(['invalid-name', '', ' ', 'username1'])
self.assertEqual(interact.GetUsername(), 'username1')
@mock.patch.object(interact.time, 'sleep', autospec=True)
def testKeystroke(self, sleep):
msvcrt = mock.Mock()
msvcrt.kbhit.return_value = False
sys.modules['msvcrt'] = msvcrt
# no reply
result = interact.Keystroke('mesg', timeout=1)
self.assertEqual(result, None)
self.assertEqual(sleep.call_count, 1)
# reply
msvcrt.kbhit.side_effect = iter([False, False, False, False, True])
msvcrt.getch.return_value = 'v'
result = interact.Keystroke('mesg', timeout=100)
self.assertEqual(result, 'v')
self.assertEqual(sleep.call_count, 6)
# validation miss
msvcrt.kbhit.side_effect = iter([True])
result = interact.Keystroke('mesg', validator='[0-9]')
self.assertEqual(result, None)
@mock.patch('__builtin__.raw_input', autospec=True)
def testPrompt(self, raw):
raw.return_value = 'user*name'
result = interact.Prompt('mesg', '^\\w+$')
self.assertEqual(None, result)
result = interact.Prompt('mesg')
self.assertEqual('user*name', result)
if __name__ == '__main__':
unittest.main()
|
import glob
import os
import fire
import numpy as np
import pandas as pd
class Split:
def __init__(self):
print("start preprocessing")
def get_csv_dataframe(self):
self.csv_df = pd.read_csv(
os.path.join(self.data_path, "mtat", "annotations_final.csv"),
header=None,
index_col=None,
sep="\t",
)
def get_top50_tags(self):
tags = list(self.csv_df.loc[0][1:-1])
tag_count = [
np.array(self.csv_df[i][1:], dtype=int).sum() for i in range(1, 189)
]
top_50_tag_index = np.argsort(tag_count)[::-1][:50]
top_50_tags = np.array(tags)[top_50_tag_index]
np.save(
open(os.path.join(self.data_path, "mtat", "tags.npy"), "wb"), top_50_tags
)
return top_50_tag_index
def write_tags(self, top_50_tag_index):
binary = np.zeros((25863, 50))
titles = []
idx = 0
for i in range(1, 25864):
features = np.array(self.csv_df.loc[i][top_50_tag_index + 1], dtype=int)
title = self.csv_df.loc[i][189]
# if np.sum(features) != 0:
binary[idx] = features
idx += 1
titles.append(title)
binary = binary[: len(titles)]
np.save(open(os.path.join(self.data_path, "mtat", "binary.npy"), "wb"), binary)
return titles, binary
def split(self, titles, binary):
tr = []
val = []
test = []
for i, title in enumerate(titles):
if int(title[0], 16) < 12:
if binary[i].sum() > 0:
tr.append(str(i) + "\t" + title)
elif int(title[0], 16) < 13:
if binary[i].sum() > 0:
val.append(str(i) + "\t" + title)
else:
if binary[i].sum() > 0:
test.append(str(i) + "\t" + title)
self.get_exist(tr, val, test)
def get_exist(self, tr, val, test):
tr_exist = []
val_exist = []
test_exist = []
for fn in tr:
_path = os.path.join(
self.data_path, "mtat", "npy", fn.split("/")[-1][:-3] + "npy"
)
if os.path.exists(_path):
tr_exist.append(fn)
for fn in val:
_path = os.path.join(
self.data_path, "mtat", "npy", fn.split("/")[-1][:-3] + "npy"
)
if os.path.exists(_path):
val_exist.append(fn)
for fn in test:
_path = os.path.join(
self.data_path, "mtat", "npy", fn.split("/")[-1][:-3] + "npy"
)
if os.path.exists(_path):
test_exist.append(fn)
np.save(open(os.path.join(self.data_path, "mtat", "train.npy"), "wb"), tr_exist)
np.save(
open(os.path.join(self.data_path, "mtat", "valid.npy"), "wb"), val_exist
)
np.save(
open(os.path.join(self.data_path, "mtat", "test.npy"), "wb"), test_exist
)
def run(self, data_path):
self.data_path = data_path
self.get_csv_dataframe()
top_50_tag_index = self.get_top50_tags()
titles, binary = self.write_tags(top_50_tag_index)
self.split(titles, binary)
if __name__ == "__main__":
s = Split()
fire.Fire({"run": s.run})
|
import logging
from typing import Optional, Callable, Any
from google.cloud import pubsub_v1
from gcp_mysql_backup_service.env import EnvironmentManager
class SubscriptionEnvironmentManager(EnvironmentManager):
@property
def subscription_name(self) -> str:
return self._get('SUBSCRIPTION_NAME')
@property
def timeout(self) -> int:
return int(self._get('SUBSCRIPTION_TIMEOUT_SECONDS', 600))
class SubscriptionManager:
def __init__(self, environment_manager: SubscriptionEnvironmentManager) -> None:
self._logger: logging.Logger = logging.getLogger(__name__)
self._environment_manager: SubscriptionEnvironmentManager = environment_manager
self._client: Optional[pubsub_v1.SubscriberClient] = None
self._subscriptions = []
def _get_client(self) -> pubsub_v1.SubscriberClient:
if self._client is None:
self._client = pubsub_v1.SubscriberClient()
return self._client
def subscribe(self, callback: Callable[[Any], None]) -> None:
subscription_path = self._get_client().subscription_path(
self._environment_manager.project_id,
self._environment_manager.subscription_name
)
subscription = self._get_client().subscribe(
subscription_path,
callback=callback
)
self._subscriptions.append(subscription)
def run(self) -> None:
with self._get_client():
for subscription in self._subscriptions:
try:
subscription.result(timeout=self._environment_manager.timeout)
except:
subscription.cancel()
|
# Copyright 2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Unit tests for tdmprogram.py"""
import pytest
import numpy as np
import strawberryfields as sf
from strawberryfields import ops
from strawberryfields.tdm import tdmprogram
# make test deterministic
np.random.seed(42)
def singleloop(r, alpha, phi, theta, copies, shift="default", hbar=2):
"""Single delay loop with program.
Args:
r (float): squeezing parameter
alpha (Sequence[float]): beamsplitter angles
phi (Sequence[float]): rotation angles
theta (Sequence[float]): homodyne measurement angles
hbar (float): value in appearing in the commutation relation
Returns:
(list): homodyne samples from the single loop simulation
"""
prog = tdmprogram.TDMProgram(N=2)
with prog.context(alpha, phi, theta, copies=copies, shift=shift) as (p, q):
ops.Sgate(r, 0) | q[1]
ops.BSgate(p[0]) | (q[0], q[1])
ops.Rgate(p[1]) | q[1]
ops.MeasureHomodyne(p[2]) | q[0]
eng = sf.Engine("gaussian")
result = eng.run(prog, hbar=hbar)
return result.samples[0]
def test_number_of_copies_must_be_integer():
"""Checks number of copies is integer"""
sq_r = 1.0
N = 3
c = 4
copies = 1 / 137
alpha = [0, np.pi / 4] * c
phi = [np.pi / 2, 0] * c
theta = [0, 0] + [0, np.pi / 2] + [np.pi / 2, 0] + [np.pi / 2, np.pi / 2]
with pytest.raises(TypeError, match="Number of copies must be a positive integer"):
singleloop(sq_r, alpha, phi, theta, copies)
def test_gates_equal_length():
"""Checks gate list parameters have same length"""
sq_r = 1.0
N = 3
c = 4
copies = 10
alpha = [0, np.pi / 4] * c
phi = [np.pi / 2, 0] * c
theta = [0, 0] + [0, np.pi / 2] + [np.pi / 2, 0] + [np.pi / 2]
with pytest.raises(ValueError, match="Gate-parameter lists must be of equal length."):
singleloop(sq_r, alpha, phi, theta, copies)
def test_at_least_one_measurement():
"""Checks circuit has at least one measurement operator"""
sq_r = 1.0
N = 3
copies = 1
alpha = [0] * 4
phi = [0] * 4
prog = tdmprogram.TDMProgram(N=3)
with pytest.raises(ValueError, match="Must be at least one measurement."):
with prog.context(alpha, phi, copies=copies, shift="default") as (p, q):
ops.Sgate(sq_r, 0) | q[2]
ops.BSgate(p[0]) | (q[1], q[2])
ops.Rgate(p[1]) | q[2]
eng = sf.Engine("gaussian")
result = eng.run(prog)
def test_spatial_modes_number_of_measurements_match():
"""Checks number of spatial modes matches number of measurements"""
sq_r = 1.0
N = 3
copies = 1
alpha = [0] * 4
phi = [0] * 4
theta = [0] * 4
with pytest.raises(ValueError, match="Number of measurement operators must match number of spatial modes."):
prog = tdmprogram.TDMProgram(N=[3, 3])
with prog.context(alpha, phi, theta, copies=copies) as (p, q):
ops.Sgate(sq_r, 0) | q[2]
ops.BSgate(p[0]) | (q[1], q[2])
ops.Rgate(p[1]) | q[2]
ops.MeasureHomodyne(p[2]) | q[0]
eng = sf.Engine("gaussian")
result = eng.run(prog)
def test_shift_by_specified_amount():
"""Checks that shifting by 1 is equivalent to shift='end' for a program
with one spatial mode"""
np.random.seed(42)
sq_r = 1.0
N = 3
copies = 1
alpha = [0] * 4
phi = [0] * 4
theta = [0] * 4
np.random.seed(42)
x = singleloop(sq_r, alpha, phi, theta, copies)
np.random.seed(42)
y = singleloop(sq_r, alpha, phi, theta, copies, shift=1)
assert np.allclose(x, y)
def test_str_tdm_method():
"""Testing the string method"""
prog = tdmprogram.TDMProgram(N=1)
assert prog.__str__() == "<TDMProgram: concurrent modes=1, time bins=0, spatial modes=0>"
def test_epr():
"""Generates an EPR state and checks that the correct correlations (noise reductions) are observed
from the samples"""
np.random.seed(42)
sq_r = 1.0
c = 4
copies = 200
# This will generate c EPRstates per copy. I chose c = 4 because it allows us to make 4 EPR pairs per copy that can each be measured in different basis permutations.
alpha = [np.pi / 4, 0] * c
phi = [0, np.pi / 2] * c
# Measurement of 4 subsequent EPR states in XX, XP, PX, PP to investigate nearest-neighbour correlations in all basis permutations
theta = [0, 0] + [0, np.pi / 2] + [np.pi / 2, 0] + [np.pi / 2, np.pi / 2] #
x = singleloop(sq_r, alpha, phi, theta, copies)
X0 = x[0::8]
X1 = x[1::8]
X2 = x[2::8]
P0 = x[3::8]
P1 = x[4::8]
X3 = x[5::8]
P2 = x[6::8]
P3 = x[7::8]
atol = 5 / np.sqrt(copies)
minusstdX1X0 = (X1 - X0).std() / np.sqrt(2)
plusstdX1X0 = (X1 + X0).std() / np.sqrt(2)
squeezed_std = np.exp(-sq_r)
assert np.allclose(minusstdX1X0, squeezed_std, atol=atol)
assert np.allclose(plusstdX1X0, 1 / squeezed_std, atol=atol)
minusstdP2P3 = (P2 - P3).std() / np.sqrt(2)
plusstdP2P3 = (P2 + P3).std() / np.sqrt(2)
assert np.allclose(minusstdP2P3, 1 / squeezed_std, atol=atol)
assert np.allclose(plusstdP2P3, squeezed_std, atol=atol)
minusstdP0X2 = (P0 - X2).std()
plusstdP0X2 = (P0 + X2).std()
expected = 2 * np.sinh(sq_r) ** 2
assert np.allclose(minusstdP0X2, expected, atol=atol)
assert np.allclose(plusstdP0X2, expected, atol=atol)
minusstdX3P1 = (X3 - P1).std()
plusstdX3P1 = (X3 + P1).std()
assert np.allclose(minusstdX3P1, expected, atol=atol)
assert np.allclose(plusstdX3P1, expected, atol=atol)
def test_ghz():
"""Generates a GHZ state and checks that the correct correlations (noise reductions) are observed
from the samples
See Eq. 5 of https://advances.sciencemag.org/content/5/5/eaaw4530
"""
# Set up the circuit
np.random.seed(42)
n = 10
vac_modes = 1
copies = 1000
sq_r = 5
alpha = [np.arccos(np.sqrt(1 / (n - i + 1))) if i != n + 1 else 0 for i in range(n + vac_modes)]
alpha[0] = 0.0
phi = [0] * (n + vac_modes)
phi[0] = np.pi / 2
# Measuring X nullifier
theta = [0] * (n + vac_modes)
samples_X = singleloop(sq_r, alpha, phi, theta, copies)
reshaped_samples_X = np.array(samples_X).reshape([copies, n + vac_modes])
# We will check that the x of all the modes equal the x of the last one
nullifier_X = lambda sample: (sample - sample[-1])[vac_modes:-1]
val_nullifier_X = np.var([nullifier_X(x) for x in reshaped_samples_X], axis=0)
assert np.allclose(val_nullifier_X, 2 * np.exp(-2 * sq_r), rtol=5 / np.sqrt(copies))
# Measuring P nullifier
theta = [np.pi / 2] * (n + vac_modes)
samples_P = singleloop(sq_r, alpha, phi, theta, copies)
# We will check that the sum of all the p is equal to zero
reshaped_samples_P = np.array(samples_P).reshape([copies, n + vac_modes])
nullifier_P = lambda sample: np.sum(sample[vac_modes:])
val_nullifier_P = np.var([nullifier_P(p) for p in reshaped_samples_P], axis=0)
assert np.allclose(val_nullifier_P, n * np.exp(-2 * sq_r), rtol=5 / np.sqrt(copies))
def test_one_dimensional_cluster():
"""Test that the nullifier have the correct value in the experiment described in
See Eq. 10 of https://advances.sciencemag.org/content/5/5/eaaw4530
"""
np.random.seed(42)
n = 20
copies = 1000
sq_r = 3
alpha_c = np.arccos(np.sqrt((np.sqrt(5) - 1) / 2))
alpha = [alpha_c] * n
alpha[0] = 0.0
phi = [np.pi / 2] * n
theta = [0, np.pi / 2] * (n // 2) # Note that we measure x for mode i and the p for mode i+1.
reshaped_samples = np.array(singleloop(sq_r, alpha, phi, theta, copies)).reshape(copies, n)
nullifier = lambda x: np.array([-x[i - 2] + x[i - 1] - x[i] for i in range(2, len(x) - 2, 2)])[1:]
nullifier_samples = np.array([nullifier(y) for y in reshaped_samples])
delta = np.var(nullifier_samples, axis=0)
assert np.allclose(delta, 3 * np.exp(-2 * sq_r), rtol=5 / np.sqrt(copies))
def test_one_dimensional_cluster_tokyo():
"""
One-dimensional temporal-mode cluster state as demonstrated in
https://aip.scitation.org/doi/pdf/10.1063/1.4962732
"""
np.random.seed(42)
sq_r = 5
N = 3 # concurrent modes
n = 500 # for an n-mode cluster state
copies = 1
# first half of cluster state measured in X, second half in P
theta1 = [0] * int(n / 2) + [np.pi / 2] * int(n / 2) # measurement angles for detector A
theta2 = theta1 # measurement angles for detector B
prog = tdmprogram.TDMProgram(N=[1, 2])
with prog.context(theta1, theta2, copies=copies, shift="default") as (p, q):
ops.Sgate(sq_r, 0) | q[0]
ops.Sgate(sq_r, 0) | q[2]
ops.Rgate(np.pi / 2) | q[0]
ops.BSgate(np.pi / 4) | (q[0], q[2])
ops.BSgate(np.pi / 4) | (q[0], q[1])
ops.MeasureHomodyne(p[0]) | q[0]
ops.MeasureHomodyne(p[1]) | q[1]
eng = sf.Engine("gaussian")
result = eng.run(prog)
xA = result.all_samples[0]
xB = result.all_samples[1]
X_A = xA[: n // 2] # X samples from detector A
P_A = xA[n // 2 :] # P samples from detector A
X_B = xB[: n // 2] # X samples from detector B
P_B = xB[n // 2 :] # P samples from detector B
# nullifiers defined in https://aip.scitation.org/doi/pdf/10.1063/1.4962732, Eqs. (1a) and (1b)
ntot = len(X_A) - 1
nX = np.array([X_A[i] + X_B[i] + X_A[i + 1] - X_B[i + 1] for i in range(ntot)])
nP = np.array([P_A[i] + P_B[i] - P_A[i + 1] + P_B[i + 1] for i in range(ntot)])
nXvar = np.var(nX)
nPvar = np.var(nP)
assert np.allclose(nXvar, 4 * np.exp(-2 * sq_r), rtol=5 / np.sqrt(n))
assert np.allclose(nPvar, 4 * np.exp(-2 * sq_r), rtol=5 / np.sqrt(n))
def test_two_dimensional_cluster_denmark():
"""
Two-dimensional temporal-mode cluster state as demonstrated in https://arxiv.org/pdf/1906.08709
"""
np.random.seed(42)
sq_r = 3
delay1 = 1 # number of timebins in the short delay line
delay2 = 12 # number of timebins in the long delay line
n = 400 # number of timebins
# Size of cluste is n x delay2
# first half of cluster state measured in X, second half in P
theta_A = [0] * int(n / 2) + [np.pi / 2] * int(n / 2) # measurement angles for detector A
theta_B = theta_A # measurement angles for detector B
# 2D cluster
prog = tdmprogram.TDMProgram([1, delay2 + delay1 + 1])
with prog.context(theta_A, theta_B, shift="default") as (p, q):
ops.Sgate(sq_r, 0) | q[0]
ops.Sgate(sq_r, 0) | q[delay2 + delay1 + 1]
ops.Rgate(np.pi / 2) | q[delay2 + delay1 + 1]
ops.BSgate(np.pi / 4, np.pi) | (q[delay2 + delay1 + 1], q[0])
ops.BSgate(np.pi / 4, np.pi) | (q[delay2 + delay1], q[0])
ops.BSgate(np.pi / 4, np.pi) | (q[delay1], q[0])
ops.MeasureHomodyne(p[1]) | q[0]
ops.MeasureHomodyne(p[0]) | q[delay1]
eng = sf.Engine("gaussian")
result = eng.run(prog)
samples = result.all_samples
xA = result.all_samples[0]
xB = result.all_samples[1]
X_A = xA[: n // 2] # X samples from detector A
P_A = xA[n // 2 :] # P samples from detector A
X_B = xB[: n // 2] # X samples from detector B
P_B = xB[n // 2 :] # P samples from detector B
# nullifiers defined in https://arxiv.org/pdf/1906.08709.pdf, Eqs. (1) and (2)
N = delay2
ntot = len(X_A) - delay2 - 1
nX = np.array([X_A[k] + X_B[k] - X_A[k + 1] - X_B[k + 1] - X_A[k + N] + X_B[k + N] - X_A[k + N + 1] + X_B[k + N + 1] for k in range(ntot)])
nP = np.array([P_A[k] + P_B[k] + P_A[k + 1] + P_B[k + 1] - P_A[k + N] + P_B[k + N] + P_A[k + N + 1] - P_B[k + N + 1] for k in range(ntot)])
nXvar = np.var(nX)
nPvar = np.var(nP)
assert np.allclose(nXvar, 8 * np.exp(-2 * sq_r), rtol=5 / np.sqrt(ntot))
assert np.allclose(nPvar, 8 * np.exp(-2 * sq_r), rtol=5 / np.sqrt(ntot))
def test_two_dimensional_cluster_tokyo():
"""
Two-dimensional temporal-mode cluster state as demonstrated by Universtiy of Tokyo. See: https://arxiv.org/pdf/1903.03918.pdf
"""
# temporal delay in timebins for each spatial mode
delayA = 0
delayB = 1
delayC = 5
delayD = 0
# concurrent modes in each spatial mode
concurrA = 1 + delayA
concurrB = 1 + delayB
concurrC = 1 + delayC
concurrD = 1 + delayD
N = [concurrA, concurrB, concurrC, concurrD]
sq_r = 5
# first half of cluster state measured in X, second half in P
n = 400 # number of timebins
theta_A = [0] * int(n / 2) + [np.pi / 2] * int(n / 2) # measurement angles for detector A
theta_B = theta_A # measurement angles for detector B
theta_C = theta_A
theta_D = theta_A
# 2D cluster
prog = tdmprogram.TDMProgram(N)
with prog.context(theta_A, theta_B, theta_C, theta_D, shift="default") as (p, q):
ops.Sgate(sq_r, 0) | q[0]
ops.Sgate(sq_r, 0) | q[2]
ops.Sgate(sq_r, 0) | q[8]
ops.Sgate(sq_r, 0) | q[9]
ops.Rgate(np.pi / 2) | q[0]
ops.Rgate(np.pi / 2) | q[8]
ops.BSgate(np.pi / 4) | (q[0], q[2])
ops.BSgate(np.pi / 4) | (q[8], q[9])
ops.BSgate(np.pi / 4) | (q[2], q[8])
ops.BSgate(np.pi / 4) | (q[0], q[1])
ops.BSgate(np.pi / 4) | (q[3], q[9])
ops.MeasureHomodyne(p[0]) | q[0]
ops.MeasureHomodyne(p[1]) | q[1]
ops.MeasureHomodyne(p[2]) | q[3]
ops.MeasureHomodyne(p[3]) | q[9]
eng = sf.Engine("gaussian")
result = eng.run(prog)
samples = result.all_samples
xA = result.all_samples[0]
xB = result.all_samples[1]
xC = result.all_samples[3]
xD = result.all_samples[9]
X_A = xA[: n // 2] # X samples from detector A
P_A = xA[n // 2 :] # P samples from detector A
X_B = xB[: n // 2] # X samples from detector B
P_B = xB[n // 2 :] # P samples from detector B
X_C = xC[: n // 2] # X samples from detector C
P_C = xC[n // 2 :] # P samples from detector C
X_D = xD[: n // 2] # X samples from detector D
P_D = xD[n // 2 :] # P samples from detector D
N = delayC
# nullifiers defined in https://arxiv.org/pdf/1903.03918.pdf, Fig. S5
ntot = len(X_A) - N - 1
nX1 = np.array([X_A[k] + X_B[k] - np.sqrt(1 / 2) * (-X_A[k + 1] + X_B[k + 1] + X_C[k + N] + X_D[k + N]) for k in range(ntot)])
nX2 = np.array([X_C[k] - X_D[k] - np.sqrt(1 / 2) * (-X_A[k + 1] + X_B[k + 1] - X_C[k + N] - X_D[k + N]) for k in range(ntot)])
nP1 = np.array([P_A[k] + P_B[k] + np.sqrt(1 / 2) * (-P_A[k + 1] + P_B[k + 1] + P_C[k + N] + P_D[k + N]) for k in range(ntot)])
nP2 = np.array([P_C[k] - P_D[k] + np.sqrt(1 / 2) * (-P_A[k + 1] + P_B[k + 1] - P_C[k + N] - P_D[k + N]) for k in range(ntot)])
nX1var = np.var(nX1)
nX2var = np.var(nX2)
nP1var = np.var(nP1)
nP2var = np.var(nP2)
assert np.allclose(nX1var, 4 * np.exp(-2 * sq_r), rtol=5 / np.sqrt(ntot))
assert np.allclose(nX2var, 4 * np.exp(-2 * sq_r), rtol=5 / np.sqrt(ntot))
assert np.allclose(nP1var, 4 * np.exp(-2 * sq_r), rtol=5 / np.sqrt(ntot))
assert np.allclose(nP2var, 4 * np.exp(-2 * sq_r), rtol=5 / np.sqrt(ntot))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : qichun tang
# @Contact : [email protected]
import math
def float_gcd(a, b):
def is_int(x):
return not bool(int(x) - x)
base = 1
while not (is_int(a) and is_int(b)):
a *= 10
b *= 10
base *= 10
return math.gcd(int(a), int(b)) / base
|
from typing import Any, Callable, Optional
import numpy as np
import torch
class EarlyStopping:
patience: int
verbose: bool
counter: int
best_score: Optional[float]
early_stop: bool
val_loss_min: float
delta: float
path: str
"""Early stops the training if validation loss doesn't improve after a given patience."""
def __init__(
self,
patience: int = 7,
verbose: bool = False,
delta: float = 0,
path: str = "checkpoint.pt",
trace_func: Callable[[Any], Any] = print,
) -> None:
"""
Args:
patience (int): How long to wait after last time validation loss improved.
Default: 7
verbose (bool): If True, prints a message for each validation loss improvement.
Default: False
delta (float): Minimum change in the monitored quantity to qualify as an
improvement.
Default: 0
path (str): Path for the checkpoint to be saved to.
Default: 'checkpoint.pt'
trace_func (function): trace print function.
Default: print
"""
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
self.delta = delta
self.path = path
self.trace_func = trace_func
def __call__(self, val_loss: float, model: Any) -> None:
score = -val_loss
if self.best_score is None:
self.best_score = score
self.save_checkpoint(val_loss, model)
elif score < self.best_score + self.delta:
self.counter += 1
self.trace_func(
f"EarlyStopping counter: {self.counter} out of {self.patience}"
)
if self.counter >= self.patience:
self.early_stop = True
else:
self.best_score = score
self.save_checkpoint(val_loss, model)
self.counter = 0
def save_checkpoint(self, val_loss: float, model: Any) -> None:
"""Saves model when validation loss decrease."""
if self.verbose:
self.trace_func(
f"Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f})."
+ " Saving model ..."
)
torch.save(model.state_dict(), self.path)
self.val_loss_min = val_loss
|
l,r,n=map(int,input().split())
l2=[]
for i in range(l+1,r,n):
l2.append(i)
print(len(l2))
|
import sortedcontainers
import packet_common
import route
import table
class RouteTable:
def __init__(self, address_family, fib, log, log_id):
assert fib.address_family == address_family
self.address_family = address_family
# Sorted dict of _Destination objects indexed by prefix
self.destinations = sortedcontainers.SortedDict()
self.fib = fib
self._log = log
self._log_id = log_id
def debug(self, msg, *args):
if self._log:
self._log.debug("[%s] %s" % (self._log_id, msg), *args)
def get_route(self, prefix, owner):
packet_common.assert_prefix_address_family(prefix, self.address_family)
if prefix in self.destinations:
return self.destinations[prefix].get_route(owner)
else:
return None
def put_route(self, rte):
packet_common.assert_prefix_address_family(rte.prefix, self.address_family)
rte.stale = False
prefix = rte.prefix
self.debug("Put %s", rte)
if rte.prefix in self.destinations:
destination = self.destinations[prefix]
else:
destination = _Destination(prefix)
self.destinations[prefix] = destination
destination.put_route(rte, self.fib)
def del_route(self, prefix, owner):
# Returns True if the route was present in the table and False if not.
packet_common.assert_prefix_address_family(prefix, self.address_family)
if prefix in self.destinations:
destination = self.destinations[prefix]
deleted = destination.del_route(owner, self.fib)
if destination.routes == []:
del self.destinations[prefix]
else:
deleted = False
if deleted:
self.debug("Delete %s", prefix)
else:
self.debug("Attempted delete %s (not present)", prefix)
return deleted
def all_routes(self):
for destination in self.destinations.values():
for rte in destination.routes:
yield rte
def all_prefix_routes(self, prefix):
packet_common.assert_prefix_address_family(prefix, self.address_family)
if prefix in self.destinations:
destination = self.destinations[prefix]
for rte in destination.routes:
yield rte
def cli_table(self):
tab = table.Table()
tab.add_row(route.Route.cli_summary_headers())
for rte in self.all_routes():
tab.add_row(rte.cli_summary_attributes())
return tab
def mark_owner_routes_stale(self, owner):
# Mark all routes of a given owner as stale. Returns number of routes marked.
# A possible more efficient implementation is to have a list of routes for each owner.
# For now, this is good enough.
count = 0
for rte in self.all_routes():
if rte.owner == owner:
rte.stale = True
count += 1
return count
def del_stale_routes(self):
# Delete all routes still marked as stale. Returns number of deleted routes.
# Cannot delete routes while iterating over routes, so prepare a delete list
routes_to_delete = []
for rte in self.all_routes():
if rte.stale:
routes_to_delete.append((rte.prefix, rte.owner))
# Now delete the routes in the prepared list
count = len(routes_to_delete)
if count > 0:
self.debug("Delete %d remaining stale routes", count)
for (prefix, owner) in routes_to_delete:
self.del_route(prefix, owner)
return count
def nr_destinations(self):
return len(self.destinations)
def nr_routes(self):
count = 0
for destination in self.destinations.values():
count += len(destination.routes)
return count
class _Destination:
# For each prefix, there can be up to one route per owner. This is also the order of preference
# for the routes from different owners to the same destination (higher numerical value is more
# preferred)
def __init__(self, prefix):
self.prefix = prefix
# List of Route objects, in decreasing order or owner (= in decreasing order of preference)
# For a given owner, at most one route is allowed to be in the list
self.routes = []
def get_route(self, owner):
for rte in self.routes:
if rte.owner == owner:
return rte
return None
def update_fib(self, fib):
if self.routes == []:
fib.del_route(self.prefix)
else:
best_route = self.routes[0]
fib.put_route(best_route)
def put_route(self, new_route, fib):
index = 0
inserted = False
for existing_route in self.routes:
if existing_route.owner == new_route.owner:
self.routes[index] = new_route
inserted = True
different = self.routes_significantly_different(existing_route, new_route)
break
elif existing_route.owner < new_route.owner:
self.routes.insert(index, new_route)
inserted = True
different = True
break
index += 1
if not inserted:
self.routes.append(new_route)
different = True
if different:
self.update_fib(fib)
def del_route(self, owner, fib):
index = 0
for rte in self.routes:
if rte.owner == owner:
del self.routes[index]
self.update_fib(fib)
return True
index += 1
return False
@staticmethod
def routes_significantly_different(route1, route2):
assert route1.prefix == route2.prefix
if route1.owner != route2.owner:
return True
if route1.next_hops != route2.next_hops:
return True
return False
|
import time
import board
import displayio
import terminalio
from digitalio import DigitalInOut, Direction
from adafruit_display_text import label
import adafruit_displayio_ssd1306
import adafruit_ina260
first_line_y = 3
line_height = 11
line_glyphs = 63
# These voltage ranges assume that we have minimal voltage
# drop between the battery terminals and the monitor
battery_voltage_yellow = 12.2
battery_voltage_red = 11.9
battery_voltage_cutoff = 11.7
moving_average_window = 8
i2c = board.I2C()
# With these settings, the readings will update every 1055 ms
# As of 10/29/2020, the INA260 library has a bug where the averaging count constant is off by one,
# so COUNT_16 is really 64 samples
ina260 = adafruit_ina260.INA260(i2c)
ina260.mode = adafruit_ina260.Mode.CONTINUOUS
ina260.averaging_count = adafruit_ina260.AveragingCount.COUNT_16
ina260.voltage_conversion_time = adafruit_ina260.ConversionTime.TIME_8_244_ms
ina260.current_conversion_time = adafruit_ina260.ConversionTime.TIME_8_244_ms
displayio.release_displays()
display_bus = displayio.I2CDisplay(i2c, device_address=0x3C)
display = adafruit_displayio_ssd1306.SSD1306(display_bus, width=128, height=32)
display_group = displayio.Group(max_size=3)
display.show(display_group)
line1 = label.Label(terminalio.FONT, color=0xFFFF00, max_glyphs=line_glyphs, line_spacing=0.85, x=0, y=first_line_y)
display_group.append(line1)
# For Adafruit M0 Express:
# power_enable = DigitalInOut(board.D5)
# green_led = DigitalInOut(board.D6)
# yellow_led = DigitalInOut(board.D9)
# red_led = DigitalInOut(board.D10)
# Pinout for Particle Xenon
power_enable = DigitalInOut(board.D2)
power_enable.direction = Direction.OUTPUT
power_enable.value = False
green_led = DigitalInOut(board.D3)
green_led.direction = Direction.OUTPUT
green_led.value = False
yellow_led = DigitalInOut(board.D4)
yellow_led.direction = Direction.OUTPUT
yellow_led.value = False
red_led = DigitalInOut(board.D5)
red_led.direction = Direction.OUTPUT
red_led.value = False
# We'd like to turn off the RGB led on the board, which is currently
# defaulting to white, but that's not possible yet (https://github.com/adafruit/circuitpython/issues/2707)
#line1.text = "Saint Gimp\nBattery monitor"
#time.sleep(1.055)
voltage_average = 13
amp_hours = 0.0
energy = 0.0
then = time.monotonic()
power_enable.value = True
lockout = False
while True:
time.sleep(1.055)
now = time.monotonic()
elapsed_time = now - then
elapsed_hours = (elapsed_time / 60 / 60)
then = now
voltage = ina260.voltage
current = ina260.current / 1000
power = ina260.power / 1000
amp_hours = amp_hours + (current * elapsed_hours)
energy = energy + (power * elapsed_hours)
line1.text = "{:.2f}".format(voltage) + " V " + "{:.3f}".format(current) + " A\n" + "{:.2f}".format(power) + " W\n" + "{:.3f}".format(amp_hours) + " Ah " + "{:.2f}".format(energy) + " Wh"
voltage_average = voltage_average - ((voltage_average - voltage) / moving_average_window)
if lockout:
power_enable.value = False
green_led.value = False
yellow_led.value = False
red_led.value = False
elif voltage_average > battery_voltage_yellow:
green_led.value = True
yellow_led.value = False
red_led.value = False
elif voltage_average > battery_voltage_red:
green_led.value = False
yellow_led.value = True
red_led.value = False
elif voltage_average > battery_voltage_cutoff:
green_led.value = False
yellow_led.value = False
red_led.value = True
else:
lockout = True
|
from django.contrib import admin
from .models import Products
admin.site.register(Products)
|
import json
import os
from contextlib import contextmanager
from typing import Iterator
from unittest import mock
import airflow.configuration
import airflow.version
import pytest
from airflow.lineage import apply_lineage, prepare_lineage
from airflow.models import DAG, Connection, DagBag
from airflow.models import TaskInstance as TI
from airflow.utils.dates import days_ago
try:
from airflow.operators.dummy import DummyOperator
except ModuleNotFoundError:
from airflow.operators.dummy_operator import DummyOperator
import datahub.emitter.mce_builder as builder
from datahub_provider import get_provider_info
from datahub_provider.entities import Dataset
from datahub_provider.hooks.datahub import DatahubKafkaHook, DatahubRestHook
from datahub_provider.operators.datahub import DatahubEmitterOperator
lineage_mce = builder.make_lineage_mce(
[
builder.make_dataset_urn("bigquery", "upstream1"),
builder.make_dataset_urn("bigquery", "upstream2"),
],
builder.make_dataset_urn("bigquery", "downstream1"),
)
datahub_rest_connection_config = Connection(
conn_id="datahub_rest_test",
conn_type="datahub_rest",
host="http://test_host:8080/",
extra=None,
)
datahub_kafka_connection_config = Connection(
conn_id="datahub_kafka_test",
conn_type="datahub_kafka",
host="test_broker:9092",
extra=json.dumps(
{
"connection": {
"producer_config": {},
"schema_registry_url": "http://localhost:8081",
}
}
),
)
def setup_module(module):
airflow.configuration.conf.load_test_config()
def test_airflow_provider_info():
assert get_provider_info()
def test_dags_load_with_no_errors(pytestconfig):
airflow_examples_folder = (
pytestconfig.rootpath / "src/datahub_provider/example_dags"
)
dag_bag = DagBag(dag_folder=str(airflow_examples_folder), include_examples=False)
assert dag_bag.import_errors == {}
assert len(dag_bag.dag_ids) > 0
@contextmanager
def patch_airflow_connection(conn: Connection) -> Iterator[Connection]:
# The return type should really by ContextManager, but mypy doesn't like that.
# See https://stackoverflow.com/questions/49733699/python-type-hints-and-context-managers#comment106444758_58349659.
with mock.patch(
"datahub_provider.hooks.datahub.BaseHook.get_connection", return_value=conn
):
yield conn
@mock.patch("datahub_provider.hooks.datahub.DatahubRestEmitter", autospec=True)
def test_datahub_rest_hook(mock_emitter):
with patch_airflow_connection(datahub_rest_connection_config) as config:
hook = DatahubRestHook(config.conn_id)
hook.emit_mces([lineage_mce])
mock_emitter.assert_called_once_with(config.host, None)
instance = mock_emitter.return_value
instance.emit_mce.assert_called_with(lineage_mce)
@mock.patch("datahub_provider.hooks.datahub.DatahubKafkaEmitter", autospec=True)
def test_datahub_kafka_hook(mock_emitter):
with patch_airflow_connection(datahub_kafka_connection_config) as config:
hook = DatahubKafkaHook(config.conn_id)
hook.emit_mces([lineage_mce])
mock_emitter.assert_called_once()
instance = mock_emitter.return_value
instance.emit_mce_async.assert_called()
instance.flush.assert_called_once()
@mock.patch("datahub_provider.hooks.datahub.DatahubRestHook.emit_mces")
def test_datahub_lineage_operator(mock_emit):
with patch_airflow_connection(datahub_rest_connection_config) as config:
task = DatahubEmitterOperator(
task_id="emit_lineage",
datahub_conn_id=config.conn_id,
mces=[
builder.make_lineage_mce(
[
builder.make_dataset_urn("snowflake", "mydb.schema.tableA"),
builder.make_dataset_urn("snowflake", "mydb.schema.tableB"),
],
builder.make_dataset_urn("snowflake", "mydb.schema.tableC"),
)
],
)
task.execute(None)
mock_emit.assert_called()
@pytest.mark.parametrize(
"hook",
[
DatahubRestHook,
DatahubKafkaHook,
],
)
def test_hook_airflow_ui(hook):
# Simply ensure that these run without issue. These will also show up
# in the Airflow UI, where it will be even more clear if something
# is wrong.
hook.get_connection_form_widgets()
hook.get_ui_field_behaviour()
@pytest.mark.parametrize(
["inlets", "outlets"],
[
(
# Airflow 1.10.x uses a dictionary structure for inlets and outlets.
# We want the lineage backend to support this structure for backwards
# compatability reasons, so this test is not conditional.
{"datasets": [Dataset("snowflake", "mydb.schema.tableConsumed")]},
{"datasets": [Dataset("snowflake", "mydb.schema.tableProduced")]},
),
pytest.param(
# Airflow 2.x also supports a flattened list for inlets and outlets.
# We want to test this capability.
[Dataset("snowflake", "mydb.schema.tableConsumed")],
[Dataset("snowflake", "mydb.schema.tableProduced")],
marks=pytest.mark.skipif(
airflow.version.version.startswith("1"),
reason="list-style lineage is only supported in Airflow 2.x",
),
),
],
ids=[
"airflow-1-10-x-decl",
"airflow-2-x-decl",
],
)
@mock.patch("datahub_provider.hooks.datahub.DatahubRestHook.emit_mces")
def test_lineage_backend(mock_emit, inlets, outlets):
DEFAULT_DATE = days_ago(2)
with mock.patch.dict(
os.environ,
{
"AIRFLOW__LINEAGE__BACKEND": "datahub_provider.lineage.datahub.DatahubLineageBackend",
"AIRFLOW__LINEAGE__DATAHUB_CONN_ID": datahub_rest_connection_config.conn_id,
},
), mock.patch("airflow.models.BaseOperator.xcom_pull", autospec=True), mock.patch(
"airflow.models.BaseOperator.xcom_push", autospec=True
), patch_airflow_connection(
datahub_rest_connection_config
):
func = mock.Mock()
func.__name__ = "foo"
dag = DAG(dag_id="test_lineage_is_sent_to_backend", start_date=DEFAULT_DATE)
with dag:
op1 = DummyOperator(
task_id="task1",
inlets=inlets,
outlets=outlets,
)
ti = TI(task=op1, execution_date=DEFAULT_DATE)
ctx1 = {
"dag": dag,
"task": op1,
"ti": ti,
"task_instance": ti,
"execution_date": DEFAULT_DATE,
"ts": "2021-04-08T00:54:25.771575+00:00",
}
prep = prepare_lineage(func)
prep(op1, ctx1)
post = apply_lineage(func)
post(op1, ctx1)
# Verify that the inlets and outlets are registered and recognized by Airflow correctly,
# or that our lineage backend forces it to.
assert len(op1.inlets) == 1
assert len(op1.outlets) == 1
assert all(map(lambda let: isinstance(let, Dataset), op1.inlets))
assert all(map(lambda let: isinstance(let, Dataset), op1.outlets))
# Check that the right things were emitted.
mock_emit.assert_called_once()
assert len(mock_emit.call_args[0][0]) == 4
assert all(mce.validate() for mce in mock_emit.call_args[0][0])
|
from ..compat import unwrap, wraps
from ..functional import merge
from ..utils import is_a, unique
def test_unique():
assert list(unique(iter([1, 3, 1, 2, 3]))) == [1, 3, 2]
def test_is_a():
assert is_a(int)(5)
assert not is_a(str)(5)
def test_wrap_and_unwrap():
def f(a, b, c): # pragma: nocover
pass
@wraps(f)
def g(*args): # pragma: nocover
pass
assert unwrap(g) is f
def test_merge():
assert merge([]) == {}
input = {"a": 1, "b": 2}
output = merge([input])
assert output is not input
assert output == input
result = merge([{"a": 1}, {"b": 2}, {"a": 3, "c": 4}])
assert result == {"a": 3, "b": 2, "c": 4}
|
#encoding: utf-8
categories = [
f'Category:{s}' for s in (
'Acrochordidae',
'Alethinophidia',
'Aniliidae',
'Anomalepidae',
'Anomochilidae',
'Boidae',
'Bolyeriidae',
'Colubrids',
'Colubrid_stubs',
'Crotalinae',
'Crotalis',
'Cylindrophiidae',
'Elapidae',
'Gerrhopilidae',
'Homalopsidae',
'Lamprophiidae',
'Leptotyphlopidae',
'Loxocemidae',
'Mambas',
'Pareidae',
'Pythonidae',
'Snakes',
'Snake_families',
'Snake_genera',
'Snake_stubs',
'Thamnophis',
'Tropidophiidae',
'Typhlopidae',
'Uropeltidae',
'Venomous_snakes',
'Viperidae',
'Viperinae',
'Xenodermidae',
'Xenopeltidae',
'Xenophidiidae',
'Xenotyphlopidae',
)
]
|
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
def riscv_compliance_repos():
http_archive(
name = "riscv-compliance",
build_file = Label("//third_party/riscv-compliance:BUILD.riscv-compliance.bazel"),
sha256 = "d071d9e5ce07f1cc12fcd7fe6daa87194d0003ddb9cbb40967e98c2374809d07",
strip_prefix = "riscv-arch-test-5a978cfd444d5e640150d46703deda99057b2bbb",
urls = [
"https://github.com/riscv/riscv-compliance/archive/5a978cfd444d5e640150d46703deda99057b2bbb.tar.gz",
],
patches = [
Label("//third_party/riscv-compliance:0001-Add-configurable-trap-alignment-and-entry-point-to-p.patch"),
],
patch_args = ["-p1"],
)
|
#!/usr/bin/env python
import roslib; roslib.load_manifest('navigation')
import rospy
import tf.transformations
from geometry_msgs.msg import Twist
import math
def callback(msg):
#rospy.loginfo("Received a /cmd_vel message!")
#rospy.loginfo("Linear Components: [%f, %f, %f]"%(msg.linear.x, msg.linear.y, msg.linear.z))
#rospy.loginfo("Angular Components: [%f, %f, %f]"%(msg.angular.x, msg.angular.y, msg.angular.z))
# Do velocity processing here:
# Use the kinematics of your robot to map linear and angular velocities into motor commands
x = msg.linear.x
z = msg.angular.z
#double y = msg.getAngular().getZ();
#if (Math.abs(x) < 0.02) x = 0;
#if (abs(y) < 0.02):
# y = 0
#s = -1 if x < 0 else 1
#speed = math.sqrt(x * x + y * y) / math.sqrt(2)
lx = x * 635
lz = z * 70
#twist = - 200 * y / speed
speed1 = (lx + lz) / 10
speed2 = (lx - lz) / 10
rospy.loginfo("Speed1: %s Speed2: %s"% (speed1, speed2))
#if (Math.abs(y) < 0.02) y = 0;
#double s = x < 0 ? -1 : 1;
#double speed = Math.sqrt(x * x + y * y) / Math.sqrt(2);
#double twist = -200 * y / speed;
#v_l = ...
#v_r = ...
# Then set your wheel speeds (using wheel_left and wheel_right as examples)
#wheel_left.set_speed(v_l)
#wheel_right.set_speed(v_r)
def listener():
rospy.init_node('cmd_vel_listener')
rospy.Subscriber("/mobile_base/commands/velocity", Twist, callback)
rospy.spin()
if __name__ == '__main__':
listener()
|
from setuptools import setup
setup(
name='temboard-agent-sample-plugins',
version='1.0',
author='Dalibo',
author_email='[email protected]',
license='PostgreSQL',
install_requires=['temboard-agent'],
py_modules=['temboard_agent_sample_plugins'],
entry_points={
'temboardagent.plugins': [
'failing = temboard_agent_sample_plugins:Failing',
'hellong = temboard_agent_sample_plugins:Hello',
'inexistant = temboard_agent_sample_plugins:INEXISTANT',
],
},
)
|
"""Starlette middleware for Prerender."""
__version__ = "1.0.1"
from prerender_python_starlette.middleware import ( # noqa: F401
DEFAULT_CRAWLER_USER_AGENTS,
DEFAULT_EXTENSIONS_TO_IGNORE,
PrerenderMiddleware,
)
|
from __future__ import division
from __future__ import print_function
import numpy as np
import PIL
import matplotlib.pyplot as plt
import SimpleITK as sitk
import os
import pandas as pd
Dir = "/home/jana/Documents/PhD/ZajemInAnalizaSlike/Vaja1/"
zob = PIL.Image.open(Dir + "zob-microct.png")
zob.show()
print(zob.size) #get the size
print(zob.mode)
zob.getbands()
zobNP = np.array(zob)
print(zobNP.view())
print(np.size(zobNP, axis=1))
print(np.size(zobNP, axis=0))
print(zobNP[:2, :2])
plt.figure()
#plt.imshow(zobNP)
#plt.show()
#get the size if the image
x = np.size(zobNP, axis=0)
y = np.size(zobNP, axis=1)
#half the x and y size
zob_small = zobNP[:int(x/2), :int(y/2)]
plt.imshow(zob_small)
print(int(x/2), int(y/2))
#plt.show()
#save image
PIL.Image.fromarray(zob_small).save(Dir + "zobSmall.jpg")
#PIL.Image.fromarray(zob_small).convert("RGB").show()
#vaja 1.1 - simpleITK
#zobS = sitk.GetImageFromArray(zobNP)
zobS = sitk.ReadImage(Dir + "zob-microct.png")
print("Size: ", zobS.GetSize())
print("Tip: ", zobS.GetPixelIDValue())
print("Tip: ", zobS.GetPixelIDTypeAsString())
print("Korak: ", zobS.GetSpacing())
print("Koordinate: ", zobS.GetOrigin())
print("Smer: ", zobS.GetDirection())
#ponastavi korak vzorčenja
zobSa = zobS #da ne spreminjam originalne slike
zobSa.SetSpacing([2.0, 2.0])
print("Nov korak vzorčenja: ", zobSa.GetSpacing())
#ponastavi izhodišče
zobSa.SetOrigin([3.0, 0.5])
print("Novo izhodišče: ", zobSa.GetOrigin())
sitk.WriteImage(zobSa, Dir + "Zob.nrrd")
sitk.WriteImage(zobSa, Dir + "Zob.nii.gz")
#naloži nrrd .nazaj
zobS1 = sitk.ReadImage(Dir + "Zob.nrrd")
print("Izhodišče .nrrd: ", zobS1.GetOrigin())
zobS1_array = sitk.GetArrayFromImage(zobS1)
print(zobS1_array.view())
#spremeni iz array-a nazaj v sliko - preveri, če se je ohranilo izhodišče
zobB = sitk.GetImageFromArray(zobS1_array)
print("Izhodišče array: ", zobB.GetOrigin())
print("Korak vzorčenja array: ", zobB.GetSpacing())
#obnova medicinskih slik
#filtriranje
zobS_filter = sitk.Mean(zobS)
sitk.WriteImage(zobS_filter, Dir + "Zob_filterMean.png")
zobS_filter = sitk.Median(zobS)
sitk.WriteImage(zobS_filter, Dir + "Zob_filterMedian.png")
zobFloat = sitk.Cast( zobS, sitk.sitkFloat32 )
#nelinearno filtriranje
zobSc = sitk.GradientAnisotropicDiffusion(zobFloat, timeStep=0.125,
conductanceParameter=int(2.0),
conductanceScalingUpdateInterval=int(1),
numberOfIterations=int(20))
ZOB = sitk.Cast( zobSc, sitk.sitkUInt8 )
sitk.WriteImage(ZOB, Dir + "Zob_Anisotropic1.jpg")
|
phone = "+123456789"
mongo_uri = "mongodb://localhost:27017"
device_uuid = None # str(uuid.uuid1())
api_uri = "./api.yaml" # from https://github.com/zhuowei/ClubhouseAPI
max_retries = 5
token_file = None
fresh_interval = 3600
user_limit = 500
first_run = False # not used
|
def RadiometricRecordType():
'''
Radiometric Record.
https://www.eorc.jaxa.jp/ALOS-2/en/doc/fdata/PALSAR-2_xx_Format_CEOS_E_f.pdf
'''
from isce3.parsers.CEOS.BasicTypes import (
BlankType, BinaryType, StringType, IntegerType, FloatType, MultiType)
from isce3.parsers.CEOS.CEOSHeaderType import CEOSHeaderType
#Common part of CEOS header (modified)
inlist = CEOSHeaderType().mapping
inlist += [('SARChannelIndicator', BinaryType('>i4')),
('NumberOfDataSets', BinaryType('>i4')),
('CalibrationFactor', FloatType(16)),
('RealPartOfDT1,1', FloatType(16)),
('ImaginaryPartOfDT1,1', FloatType(16)),
('RealPartOfDT1,2', FloatType(16)),
('ImaginaryPartOfDT1,2', FloatType(16)),
('RealPartOfDT2,1', FloatType(16)),
('ImaginaryPartOfDT2,1', FloatType(16)),
('RealPartOfDT2,2', FloatType(16)),
('ImaginaryPartOfDT2,2', FloatType(16)),
('RealPartOfDR1,1', FloatType(16)),
('ImaginaryPartOfDR1,1', FloatType(16)),
('RealPartOfDR1,2', FloatType(16)),
('ImaginaryPartOfDR1,2', FloatType(16)),
('RealPartOfDR2,1', FloatType(16)),
('ImaginaryPartOfDR2,1', FloatType(16)),
('RealPartOfDR2,2', FloatType(16)),
('ImaginaryPartOfDR2,2', FloatType(16)),
('SkipBytes', BlankType(9577))]
return MultiType( inlist )
|
import csv
import numpy as np
import matplotlib.pyplot as plt
from prettytable import PrettyTable
from datetime import datetime as date
# load data from csv files
def load():
slot_no = 300
supervisor_no = 47
presentation_no = 118
preference_no = 3
presentation_supervisor = np.zeros([presentation_no, supervisor_no], dtype=np.int8)
supervisor_slot = np.zeros([supervisor_no, slot_no], dtype=np.int8)
supervisor_preference = np.zeros([supervisor_no, 2 * preference_no], dtype=np.int8)
# read supExaAssign.csv
with open('input_files\SupExaAssign.csv') as file:
csv_reader = csv.reader(file, delimiter=',')
next(csv_reader)
for row in csv_reader:
i = int(row[0][1:]) - 1 # only underscores in P___ will be considered
for col in range(1, 4):
j = int(row[col][2:]) - 1 # only underscores in S0__ will be considered
presentation_supervisor[i][j] = 1
presentation_presentation = np.dot(presentation_supervisor, presentation_supervisor.transpose())
# presentations supervised by same examiners are marked with 1
presentation_presentation[presentation_presentation >= 1] = 1
np.fill_diagonal(presentation_presentation, 0) # mark diagonal with 0 so penalty points can be calculated correctly
# read HC04.csv (staff unavailability)
with open('input_files\HC04.csv') as file:
csv_reader = csv.reader(file, delimiter=',')
for row in csv_reader:
i = int(row[0][2:]) - 1 # only underscores in S0__ will be considered
j = [int(_) - 1 for _ in row[1:]]
supervisor_slot[i][j] = 1
slot_presentation = np.dot(supervisor_slot.transpose(), presentation_supervisor.transpose())
slot_presentation[slot_presentation >= 1] = -1 # unavailable slots for presentation are marked with -1
# read HC03.csv (venue unavailability)
with open('input_files\HC03.csv') as file:
csv_reader = csv.reader(file, delimiter=',')
for row in csv_reader:
i = [int(_) - 1 for _ in row[1:]]
slot_presentation[i, :] = -1 # unavailable slots for presentation are marked with -1
# read SC01.csv (consecutive presentations)
with open('input_files\SC01.csv') as file:
csv_reader = csv.reader(file, delimiter=',')
for row in csv_reader:
i = int(row[0][2:]) - 1 # only underscores in S0__ will be considered
supervisor_preference[i][0] = int(row[1])
# read SC02.csv (number of days)
with open('input_files\SC02.csv') as file:
csv_reader = csv.reader(file, delimiter=',')
for row in csv_reader:
i = int(row[0][2:]) - 1 # only underscores in S0__ will be considered
supervisor_preference[i][1] = int(row[1])
# read SC03.csv (change of venue)
with open('input_files\SC03.csv') as file:
csv_reader = csv.reader(file, delimiter=',')
for row in csv_reader:
i = int(row[0][2:]) - 1 # only underscores in S0__ will be considered
supervisor_preference[i][2] = 1 if row[1] == "yes" else 0
return slot_presentation, presentation_presentation, presentation_supervisor, supervisor_preference
# write result to csv file with timestamp
def write(slot_presentation, supervisor_preference, constraints_count, plot_data):
timestamp = date.now().strftime("[%Y-%m-%d %H-%M-%S]")
# plot graph
title = (f"Improvement of Presentation Scheduling over Iterations\n"
f"[Hard Constraints Violated:] {constraints_count[1]} "
f"[Soft Constraints Violated:] {constraints_count[2]}\n"
f"[Final Penalty Points:] {constraints_count[0]}")
plt.title(title)
plt.xlabel("Number of Iterations")
plt.ylabel("Penalty Points")
plt.axis([0, len(plot_data), 0, max(plot_data)])
plt.plot(plot_data, "r--")
plt.grid(True)
plt.ioff()
plt.show()
graph_name = f"graph {timestamp}"
plt.savefig(graph_name)
# draw schedule
venue_no = 4
time_slot_no = 15
day_slot_no = venue_no * time_slot_no
day_no = 5
slot_no = day_slot_no * day_no
venues = ["Viva Room", "Meeting Room", "Interaction Room", "BJIM"]
days = ["Mon", "Tues", "Wed", "Thu", "Fri"]
schedule = PrettyTable()
schedule.field_names = ["Day", "Venue",
"0900-0930", "0930-1000", "1000-1030",
"1030-1100", "1100-1130", "1130-1200",
"1200-1230", "1230-1300", "1400-1430",
"1430-1500", "1500-1530", "1530-1600",
"1600-1630", "1630-1700", "1700-1730"]
venue = 0
day = 0
for first_slot in range(0, slot_no, time_slot_no):
row = []
if venue == 0:
row.append(days[day])
else:
row.append("")
row.append(venues[venue])
for slot in range(first_slot, first_slot + time_slot_no):
presentation = np.where(slot_presentation[slot] == 1)[0]
if len(presentation) == 0:
row.append("")
else:
presentation = presentation[0] + 1
row.append("P" + str(presentation))
schedule.add_row(row)
venue += 1
if venue == venue_no:
venue = 0
day += 1
schedule.add_row([""] * (2 + time_slot_no))
print("\n", schedule, "\n")
# print supervisor-related data
supervisor_no = supervisor_preference.shape[0]
for supervisor in range(supervisor_no):
venue_preference = "No" if supervisor_preference[supervisor][2] else "Yes"
print(f"[Supervisor S{str(supervisor + 1).zfill(3)}] "
f"[No. of Continuous Presentations: {supervisor_preference[supervisor][3]}] "
f"[Day Preference: {supervisor_preference[supervisor][1]}] "
f"[Days: {supervisor_preference[supervisor][4]}] "
f"[Venue Change Preference: {venue_preference}] "
f"[Venue Changes: {supervisor_preference[supervisor][5]}]")
# write result data to csv file with timestamp
filename = f"result {timestamp}.csv"
with open(filename, 'w', newline='') as file:
writer = csv.writer(file)
for slot in range(slot_presentation.shape[0]):
presentation = np.where(slot_presentation[slot] == 1)[0]
if len(presentation) == 0: # empty if no presentation is found for the slot
writer.writerow(["null", ""])
else:
presentation = presentation[0] + 1 # Access x in array([x])
writer.writerow(["P" + str(presentation), ""])
|
import logging
import paho.mqtt.client as mqtt
_LOGGER = logging.getLogger(__name__)
class MQTTClient:
def __init__(self, host, user, pwd):
self.mqttc = mqtt.Client()
self.mqttc.enable_logger()
self.host = host
self.user = user
self.pwd = pwd
def on_disconnect(client, userdata, rc):
if rc != 0:
_LOGGER.info(f"Unexpected MQTT disconnection. rc = {rc}. Will auto-reconnect")
self.mqttc.on_disconnect = on_disconnect
self.mqttc.username_pw_set(self.user, self.pwd)
self.mqttc.reconnect_delay_set()
def publish(self, topic, payload=None, qos=0, retain=False):
(rc, message_id) = self.mqttc.publish(topic, payload, qos, retain)
_LOGGER.debug(f"published to {topic}: {payload}. response: {(rc, message_id)}")
def connect(self):
self.mqttc.connect(self.host, 1883, 60)
|
import setuptools
__version__="0.0.1"
with open("README.md", 'r') as fh:
long_description = fh.read()
setuptools.setup(
name="pis_client",
author="Gishobert Gwenzi",
author_email="[email protected]",
version="0.0.1",
description="Python PIS client",
licence="MIT",
long_description=long_description,
url="http://github.com/ignertic/pis_client",
packages=setuptools.find_packages(),
install_requires=['loguru', 'requests'],
entry_points={"console_scripts" : [""]},
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent"])
|
import unittest
import pygame
import os
import Tests.tests_env as T
from utils.bird import Bird
path = os.getcwd()
os.chdir(path + '/..')
T.init()
class Test_bird(unittest.TestCase):
def test_create_bird(self):
surf = T.create_surface()
bird = Bird()
bird.falling = True
self.assertEqual((bird.x, bird.y), (200, 200))
for i in range(len(bird.Bird_imgs)):
self.assertIsInstance(bird.Bird_imgs[i], pygame.Surface)
bird.draw(surf)
self.assertTrue(surf.get_at((bird.x + bird.img.get_width() // 2,
bird.y + bird.img.get_height() // 2)) != T.white)
def test_bird_move(self):
bird = Bird()
bird.falling = True
# test bird jump does not goes beyond the sealing
tick = 0
while tick < 100:
tick += 1
bird.jump()
bird.move()
self.assertGreater(bird.y, -1)
# test bird falling does not goes beyond the ground
tick = 0
while tick < 100:
tick += 1
bird.move()
self.assertLess(bird.y, 550)
def test_bird_draw(self):
os.chdir(path + '/..')
surf = T.create_surface()
bird = Bird()
for i in range(100):
bird.draw(surf)
self.assertTrue(surf.get_at((bird.x + bird.img.get_width() // 2,
bird.y + bird.img.get_height() // 2)) != T.white)
# pygame.image.save(surf, "test_bird_draw.png")
T.end()
if __name__ == '__main__':
unittest.main()
|
# Copyright (c) 2018 Fujitsu Limited.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import defaultdict
import mock
from neutron.tests.unit.api.v2 import test_base
from neutron_lib.services.logapi import constants as log_const
from neutron_fwaas.privileged.netfilter_log import libnetfilter_log as libnflog
from neutron_fwaas.services.logapi.agents.drivers.iptables import log
from neutron_fwaas.tests import base
FAKE_PROJECT_ID = 'fake_project_id'
FAKE_PORT_ID = 'fake_port_id'
FAKE_FWG_ID = 'fake_fwg_id'
FAKE_LOG_ID = 'fake_log_id'
FAKE_RESOURCE_TYPE = 'firewall_group'
FAKE_RATE = 100
FAKE_BURST = 25
class TestLogPrefix(base.BaseTestCase):
def setUp(self):
super(TestLogPrefix, self).setUp()
self.log_prefix = log.LogPrefix(FAKE_PORT_ID,
'fake_event',
FAKE_PROJECT_ID)
self.log_prefix.log_object_refs = set([FAKE_LOG_ID])
def test_add_log_obj_ref(self):
added_log_id = test_base._uuid
expected_log_obj_ref = set([FAKE_LOG_ID, added_log_id])
self.log_prefix.add_log_obj_ref(added_log_id)
self.assertEqual(expected_log_obj_ref, self.log_prefix.log_object_refs)
def test_remove_log_obj_ref(self):
expected_log_obj_ref = set()
self.log_prefix.remove_log_obj_ref(FAKE_LOG_ID)
self.assertEqual(expected_log_obj_ref, self.log_prefix.log_object_refs)
def test_is_empty(self):
self.log_prefix.remove_log_obj_ref(FAKE_LOG_ID)
result = self.log_prefix.is_empty
self.assertEqual(True, result)
class BaseIptablesLogTestCase(base.BaseTestCase):
def setUp(self):
super(BaseIptablesLogTestCase, self).setUp()
self.iptables_manager_patch = mock.patch(
'neutron.agent.linux.iptables_manager.IptablesManager')
self.iptables_manager_mock = self.iptables_manager_patch.start()
resource_rpc_mock = mock.Mock()
self.iptables_mock = mock.Mock()
self.v4filter_mock = mock.Mock()
self.v6filter_mock = mock.Mock()
self.iptables_mock.ipv4 = {'filter': self.v4filter_mock}
self.iptables_mock.ipv6 = {'filter': self.v6filter_mock}
self.log_driver = log.IptablesLoggingDriver(mock.Mock())
self.log_driver.iptables_manager = self.iptables_mock
self.log_driver.resource_rpc = resource_rpc_mock
self.context = mock.Mock()
self.log_driver.agent_api = mock.Mock()
def test_start_logging(self):
fake_router_info = mock.Mock()
fake_router_info.router_id = 'fake_router_id'
fake_router_info.ns_name = 'fake_namespace'
libnflog.run_nflog = mock.Mock()
self.log_driver._create_firewall_group_log = mock.Mock()
# Test with router_info that has internal ports
fake_router_info.internal_ports = [
{'id': 'fake_port1'},
{'id': 'fake_port2'},
]
fake_kwargs = {
'router_info': fake_router_info
}
self.log_driver.ports_belong_router = defaultdict(set)
self.log_driver.start_logging(self.context, **fake_kwargs)
self.log_driver._create_firewall_group_log.\
assert_called_once_with(self.context,
FAKE_RESOURCE_TYPE,
ports=fake_router_info.internal_ports,
router_id=fake_router_info.router_id)
# Test with log_resources
fake_kwargs = {
'log_resources': 'fake'
}
self.log_driver._create_firewall_group_log.reset_mock()
self.log_driver.start_logging(self.context, **fake_kwargs)
self.log_driver._create_firewall_group_log. \
assert_called_once_with(self.context,
FAKE_RESOURCE_TYPE,
**fake_kwargs)
def test_stop_logging(self):
fake_kwargs = {
'log_resources': 'fake'
}
self.log_driver._delete_firewall_group_log = mock.Mock()
self.log_driver.stop_logging(self.context, **fake_kwargs)
self.log_driver._delete_firewall_group_log.\
assert_called_once_with(self.context, **fake_kwargs)
fake_kwargs = {
'fake': 'fake'
}
self.log_driver._delete_firewall_group_log.reset_mock()
self.log_driver.stop_logging(self.context, **fake_kwargs)
self.log_driver._delete_firewall_group_log.assert_not_called()
def test_clean_up_unused_ipt_mgrs(self):
f_router_ids = ['r1', 'r2', 'r3']
self.log_driver.ipt_mgr_list = self._fake_ipt_mgr_list(f_router_ids)
# Test with a port is delete from router
self.log_driver.unused_port_ids = set(['r1_port1'])
self.log_driver._cleanup_unused_ipt_mgrs()
self.assertEqual(set(), self.log_driver.unused_port_ids)
self.assertIsNone(self.log_driver.ipt_mgr_list['r1'].get('r1_port1'))
# Test with all ports are deleted from router
self.log_driver.unused_port_ids = set(['r2_port1', 'r2_port2'])
self.log_driver._cleanup_unused_ipt_mgrs()
self.assertEqual(set(), self.log_driver.unused_port_ids)
self.assertIsNone(self.log_driver.ipt_mgr_list.get('r2'))
def test_get_intf_name(self):
fake_router = mock.Mock()
fake_port_id = 'fake_router_port_id'
# Test with legacy router
self.log_driver.conf.agent_mode = 'legacy'
fake_router.router = {
'fake': 'fake_mode'
}
with mock.patch.object(self.log_driver.agent_api,
'get_router_hosting_port',
return_value=fake_router):
intf_name = self.log_driver._get_intf_name(fake_port_id)
expected_name = 'qr-fake_router'
self.assertEqual(expected_name, intf_name)
# Test with dvr router
self.log_driver.conf.agent_mode = 'dvr_snat'
fake_router.router = {
'distributed': 'fake_mode'
}
with mock.patch.object(self.log_driver.agent_api,
'get_router_hosting_port',
return_value=fake_router):
intf_name = self.log_driver._get_intf_name(fake_port_id)
expected_name = 'sg-fake_router'
self.assertEqual(expected_name, intf_name)
# Test with fip dev
self.log_driver.conf.agent_mode = 'dvr_snat'
fake_router.router = {
'distributed': 'fake_mode'
}
fake_router.rtr_fip_connect = 'fake'
self.log_driver.conf.agent_mode = 'fake'
with mock.patch.object(self.log_driver.agent_api,
'get_router_hosting_port',
return_value=fake_router):
intf_name = self.log_driver._get_intf_name(fake_port_id)
expected_name = 'rfp-fake_route'
self.assertEqual(expected_name, intf_name)
def test_setup_chains(self):
self.log_driver._add_nflog_rules_accepted = mock.Mock()
self.log_driver._add_log_rules_dropped = mock.Mock()
m_ipt_mgr = mock.Mock()
m_fwg_port_log = mock.Mock()
# Test with ALL event
m_fwg_port_log.event = log_const.ALL_EVENT
self.log_driver._setup_chains(m_ipt_mgr, m_fwg_port_log)
self.log_driver._add_nflog_rules_accepted.\
assert_called_once_with(m_ipt_mgr, m_fwg_port_log)
self.log_driver._add_log_rules_dropped.\
assert_called_once_with(m_ipt_mgr, m_fwg_port_log)
# Test with ACCEPT event
self.log_driver._add_nflog_rules_accepted.reset_mock()
self.log_driver._add_log_rules_dropped.reset_mock()
m_fwg_port_log.event = log_const.ACCEPT_EVENT
self.log_driver._setup_chains(m_ipt_mgr, m_fwg_port_log)
self.log_driver._add_nflog_rules_accepted.\
assert_called_once_with(m_ipt_mgr, m_fwg_port_log)
self.log_driver._add_log_rules_dropped.assert_not_called()
# Test with DROP event
self.log_driver._add_nflog_rules_accepted.reset_mock()
self.log_driver._add_log_rules_dropped.reset_mock()
m_fwg_port_log.event = log_const.DROP_EVENT
self.log_driver._setup_chains(m_ipt_mgr, m_fwg_port_log)
self.log_driver._add_nflog_rules_accepted.assert_not_called()
self.log_driver._add_log_rules_dropped.\
assert_called_once_with(m_ipt_mgr, m_fwg_port_log)
def test_add_nflog_rules_accepted(self):
ipt_mgr = mock.Mock()
f_accept_prefix = log.LogPrefix(FAKE_PORT_ID, log_const.
ACCEPT_EVENT,
FAKE_PROJECT_ID)
f_port_log = self._fake_port_log('fake_log_id',
log_const.ACCEPT_EVENT,
FAKE_PORT_ID)
self.log_driver._add_rules_to_chain_v4v6 = mock.Mock()
self.log_driver._get_ipt_mgr_by_port = mock.Mock(return_value=ipt_mgr)
self.log_driver._get_intf_name = mock.Mock(return_value='fake_device')
with mock.patch.object(self.log_driver, '_get_prefix',
side_effect=[f_accept_prefix, None]):
# Test with prefix already added into prefixes_table
self.log_driver._add_nflog_rules_accepted(ipt_mgr, f_port_log)
self.log_driver._add_rules_to_chain_v4v6.assert_not_called()
self.assertEqual(set(['fake_log_id']),
f_accept_prefix.log_object_refs)
# Test with prefixes_tables does not include the prefix
prefix = log.LogPrefix(FAKE_PORT_ID, log_const.
ACCEPT_EVENT, FAKE_PROJECT_ID)
with mock.patch.object(log, 'LogPrefix', return_value=prefix):
self.log_driver._add_nflog_rules_accepted(ipt_mgr, f_port_log)
v4_rules, v6_rules = self._fake_nflog_rule_v4v6('fake_device',
prefix.id)
self.log_driver._add_rules_to_chain_v4v6.\
assert_called_once_with(ipt_mgr, 'accepted',
v4_rules, v6_rules,
wrap=True, top=True, tag=prefix.id)
self.assertEqual(set(['fake_log_id']),
prefix.log_object_refs)
def test_add_nflog_rules_dropped(self):
ipt_mgr = mock.Mock()
f_drop_prefix = log.LogPrefix(FAKE_PORT_ID, log_const.
DROP_EVENT,
FAKE_PROJECT_ID)
f_port_log = self._fake_port_log('fake_log_id',
log_const.DROP_EVENT,
FAKE_PORT_ID)
self.log_driver._add_rules_to_chain_v4v6 = mock.Mock()
self.log_driver._get_ipt_mgr_by_port = mock.Mock(return_value=ipt_mgr)
self.log_driver._get_intf_name = mock.Mock(return_value='fake_device')
with mock.patch.object(self.log_driver, '_get_prefix',
side_effect=[f_drop_prefix, None]):
# Test with prefix already added into prefixes_table
self.log_driver._add_log_rules_dropped(ipt_mgr, f_port_log)
self.log_driver._add_rules_to_chain_v4v6.assert_not_called()
self.assertEqual(set(['fake_log_id']),
f_drop_prefix.log_object_refs)
# Test with prefixes_tables does not include the prefix
prefix = log.LogPrefix(FAKE_PORT_ID, log_const.
ACCEPT_EVENT, FAKE_PROJECT_ID)
with mock.patch.object(log, 'LogPrefix', return_value=prefix):
self.log_driver._add_log_rules_dropped(ipt_mgr, f_port_log)
v4_rules, v6_rules = self._fake_nflog_rule_v4v6('fake_device',
prefix.id)
calls = [
mock.call(ipt_mgr, 'dropped', v4_rules, v6_rules,
wrap=True, top=True, tag=prefix.id),
mock.call(ipt_mgr, 'rejected', v4_rules, v6_rules,
wrap=True, top=True, tag=prefix.id),
]
self.log_driver._add_rules_to_chain_v4v6.\
assert_has_calls(calls)
self.assertEqual(set(['fake_log_id']),
prefix.log_object_refs)
def _fake_port_log(self, log_id, event, port_id):
f_log_info = {
'event': event,
'project_id': FAKE_PROJECT_ID,
'id': log_id
}
return log.FWGPortLog(port_id, f_log_info)
def _fake_nflog_rule_v4v6(self, device, tag):
v4_nflog_rule = ['-i %s -m limit --limit %s/s --limit-burst %s '
'-j NFLOG --nflog-prefix %s'
% (device, FAKE_RATE, FAKE_BURST, tag)]
v4_nflog_rule += ['-o %s -m limit --limit %s/s --limit-burst %s '
'-j NFLOG --nflog-prefix %s'
% (device, FAKE_RATE, FAKE_BURST, tag)]
v6_nflog_rule = ['-i %s -m limit --limit %s/s --limit-burst %s '
'-j NFLOG --nflog-prefix %s'
% (device, FAKE_RATE, FAKE_BURST, tag)]
v6_nflog_rule += ['-o %s -m limit --limit %s/s --limit-burst %s '
'-j NFLOG --nflog-prefix %s'
% (device, FAKE_RATE, FAKE_BURST, tag)]
return v4_nflog_rule, v6_nflog_rule
def _fake_ipt_mgr_list(self, router_ids):
f_ipt_mgrs = defaultdict(dict)
for router_id in router_ids:
f_port_id1 = router_id + '_port1'
f_port_id2 = router_id + '_port2'
ipt_mgr = mock.Mock()
ipt_mgr.ns_name = 'ns_' + router_id
f_ipt_mgrs[router_id][f_port_id1] = ipt_mgr
f_ipt_mgrs[router_id][f_port_id2] = ipt_mgr
return f_ipt_mgrs
|
import functools
import os
import re
import ipywidgets as widgets
import numpy as np
import pandas as pd
import qgrid
from IPython.display import display
from IPython.display import Javascript
from IPython.display import Markdown
from ipywidgets import fixed
from ipywidgets import interact
from ipywidgets import interact_manual
from ipywidgets import interactive
from ipywidgets import Layout
def downselect(f):
"""This is a decorator run before each downselect to catch any errors and display the downselect methods
output. This can be configured to log and/or report an error reporting tool like Sentry."""
@functools.wraps(f)
def wrap(self, *args, **kwargs):
if os.getenv('TESTING') is not None:
# Skipping decortor logic
return f(self, *args, **kwargs)
else:
try:
result = f(self, *args, **kwargs)
if type(result) == pd.DataFrame:
if result.empty or len(result.index) < 1:
return display(Markdown('**No results returned**'))
else:
return display(qgrid.show_grid(result, show_toolbar=True))
elif type(result) == None:
return display(Markdown('**No results returned**'))
else:
return result
except Exception as e:
display(Markdown(f'An error has been reported: <span style="color:red">{e}</span>'))
pass # Error reporting can be configured here!
return wrap
class Downselects:
"""This class is responsible for providing necessary values for downselect functions.
Args:
hunt_data_df (dataframe): original hunt data
Attributes:
df (dataframe): original hunt data
"""
def __init__(self, hunt_data_df):
self.df = hunt_data_df
def get_df(self, df=None):
"""Returns the hunt_data dataframe otherwise a passed in dataframe."""
if df is not None:
return df
else:
return self.df.copy()
@downselect
def general_frequency_trend(self, groupby, uniqued, df=None):
"""Returns a dataframe of unique 'groupby' values and the frequency of the 'uniqued' column
Args:
groupby (list): Columns to group the dataframe records by
uniqued (str): Column to get unique count of
df (dataframe): passed in dataframe to run this downselect on
Returns:
Returns the dataframe object
"""
df = self.get_df(df=df)
if df.empty:
return None
return df.groupby(groupby).nunique()[[uniqued]].rename(columns={uniqued: 'count'})
@downselect
def column_frequency_count(self, column, df=None):
"""Returns a dataframe of unique instances of 'column' values and their frequencies
Args:
column (str): The column to unique on
df (dataframe): passed in dataframe to run this downselect on
Returns:
Returns the dataframe object
"""
df = self.get_df(df=df)
if df.empty:
return None
tmpdf = df.groupby([column]).size().reset_index(name='count')
return tmpdf.sort_values(by=['count'], ascending=False).reset_index(drop=True)
@downselect
def column_group(self, column_list, sort_by, df=None):
"""Returns a dataframe grouped by 'column_list' and sorted by 'sort_by'
Args:
column_list (list): The columns to group by
sort_by (str): Column to sort by
df (dataframe): passed in dataframe to run this downselect on
Returns:
Returns the dataframe object
"""
df = self.get_df(df=df)
if df.empty:
return None
tmpdf = df[column_list].drop_duplicates()
return tmpdf.sort_values(by=[sort_by], ascending=False).reset_index(drop=True)
@downselect
def spread_counts_report(self, search_list, field_list, df=None):
"""Display a spread counts report an item in 'search_list'
Args:
search_list (list): columns to apply spread counts on
field_list (list): columns to report within the resulting dataframe
df (dataframe): passed in dataframe to run this downselect on
Returns:
Returns result as Markdown
"""
df = self.get_df(df=df)
if df.empty:
return None
tmpdf = df[field_list].copy()
tmpdf[field_list] = tmpdf[field_list].astype(str).fillna(value='-')
tmpdf.reset_index(drop=True)
def spread_counts_helper(field, temp_frame):
def x(cnt): return "is" if cnt == 1 else "are"
def y(cnt): return " " if cnt == 1 else "s "
def z(cnt): return " " if cnt == 1 else "es "
try:
if field == 'process_hash':
hash_exe_pat = temp_frame.copy()
hash_exe_pat = hash_exe_pat.drop_duplicates(
subset='process_hash', keep='first').set_index('process_hash', drop=False)
phash_list = temp_frame.process_hash.unique().tolist()
output = ""
output += '## Process Hash\n\n'
lph = len(phash_list)
output += f'There {x(lph)} {lph} unique process hash{z(lph)}associated with your search.\n\n'
h_head = '| Unique Process Hash{z{lph}}| Count in Hunt Data | Engine Hits | Engines Total | Detected By | Detected As |\n| ----------- | ----------- | ----------- | ----------- | ----------- | ----------- |\n'
for h in phash_list:
h_cnt = df.loc[df.process_hash == h, 'process_hash'].count()
hit = hash_exe_pat.loc[h, 'engines_hit']
etl = hash_exe_pat.loc[h, 'engines_total']
dby = hash_exe_pat.loc[h, 'detected_by']
das = hash_exe_pat.loc[h, 'detected_as']
h_md = f'| {h} | {h_cnt} | {str(hit)} | {str(etl)} | {str(dby)} | {str(das)} |\n'
h_head += h_md
output += h_head
return output
else:
uniq_list = temp_frame[field].unique().tolist()
output = ""
output += f'## {field.replace("_", " ").capitalize()} List\n\n'
cnt = len(uniq_list)
output += f'There {x(cnt)} {cnt} unique {field.replace("_", " ")}{y(cnt)}associated with your search.\n\n'
heading = f'| Unique {field.replace("_", " ")}{y(cnt)}| Count in Hunt Data |\n| ----------- | ----------- |\n'
for i in uniq_list:
i_cnt = df.loc[df[field] == i, field].count()
i_md = f'| {i} | {i_cnt} |\n'
heading += i_md
output += heading
return output
except Exception as e:
return f'An error has been reported: <span style="color:red">{e}</span>'
@interact
def report_display(column=search_list, search=''):
try:
if search == '':
return display(Markdown('**Input Search Above.**'))
else:
temp_frame = tmpdf[tmpdf[column].str.contains(search, flags=re.IGNORECASE, regex=True)]
for i in search_list:
display(Markdown(spread_counts_helper(i, temp_frame)))
display(Markdown(" "))
display(Markdown('## Rows From Hunt Data'))
display(temp_frame)
except Exception as e:
display(Markdown(f'An error has been reported: <span style="color:red">{e}</span>'))
@downselect
def java_exploit(self, column_list, df=None):
"""Returns any data where java is the parent process
Args:
column_list (list): columns to display in dataframe
df (dataframe): passed in dataframe to run this downselect on
Returns:
Returns the dataframe object
"""
df = self.get_df(df=df)
if df.empty:
return None
return df[(df['parent_name'] == 'java.exe') | (df['parent_name'] == 'javaw.exe')][column_list]
@downselect
def office_exploit(self, column_list, df=None):
"""Returns any data where Microsoft Office products are the parent process
Args:
column_list (list): columns to display in dataframe
df (dataframe): passed in dataframe to run this downselect on
Returns:
Returns the dataframe object
"""
df = self.get_df(df=df)
if df.empty:
return None
return df[(df['parent_name'] == 'winword.exe') | (df['parent_name'] == 'excel.exe')
| (df['parent_name'] == 'powerpnt.exe')][column_list]
@downselect
def adobe_exploit(self, column_list, df=None):
"""Returns any data where adobe is the parent process
Args:
column_list (list): columns to display in dataframe
df (dataframe): passed in dataframe to run this downselect on
Returns:
Returns the dataframe object
"""
df = self.get_df(df=df)
if df.empty:
return None
return df[(df['parent_name'] == 'acrobat.exe') | (
df['parent_name'] == 'acrord32.exe')][column_list]
@downselect
def web_shell_exploit(self, column_list, df=None):
"""Returns any data where web applications are the parent process
Args:
column_list (list): columns to display in dataframe
df (dataframe): passed in dataframe to run this downselect on
Returns:
Returns the dataframe object
"""
df = self.get_df(df=df)
if df.empty:
return None
return df[(df['parent_name'] == 'w3wp.exe') | (df['parent_name'] == 'tomcat.exe') | (
df['parent_name'] == 'httpd.exe') | (df['parent_name'] == 'nginx.exe')][column_list]
|
# Copyright 2016, 2017 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Resource definition
"""
#
# IMPORTS
#
from flask_potion import fields
from tessia.server.api.resources.secure_resource import NAME_PATTERN
from tessia.server.api.resources.secure_resource import SecureResource
from tessia.server.db.models import StorageServer
#
# CONSTANTS AND DEFINITIONS
#
DESC = {
'name': 'Name',
'hostname': 'Hostname',
'model': 'Model',
'type': 'Server type',
'fw_level': 'Firmware level',
'modified': 'Last modified',
'desc': 'Description',
'modifier': 'Modified by',
'project': 'Project',
'owner': 'Owner',
}
#
# CODE
#
class StorageServerResource(SecureResource):
"""
Resource for storage servers
"""
class Meta:
"""
Potion's meta section
"""
# the sqlalchemy model
model = StorageServer
# name of the resource in the url
name = 'storage-servers'
# not used at the moment
exclude_fields = ['attributes', 'username', 'password']
title = 'Storage Server'
description = 'A storage server contains many storage volumes'
# custom attribute to define one or more schema fields that have a
# human description for an item, used by integrity exceptions to
# parse db errors.
human_identifiers = ['name']
class Schema:
"""
Potion's schema section
"""
# it seems that 'title' attribute would be better than 'description'
# (according to json spec) but our client does not support it therefore
# we set both
name = fields.String(
title=DESC['name'], description=DESC['name'], pattern=NAME_PATTERN)
hostname = fields.String(
title=DESC['hostname'], description=DESC['hostname'],
nullable=True)
model = fields.String(
title=DESC['model'], description=DESC['model'])
fw_level = fields.String(
title=DESC['fw_level'], description=DESC['fw_level'],
nullable=True)
modified = fields.DateTime(
title=DESC['modified'], description=DESC['modified'], io='r')
desc = fields.String(
title=DESC['desc'], description=DESC['desc'], nullable=True)
# relations
type = fields.String(title=DESC['type'], description=DESC['type'])
modifier = fields.String(
title=DESC['modifier'], description=DESC['modifier'], io='r')
project = fields.String(
title=DESC['project'], nullable=True, description=DESC['project'])
owner = fields.String(
title=DESC['owner'], nullable=True, description=DESC['owner'])
# StorageServerResource
|
from os import environ
from mongoengine import connect
from hashlib import sha256
from Regions.region import Region
if __name__ == '__main__':
regions = ['BOGOTA', 'VALLE', 'ANTIOQUIA', 'CARTAGENA', 'HUILA', 'META',
'RISARALDA', 'NORTE SANTANDER', 'CALDAS', 'CUNDINAMARCA', 'BARRANQUILLA', 'SANTANDER',
'QUINDIO', 'TOLIMA', 'CAUCA', 'STA MARTA D.E.', 'CESAR', 'SAN ANDRES', 'CASANARE',
'NARIÑO', 'ATLANTICO', 'BOYACA', 'CORDOBA', 'BOLIVAR', 'SUCRE', 'MAGDALENA', 'GUAJIRA',
'CHOCO', 'AMAZONAS', 'CAQUETA', 'PUTUMAYO', 'ARAUCA', 'VAUPES', 'GUAINIA', 'VICHADA', 'GUAVIARE', 'Tolima',
'BOGOTA', 'BUGA', 'MEDELLIN', 'ITAGUI', 'CARTAGENA', 'NEIVA', 'PALMIRA', 'VILLAVICENCIO',
'RIONEGRO', 'CALI', 'DOSQUEBRADAS', 'CUCUTA', 'MANIZALES', 'SUBACHOQUE', 'SOACHA', 'BARRANQUILLA', 'FLORIDABLANCA', 'ARMENIA',
'PEREIRA', 'CAJICA', 'IBAGUE', 'BUCARAMANGA', 'POPAYAN', 'VITERBO', 'ANAPOIMA', 'LA ESTRELLA', 'ENVIGADO',
'SANTA MARTA', 'CHIA', 'MADRID', 'VILLA DEL ROSARIO', 'CARTAGO', 'YUMBO', 'GUARNE', 'BELLO', 'CALARCA', 'VALLEDUPAR', 'LA DORADA', 'CHINCHINA', 'SAN ANDRES',
'YOPAL', 'GUATAPE', 'COTA', 'SABANETA', 'VILLAMARIA', 'TRUJILLO', 'PACHO', 'RETIRO', 'APARTADO', 'CACERES', 'IPIALES', 'LOS PATIOS', 'MOSQUERA', 'LA UNION',
'FUSAGASUGA', 'LA CALERA', 'VILLETA', 'PALERMO', 'SAN PEDRO', 'DARIEN', 'FUNZA', 'TULUA', 'SOLEDAD', 'EL DOVIO', 'SANTANA', 'FRONTINO', 'TUNJA', 'TOCANCIPA',
'PUERTO COLOMBIA', 'SAHAGUN', 'PASTO', 'POLO NUEVO', 'VILLAPINZON', 'MONTEBELLO', 'MONTENEGRO', 'SANTA ROSA DE OSOS', 'TURBACO', 'SINCELEJO', 'LA CEJA',
'SAN FRANCISCO', 'MONTERIA', 'OICATA', 'ARJONA', 'SOPO', 'CIENAGA', 'TUMACO', 'RIOHACHA', 'ZIPAQUIRA', 'PUERTO SANTANDER', 'TOGUI', 'CUMBAL', 'ULLOA',
'SANTANDER DE QUILICHAO', 'FILANDIA', 'CIRCASIA', 'ANSERMA', 'DON MATIAS', 'CALDAS', 'CHOACHI', 'COCORNA', 'GUACARI', 'TENJO', 'BUENAVENTURA', 'ACACIAS',
'BARRANCABERMEJA', 'SAN JUAN DE RIO SECO', 'ANGOSTURA', 'GUACHENÉ', 'PONEDERA', 'SANTO TOMAS', 'GALAPA', 'CIENAGA DE ORO', 'SANTA ROSA DE CABAL', 'LA TEBAIDA', 'MIRANDA',
'JAMUNDI', 'COPACABANA', 'TENZA', 'GINEBRA', 'ANDALUCIA', 'GARAGOA', 'SANTA SOFIA', 'FACATATIVA', 'CONTADERO', 'CUMBITARA', 'OCAÑA', 'CIMITARRA', 'CONCEPCION', 'CURITI',
'ESPINAL', 'SOCHA', 'TIMBIO', 'SAN DIEGO', 'GIRARDOT', 'BUGALAGRANDE', 'UBATE', 'SANTUARIO', 'FLORIDA', 'COGUA', 'SOGAMOSO', 'CERETE', 'TIERRALTA', 'SUESCA', 'AGRADO',
'GIGANTE', 'PITALITO', 'QUIBDO', 'CANDELARIA', 'EL CERRITO', 'YOTOCO', 'ZONA BANANERA', 'BARANOA', 'MALAMBO', 'SAN PEDRO DE URABA', 'MELGAR', 'SAN CRISTOBAL', 'ANDES',
'SAN MARTIN', 'LA VIRGINIA', 'GRANADA', 'SONSON', 'TUQUERRES', 'PUERTO TEJADA', 'EL BANCO', 'BARRANCO DE LOBA', 'LETICIA', 'FLORENCIA', 'ACEVEDO', 'TIMANA',
'ISNOS (SAN JOSE DE ISNOS)', 'RIVERA', 'GARZON', 'SAN AGUSTIN', 'VIJES', 'AQUITANIA', 'AIPE', 'EL COLEGIO', 'CALAMAR', 'UNION PANAMERICANA', 'LORICA', 'SABANAGRANDE', 'GUADUAS',
'SIBATE', 'PIEDECUESTA', 'BARBOSA', 'SANTA ROSA', 'LA CUMBRE', 'BOSCONIA', 'CUASPUD (CARLOSAMA)', 'VILLARICA', 'ROBLES (LA PAZ)', 'RIO DE ORO', 'NATAGA', 'PUEBLOVIEJO',
'AGUSTIN CODAZZI', 'AGUACHICA', 'EL TAMBO', 'ALCALA', 'MANATI', 'TOLEDO', 'GUAMO', 'BELEN', 'PAICOL', 'POTOSI', 'SAN JUAN DE ARAMA', 'PAZ DE ARIPORO', 'LA PLATA', 'CALOTO',
'DAGUA', 'SAMANA', 'MAICAO', 'ALBANIA', 'SAN JUAN DEL CESAR', 'ALGECIRAS', 'RISARALDA', 'CHIQUINQUIRA', 'SABANALARGA', 'CUBARA', 'CANALETE', 'VILLA DE LEYVA', 'DUITAMA',
'SAMACA', 'LA FLORIDA', 'GUACHUCAL', 'OPORAPA', 'ANCUYA', 'ROLDANILLO', 'CACOTA', 'ABREGO', 'TIBU', 'MARMATO', 'MARQUETALIA', 'LA JAGUA DE IBIRICO', 'CHIRIGUANA',
'PALMAR DE VARELA', 'DISTRACCION', 'CHIPAQUE', 'PUERTO NARIÑO', 'TURBANA', 'TURMEQUE', 'PIENDAMO', 'MARIA LA BAJA', 'RESTREPO', 'NORCASIA', 'CARMEN DE VIBORAL',
'LINARES', 'PAIPA', 'UNE', 'FOMEQUE', 'MAGANGUE', 'SILVANIA', 'LOS PALMITOS', 'SANTA LUCIA', 'MONTELIBANO', 'VILLANUEVA', 'RIOSUCIO', 'GACHANCIPA', 'PUERTO LEGUIZAMO',
'MAGUI (PAYAN)', 'MONIQUIRA', 'PAMPLONA', 'ROVIRA', 'FONSECA', 'SINCE', 'CAMPOALEGRE', 'SANTA ROSA DE VITERBO', 'CORDOBA', 'MUZO', 'CISNEROS', 'LA TOLA', 'TINJACA',
'SITIO NUEVO', 'PUERTO GUZMAN', 'PROVIDENCIA', 'PLANADAS', 'PRADERA', 'ARAUCA', 'AGUADAS', 'ITUANGO', 'MEDIO BAUDO (BOCA DE PEPE)', 'EL ZULIA', 'PUERTO LOPEZ', 'MITU',
'EL RETEN', 'ITSMINA', 'MIRAFLORES', 'VIRACACHA', 'CUCAITA', 'SAN VICENTE DEL CAGUAN', 'REPELON', 'QUIMBAYA', 'BUENOS AIRES', 'EL BAGRE', 'PUERTO GAITAN', 'SANTA CATALINA',
'TOTORO', 'DIBULLA', 'LA CRUZ', 'PUERTO ASIS', 'TURBO', 'SALADOBLANCO', 'ARACATACA', 'FUNDACION', 'SAN LUIS', 'CAUCASIA', 'TABIO', 'SUTAMARCHAN', 'MANZANARES', 'MANAURE',
'TIPACOQUE', 'COMBITA', 'SAN CARLOS DE GUAROA', 'NILO', 'JUAN DE ACOSTA', 'PIVIJAY', 'CAMPO DE LA CRUZ', 'FLANDES', 'SOCORRO', 'VENTAQUEMADA', 'TADO', 'RIOQUITO', 'LIBANO',
'GONZALEZ', 'PAYA', 'ATRATO', 'RIO IRO', 'BAHIA SOLANO (MUTIS)', 'LURUACO', 'PASCA', 'MORROA', 'PIEDRAS', 'CLEMENCIA', 'BAGADO', 'CHINU', 'MARINILLA', 'USIACURI', 'PALESTINA',
'SAN MIGUEL DE SEMA', 'MALLAMA (PIEDRANCHA)', 'HONDA', 'TIMBIQUI', 'TUBARA', 'BOJAYA (BELLAVISTA)', 'GUATEQUE', 'EL PASO', 'BECERRIL', 'CABUYARO', 'COPER', 'CAQUEZA',
'CHOCONTA', 'JUNIN', 'LA MESA', 'SAN ANTONIO DE TEQUENDAMA', 'LENGUAZAQUE', 'TAURAMENA', 'SESQUILE', 'VALLE DEL GUAMUEZ', 'MOCOA', 'ORITO', 'EL COPEY',
'OLAYA HERRERA(BOCAS DE SATINGA', 'SAN PABLO', 'EL PIÑON', 'CAREPA', 'PAUNA', 'ZIPACON', 'LA PLAYA', 'ARGELIA', 'PIOJO', 'SAN ONOFRE', 'COROZAL',
'FRANCISCO PIZARRO (SALAHONDA)', 'NUEVO COLON', 'TASCO', 'MANTA', 'ANOLAIMA', 'JERICO', 'PUERTO SALGAR', 'CONDOTO', 'CERTEGUI', 'UBAQUE', 'MARIQUITA', 'BELEN DE UMBRIA',
'EL CARMEN DE BOLIVAR', 'SAN JUAN NEPOMUCENO', 'MOMPOS', 'EL PEÑON', 'YOLOMBO', 'PUERTO INIRIDA', 'SAN MIGUEL (LA DORADA)', 'OSPINA PEREZ (VENECIA)', 'PUERTO CARREÑO',
'BUENAVISTA', 'BARBACOAS', 'GIRALDO', 'TENA', 'AGUA DE DIOS', 'PUEBLO BELLO', 'HATONUEVO', 'CERRO SAN ANTONIO', 'SILVIA', 'YACOPI', 'YACUANQUER', 'OBANDO', 'LOPEZ (MICAY)',
'CAÑASGORDAS', 'SAN SEBASTIAN DE BUENAVISTA', 'AMAGA', 'GIRON', 'BURITICA', 'VALDIVIA', 'GUAITARILLA', 'SAN BERNARDO', 'NARIÑO', 'ALDANA', 'UNGUIA', 'PUERTO ESCONDIDO',
'SAN MARCOS', 'GUAPOTA', 'EL ROSAL', 'SUPIA', 'SANTA ROSA DEL SUR', 'EL CHARCO', 'PLANETA RICA', 'PURISIMA', 'BARRANCAS', 'PATIA (EL BORDO)', 'CABRERA', 'TOCA',
'LA LLANADA', 'LLORO', 'SAN ROQUE', 'CHIGORODO', 'MACHETA', 'MAHATES', 'CUMARAL', 'PUERRES', 'PUPIALES', 'RICAURTE', 'EL CAIRO', 'CONVENCION', 'TAMESIS', 'FIRAVITOBA',
'COLON', 'CAIMITO', 'RAFAEL REYES (APULO)', 'TAME', 'NEIRA', 'LOS CORDOBAS', 'ALBAN', 'TOCAIMA', 'ARBOLEDA (BERRUECOS)', 'SAN LORENZO', 'URIBIA', 'SUAN', 'AYAPEL',
'puerto COLOMBIA', 'CORRALES', 'VIOTA', 'YAGUARA', 'ANSERMANUEVO', 'SANTA FE DE ANTIOQUIA', 'TOLU', 'NECHI', 'LEBRIJA', 'TUCHIN', 'SAN JOSE DEL GUAVIARE', 'GUATAVITA',
'CHACHAGUI', 'SANDONA', 'ALTO BAUDO (PIE DE PATO)', 'COTORRA', 'TELLO', 'LA BELLEZA', 'ARBOLETES', 'YARUMAL', 'TARAZA', 'SASAIMA', 'VIANI', 'LABATECA', 'MUTISCUA',
'SALGAR', 'VALENCIA', 'TAMINANGO', 'SAMPUES', 'SANTA BARBARA (ISCUANDE)', 'IMUES', 'CURUMANI', 'ARIGUANI (EL DIFICIL)', 'CANTON DE SAN PABLO (MANAGRU)', 'PALMITO',
'EL PEÑOL', 'FRESNO', 'SEVILLA', 'GUAPI', 'SAN JUAN DE BETULIA', 'CACHIPAY', 'SAN VICENTE DE CHUCURI', 'GUARANDA', 'COYAIMA', 'GIRARDOTA', 'SUAITA', 'MALAGA',
'YAVARATE (CD)', 'MAJAGUAL', 'ROBERTO PAYAN (SAN JOSE)', 'GUATAQUI', 'SAN JUAN DE URABA', 'GUAYATA', 'GALERAS (NUEVA GRANADA)', 'SAN BERNARDO DEL VIENTO',
'DABEIBA', 'SARAVENA', 'HISPANIA', 'EL CARMEN DE ATRATO', 'MACEO', 'Galapa', 'SAN ANTONIO', 'PAILITAS', 'SABANA DE TORRES', 'CUCUNUBA', 'CHAPARRAL', 'SANTA ANA',
'TIQUISIO (PUERTO RICO)', 'CICUCO', 'VIGIA DEL FUERTE', 'SIMIJACA', 'SAN PELAYO', 'SAMANIEGO', 'GAMARRA', 'NATAGAIMA', 'SARDINATA', 'LA GLORIA', 'PUERTO LIBERTADOR',
'TOLUVIEJO', 'PUEBLO NUEVO', 'ARAUQUITA', 'PUEBLO RICO', 'SAN ANDRES SOTAVENTO', 'MOÑITOS', 'SANTIAGO', 'CAJIBIO', 'CALDONO', 'SAN JACINTO', 'BELTRAN', 'AGUAZUL',
'ZARZAL', 'PIJIÑO DEL CARMEN (PIJIÑO)', 'SUCRE', 'SAN CARLOS', 'CHIMA', 'ZAMBRANO', 'SOPLAVIENTO', 'COLOSO (RICAURTE)', 'GUASCA', 'CHIMICHAGUA', 'ARANZAZU',
'PAMPLONITA', 'EL ROBLE', 'BOJACA', 'SAN ESTANISLAO', 'SAN FELIPE (CD)', 'PUERTO COLOMBIA (CD)', 'MAPIRIPANA (CD)', 'SAN ALBERTO', 'CARACOLI', 'BALBOA', 'ARENAL',
'SAN ANTERO', 'MANI', 'COLON (GENOVA)', 'TARAIRA', 'REMEDIOS', 'SILOS', 'CHALAN', 'PLATO', 'LA VEGA', 'COVEÑAS', 'PUERTO WILCHES', 'RAMIRIQUI', 'SUSACON', 'ANZA', 'NEMOCON', 'UBALA',
'PADILLA', 'LERIDA', 'PELAYA', 'OVEJAS', 'SAN BENITO ABAD', 'CAICEDONIA', 'GUAMAL', 'REMOLINO', 'LA PALMA', 'CORINTO', 'EL AGUILA', 'BOLIVAR', 'CARMEN DE CARUPA', 'MUTATA', 'CAICEDO',
'LA CELIA', 'BARICHARA', 'PUERTO PARRA', 'TAMALAMEQUE', 'MURINDO', 'MONTECRISTO', 'PINILLOS', 'ARROYOHONDO', 'NOBSA', 'MANAURE BALCON DEL CESAR', 'BOCHALEMA', 'PORE', 'FUNES',
'PUERTO BERRIO', 'NECOCLI', 'SAN GIL', 'CHITAGA', 'PUERTO TRIUNFO', 'ROSAS', 'ALBAN (SAN JOSE)', 'MEDIO SAN JUAN', 'PUERTO BOYACA', 'LA APARTADA', 'HELICONIA', 'LOS ANDES (SOTOMAYOR)',
'CONSACA', 'QUETAME', 'EBEJICO', 'ACHI', 'BETULIA', 'COVARACHIA', 'SANTA BARBARA', 'MARSELLA', 'EL MOLINO', 'LA CHORRERA (CD)', 'LA PAZ', 'CONCORDIA', 'URUMITA', 'SOLITA', 'EL DONCELLO',
'MESETAS', 'OIBA', 'ZARAGOZA', 'MERCADERES', 'BRICENO', 'SAN VICENTE', 'MORALES', 'LOS SANTOS', 'TAUSA', 'ENTRERRIOS', 'VENECIA', 'GALAN', 'PUERTO RICO', 'CASTILLA LA NUEVA', 'TIBASOSA',
'SIACHOQUE', 'PUERTO SANTANDER (CD)', 'RAGONVALIA', 'SOPETRAN', 'PEQUE', 'NOROSÌ', 'CARMEN DEL DARIEN', 'MOGOTES', 'SAN JOSE DEL FRAGUA', 'SANTA HELENA DEL OPON', 'SEGOVIA',
'BAJO BAUDO (PIZARRO)', 'MARIPI', 'HATO', 'ALPUJARRA', 'VILLAGARZON', 'MOMIL', 'CANTAGALLO', 'LEIVA', 'VALPARAISO', 'HATILLO DE LOBA', 'TIBANA', 'SAN JOSE DE URE', 'BITUIMA',
'GOMEZ PLATA', 'GACHALA', 'URAMITA', 'TITIRIBI', 'ONZAGA', 'ABEJORRAL', 'PUERTO NARE (LA MAGDALENA )', 'CHINACOTA', 'SAN CAYETANO', 'ASTREA', 'OROCUE', 'RIOBLANCO', 'SUAZA',
'GACHETA', 'FUQUENE', 'TARAPACA (CD)', 'BELEN DE LOS ANDAQUIES', 'SAN RAFAEL', 'ANORI', 'ORTEGA', 'EL CASTILLO', 'SAN PEDRO DE CARTAGO', 'LANDAZURI', 'CASABIANCA', 'SALDAÑA',
'SANTO DOMINGO', 'CEPITA', 'DOLORES', 'URRAO', 'TARQUI', 'TARSO', 'FREDONIA', 'SAN JOSE DE LA MONTANA', 'OLAYA', 'VILLAVIEJA', 'ACANDI', 'MARGARITA', 'NIMAIMA', 'VALLE DE SAN JOSE',
'SOATA', 'SAN JACINTO DEL CAUCA', 'NUEVA GRANADA', 'SORACA', 'TUTA', 'BELMIRA', 'SANTA BARBARA DE PINTO', 'SAN ZENON', 'QUEBRADANEGRA', 'CUBARRAL', 'SAN JERONIMO', 'MONGUI',
'CARTAGENA DEL CHAIRA', 'PEDRAZA', 'MONTERREY', 'PUERTO CAICEDO', 'LA PRIMAVERA', 'VERSALLES', 'ANZOATEGUI', 'PITAL', 'SAN JOAQUIN', 'CAMPAMENTO', 'ARBOLEDAS', 'QUINCHIA',
'TALAIGUA NUEVO', 'HERVEO', 'CHIVOLO', 'SABOYA', 'LA MONTANITA', 'FOSCA', 'EL PAUJIL', 'MORELIA', 'LIBORINA', 'NOVITA', 'SUAREZ', 'BUESACO', 'PURIFICACION', 'LA VICTORIA',
'PACORA', 'LA SIERRA', 'AGUADA', 'CALIFORNIA', 'SAN CALIXTO', 'TORIBIO', 'ALGARROBO', 'SIBUNDOY', 'ARCABUCO', 'HATO COROZAL', 'MAPIRIPAN', 'AMALFI', 'MATANZA', 'NUQUI',
'EL TARRA', 'DURANIA', 'HOBO', 'CAPARRAPI', 'ARBELAEZ', 'PANDI', 'GENOVA', 'SAN JOSE DE MIRANDA', 'PAEZ', 'SALAMINA', 'JAMBALO', 'ALTOS DEL ROSARIO', 'ABRIAQUI', 'COLOMBIA',
'PEÑOL', 'IZA', 'LA ESPERANZA', 'BELALCAZAR', 'UTICA', 'LA ARGENTINA', 'ALTAMIRA', 'YONDO (CASABE)', 'CHIQUIZA', 'CARMEN DE APICALA', 'GRAMALOTE', 'SALENTO', 'ARATOCA',
'EL GUAMO', 'APIA', 'EL PLAYON', 'VELEZ', 'GUACHETA', 'AMBALEMA', 'SANTA CRUZ (GUACHAVES)', 'TERUEL', 'QUIPAMA', 'GUADALUPE', 'LA MERCED', 'GUALMATAN', 'IQUIRA', 'SOLANO',
'FUENTE DE ORO', 'TRINIDAD', 'CUITIVA', 'LA PINTADA', 'LA JAGUA DEL PILAR', 'CARCASI', 'ICONONZO', 'TANGUA', 'ZAPATOCA', 'ALVARADO', 'PESCA', 'TORO', 'TEORAMA', 'CAJAMARCA',
'TENERIFE', 'CURILLO', 'EL CARMEN', 'EL CARMEN DE CHUCURI', 'REGIDOR', 'MOTAVITA', 'PRADO', 'MILAN', 'PARAMO', 'BARAYA', 'VERGARA', 'GAMA', 'VENADILLO', 'SAN MARTIN DE LOBA',
'BETANIA', 'CAROLINA', 'SAN BENITO', 'SANTA MARIA', 'LA UVITA', 'CONFINES', 'VALLE DE SAN JUAN', 'EL GUACAMAYO', 'PINCHOTE', 'UMBITA', 'ILES', 'FORTUL', 'RIO VIEJO', 'ZETAQUIRA',
'PACHAVITA', 'COROMORO', 'GUAVATA', 'LA PEDRERA (CD)', 'TONA', 'ARMERO (GUAYABAL)', 'SABANAS DE SAN ANGEL', 'VETAS', 'CHIPATA', 'SAN PABLO DE BORBUR', 'SAPUYES', 'GUACA', 'BUCARASICA', 'PALOCABILDO', 'CUCUTILLA', 'VILLA CARO', 'LOURDES', 'GUAYABETAL', 'PURACE', 'ELIAS', 'CARAMANTA', 'ZAPAYAN', 'INZA', 'OTANCHE', 'NOCAIMA', 'MOLAGAVITA', 'ALEJANDRIA', 'TOPAGA', 'SATIVASUR', 'TOTA', 'SOTARA', 'SUPATA', 'GUACAMAYAS', 'CUMARIBO', 'CUNDAY', 'CAPITANEJO', 'SOTAQUIRA', 'JARDIN', 'PIJAO', 'PARATEBUENO', 'CONTRATACION', 'RIOFRIO', 'CHARALA', 'HACARI', 'HERRAN', 'EL ESPINO', 'CHITA', 'CIENEGA', 'SUTATAUSA', 'GAMEZA', 'SAN LUIS DE PALENQUE', 'ANGELOPOLIS', 'VISTAHERMOSA', 'JENESANO', 'RAQUIRA', 'LA MACARENA', 'COELLO', 'BOAVITA', 'PAZ DE RIO', 'SATIVANORTE', 'GUATICA', 'MISTRATO', 'CHISCAS', 'LEJANIAS', 'SAN LUIS DE GACENO', 'CHIVATA', 'EL COCUY', 'SAN JOSE DE PARE', 'PAJARITO', 'VILLAHERMOSA', 'BOYACA', 'PUERTO CONCORDIA', 'EL RETORNO', 'CACHIRA', 'VICTORIA', 'CERRITO', 'SALAZAR', 'CHITARAQUE', 'EL TABLON', 'POLICARPA', 'OSPINA', 'CERINZA', 'GACHANTIVA', 'PANQUEBA', 'LITORAL DEL BAJO SAN JUAN', 'SIPI', 'SIMITI', 'TESALIA', 'FLORIAN', 'SAN JOSE', 'VEGACHI', 'ATACO', 'ENCINO', 'PIAMONTE', 'BARRANCA DE UPIA', 'QUIPILE', 'SAN FERNANDO', 'RECETOR', 'GAMBITA', 'SUSA', 'PUERTO LLERAS', 'EL DORADO', 'CARURU', 'FILADELFIA', 'EL ROSARIO', 'BARRANCO MINAS (CD)', 'MONGUA', 'LA URIBE', 'MACARAVITA', 'YALI', 'ENCISO', 'TUNUNGUA', 'MARULANDA', 'TIBACUY', 'LA CAPILLA', 'PULI', 'SANTA ISABEL', 'GUEPSA', 'SACAMA', 'SACHICA', 'PUENTE NACIONAL', 'FALAN', 'SANTA ROSALIA', 'MEDINA', 'PUEBLORRICO', 'PUERTO RONDON', 'JURADO', 'PENSILVANIA', 'PALMAR', 'FLORESTA', 'OCAMONTE', 'SIMACOTA', 'SAN SEBASTIAN', 'NUNCHIA', 'SUTATENZA', 'PACOA (CD)', 'TAMARA', 'BERBEO', 'PALMAS SOCORRO', 'PANA PANA (CAMPO ALEGRE) (CD)', 'SURATA', 'JESUS MARIA', 'CHARTA', 'TIBIRITA', 'TUTAZA', 'SOCOTA', 'GUAYABAL DE SIQUIMA', 'CHAMEZA', 'SORA', 'SAN MIGUEL', 'JERUSALEN', 'LA SALINA', 'EL CALVARIO', 'BUSBANZA', 'PAIME', 'CRAVO NORTE', 'GUICAN', 'MACANAL', 'SAN JOSE DEL PALMAR', 'LABRANZAGRANDE', 'CHINAVITA', 'SOMONDOCO', 'MURILLO', 'CHIVOR', 'LA PEÑA', 'ALMEIDA', 'SAN MATEO', 'PISBA', 'ALMAGUER', 'RONCESVALLES', 'TOPAIPI', 'RONDON', 'SAN EDUARDO', 'CHAGUANI', 'EL ENCANTO (CD)', 'BETEITIVA', 'JORDAN', 'GUTIERREZ']
mongo_uri = environ.get('MONGO_URI')
connect(host=mongo_uri)
for name in regions:
reg = Region()
reg.active=True
reg.hash=sha256(f"{name}".encode('utf-8')).hexdigest()
reg.name=name
reg.save()
print(f"{len(regions)} regions has been inserted")
|
# -*- coding: utf-8 -*-
from cefpython3 import cefpython as cef
from importlib import reload
import numpy as np
import os
import tempfile
import unittest
from unittest.mock import Mock, patch, mock_open
import endless_bot.emitters.webbrowser
from endless_bot.emitters.webbrowser import BrowserHandler
from endless_bot.emitters.webbrowser import WebBrowser
class WebBrowserTest(unittest.TestCase):
def setUp(self):
from endless_bot.main import parse_args
self.args = parse_args([])
self.broadcaster = Mock()
self.webbrowser = Mock()
self.random_image = np.random.randint(0, 255, (640, 360, 4), np.uint8)
self.paint_buffer = Mock()
self.paint_buffer.GetString.return_value = self.random_image.tostring()
@patch("builtins.open", new_callable=mock_open(read_data="data"))
def test_handler_init_preload(self, mock_file):
reload(endless_bot.emitters.webbrowser)
BrowserHandler(self.webbrowser, self.broadcaster)
self.assertEqual(mock_file.call_count, 1)
@patch("builtins.open", mock_open(read_data="data"))
def test_handler_loading_state_pending(self):
reload(endless_bot.emitters.webbrowser)
browser_handler = BrowserHandler(self.webbrowser, self.broadcaster)
browser_handler.OnLoadingStateChange(self.webbrowser, True)
self.assertEqual(self.webbrowser.ExecuteJavascript.call_count, 0)
@patch("builtins.open", mock_open(read_data="data"))
def test_handler_loading_state_ready(self):
reload(endless_bot.emitters.webbrowser)
browser_handler = BrowserHandler(self.webbrowser, self.broadcaster)
browser_handler.OnLoadingStateChange(self.webbrowser, False)
self.assertEqual(self.webbrowser.ExecuteJavascript.call_count, 1)
self.assertEqual(self.webbrowser.ExecuteJavascript.call_args, unittest.mock.call("data"))
def test_handler_paint_invalid_element(self):
reload(endless_bot.emitters.webbrowser)
browser_handler = BrowserHandler(self.webbrowser, self.broadcaster)
browser_handler.OnPaint(Mock(), cef.PET_POPUP, self.paint_buffer)
self.assertEqual(self.paint_buffer.GetString.call_count, 0)
self.assertEqual(self.broadcaster.send_image.call_count, 0)
def test_handler_paint_continue(self):
self.broadcaster.send_image.return_value = 0
reload(endless_bot.emitters.webbrowser)
browser_handler = BrowserHandler(self.webbrowser, self.broadcaster)
browser_handler.OnPaint(Mock(), cef.PET_VIEW, self.paint_buffer)
self.assertEqual(self.paint_buffer.GetString.call_count, 1)
np.testing.assert_array_equal(self.broadcaster.send_image.call_args[0][0], self.random_image)
def test_handler_paint_shutdown(self):
self.broadcaster.send_image.return_value = -1
reload(endless_bot.emitters.webbrowser)
browser_handler = BrowserHandler(self.webbrowser, self.broadcaster)
browser_handler.OnPaint(Mock(), cef.PET_VIEW, self.paint_buffer)
self.assertEqual(self.paint_buffer.GetString.call_count, 1)
np.testing.assert_array_equal(self.broadcaster.send_image.call_args[0][0], self.random_image)
self.assertEqual(self.webbrowser.shutdown.call_count, 1)
def test_handler_paint_click(self):
action = np.random.randint(1, 600, 1)
self.broadcaster.send_image.return_value = action
reload(endless_bot.emitters.webbrowser)
browser_handler = BrowserHandler(self.webbrowser, self.broadcaster)
browser_handler.OnPaint(Mock(), cef.PET_VIEW, self.paint_buffer)
self.assertEqual(self.webbrowser.send_click.call_count, 1)
self.assertEqual(self.webbrowser.send_click.call_args, unittest.mock.call(180, action))
def test_handler_get_view_rect(self):
reload(endless_bot.emitters.webbrowser)
browser_handler = BrowserHandler(self.webbrowser, self.broadcaster)
rect_out = []
result = browser_handler.GetViewRect(rect_out)
self.assertEqual(rect_out, [0, 0, 360, 640])
self.assertEqual(result, True)
@patch('appdirs.user_data_dir')
@patch('cefpython3.cefpython')
def test_cache_dir(self, cef_mock, user_data_dir):
parent_dir = tempfile.mkdtemp()
cache_dir = os.path.join(parent_dir, "Browser")
self.assertFalse(os.path.isdir(cache_dir))
user_data_dir.return_value = parent_dir
cef_mock.__version__ = "55.3"
reload(endless_bot.emitters.webbrowser)
WebBrowser(self.args, self.broadcaster)
self.assertEqual(user_data_dir.call_count, 1)
self.assertEqual(cef_mock.Initialize.call_args_list[0][0][0]["cache_path"], cache_dir)
self.assertEqual(cef_mock.MessageLoop.call_count, 1)
self.assertEqual(cef_mock.Shutdown.call_count, 1)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 14 13:17:49 2021
@author: surajitrana
"""
import matplotlib.pyplot as plt
import numpy as np
def plot_piechart():
dataset = np.array([20, 25, 10, 15, 30])
chart_lables = np.array(["Audi", "Mercedez", "BMW", "Tesla", "Volvo"])
plt.pie(dataset, labels=chart_lables)
plt.show()
if __name__ == '__main__':
plot_piechart()
|
# core/operators/test_nonparametric.py
"""Tests for rom_operator_inference.core.operators._nonparametric."""
import pytest
import numpy as np
import rom_operator_inference as opinf
_module = opinf.core.operators._nonparametric
class TestConstantOperator:
"""Test core.operators._nonparametric.ConstantOperator."""
def test_init(self):
"""Test core.operators._nonparametric.ConstantOperator.__init__()"""
# Too many dimensions.
cbad = np.arange(12).reshape((4, 3))
with pytest.raises(ValueError) as ex:
opinf.core.operators.ConstantOperator(cbad)
assert ex.value.args[0] == "constant operator must be one-dimensional"
# Case 1: one-dimensional array.
c = np.arange(12)
op = opinf.core.operators.ConstantOperator(c)
assert op.entries is c
# Case 2: two-dimensional array that can be flattened.
op = opinf.core.operators.ConstantOperator(c.reshape((-1, 1)))
assert op.shape == (12,)
assert np.all(op.entries == c)
op = opinf.core.operators.ConstantOperator(c.reshape((1, -1)))
assert op.shape == (12,)
assert np.all(op.entries == c)
def test_evaluate(self):
"""Test core.operators._nonparametric.ConstantOperator.evaluate()"""
c = np.random.random(10)
op = opinf.core.operators.ConstantOperator(c)
assert op.evaluate() is c
assert op.evaluate(1) is c
assert op.evaluate([1, 2]) is c
assert op.evaluate([1], 2) is c
class TestLinearOperator:
"""Test core.operators._nonparametric.LinearOperator."""
def test_init(self):
"""Test core.operators._nonparametric.LinearOperator.__init__()"""
# Too many dimensions.
Abad = np.arange(12).reshape((2, 2, 3))
with pytest.raises(ValueError) as ex:
opinf.core.operators.LinearOperator(Abad)
assert ex.value.args[0] == "linear operator must be two-dimensional"
# No violation, nonsquare.
A = Abad.reshape((4, 3))
op = opinf.core.operators.LinearOperator(A)
assert op.entries is A
# Correct square usage.
A = A[:3, :3]
op = opinf.core.operators.LinearOperator(A)
assert op.entries is A
# Special case: "one-dimensional" operator.
B = np.arange(5)
op = opinf.core.operators.LinearOperator(B)
assert op.shape == (5, 1)
assert np.all(op.entries[:, 0] == B)
# Special case: "scalar" operator.
A = np.array([10])
op = opinf.core.operators.LinearOperator(A)
assert op.shape == (1, 1)
assert op.entries[0, 0] == A[0]
def test_evaluate(self):
"""Test core.operators._nonparametric.LinearOperator.evaluate()"""
# Special case: A is 1x1 (e.g., ROM state dimension = 1)
A = np.random.random((1, 1))
op = opinf.core.operators.LinearOperator(A)
x = np.random.random()
assert np.allclose(op.evaluate(x), A[0, 0] * x)
# Scalar inputs (e.g., ROM state dimension > 1 but input dimension = 1)
B = np.random.random(10)
op = opinf.core.operators.LinearOperator(B)
x = np.random.random()
assert np.allclose(op.evaluate(x), B * x)
# 1D inputs (usual case)
def _check1D(A):
x = np.random.random(A.shape[-1])
op = opinf.core.operators.LinearOperator(A)
assert np.allclose(op.evaluate(x), A @ x)
_check1D(np.random.random((4, 3)))
_check1D(np.random.random((4, 4)))
_check1D(np.random.random((4, 1)))
# 2D inputs (for applying to data residual)
def _check2D(A):
X = np.random.random((A.shape[-1], 20))
op = opinf.core.operators.LinearOperator(A)
assert np.allclose(op.evaluate(X), A @ X)
_check2D(np.random.random((10, 3)))
_check2D(np.random.random((6, 6)))
class TestQuadraticOperator:
"""Test core.operators._nonparametric.QuadraticOperator."""
def test_init(self):
"""Test core.operators._nonparametric.QuadraticOperator.__init__()"""
# Too many dimensions.
Hbad = np.arange(12).reshape((2, 2, 3))
with pytest.raises(ValueError) as ex:
opinf.core.operators.QuadraticOperator(Hbad)
assert ex.value.args[0] == "quadratic operator must be two-dimensional"
# Two-dimensional but invalid shape.
Hbad = Hbad.reshape((4, 3))
with pytest.raises(ValueError) as ex:
opinf.core.operators.QuadraticOperator(Hbad)
assert ex.value.args[0] == "invalid dimensions for quadratic operator"
# Special case: r = 1
H = np.random.random((1, 1))
op = opinf.core.operators.QuadraticOperator(H)
assert op.shape == (1, 1)
assert np.allclose(op.entries, H)
# Full operator, compressed internally.
r = 4
H = np.random.random((r, r**2))
op = opinf.core.operators.QuadraticOperator(H)
assert op.shape == (r, r*(r + 1)//2)
assert np.allclose(op.entries, opinf.utils.compress_quadratic(H))
# Compressed operator.
r = 4
H = np.random.random((r, r*(r + 1)//2))
op = opinf.core.operators.QuadraticOperator(H)
assert op.entries is H
def test_evaluate(self, ntrials=10):
"""Test core.operators._nonparametric.QuadraticOperator.evaluate()"""
# Full operator, compressed internally.
r = 4
H = np.random.random((r, r**2))
op = opinf.core.operators.QuadraticOperator(H)
for _ in range(ntrials):
x = np.random.random(r)
assert np.allclose(op.evaluate(x), H @ np.kron(x, x))
# Compressed operator.
H = np.random.random((r, r*(r + 1)//2))
op = opinf.core.operators.QuadraticOperator(H)
for _ in range(ntrials):
x = np.random.random(r)
assert np.allclose(op.evaluate(x), H @ opinf.utils.kron2c(x))
# Special case: r = 1
H = np.random.random((1, 1))
op = opinf.core.operators.QuadraticOperator(H)
for _ in range(ntrials):
x = np.random.random()
assert np.allclose(op.evaluate(x), H[0, 0] * x**2)
# class TestCrossQuadraticOperator:
# """Test core.operators._nonparametric.CrossQuadraticOperator."""
# def test_init(self):
# """Test CrossQuadraticOperator.__init__()"""
# raise NotImplementedError
#
# def test_evaluate(self):
# """Test CrossQuadraticOperator.evaluate()"""
# raise NotImplementedError
class TestCubicOperator:
"""Test core.operators._nonparametric.CubicOperator."""
def test_init(self):
"""Test core.operators._nonparametric.CubicOperator.__init__()"""
# Too many dimensions.
Gbad = np.arange(24).reshape((2, 4, 3))
with pytest.raises(ValueError) as ex:
opinf.core.operators.CubicOperator(Gbad)
assert ex.value.args[0] == "cubic operator must be two-dimensional"
# Two-dimensional but invalid shape.
Gbad = Gbad.reshape((3, 8))
with pytest.raises(ValueError) as ex:
opinf.core.operators.CubicOperator(Gbad)
assert ex.value.args[0] == "invalid dimensions for cubic operator"
# Special case: r = 1
G = np.random.random((1, 1))
op = opinf.core.operators.CubicOperator(G)
assert op.shape == (1, 1)
assert np.allclose(op.entries, G)
# Full operator, compressed internally.
r = 4
G = np.random.random((r, r**3))
op = opinf.core.operators.CubicOperator(G)
assert op.shape == (r, r*(r + 1)*(r + 2)//6)
assert np.allclose(op.entries, opinf.utils.compress_cubic(G))
# Compressed operator.
r = 5
G = np.random.random((r, r*(r + 1)*(r + 2)//6))
op = opinf.core.operators.CubicOperator(G)
assert op.entries is G
def test_evaluate(self, ntrials=10):
"""Test core.operators._nonparametric.CubicOperator.evaluate()"""
# Full operator, compressed internally.
r = 4
G = np.random.random((r, r**3))
op = opinf.core.operators.CubicOperator(G)
for _ in range(ntrials):
x = np.random.random(r)
assert np.allclose(op.evaluate(x), G @ np.kron(np.kron(x, x), x))
# Compressed operator.
r = 5
G = np.random.random((r, r*(r + 1)*(r + 2)//6))
op = opinf.core.operators.CubicOperator(G)
for _ in range(ntrials):
x = np.random.random(r)
assert np.allclose(op.evaluate(x), G @ opinf.utils.kron3c(x))
# Special case: r = 1
G = np.random.random((1, 1))
op = opinf.core.operators.CubicOperator(G)
for _ in range(ntrials):
x = np.random.random()
assert np.allclose(op.evaluate(x), G[0, 0] * x**3)
|
import pytest
import subprocess
import requests
import time
import configparser
from gameengine import GameEngine
import socket
def wait_until_up(watchdog):
while True:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('127.0.0.1', 5000))
s.close()
break
except ConnectionRefusedError:
watchdog -= 1
if watchdog == 0:
break
time.sleep(1)
def wait_until_down(watchdog):
while True:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('127.0.0.1', 5000))
s.close()
watchdog -= 1
except ConnectionRefusedError:
break
if watchdog == 0:
break
time.sleep(1)
@pytest.fixture()
def gameengineflask():
proc = subprocess.Popen('src/gameengine.py')
wait_until_up(5)
yield "resource"
proc.kill()
wait_until_down(5)
def test_gameengine_creation():
ge = GameEngine("Keyboarder", "src/keyboarder.cfg")
assert ge, 'Could not create GameEngine'
def test_list_games(gameengineflask):
r = requests.get('http://localhost:5000/list_games')
assert r.status_code == 200
assert 'game_1.cfg' in r.text
def test_default_game_choice(gameengineflask):
r = requests.get('http://localhost:5000/game_choice')
assert r.status_code == 200
def test_set_game_choice(gameengineflask):
r = requests.post('http://localhost:5000/game_choice',
data='{"game_choice": "data/game_1.cfg"}')
assert r.status_code == 200
r = requests.get('http://localhost:5000/game_choice')
assert r.status_code == 200
assert r.text == 'data/game_1.cfg'
def test_gameengine_config_does_not_exist():
try:
GameEngine('name', '../data/game_doesnotexist.cfg')
assert False
except FileNotFoundError:
assert True
def test_game_invalid_config_file():
try:
GameEngine('name', 'README.md')
assert False, "README.md is not a config file and should have" \
" thrown an exception - MissingSectionHeaderError."
except configparser.MissingSectionHeaderError:
assert True
def test_gameengine_bad_method_choice(gameengineflask):
r = requests.delete('http://localhost:5000/game_choice')
assert r.status_code == 405
def test_url_endpoint_creations():
ge = GameEngine("Keyboarder", "src/keyboarder.cfg")
endpoints = ge.list_endpoints()
assert 'game_choice' in str(endpoints)
def test_pick_random_key(gameengineflask):
requests.post('http://localhost:5000/game_choice', data='{"game_choice": "data/game_1.cfg"}')
r = requests.get('http://localhost:5000/pick_key')
assert r.status_code == 200
def test_default_scores():
ge = GameEngine("Keyboarder", "src/keyboarder.cfg")
scores = ge.scores
assert scores['success'] == 0 and scores['fail'] == 0
def test_increment_success():
ge = GameEngine("Keyboarder", "src/keyboarder.cfg")
scores = ge.scores
assert scores['success'] == 0 and scores['fail'] == 0
ge.increment_result('success')
assert scores['success'] == 1 and scores['fail'] == 0
def test_clear_scores():
ge = GameEngine("Keyboarder", "src/keyboarder.cfg")
ge.increment_result('success')
scores = ge.scores
assert scores['success'] == 1 and scores['fail'] == 0
ge.clear_results()
scores = ge.scores
assert scores['success'] == 0 and scores['fail'] == 0
def test_basic_reset_status():
ge = GameEngine("Keyboarder", "src/keyboarder.cfg")
ge.game_status['current_stage'] = 3
assert ge.game_status['current_stage'] == 3
ge.reset_game_status()
assert ge.game_status['current_stage'] == 0
def test_all_values_reset_status():
ge = GameEngine("Keyboarder", "src/keyboarder.cfg")
ge.scores = {'fail': 1, 'success': 2}
ge.game_status['scores'] = ge.scores
ge.game_status['current_stage'] = 3
ge.game_status['remaining'] = 4
assert ge.game_status['scores']['fail'] == 1
assert ge.game_status['scores']['success'] == 2
assert ge.game_status['current_stage'] == 3
assert ge.game_status['remaining'] == 4
ge.reset_game_status()
assert ge.game_status['scores']['fail'] == 0
assert ge.game_status['scores']['success'] == 0
assert ge.game_status['current_stage'] == 0
assert ge.game_status['remaining'] == 0
|
"""Defines a background widget to create a single-color background."""
from os.path import dirname as _dirname
from typing import Optional as _Optional
from kivy.lang.builder import Builder as _Builder
from kivy.properties import ListProperty as _ListProperty
from kivy.uix.widget import Widget as _Widget
_Builder.load_file(_dirname(__file__) + '\\background.kv')
class Background(_Widget):
"""Simple single-color background.
Kivy does not provide a background by default. This widget adds one
that you can select the color of.
"""
color = _ListProperty([1, 1, 1])
"""Color of the background.
The color must be given as RGB values, each between 0 and 1.
"""
def __init__(self, color:_Optional[list] = None, **kwargs):
"""Initialization method of the widget.
Args:
color: The color of the background given as RGB values,
each between 0 and 1.
**kwargs: Keyed arguments passed on to the base class
of the widget.
"""
self.color = color if color else self.color
super(Background, self).__init__(**kwargs)
|
from tkinter import *
root = Tk()
root.geometry('500x300')
root.resizable(0,0)
root.title("Website Blocker !")
Label(root, text ='WEBSITE BLOCKER' , font ='arial 20 bold').pack()
Label(root, text ='Website Blocker' , font ='arial 20 bold').pack(side=BOTTOM)
host_path ='C:\Windows\System32\drivers\etc\hosts'
ip_address = '127.0.0.1'
Label(root, text ='Enter Website :' , font ='arial 13 bold').place(x=5 ,y=60)
Websites = Text(root,font = 'arial 10',height='2', width = '40', wrap = WORD, padx=5, pady=5)
Websites.place(x= 140,y = 60)
def Blocker():
website_lists = Websites.get(1.0,END)
Website = list(website_lists.split(","))
with open (host_path , 'r+') as host_file:
file_content = host_file.read()
for website in Website:
if website in file_content:
Label(root, text = 'Already Blocked' , font = 'arial 12 bold').place(x=200,y=200)
pass
else:
host_file.write(ip_address + " " + website + '\n')
Label(root, text = "Blocked", font = 'arial 12 bold').place(x=230,y =200)
block = Button(root, text = 'Block',font = 'arial 12 bold',pady = 5,command = Blocker ,width = 6, bg = 'royal blue1', activebackground = 'sky blue')
block.place(x = 230, y = 150)
root.mainloop()
|
from .cli import cli_main
cli_main()
|
count = 1
print(count)
while (count <= 150):
fizz = count / 3
buzz = count / 5
if (float(fizz).is_integer() and not float(buzz).is_integer()):
print("fizz")
elif (float(buzz).is_integer() and not float(fizz).is_integer()):
print("buzz")
elif (float(buzz).is_integer() and float(fizz).is_integer()):
print("fizzbuzz")
else:
print(count)
count += 1
# Not elegant but gets the job done
|
# Copyright 2020 Alexis Lopez Zubieta
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
import os
import logging
import subprocess
def appimage_mount(target):
abs_target_path = os.path.abspath(target)
process = subprocess.Popen([abs_target_path, '--appimage-mount'], stdout=subprocess.PIPE)
app_dir = process.stdout.readline().decode('utf-8').strip()
ret_code = process.poll()
if ret_code == None:
logging.info("AppImage mounted at: %s" % app_dir)
return app_dir, process
else:
raise RuntimeError("Unable to run: %s --appimage-mount" % target)
def appimage_umount(process):
process.kill()
process.wait()
logging.info("AppImage unmounted")
|
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
import ibis
import third_party.ibis.ibis_oracle.api
OL_HOST= os.environ.get(
'IBIS_TEST_ORACLE_HOST', os.environ.get('OLHOST','host')
)
OL_PORT= os.environ.get(
'IBIS_TEST_ORACLE_PORT', os.environ.get('OLPORT','port')
)
OL_USER = os.environ.get(
'IBIS_TEST_ORACLE_USER', os.environ.get('OLUSER', 'username')
)
OL_PASS = os.environ.get(
'IBIS_TEST_ORACLE_PASSWORD', os.environ.get('OLPASSWORD', 'password')
)
IBIS_TEST_ORACLE_DB = os.environ.get(
'IBIS_TEST_ORACLE_DATABASE', os.environ.get('OLDATABASE', 'database_name')
)
IBIS_TEST_ORACLE_protocol = os.environ.get(
'IBIS_TEST_ORACLE_PROTOCOL', os.environ.get('OLPROTOCOL', 'protocol')
)
def _random_identifier(suffix):
return '__ibis_test_{}_{}'.format(suffix, ibis.util.guid())
@pytest.fixture(scope='session')
def con():
return third_party.ibis.ibis_oracle.api.connect(
host=OL_HOST,
port=OL_PORT,
user=OL_USER,
password=OL_PASS,
database=IBIS_TEST_ORACLE_DB,
protocol=IBIS_TEST_ORACLE_protocol,
)
@pytest.fixture(scope='module')
def db(con):
return con.database()
@pytest.fixture(scope='module')
def alltypes(db):
return db.functional_alltypes
@pytest.fixture(scope='module')
def df(alltypes):
return alltypes.execute()
@pytest.fixture(scope='module')
def at(alltypes):
return alltypes.op().sqla_table
@pytest.fixture(scope='module')
def intervals(con):
return con.table("intervals")
@pytest.fixture
def translate():
from third_party.ibis.ibis_oracle.compiler import OracleDialect
dialect = OracleDialect()
context = dialect.make_context()
return lambda expr: dialect.translator(expr, context).get_result()
@pytest.fixture
def temp_table(con) -> str:
"""
Return a temporary table name.
Parameters
----------
con : third_party.ibis.ibis_oracle.compiler.OracleDialect
Yields
------
name : string
Random table name for a temporary usage.
"""
name = _random_identifier('table')
try:
yield name
finally:
con.drop_table(name, force=True)
|
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
def mock_cast_as_call(obj=None):
"""Use this to mock `cast` as calls.
:param obj: Either an instance of RPCClient
or an instance of _Context.
"""
orig_prepare = obj.prepare
def prepare(*args, **kwargs):
cctxt = orig_prepare(*args, **kwargs)
mock_cast_as_call(obj=cctxt) # woo, recurse!
return cctxt
prepare_patch = mock.patch.object(obj, 'prepare').start()
prepare_patch.side_effect = prepare
cast_patch = mock.patch.object(obj, 'cast').start()
cast_patch.side_effect = obj.call
|
from .smoothing import smooth_image
from .utils import save_imgs
from .data_loader import load_patches, ImageFolder720p
|
import basic
import os
import subprocess
while True:
text = input('Doge shell 0.1 (Beta) > ')
if "import" in text:
importing = text.split(" ")
if importing[0] == "import":
f = open(importing[1], 'r')
imports = f.read()
f2 = open(importing[2], 'r')
toimp = f2.read()
aimp = imports + "\n" + toimp
print(aimp)
f2 = open(importing[2], 'w')
f2.truncate()
f2.write(aimp)
f2.close()
f.close()
else:
if text.strip() == "":
continue
result, error = basic.run('<stdin>', text)
if error:
print(error.as_string())
elif result:
if len(result.elements) == 1:
print(repr(result.elements[0]))
else:
print(repr(result))
else:
if text.strip() == "":
continue
result, error = basic.run('<stdin>', text)
if error:
print(error.as_string())
elif result:
if len(result.elements) == 1:
print(repr(result.elements[0]))
else:
print(repr(result))
|
from .json_resource import json_tool
from tkinter import *
from tkinter import ttk
import os
class gui:
def __init__(self, file_path, json_path):
self.file_path = file_path
self.json_path = json_path
def main_frame():
"""
MAINFRAME para configurar e iniciar o Robô de Geração de fatura manual\n
pela J1.\n
"""
def clicked():
user = user_text.get()
passw = Pass_text.get()
sap_dir = sap_text.get()
destin_dir = destin_label_text.get()
data = {"USER_SAP": user, "PASSWORD_SAP": passw, "SAP_PATH": sap_dir, "DESTIN_PATH": destin_dir}
path = os.getcwd()
path = path[:-7]
path = f'{path}My_docs\\index.json'
json_tool.save_json(data,path)
def truncate():
data = {"STATUS": "ATIVO"}
path = os.getcwd()
path = path[:-7]
path = f'{path}My_docs\\status.json'
json_tool.save_json(data,path)
window.destroy()
data = {"STATUS": "INATIVO"}
path = os.getcwd()
path = path[:-7]
path = f'{path}My_docs\\status.json'
json_tool.save_json(data,path)
path = os.getcwd()
path = path[:-7]
path = f'{path}My_docs\\index.json'
data = json_tool.read_json(path)
window = Tk()
window.title("R_FATMAN_CONFIGURAÇÔES --gui")
window.geometry('500x500')
path = os.getcwd()
path = path[:-7]
path = f'{path}My_images\\logo-claro.png'
imagem = PhotoImage(file=path)
w = Label(window, image=imagem, bd=0)
w.imagem = imagem
w.pack()
User_label = Label(window, font = ('Courier New', 8), text= "USUARIO SAP")
User_label.pack()
user_text = Entry(window, width=20)
texto_default = data["USER_SAP"]
user_text.insert(0, texto_default)
user_text.pack()
Pass_label = Label(window, font = ('Courier New', 8), text= "SENHA SAP")
Pass_label.pack()
Pass_text = Entry(window,show = '*', width=20)
texto_default = data["PASSWORD_SAP"]
Pass_text.insert(0, texto_default)
Pass_text.pack()
sap_label = Label(window, font = ('Courier New', 8), text= "DIRETORIO SAP")
sap_label.pack()
sap_text = Entry(window,width=60)
texto_default = data["SAP_PATH"]
sap_text.insert(0, texto_default)
sap_text.pack()
destin_label = Label(window, font = ('Courier New', 8), text= "DIR. RELATORIO")
destin_label.pack()
destin_label_text = Entry(window,width=60)
texto_default = data["DESTIN_PATH"]
destin_label_text.insert(0, texto_default)
destin_label_text.pack()
Atualizar = Button(window, text="Atualizar Dados", command=clicked)
Atualizar.pack()
Subimmit_btn = Button(window, text="EXECUTAR", command=truncate, bg= 'red', fg='white')
Subimmit_btn.pack()
window.mainloop()
def waiting_frame():
window = Tk()
window.title("R_FATMAN_j1 --gui")
window.geometry('300x400')
path = os.getcwd()
path = path[:-7]
path = f'{path}My_images\\logo-claro.png'
imagem = PhotoImage(file=path)
w = Label(window, image=imagem, bd=0)
w.imagem = imagem
w.pack()
texto = 'POR FAVOR, AGUARDE ALGUNS MINUTOS.\nVALIDANDO DADOS NO SEFAZ...'
Pass_label = Label(window, font = ('Courier New', 8), text= texto)
Pass_label.pack()
pb = ttk.Progressbar(window, orient="horizontal", length=200, mode="indeterminate")
pb.pack()
pb.start()
window.after(300000, lambda: window.destroy())
window.mainloop()
|
from __future__ import absolute_import,print_function, division
import numpy as np
def correct_panel(img, copy=True, divide=True):
"""
Distributes the intensity in the larger Jungfrau pixels into smaller
inserted pixels
See: https://doi.org/10.1088/1748-0221/13/11/C11006
Parameters
==========
img: a 2D numpy of shape 512x1024
copy: boolean, if True, copy the image, otherwise
the input image is updated in-place (usually not desired).
The performance hit for copy=True is negligible in most applications.
On psana servers this function runs in ~6 ms with copy=False
and ~7.5 ms with copy=True
TODO: for raw jungfrau data where gain mode is stored in 2 of the 16 bits,
# we need to carefully divide the 14-bit data by 2 for the large pixels (if we wish to use them)
Return
======
2D numpy array of shape 514x1030
"""
if not isinstance(img, np.ndarray):
raise TypeError("input image needs to be a numpy array")
if img.shape != (512, 1024):
raise ValueError("Input image needs shape 512x1024")
if copy:
img = img.copy()
if divide:
img[255]/=2
img[256]/=2
img2 = np.insert(img, (256, 256), values=(img[255], img[256]), axis=0).T
if divide:
img2[255]/=2
img2[256]/=2
img2[511]/=2
img2[512]/=2
img2[767]/=2
img2[768]/=2
img3 = np.insert(img2, (256, 256, 512, 512, 768, 768),
values=(img2[255], img2[256],
img2[511], img2[512],
img2[767], img2[768]),
axis=0).T
return img3
def pad_stacked_format(raw, num_panels=32):
"""
pad a raw data array that represents stacks of 512x1024 blocks
"""
padded = np.vstack([correct_panel(raw[i * 512: (i + 1) * 512], divide=False)
for i in range(num_panels)])
return padded
def get_14bit_from_jungfrau(expt):
iset = expt.imageset
F = iset.get_format_class()
if len(iset.paths()) != 1:
raise ValueError("imageset should have exactly 1 path")
fclass = F.get_instance(iset.paths()[0])
return fclass.get_14bit_component(iset.indices()[0])
def get_pedestalRMS_from_jungfrau(expt, gain_modes_too=False):
iset = expt.imageset
F = iset.get_format_class()
if len(iset.paths()) != 1:
raise ValueError("imageset should have exactly 1 path")
fclass = F.get_instance(iset.paths()[0])
return fclass.get_pedestal_rms(iset.indices()[0], return_gain_modes=gain_modes_too)
|
'''
ML pipeline: run an experiment end-to-end.
Input:
Experiment
Output:
- Predictions
- Evaluation metrics
'''
from pathlib import Path
import os
import sys
source_path = str(Path(os.path.abspath(__file__)).parent.parent)
pipeline_path = str(Path(os.path.abspath(__file__)).parent)
sys.path = [i for i in sys.path if i != pipeline_path]
if source_path not in sys.path:
sys.path.insert(0, source_path)
import fire
import importlib
import logging
import numpy as np
from sklearn.model_selection import train_test_split
import itertools as it
from tqdm import tqdm
import copy
import warnings
warnings.filterwarnings("ignore")
from pipeline.config_loading import load_environment, load_experiment
from pipeline.approaches import dummy_classifier
from pipeline.evaluation import evaluate, get_feature_importance
from pipeline.plotting import do_plots
from pipeline.model_data_prep import create_model_configuration
from pipeline.data_persistence import persist_errors, initialise_experiment, persist_local, get_experiment, get_approaches, persist_learner, persist_evaluation, get_local, persist_feature_importance
from pipeline.preprocessing import apply_preprocessing
from utils.utils import max_run_time
from pipeline.textprocessing import run_tfidf
logging.basicConfig(level=logging.ERROR)
def generate_hyperparameters_combinations(hyperparameters):
"""Given a dict of lists, returns a list of dicts with all unique combinations.
It generates the Grid for the grid search.
Parameters
----------
hyperparameters : dict
Hyperparameters sets to be used to tune the learner
Returns
-------
list of dict
Unique list of all variables combinations.
"""
all_hyper = sorted(hyperparameters)
combinations = it.product(*(hyperparameters[hyper] for hyper in all_hyper))
return [dict(zip(all_hyper, comb)) for comb in list(combinations)]
def generate_folds_matrices(features, labels, fold, args):
"""Returns the data_dict for test and training given a fold
Parameters
----------
features : pd.DataFrame
Features table for the experiment
labels : pd.DataFrame
Labels table for the experiment
fold : dict
id_llamado lists to filter features and labels
Returns
-------
dict of pd.DataFrame
train and test dict with features and labels each
"""
features_train = features.loc[fold['train']]
features_test = features.loc[fold['test']]
#Check if textprocessing is needed
if args:
tfidf_features_train, tfidf_features_test = run_tfidf(fold, args)
#Get normal features pandas dataframe
#Combined features pandas DataFrame
features_train = features_train.merge(tfidf_features_train, on ='id_llamado', how='inner')\
.set_index('id_llamado')
features_test = features_test.merge(tfidf_features_test, on='id_llamado', how='inner')\
.set_index('id_llamado')
# Ensure no NA
features_train = features_train.dropna()
features_test = features_test.dropna()
train_dict = {
'features': features_train,
'labels': labels.loc[features_train.index]}
test_dict = {
'features': features_test,
'labels': labels.loc[features_test.index]}
return train_dict, test_dict
def loop_the_grid(args):
"""
Given the experiment file with experiment parameters, the list of
temporal_folds as well as the data dictionary prepared by the
model_data_prep function, the function loops through the various temporal folds
and the list of approaches specified in the experiment file to calculate
metrics specified in the experiment file.
Parameters
----------
args: dictionary
Minimum set of arguments to start functions.
"""
experiment = get_experiment(args['experiment_id'])
approaches = get_approaches(args['experiment_id'])
features = get_local(args, 'features').set_index('id_llamado')
labels = get_local(args, 'labels').set_index('id_llamado')
#Check if textprocessing is needed:
if 'textprocessing' in experiment:
args_tfidf = {}
args_tfidf['params'] = experiment['textprocessing']['tfidf']
args_tfidf['experiment_id'] = args['experiment_id']
else:
args_tfidf = {}
print('Approaches: ', ', '.join([k['name'] for k in approaches]))
for fold in tqdm(args['folds'], desc='Folds'):
args['fold_name'] = fold['name']
original_train_dict, original_test_dict = generate_folds_matrices(features, labels, fold, args_tfidf)
for approach in tqdm(approaches, desc='Approaches'):
args['approach_id'] = approach['approach_id']
args['approach_name'] = approach['name']
train_dict, test_dict = \
apply_preprocessing(approach, original_train_dict, original_test_dict,
args)
for hyperparameters in tqdm(generate_hyperparameters_combinations(approach['hyperparameters']), desc='Hyper'):
args['hyperparameters'] = hyperparameters
args = persist_learner(args)
try:
max_run_time(experiment['model_config']['max_seconds'])
mod = importlib.import_module(f"pipeline.approaches.{approach['python_path'][:-3]}")
model = mod.fit(args, train_dict=train_dict)
predictions = mod.predict(model, test_features=test_dict['features'])
evaluations = evaluate(obs=test_dict['labels'],
pred=predictions,
evaluation=experiment['evaluation'])
feature_importance = get_feature_importance(model, test_dict['features'])
persist_local(predictions, args, 'predictions',
['experiment_id', 'approach_id', 'learner_id', 'fold_name'])
persist_local(model, args, 'models', ['experiment_id', 'approach_id', 'learner_id'], '.p')
persist_evaluation(evaluations, args)
persist_feature_importance(feature_importance, args)
except TimeoutError as error:
error = f'timeout < {experiment["model_config"]["max_seconds"]}'
persist_errors(error, args)
if experiment['model_config']['errors']:
raise
continue
except Exception as e:
persist_errors(e, args)
if experiment['model_config']['errors']:
raise
continue
def run_experiment(experiment_file, testing=False):
"""
Runs the experiment specified in the experiment yaml file to produce
metrics for evaluation.
It writes results on database and local system.
Parameters
----------
experiment_file: str
Name of the experiment file inside experiment folder.
Example: dummy_experiment.yaml
"""
env = load_environment()
args = initialise_experiment(experiment_file, env, testing)
print('Experiment id', args['experiment_id'])
args = create_model_configuration(args)
loop_the_grid(args)
if __name__ == '__main__':
# python src/pipeline/pipeline.py run_experiment --experiment_file='dummy_experiment.yaml'
fire.Fire()
|
import pika
from hipotap_common.proto_messages.auth_pb2 import AuthResponsePB, AuthStatus
from hipotap_common.proto_messages.customer_pb2 import CustomerCredentialsPB, CustomerPB
from hipotap_common.proto_messages.hipotap_pb2 import BaseResponsePB, BaseStatus
from hipotap_common.queues.customer_queues import (
CUSTOMER_AUTH_QUEUE,
CUSTOMER_REGISTER_QUEUE,
)
from hipotap_common.rpc.rpc_subscriber import RpcSubscriber
from sqlalchemy.orm.exc import NoResultFound
from hipotap_common.db import Customer_Table, db_session
def broker_requests_handling_loop():
subscriber = RpcSubscriber()
subscriber.subscribe_to_queue(CUSTOMER_AUTH_QUEUE, on_auth_request)
subscriber.subscribe_to_queue(CUSTOMER_REGISTER_QUEUE, on_register_request)
subscriber.handling_loop()
def on_auth_request(ch, method, properties, body):
print(f" [x] Received body: {body}")
# Parse received message
customer_credentials = CustomerCredentialsPB()
customer_credentials.ParseFromString(body)
print(f" [x] Received Customer credentials: {customer_credentials}")
response = AuthResponsePB()
# Check if customer exists
try:
customer = (
db_session.query(Customer_Table)
.filter_by(
email=customer_credentials.email, password=customer_credentials.password
)
.one()
)
response.status = AuthStatus.OK
response.customer_data.name = customer.name
response.customer_data.surname = customer.surname
except NoResultFound:
print("No such customer")
response.status = AuthStatus.INVALID_CREDENTIALS
response.customer_data.name = None
# Send response
ch.basic_publish(
"",
routing_key=properties.reply_to,
properties=pika.BasicProperties(correlation_id=properties.correlation_id),
body=response.SerializeToString(),
)
def on_register_request(ch, method, properties, body):
print(f" [x] Received body: {body}")
customer_pb = CustomerPB()
customer_pb.ParseFromString(body)
print(f" [x] Received Customer: {customer_pb}")
response_pb = BaseResponsePB()
try:
customer = db_session.add(
Customer_Table(
email=customer_pb.credentials.email,
name=customer_pb.data.name,
surname=customer_pb.data.surname,
password=customer_pb.credentials.password,
)
)
response_pb.status = BaseStatus.OK
except:
print("Customer already exists")
response_pb.status = BaseStatus.FAIL
ch.basic_publish(
"",
routing_key=properties.reply_to,
properties=pika.BasicProperties(correlation_id=properties.correlation_id),
body=response_pb.SerializeToString(),
)
|
expected_output = {
"interfaces": {
"Port-channel1": {
"name": "Port-channel1",
"protocol": "lacp",
"members": {
"GigabitEthernet2": {
"interface": "GigabitEthernet2",
"oper_key": 1,
"admin_key": 1,
"port_num": 1,
"lacp_port_priority": 32768,
"flags": "SA",
"activity": "auto",
"state": "bndl",
"bundled": True,
"port_state": 61,
},
"GigabitEthernet3": {
"interface": "GigabitEthernet3",
"oper_key": 1,
"admin_key": 1,
"port_num": 1,
"lacp_port_priority": 32768,
"flags": "SA",
"activity": "auto",
"state": "bndl",
"bundled": True,
"port_state": 61,
},
},
},
"Port-channel2": {
"name": "Port-channel2",
"protocol": "lacp",
"members": {
"GigabitEthernet4": {
"interface": "GigabitEthernet4",
"oper_key": 2,
"admin_key": 2,
"port_num": 1,
"lacp_port_priority": 32768,
"flags": "SA",
"state": "bndl",
"activity": "auto",
"bundled": True,
"port_state": 61,
},
"GigabitEthernet5": {
"interface": "GigabitEthernet5",
"oper_key": 2,
"admin_key": 2,
"port_num": 1,
"lacp_port_priority": 32768,
"flags": "SA",
"activity": "auto",
"state": "bndl",
"bundled": True,
"port_state": 61,
},
"GigabitEthernet6": {
"interface": "GigabitEthernet6",
"oper_key": 2,
"admin_key": 2,
"port_num": 1,
"lacp_port_priority": 32768,
"flags": "SA",
"activity": "auto",
"state": "bndl",
"bundled": True,
"port_state": 61,
},
},
},
}
}
|
def select_other_samples (project, list_samples, samples_prefix, mode, extensions, exclude=False, Debug=False):
## init dataframe
name_columns = ("sample", "dirname", "name", "ext", "tag")
## initiate dataframe
df_samples = pd.DataFrame(columns=name_columns)
#Get all files in the folder "path_to_samples"
sample_list = []
for names in samples_prefix:
for path_file in list_samples:
f = os.path.basename(path_file)
dirN = os.path.dirname(path_file)
#samplename_search = re.search(r"(%s).*" % names, f)
samplename_search = re.search(r"(%s).*" % names, path_file)
enter = ""
if samplename_search:
if (exclude): ## exclude==True
enter = False
else: ## exclude==True
enter = True
else:
if (exclude): ## exclude==True
enter = True
else: ## exclude==True
enter = False
if enter:
## project mode:
if project:
if mode == 'annot':
#### /path/to/folder/annot/name.faa
for ext in extensions:
f_search = re.search(r".*\/%s\/(.*)\.%s$" %(mode, ext), path_file)
if f_search:
file_name = f_search.group(1)
df_samples.loc[len(df_samples)] = [path_file, dirN, file_name, ext, mode]
elif mode== 'assembly':
#### name_assembly.faa
f_search = re.search(r"(.*)\_%s\.%s$" %(mode, extensions), f)
if f_search:
file_name = f_search.group(1)
df_samples.loc[len(df_samples)] = [path_file, dirN, file_name, extensions, mode]
elif mode== 'mash':
#### name.sig
f_search = re.search(r".*\/%s\/(.*)\.%s$" %(mode, extensions[0]), path_file)
if f_search:
file_name = f_search.group(1)
df_samples.loc[len(df_samples)] = [path_file, dirN, file_name, extensions[0], mode]
else:
f_search = re.search(r".*\/(.*)\/%s\/(.*)\_summary\.%s$" %(mode, extensions[0]), path_file)
if f_search:
### get information
if mode == 'profile':
name = f_search.group(1)
db_name = f_search.group(2).split('_')[-1]
if not name.startswith('report'):
df_samples.loc[len(df_samples)] = [path_file, dirN, name, db_name, mode]
elif mode == 'ident':
name = f_search.group(1)
df_samples.loc[len(df_samples)] = [path_file, dirN, name, 'csv', mode]
## detached mode
else:
if f.endswith(extensions):
file_name, ext = os.path.splitext(f)
df_samples.loc[len(df_samples)] = [path_file, dirN, file_name, db_name, mode]
## debug message
if (Debug):
print (colored("**DEBUG: df_samples **", 'yellow'))
print (df_samples)
##
number_samples = df_samples.index.size
if (number_samples == 0):
print (colored("\n**ERROR: No samples were retrieved for this option. Continue processing...\n",'red'))
return (df_samples)
print (colored("\t" + str(number_samples) + " samples selected from the input provided...", 'yellow'))
return (df_samples)
|
#!/usr/bin/env python
from distutils.core import setup
setup(name='gautocorr',
version='1.0',
author='Julius Bier Kirkegaard',
py_modules=['gautocorr'],
)
|
# SPDX-License-Identifier: BSD-3-Clause
from argparse import ArgumentError
import typing as t
from urllib.parse import urlencode, quote_plus
from . import exceptions as e
from . import urls
from . import http
from . import utils
_fields = set(
"""abs abstract ack aff aff_id alternate_bibcode
alternative_title arXiv arxiv_class author
author_count
bibcode bigroup bibstem body
citation_count copyright
data database pubdate doctype doi
full
grant
identifier inst issue
keyword
lang
object orcid orcid_user orcid_other
page property pubdata pub
read_count
title
vizier volume
year
""".split()
)
_short_fl = "abstract,author,bibcode,pubdate,title,pub"
def search(
token: str,
query: str = "*:*",
fields: str = None,
fq: str = "",
limit: int = -1,
) -> t.Generator[t.Dict[t.Any, t.Any], None, None]:
if fields is not None:
for f in fields.split(","):
if f and f not in _fields:
raise ValueError(f"Field {f} not valid in search")
else:
fields = _short_fl
start = 0
count = 0
while True:
terms = [
f"?q={query}",
f"fl={fields}",
f"fq={fq}",
f"start={start}",
]
if limit > 0:
terms.append(f"rows={limit}")
search_term = "&".join(terms)
url = urls.make_url(urls.urls["search"]["search"], search_term)
# print(url)
data = http.get(token, url)
if data.status != 200:
if data.status == 400:
raise e.MalformedRequest
elif data.status == 404:
raise e.NoRecordsFound
elif data.status == 499:
raise e.ServerTooBusy
elif data.status == 500:
raise e.SeverError
else:
raise e.AdsApiError(f"Unknown error code {data.status}")
total_num = int(data.response["response"]["numFound"])
count += len(data.response["response"]["docs"])
# print(count,total_num,start)
yield from data.response["response"]["docs"]
if count == total_num or count >= limit:
break
else:
start = count - 1
def bigquery(token: str, bibcodes: t.List[str], limit: int = -1):
# Broken for now
terms = {
"q": "*:*",
"fl": "id,bibcode,title",
}
if limit > 0:
terms["rows"] = str(limit)
url = urls.make_url(urls.urls["search"]["bigquery"])
bib = {"bibcodes": utils.ensure_list(bibcodes)}
data = http.post(token, url, data=bib, params=terms, json=True)
if data.status != 200:
if data.status == 400:
raise e.MalformedRequest
elif data.status == 404:
raise e.NoRecordsFound
elif data.status == 499:
raise e.ServerTooBusy
elif data.status == 500:
raise e.SeverError
else:
raise e.AdsApiError(f"Unknown error code {data.status}")
return data
|
from alertmanager import AlertManager, Alert
from copy import copy
from time import sleep
# https://github.com/KennethWilke/qlight_userspace
QLIGHT_PATH = "/opt/personal/qlight/qlight"
LIGHT_STATUS = {'red': 'off',
'yellow': 'off',
'blue': 'off',
'green': 'on',
'sound': 'off'}
ALL_GOOD_LIGHT_STATUS = copy(LIGHT_STATUS)
def set_red(status):
if status.lower() not in ['on', 'off', 'blink']:
raise Exception('Invalid Status')
LIGHT_STATUS['red'] = status
def set_yellow(status):
if status.lower() not in ['on', 'off', 'blink']:
raise Exception('Invalid Status')
LIGHT_STATUS['yellow'] = status
def set_green(status):
if status.lower() not in ['on', 'off', 'blink']:
raise Exception('Invalid Status')
LIGHT_STATUS['green'] = status
def set_blue(status):
if status.lower() not in ['on', 'off', 'blink']:
raise Exception('Invalid Status')
LIGHT_STATUS['blue'] = status
def set_sound(soundid):
if type(soundid) != int and not soundid.isdigit():
raise Exception('Invalid Soundid')
LIGHT_STATUS['sound'] = soundid
def set_qlight_from_dict(alertstatus):
_any_errors = 0
for k, v in alertstatus.items():
if v in ['on', 'blink'] or type(v) == int:
_any_errors = 1
_key = 'set_{}'.format(k)
globals()[_key](v)
if _any_errors:
set_green('off')
def set_all_clear():
global LIGHT_STATUS
LIGHT_STATUS = copy(ALL_GOOD_LIGHT_STATUS)
def send_qlight_signal():
_cmd = "{qlight} -r {red} -g {green} -b {blue} -y {yellow} -s {sound}"
_r_cmd = _cmd.format(qlight=QLIGHT_PATH,
red=LIGHT_STATUS['red'],
green=LIGHT_STATUS['green'],
blue=LIGHT_STATUS['blue'],
yellow=LIGHT_STATUS['yellow'],
sound=LIGHT_STATUS['sound'])
import os
os.system('{}'.format(_r_cmd))
if LIGHT_STATUS['sound'] and LIGHT_STATUS['sound'] != 'off':
sleep(0.8)
LIGHT_STATUS['sound'] = 'off'
send_qlight_signal()
def get_qlight_labels(alert):
light_status = {
"red": alert.labels.qlight_red,
"blue": alert.labels.qlight_blue,
"green": alert.labels.qlight_green,
"yellow": alert.labels.qlight_yellow,
"sound": alert.labels.qlight_sound,
}
return light_status
# def infrastructure_maas_down(alert):
# if (alert.labels.severity == 'critical'
# and 'maas' in alert.labels.monitor.lower()):
# return True
# return False
# def persite_maas_down(alert):
# if (alert.labels.severity == 'critical'
# and ('maas' in alert.labels.monitor.lower() or alert.labels )):
# return True
# return False
# def infrastructure_down(alert):
# return False
# def maintanence_window(alert):
# return False
if __name__ == "__main__":
# TODO: Make sure we don't beep too often in a row
v2AlM = AlertManager('http://prometheus.example.com')
v3AlM = AlertManager('http://api.kube.example.com')
alerts = []
alerts = list(v2AlM.get_alerts() + v3AlM.get_alerts())
_CONFIRMATION = {}
for alert in alerts:
print(alert)
# Get the qlight_* labels from each alert manager.
# Apply it at the end.
# Off is ignored on any labels, so one alert can not turn off a red for
# someone else's alert.
set_qlight_from_dict(get_qlight_labels(alert))
continue
# if persite_maas_down(alert) and not _CONFIRMATION.get('maas'):
# # A website is down, don't present green
# set_yellow('on')
# set_green('off')
# _CONFIRMATION['maas'] = True
# print("set and confirmed a customer site is down")
# if ((infrastructure_down(alert) or infrastructure_maas_down(alert))
# and not _CONFIRMATION.get('infra')):
# # Infrastructure is down, don't present green
# set_red('blink')
# set_green('off')
# # set_sound(5)
# _CONFIRMATION['infra'] = True
# print("set and confirmed infrastructure downtime")
# if maintanence_window(alert) and not _CONFIRMATION.get('maint'):
# # Set blue for maintanence, but let green be decided by other
# # checks
# set_blue('on')
# _CONFIRMATION['maint'] = True
# print("set and confirmed an active maint window")
# After we know what state we got from alertmanager
send_qlight_signal()
|
import torch
import torch.nn as nn
import common
from itertools import izip
def make_model(args, parent=False):
return RDN(args)
class RDN(nn.Module):
def __init__(self, args, conv=common.default_conv):
super(RDN, self).__init__()
rgb_mean = (0.4488, 0.4371, 0.4040)
rgb_std = (1.0, 1.0, 1.0)
self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std)
kernel_size = 3
self.is_sub_mean = args.is_sub_mean
self.conv1 = conv(args.n_channel_in, args.n_feats, kernel_size, bias=True)
self.conv2 = conv(args.n_feats, args.n_feats, kernel_size, bias=True)
self.RDBs = []
for i in xrange(args.n_denseblocks):
RDB = common.RDB(args.n_feats,args.n_layers,args.growth_rate,conv,kernel_size,True)
self.add_module('RDB{}'.format(i+1),RDB)
self.RDBs.append(RDB)
self.gff_1 = nn.Conv2d(args.n_feats*args.n_denseblocks, args.n_feats,
kernel_size=1, padding=0, bias=True)
self.gff_3 = conv(args.n_feats, args.n_feats, kernel_size, bias=True)
m_tail = [common.Upsampler(conv, args.scale[0], args.n_feats, act=False),
conv(args.n_feats, args.n_channel_out, kernel_size)]
self.tail = nn.Sequential(*m_tail)
self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1)
def forward(self, x):
if self.is_sub_mean:
x = self.sub_mean(x)
F_minus = self.conv1(x)
x = self.conv2(F_minus)
to_concat = []
for db in self.RDBs:
x = db(x)
to_concat.append(x)
x = torch.cat(to_concat, 1)
x = self.gff_1(x)
x = self.gff_3(x)
x = x + F_minus
self.down_feats = x
out = self.tail(x)
if self.is_sub_mean:
out = self.add_mean(out)
return out
def load_state_dict(self, state_dict, strict=True):
own_state = self.state_dict()
for (k1, p1), (k2, p2) in izip(state_dict.items(), own_state.items()):
if (k1.split('.')[0] == '0') or (k1.split('.')[0] == '5'): #do not copy shift mean layer
continue
if isinstance(p1, nn.Parameter):
p1 = p1.data
try:
own_state[k2].copy_(p1)
except Exception:
raise RuntimeError('error')
|
# closed over variable 2 deep
def f():
x = 1
def g():
def h():
return 1 + x
|
import textwrap
import topside as top
from ..procedures_bridge import ProceduresBridge
from ..plumbing_bridge import PlumbingBridge
from ..controls_bridge import ControlsBridge
def make_plumbing_engine():
pdl = textwrap.dedent("""\
name: example
body:
- component:
name: injector_valve
edges:
edge1:
nodes: [A, B]
states:
open:
edge1: 1
closed:
edge1: closed
- graph:
name: main
nodes:
A:
initial_pressure: 100
components:
- [injector_valve, A]
B:
initial_pressure: 0
components:
- [injector_valve, B]
states:
injector_valve: closed
""")
parser = top.Parser(pdl, 's')
return parser.make_engine()
def test_proc_bridge_procedure_step_affects_plumbing():
plumb_b = PlumbingBridge()
control_b = ControlsBridge(plumb_b)
proc_b = ProceduresBridge(plumb_b, control_b)
procedure = top.Procedure('main', [
top.ProcedureStep('1', top.StateChangeAction('injector_valve', 'open'), [
(top.Immediate(), top.Transition('main', '2'))], 'PRIMARY'),
top.ProcedureStep('2', top.StateChangeAction('injector_valve', 'closed'), [
(top.Immediate(), top.Transition('main', '3'))], 'PRIMARY'),
top.ProcedureStep('3', top.MiscAction('Approach the launch tower'), [], 'SECONDARY')
])
proc_b.load_suite(top.ProcedureSuite([procedure]))
plumb_eng = make_plumbing_engine()
plumb_b.load_engine(plumb_eng)
assert plumb_eng.current_state('injector_valve') == 'closed'
proc_b.procStepForward()
assert plumb_eng.current_state('injector_valve') == 'open'
proc_b.procStepForward()
assert plumb_eng.current_state('injector_valve') == 'closed'
def test_time_step_forward():
plumb_b = PlumbingBridge()
plumb_b.step_size = 0.1e6
control_b = ControlsBridge(plumb_b)
proc_b = ProceduresBridge(plumb_b, control_b)
procedure = top.Procedure('main', [
top.ProcedureStep('1', top.StateChangeAction('injector_valve', 'open'), [
(top.WaitFor(0.2e6), top.Transition('main', '2'))], 'PRIMARY'),
top.ProcedureStep('2', top.StateChangeAction('injector_valve', 'closed'), [
(top.WaitFor(0.2e6), top.Transition('main', '3'))], 'PRIMARY'),
top.ProcedureStep('3', top.MiscAction('Approach the launch tower'), [], 'SECONDARY')
])
proc_b.load_suite(top.ProcedureSuite([procedure]))
plumb_eng = make_plumbing_engine()
plumb_b.load_engine(plumb_eng)
proc_eng = proc_b._proc_eng
proc_b.procStepForward() # execute step 1
assert not proc_eng.ready_to_proceed()
# Time hasn't advanced yet, so pressures should be the same
assert plumb_eng.current_pressures('A') == 100
assert plumb_eng.current_pressures('B') == 0
plumb_b.timeStepForward() # time == 0.1s
assert not proc_eng.ready_to_proceed()
# Valve is open, so pressures should start dropping
assert plumb_eng.current_pressures('A') < 100
assert plumb_eng.current_pressures('B') > 0
plumb_b.timeStepForward() # time == 0.2s
assert proc_eng.ready_to_proceed()
pressures_at_02 = plumb_eng.current_pressures()
proc_b.procStepForward() # execute step 2
assert not proc_eng.ready_to_proceed()
assert plumb_eng.current_pressures() == pressures_at_02
plumb_b.timeStepForward() # time == 0.3s
assert not proc_eng.ready_to_proceed()
# Valve is now closed, so pressures shouldn't change further
assert plumb_eng.current_pressures() == pressures_at_02
def test_time_advance():
plumb_b = PlumbingBridge()
control_b = ControlsBridge(plumb_b)
proc_b = ProceduresBridge(plumb_b, control_b)
procedure = top.Procedure('main', [
top.ProcedureStep('1', top.StateChangeAction('injector_valve', 'open'), [
(top.Less('A', 75), top.Transition('main', '2'))], 'PRIMARY'),
top.ProcedureStep('2', top.MiscAction('Approach the launch tower'), [], 'SECONDARY')
])
proc_b.load_suite(top.ProcedureSuite([procedure]))
plumb_eng = make_plumbing_engine()
plumb_b.load_engine(plumb_eng)
tol = 0.01
proc_eng = proc_b._proc_eng
proc_b.procStepForward() # execute step 1
assert not proc_eng.ready_to_proceed()
assert plumb_eng.current_pressures('A') == 100
assert plumb_eng.current_pressures('B') == 0
plumb_b.timeAdvance()
assert proc_eng.ready_to_proceed()
# We expect pressures to equalize at 50 once the system is steady
assert abs(plumb_eng.current_pressures('A') - 50) < tol
assert abs(plumb_eng.current_pressures('B') - 50) < tol
def test_time_stop():
plumb_b = PlumbingBridge()
plumb_b.step_size = 0.1e6
control_b = ControlsBridge(plumb_b)
proc_b = ProceduresBridge(plumb_b, control_b)
procedure = top.Procedure('main', [
top.ProcedureStep('1', top.StateChangeAction('injector_valve', 'open'), [
(top.WaitFor(0.1e6), top.Transition('main', '2'))], 'PRIMARY'),
top.ProcedureStep('2', top.MiscAction('Approach the launch tower'), [], 'SECONDARY')
])
proc_b.load_suite(top.ProcedureSuite([procedure]))
plumb_eng = make_plumbing_engine()
plumb_b.load_engine(plumb_eng)
proc_eng = proc_b._proc_eng
proc_b.procStepForward() # execute step 1
assert not proc_eng.ready_to_proceed()
# Time hasn't advanced yet, so pressures should be the same
assert plumb_eng.current_pressures('A') == 100
assert plumb_eng.current_pressures('B') == 0
plumb_b.timeStepForward() # time == 0.1s
assert proc_eng.ready_to_proceed()
# Valve is open, so pressures should start dropping
assert plumb_eng.current_pressures('A') < 100
assert plumb_eng.current_pressures('B') > 0
proc_b.procStop()
plumb_b.timeStop()
# Everything should now be as it started
assert proc_eng.current_step.step_id == '1'
assert plumb_eng.current_state('injector_valve') == 'closed'
assert plumb_eng.current_pressures('A') == 100
assert plumb_eng.current_pressures('B') == 0
proc_b.procStepForward() # execute step 1
assert not proc_eng.ready_to_proceed()
# Time hasn't advanced yet, so pressures should be the same
assert plumb_eng.current_pressures('A') == 100
assert plumb_eng.current_pressures('B') == 0
plumb_b.timeStepForward() # time == 0.1s
assert proc_eng.ready_to_proceed()
# Valve is open, so pressures should start dropping
assert plumb_eng.current_pressures('A') < 100
assert plumb_eng.current_pressures('B') > 0
def test_jump_to_step_normal():
plumb_b = PlumbingBridge()
control_b = ControlsBridge(plumb_b)
proc_b = ProceduresBridge(plumb_b, control_b)
procedure = top.Procedure('main', [
top.ProcedureStep('1', top.StateChangeAction('injector_valve', 'open'), [
(top.Immediate(), top.Transition('main', '2'))], 'PRIMARY'),
top.ProcedureStep('2', top.StateChangeAction('injector_valve', 'closed'), [
(top.Immediate(), top.Transition('main', '3'))], 'PRIMARY'),
top.ProcedureStep('3', top.StateChangeAction('injector_valve', 'closed'), [
(top.Immediate(), top.Transition('main', '4'))], 'PRIMARY'),
top.ProcedureStep('4', top.MiscAction('Approach the launch tower'), [], 'SECONDARY')
])
proc_b.load_suite(top.ProcedureSuite([procedure]))
plumb_eng = make_plumbing_engine()
plumb_b.load_engine(plumb_eng)
proc_eng = proc_b._proc_eng
assert proc_eng.current_step == procedure.step_list[0]
assert proc_eng.step_position == top.StepPosition.Before
proc_b.procJump("2")
assert proc_eng.current_step == procedure.step_list[1]
assert proc_eng.step_position == top.StepPosition.After
proc_b.procJump("4")
assert proc_eng.current_step == procedure.step_list[3]
assert proc_eng.step_position == top.StepPosition.After
def test_jump_to_step_current_and_past():
plumb_b = PlumbingBridge()
control_b = ControlsBridge(plumb_b)
proc_b = ProceduresBridge(plumb_b, control_b)
procedure = top.Procedure('main', [
top.ProcedureStep('1', top.StateChangeAction('injector_valve', 'open'), [
(top.Immediate(), top.Transition('main', '2'))], 'PRIMARY'),
top.ProcedureStep('2', top.StateChangeAction('injector_valve', 'closed'), [
(top.Immediate(), top.Transition('main', '3'))], 'PRIMARY'),
top.ProcedureStep('3', top.StateChangeAction('injector_valve', 'closed'), [
(top.Immediate(), top.Transition('main', '4'))], 'PRIMARY'),
top.ProcedureStep('4', top.MiscAction('Approach the launch tower'), [], 'SECONDARY')
])
proc_b.load_suite(top.ProcedureSuite([procedure]))
plumb_eng = make_plumbing_engine()
plumb_b.load_engine(plumb_eng)
proc_eng = proc_b._proc_eng
assert proc_eng.current_step == procedure.step_list[0]
assert proc_eng.step_position == top.StepPosition.Before
proc_b.procJump("1") # jumping to a current step; does nothing
assert proc_eng.current_step == procedure.step_list[0]
assert proc_eng.step_position == top.StepPosition.Before
proc_b.procJump("3")
assert proc_eng.current_step == procedure.step_list[2]
assert proc_eng.step_position == top.StepPosition.After
proc_b.procJump("2") # attemping to jump to a past step; does nothing
assert proc_eng.current_step == procedure.step_list[2]
assert proc_eng.step_position == top.StepPosition.After
def test_jump_to_step_invalid():
plumb_b = PlumbingBridge()
control_b = ControlsBridge(plumb_b)
proc_b = ProceduresBridge(plumb_b, control_b)
procedure = top.Procedure('main', [
top.ProcedureStep('1', top.StateChangeAction('injector_valve', 'open'), [
(top.Immediate(), top.Transition('main', '2'))], 'PRIMARY'),
top.ProcedureStep('2', top.StateChangeAction('injector_valve', 'closed'), [
(top.Immediate(), top.Transition('main', '3'))], 'PRIMARY'),
top.ProcedureStep('3', top.StateChangeAction('injector_valve', 'closed'), [
(top.Immediate(), top.Transition('main', '4'))], 'PRIMARY'),
top.ProcedureStep('4', top.MiscAction('Approach the launch tower'), [], 'SECONDARY')
])
proc_b.load_suite(top.ProcedureSuite([procedure]))
plumb_eng = make_plumbing_engine()
plumb_b.load_engine(plumb_eng)
proc_eng = proc_b._proc_eng
assert proc_eng.current_step == procedure.step_list[0]
assert proc_eng.step_position == top.StepPosition.Before
proc_b.procJump("-1")
assert proc_eng.current_step == procedure.step_list[0]
assert proc_eng.step_position == top.StepPosition.Before
proc_b.procJump("test")
assert proc_eng.current_step == procedure.step_list[0]
assert proc_eng.step_position == top.StepPosition.Before
proc_b.procJump("0")
assert proc_eng.current_step == procedure.step_list[0]
assert proc_eng.step_position == top.StepPosition.Before
proc_b.procJump("3")
assert proc_eng.current_step == procedure.step_list[2]
assert proc_eng.step_position == top.StepPosition.After
proc_b.procJump("10")
assert proc_eng.current_step == procedure.step_list[2]
assert proc_eng.step_position == top.StepPosition.After
|
import os
from subprocess import Popen
import pipes
from gi.repository import Gio
class Applications():
def __init__(self):
self.applications = {}
self.APP_FOLDER = '/usr/share/applications/'
def getProperties(self, app, file_path):
name = app.get_name()
exec = app.get_string('Exec')
icon = app.get_string('Icon')
file_path = file_path
# print(app.get_boolean('Terminal'))
return { 'name' : name, 'exec': exec, 'icon': icon, 'file_path': file_path }
def generate_apps_file(self):
self.applications = {}
for _, _, files in os.walk(self.APP_FOLDER):
for filename in files:
try:
file_path = os.path.join(self.APP_FOLDER, filename)
app = Gio.DesktopAppInfo.new_from_filename(file_path)
result = self.getProperties(app, file_path)
self.applications[result['name']] = result
except:
print("An exception occurred", filename)
def filter_apps(self, text):
result = {}
for app in self.applications:
if app.lower().find(text.lower()) > -1:
result[app] = self.applications[app]
return result
def launch_app(self, app_name):
file_path = self.applications[app_name]['file_path']
app = Gio.DesktopAppInfo.new_from_filename(file_path)
app.launch()
|
class ScrapperConfig:
def __init__(self, file_with_classes=None, target_path='data/',
path_to_driver=None, download_data=False, path_to_data=None):
self.file_with_classes = file_with_classes
self.path_to_driver = path_to_driver
self.target_path = target_path
self.download_data = download_data
self.path_to_data = path_to_data
self.samples_per_class = 2
class TrainConfig:
def __init__(self):
self.model_name = 'dummy_model'
self.log_interval = 2
self.train_batch_size = 32
self.test_batch_size = 16
self.num_workers = 4
self.epochs = 100
self.datapath = 'faces'
self.criterion = 'crossentropy' #'labelsmoothing'
|
from .signals import *
from plogical.pluginManagerGlobal import pluginManagerGlobal
class pluginManager:
@staticmethod
def preCreateFTPAccount(request):
return pluginManagerGlobal.globalPlug(request, preCreateFTPAccount)
@staticmethod
def postCreateFTPAccount(request, response):
return pluginManagerGlobal.globalPlug(request, postCreateFTPAccount, response)
@staticmethod
def preSubmitFTPCreation(request):
return pluginManagerGlobal.globalPlug(request, preSubmitFTPCreation)
@staticmethod
def postSubmitFTPCreation(request, response):
return pluginManagerGlobal.globalPlug(request, postSubmitFTPCreation, response)
@staticmethod
def preSubmitFTPDelete(request):
return pluginManagerGlobal.globalPlug(request, preSubmitFTPDelete)
@staticmethod
def postSubmitFTPDelete(request, response):
return pluginManagerGlobal.globalPlug(request, postSubmitFTPDelete, response)
@staticmethod
def preChangePassword(request):
return pluginManagerGlobal.globalPlug(request, preChangePassword)
@staticmethod
def postChangePassword(request, response):
return pluginManagerGlobal.globalPlug(request, postChangePassword, response)
|
# -*- coding: utf-8 -*-
from django.db import models
from django.db.models import Count, Case, When, Value
class ModelAManager(models.Manager):
def get_queryset(self):
qs = super().get_queryset()
qs = qs.annotate(annotation=Count('c_models'))
qs = qs.annotate(boolean_annotation=Case(
When(annotation__gt=12, then=Value(True)),
default=Value(False),
output_field=models.BooleanField()
))
return qs
class ModelA(models.Model):
objects = ModelAManager()
CHOICES = (
('1', 'one'),
('2', 'two'),
('3', 'three'),
('4', 'four'),
('5', 'five'),
('6', 'six'),
('7', 'seven'),
('8', 'eight'),
('9', 'nine'),
)
dropdown_lte3 = models.IntegerField(blank=True, null=True)
dropdown_gt3 = models.IntegerField(blank=True, null=True)
multiselect = models.IntegerField(blank=True, null=True)
multiselect_dropdown = models.IntegerField(blank=True, null=True)
choices_dropdown = models.CharField(max_length=255, blank=True, null=True, choices=CHOICES)
related_dropdown = models.ForeignKey(
'ModelB',
blank=True,
null=True,
on_delete=models.CASCADE,
related_name='related_dropdown_reverse')
multiselect_related = models.ForeignKey(
'ModelB',
blank=True,
null=True,
on_delete=models.CASCADE,
related_name='multiselect_related_reverse')
multiselect_related_dropdown = models.ForeignKey(
'ModelB',
blank=True,
null=True,
on_delete=models.CASCADE,
related_name='multiselect_related_dropdown_reverse')
c_models = models.ManyToManyField('ModelC')
class ModelB(models.Model):
id = models.AutoField(primary_key=True)
def __str__(self):
return 'ModelB {}'.format(self.id)
class ModelC(models.Model):
id = models.AutoField(primary_key=True)
|
# pattsquash.py
#
# Attempt to find duplicate patterns and produce a new pattern and index file
#
# Copyright (c) 2021 Troy Schrapel
#
# This code is licensed under the MIT license
#
# https://github.com/visrealm/hbc-56
#
#
import os, sys, csv, struct
valDict = {}
IND_OFFSET = 200
for infile in sys.argv[1:]:
f, e = os.path.splitext(infile)
try:
pattIn = open(infile, "rb")
pattOut = open(f + ".patt","wb")
indOut = open(f + ".ind","wb")
indSize = 0
pattSaved = 0
print("Processing: ", infile)
index = IND_OFFSET
ul = pattIn.read(8)
while ul:
val = struct.unpack('Q', ul)[0]
#print(val)
if val in valDict:
indOut.write(bytes([valDict[val] & 0xff]))
pattSaved += 1
indSize += 1
else:
valDict[val] = index
indOut.write(bytes([index & 0xff]))
pattOut.write(struct.pack('Q', val))
index += 1
indSize += 1
ul = pattIn.read(8)
pattIn.close()
pattOut.close()
indOut.close()
print("Index size: ", indSize)
print("Patterns saved:", pattSaved,"=",pattSaved*8,"bytes")
print("Total size: ", os.path.getsize(f + ".patt")+os.path.getsize(f + ".ind"),"bytes")
print("Created files: ", f + ".patt", f + ".ind")
except IOError:
print("cannot convert", infile)
|
# coding: utf-8
import pytest
from thermalprinter.exceptions import ThermalPrinterValueError
def test_default_value(printer):
assert printer._char_spacing == 0
def test_changing_no_value(printer):
printer.char_spacing()
assert printer._char_spacing == 0
def test_changing_good_value(printer):
printer.char_spacing(120)
assert printer._char_spacing == 120
def test_bad_value__not_int(printer):
with pytest.raises(ThermalPrinterValueError):
printer.char_spacing('42')
def test_changing_bad_value__not_in_range_low(printer):
with pytest.raises(ThermalPrinterValueError):
printer.char_spacing(-42)
def test_changing_bad_value__not_in_range_high(printer):
with pytest.raises(ThermalPrinterValueError):
printer.char_spacing(512)
def test_reset_value(printer):
printer.reset()
assert printer._char_spacing == 0
|
"""
Help window of GUI interface to the Pip simulator
Andy Harrington
"""
from graphics import *
HELP_WIDTH = 680
HELP_HEIGHT = 550
helpWin = None
helpMap = {}
helpMap["Nowhere"] = """Pip Assember Summary
| Symbols Used |
| X for a symbolic or numeric data address. |
| #N for a literal number N as data |
| Acc refers to the accumulator |
| L refers to a symbolic code label or numeric code address |
| |
| Instructions Pseudo Python syntax for what happens |
| Data Flow |
| LOD X (or #N) Acc = X (or N) |
| STO X X = Acc (copy Acc to location X) |
| Control |
| JMP L IP = L (go to instruction L) |
| JMZ L if Acc==0: IP = L else: IP = IP+2 (normal) |
| NOP No operation; just go to next instruction |
| HLT Halt execution |
| Arithmetic-Logic |
| ADD X (or #N) Acc = Acc + X (or N) |
| SUB X (or #N) Acc = Acc - X (or N) |
| MUL X (or #N) Acc = Acc * X (or N) |
| DIV X (or #N) Acc = Acc / X (or N) |
| AND X (or #N) if Acc != 0 and X != 0: Acc=1 else: Acc=0 |
| NOT if Acc == 0: Acc = 1 else: Acc = 0 |
| CPZ X if X == 0: Acc = 1 else: Acc = 0 |
| CPL X if X < 0: Acc = 1 else: Acc = 0 |
| |
| In source files: An instruction may be preceded by a label |
| and a colon. Any line may end with a comment. A comment |
| starts with ';' and extend to the end of the line. |"""
nSummaryLines = helpMap["Nowhere"].count('\n') + 1
helpMap["QUIT"] = """Quit: Terminate the program."""
helpMap["UPDATE"] = """Update: Validate and record edits to text fields,
including changes to code, memory data, the IP or Accumulator."""
helpMap["HELP"] = """Help: Click on a location for context sensative help."""
helpMap["NONE"] = \
"""None: Display no symbolic labels. Only show numeric addresses."""
helpMap["CODE"] = \
"""Code: Display symbolic code labels and numeric data addresses."""
helpMap["DATA"] = \
"""Data: Display symbolic data labels and numeric code addresses."""
helpMap["ALL"] = """All: Display symbolic code and data labels."""
helpMap["BINARY"] ="""Binary: display all data in binary.
Click None, Code, Data or All to terminate binary display."""
helpMap["STEP"] ="""Step: step through a single CPU instruction."""
helpMap["RUN"] ="""Run: Run up to 100 CPU steps.
A smaller numerical limit may be entered in the text box."""
helpMap["INIT"] ="Init: Initialize IP and ACC to 0"
helpMap["SAVE"] ="""Save: Save to a filename listed in the Filename text area.
See LOAD for file formats."""
helpMap["LOAD"] = """Load: Load a filename listed in the Filename text area.
The following formats and extensions are allowed: ------------
.asm -- an assembler text file with symbolic labels allowed
Follow labels by ':'. Comments start with ';'.
.bin -- text groups of 8 0's and 1's, whitespace separated
.dat -- the binary format of the Analytical Engine applets"""
helpMap["CLEAR_MSG"] = \
"""Bottom right red rectangle: Error messages appear here.
Clicking here clears any old message."""
helpMap["ADD_LINE"] = \
"""Light blue '+ line' button above code: Show another code line."""
helpMap["Intro"] = \
"""Click in the blue CODE area for an assember summary.\n"""
order = ["QUIT", "UPDATE", "HELP", "NONE", "CODE", "DATA", "ALL", "BINARY",
"STEP", "RUN", "INIT", "SAVE","LOAD", "Intro"]
order.reverse()
order.append("CLEAR_MSG")
order.append("ADD_LINE")
allMsg = "\n".join([helpMap[cmd] for cmd in order])
allMsg = allMsg.splitlines()
allMsg = "\n".join(["| {} |".format(line.ljust(64)) for line in allMsg])
def initHelp(cmd):
global helpWin
closeHelp()
if cmd == "HELP":
lines = allMsg
else:
lines = helpMap[cmd]
lines += "\n\nClick the top right red X button to close the Help Window."
nLines = lines.count('\n') + 1
# print nLines
if cmd == "Nowhere":
cmd = "Assembler Instructions"
height = HELP_HEIGHT * (nLines + 2) // nSummaryLines
helpWin = GraphWin("Help on {}".format(cmd), HELP_WIDTH, height)
text = Text(Point(HELP_WIDTH//2, height//2), lines)
text.setFace('courier')
text.draw(helpWin)
def closeHelp():
if helpWin and not helpWin.isClosed():
helpWin.close()
|
#!/usr/bin/env python
import os, sys, logging, optparse
import importlib
bpy_in='bpy' in locals()
if not bpy_in:
try:
importlib.import_module('bpy')
bpy_in = True
except:
bpy_in = False
if bpy_in:
import bpy
import bpy_extras
import mathutils
try:
curdir = os.path.abspath(os.path.dirname(__file__))
except:
curdir = ''
def run(argv=None):
if argv is None:
idx = 1
for i, v in enumerate(sys.argv):
if v == '--':
idx = i
break
argv = sys.argv[idx+1:]
parser = optparse.OptionParser()
parser.add_option('-a', '--anim',
default=False,
action='store_true')
parser.add_option('--anim_only',
default=False,
action='store_true')
parser.add_option('--output-directory',
type='string', action='store', dest='output_directory',
)
parser.add_option('-v', '--verbose',
default=False, action='store_true'
)
(keywords, positional) = parser.parse_args(argv)
# Force the render engine to CYCLES
bpy.context.scene.render.engine = 'CYCLES'
# Clear the scene first.
bpy.ops.object.select_all(action='SELECT') # select all object
bpy.ops.object.delete() # delete all select objects.
sFilePath = positional[0] if len(positional) > 0 else None
bIncludeAnimations = keywords.anim
if sFilePath is None:
print('No input files specified.')
return
bpy.ops.import_scene.cgf(filepath=sFilePath, import_animations=bIncludeAnimations)
if keywords.output_directory:
fbx_filepath = os.path.join(os.path.abspath(os.path.expanduser(keywords.output_directory)), os.path.splitext(os.path.basename(sFilePath))[0])
else:
fbx_filepath = os.path.splitext(sFilePath)[0]
object_types = None
if keywords.anim_only:
fbx_filepath += '_animations.fbx'
object_types = { 'ARMATURE' }
else:
fbx_filepath += '.fbx'
object_types = { 'ARMATURE', 'MESH' }
# Imported
# Exported the scene as FBX output.
bpy.ops.export_scene.fbx(
filepath=fbx_filepath,
axis_forward = 'Z',
axis_up = 'Y',
bake_space_transform = True,
object_types = object_types,
use_mesh_modifiers = True,
use_mesh_modifiers_render = True,
add_leaf_bones = False,
bake_anim = bIncludeAnimations,
bake_anim_use_all_bones = False,
bake_anim_use_nla_strips = False,
bake_anim_use_all_actions = True,
bake_anim_force_startend_keying = False,
bake_anim_step = 1,
bake_anim_simplify_factor = 0,
use_anim = True,
use_anim_action_all = True,
use_default_take = True,
use_anim_optimize = True,
anim_optimize_precision = 6,
)
if __name__ == '__main__':
run()
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Celery tasks to index records."""
from __future__ import absolute_import, print_function
from celery import shared_task
from .api import RecordIndexer
@shared_task(ignore_result=True)
def process_bulk_queue(version_type=None, es_bulk_kwargs=None):
"""Process bulk indexing queue.
:param str version_type: Elasticsearch version type.
:param dict es_bulk_kwargs: Passed to
:func:`elasticsearch:elasticsearch.helpers.bulk`.
Note: You can start multiple versions of this task.
"""
RecordIndexer(version_type=version_type).process_bulk_queue(
es_bulk_kwargs=es_bulk_kwargs)
@shared_task(ignore_result=True)
def index_record(record_uuid):
"""Index a single record.
:param record_uuid: The record UUID.
"""
RecordIndexer().index_by_id(record_uuid)
@shared_task(ignore_result=True)
def delete_record(record_uuid):
"""Delete a single record.
:param record_uuid: The record UUID.
"""
RecordIndexer().delete_by_id(record_uuid)
|
import functools
from pathlib import Path
from sources.simulators.serial_execution_simulator import \
SerialExecutionSimulator
from experiments.celeba_experiments.celeba_metadata_providers import \
CELEBA_BASE_METADATA_SYS_EXP_PROVIDER, VARYING_LOCAL_EPOCHS_EXP_PARAMETER_MAP
from sources.datasets.celeba.celeba_client_dataset_factory import CelebaClientDatasetFactory
from sources.datasets.celeba.celeba_client_dataset_processor import CelebaClientDatasetProcessor
from sources.models.celeba.celeba_model_template import CelebaKerasModelTemplate
from sources.experiments.grid_search_metadata_provider import ParameterGridMetadataGenerator
from sources.dataset_creation_utils.get_iid_dataset_utils import get_default_iid_dataset
from sources.metrics.central_evaluation_keras import \
create_central_evaluation_function_from_dataset_processor_keras
from sources.flwr.flwr_strategies.full_evaluation_strategy_providers import \
full_eval_fed_avg_strategy_provider
from sources.experiments.simulate_experiment import SimulateExperiment
from sources.utils.set_random_seeds import DEFAULT_SEED
def celeba_vle():
base_dir = Path(__file__).parent.parent.parent
root_data_dir = base_dir / "data"
model_template = CelebaKerasModelTemplate(DEFAULT_SEED)
dataset_factory = CelebaClientDatasetFactory(root_data_dir)
central_dataset = get_default_iid_dataset("celeba")
eval_fn = create_central_evaluation_function_from_dataset_processor_keras(
model_template,
central_dataset,
CelebaClientDatasetProcessor())
strategy_provider = functools.partial(
full_eval_fed_avg_strategy_provider,
eval_fn
)
pgmg = ParameterGridMetadataGenerator(VARYING_LOCAL_EPOCHS_EXP_PARAMETER_MAP,
lambda d: strategy_provider,
lambda d: model_template.get_optimizer(0.01),
CELEBA_BASE_METADATA_SYS_EXP_PROVIDER,
lambda d: f"_le_{d['local_epochs']}")
pgr = pgmg.generate_grid_responses()
SimulateExperiment.start_experiment(
f"Celeba_Varying_Local_Epochs",
model_template,
dataset_factory,
strategy_provider=None,
strategy_provider_list=pgr.strategy_provider_list,
optimizer_list=pgr.optimizer_list,
experiment_metadata_list=pgr.experiment_metadata_list,
base_dir=base_dir,
runs_per_experiment=1,
centralised_evaluation=True,
aggregated_evaluation=True,
rounds_between_centralised_evaluations=10,
simulator_provider=SerialExecutionSimulator)
|
import os
import glob
import pathlib
import json
import inspect
import numpy as np
from datetime import datetime
def simulation():
# Simulation id
SIM_ID = datetime.now().strftime("%Y%m%d%H%M%S")
DIR_BASE = "./experiments/JCC2019/data/" + SIM_ID + "/"
pathlib.Path(DIR_BASE).mkdir(parents=True, exist_ok=True)
return SIM_ID, DIR_BASE
def saveParameters(DIR_BASE, parameters):
param = dict(parameters)
param['M'] = int(parameters['M'])
param['N'] = int(parameters['N'])
param['L'] = int(parameters['L'])
param['b0'] = inspect.getsourcelines(parameters['b0'])[0][0].strip("['\n']")
param['u0'] = inspect.getsourcelines(parameters['u0'])[0][0].strip("['\n']")
# Vector field
param['v'] = (
inspect.getsourcelines(param['v'][0])[0][0].strip("['\n']"),
inspect.getsourcelines(param['v'][1])[0][0].strip("['\n']")
)
with open(DIR_BASE + 'parameters.json', 'w') as fp:
json.dump(param, fp)
def saveSimulation(DIR_BASE, U, B):
np.save(DIR_BASE + 'U', U)
np.save(DIR_BASE + 'B', B)
def saveSimulationCSV(DIR_BASE, parameters, U, B, k):
M, N = parameters['N'], parameters['M']
L = parameters['L']
xa, xb = parameters['x_lim']
ya, yb = parameters['y_lim']
ta, tb = parameters['t_lim']
x = np.linspace(xa, xb, N)
y = np.linspace(ya, yb, M)
t = np.linspace(ta, tb, L + 1)
X, Y = np.meshgrid(x, y)
UU = np.zeros((2 ** (M * N), 3))
BB = np.zeros_like(UU)
UU[:,0] = X.flatten()
UU[:,1] = Y.flatten()
UU[:,2] = U[k].flatten()
BB[:,0] = X.flatten()
BB[:,1] = Y.flatten()
BB[:,2] = B[k].flatten()
np.savetxt('U' + str(int(t[k]))+ '.csv', UU, fmt="%.8f")
np.savetxt('B' + str(int(t[k]))+ '.csv', BB, fmt="%.8f")
def saveTimeError(DIR_BASE, times, errors):
np.save(DIR_BASE + 'times', times)
np.save(DIR_BASE + 'errors', errors)
def openFile(dir):
"""Function to handle data files.
Parameters
----------
dir : str
Data file path.
Returns
-------
array_like
Data.
Raises
------
Exception
Error if file format or path is not supported.
"""
# If path is a directory
if os.path.isdir(dir):
# Check file sizes from first file
Nt = len(glob.glob(dir + "/1_*.txt")) # Nt
tmp = np.loadtxt(dir + "/1_0.txt")
Ny, Nx = tmp.shape
# Output array
V = np.zeros((Nt, 2, Ny, Nx))
for n in range(Nt):
V1 = np.loadtxt("{0}/1_{1}.txt".format(dir, n))
V2 = np.loadtxt("{0}/2_{1}.txt".format(dir, n))
V[n] = V1, V2
return V
# If path is a file
elif os.path.isfile(dir):
if '.npy' in dir: # Numpy data file
return np.load(dir)
elif '.txt' in dir: # Plain text data file
return np.loadtxt(dir)
else:
raise Exception("File extension not supported.")
else:
raise Exception("Path is not supported.")
def openVectorWT(w_dir, t_dir):
"""Open vector field with independent directories.
Parameters
----------
w_dir : str
Wind effect filepath.
t_dir : tuple (str, str)
Topography effect filepaths.
"""
tx_dir, ty_dir = t_dir
W = openFile(w_dir)
Tx = openFile(tx_dir)
Ty = openFile(ty_dir)
assert Tx.shape == Ty.shape, "Tx and Ty must have same dimensions."
Nt, Nc = W.shape
Ny, Nx = Tx.shape
V = np.zeros((Nt, 2, Ny, Nx))
for n in range(Nt):
V[n, 0] = Tx + W[n, 0]
V[n, 1] = Ty + W[n, 1]
return V
|
# -*- coding: utf-8 -*-
""" Surrogate Analysis
"""
# Author: Avraam Marimpis <[email protected]>
from typing import Optional, Tuple, Callable
import numpy as np
import numbers
def surrogate_analysis(
ts1: "np.ndarray[np.float32]",
ts2: "np.ndarray[np.float32]",
num_surr: Optional[int] = 1000,
estimator_func: Optional[
Callable[["np.ndarray[np.float32]", "np.ndarray[np.float32]"], float]
] = None,
ts1_no_surr: bool = False,
rng: Optional[np.random.RandomState] = None,
) -> Tuple[float, "np.ndarray[np.int32]", "np.ndarray[np.float32]", float]:
""" Surrogate Analysis
Parameters
----------
ts1 :
ts2 :
num_surr : int
estimator_func : function
ts1_no_surr : boolean
rng : object or None
An object of type numpy.random.RandomState
Returns
-------
p_val : float
corr_surr :
surrogates :
r_value : float
"""
if rng is None:
rng = np.random.RandomState(0)
if estimator_func is None:
def estimator(x, y):
return np.abs(np.corrcoef(x, y))[0, 1]
estimator_func = estimator
r_value = estimator_func(ts1, ts2)
if isinstance(r_value, numbers.Real):
r_value = [r_value]
num_samples = len(ts1)
num_r_vals = len(r_value)
surrogates = np.zeros([2, num_surr, num_samples])
if ts1_no_surr is True:
surrogates[0, ...] = np.tile(ts1, [num_surr, 1])
else:
surrogates[0, ...] = aaft(ts1, num_surr, rng)
surrogates[1, ...] = aaft(ts2, num_surr, rng)
surr_vals = np.zeros((num_surr, len(r_value)))
for i in range(num_surr):
surr_vals[i, :] = estimator_func(surrogates[0, i, ...], surrogates[1, i, ...])
surr_vals = np.array(surr_vals)
p_vals = np.zeros((num_r_vals))
for i in range(num_r_vals):
r = np.where(surr_vals[:, i] > r_value[i])[0]
p_val = 0.0
if len(r) == 0:
p_val = 1.0 / float(num_surr)
else:
p_val = len(r) / float(num_surr)
p_vals[i] = p_val
p_vals = p_vals.squeeze()
surr_vals = surr_vals.squeeze()
return p_vals, surr_vals, surrogates, r_value
def aaft(
ts: "np.ndarray[np.float32]",
num_surr: Optional[int] = 1,
rng: Optional[np.random.RandomState] = None,
) -> "np.ndarray[np.float32]":
""" Amplitude Adjusted Fourier Transform
Parameters
----------
ts :
num_surr :
rng : object or None
An object of type numpy.random.RandomState
Returns
-------
"""
if rng is None:
rng = np.random.RandomState()
n_samples = len(ts)
s = np.zeros((num_surr, n_samples))
for i in range(num_surr):
y = ts
normal = np.sort(rng.randn(1, n_samples)).ravel()
y, T = np.sort(ts), np.argsort(ts)
T, r = np.sort(T), np.argsort(T)
normal = phase_rand(normal[r], 1, rng).ravel()
normal, T = np.sort(normal), np.argsort(normal)
T, r = np.sort(T), np.argsort(T)
s[i, :] = y[r]
return s
def fdr(
p_values: "np.ndarray[np.float32]",
q: Optional[float] = 0.01,
method: Optional[str] = "pdep",
) -> Tuple[bool, float]:
""" False Discovery Rate
Parameters
----------
p_values :
q :
method :
Returns
-------
"""
crit_p = 0.0
h = False
sorted_p_values = np.sort(p_values)
m = len(sorted_p_values)
thresh = np.arange(1, m + 1) * (q / m)
rej = sorted_p_values <= thresh
max_id = np.where(rej == True)[0]
if max_id.size == 0:
crit_p = 0.0
h = p_values * 0
else:
max_id = np.max(max_id)
crit_p = sorted_p_values[max_id]
crit_p = crit_p.squeeze()
h = p_values <= crit_p
h = h.squeeze()
h = h.astype(np.bool)
return h, crit_p
def phase_rand(
data, num_surr: Optional[int] = 1, rng: Optional[np.random.RandomState] = None
) -> "np.ndarray[np.float32]":
""" Phase-randomized suggorates
Parameters
----------
data :
num_surr :
rng : object or None
An object of type numpy.random.RandomState
Returns
-------
"""
if rng is None:
rng = np.random.RandomState()
n_samples = np.shape(data)[0]
surrogates = np.zeros((num_surr, n_samples))
half = np.int32(np.floor(n_samples / 2.0))
surrogates = np.zeros((num_surr, n_samples))
y = np.fft.fft(data)
m = np.abs(y)
p = np.angle(y)
for i in range(num_surr):
if n_samples % 2 == 0:
p1 = rng.randn(half - 1, 1) * 2.0 * np.pi
a = p1.T.ravel()
b = p[half]
c = np.flipud(p1).T.ravel()
p[list(range(1, n_samples))] = np.hstack((a, b, -c))
a = m[list(range(0, half + 1))]
b = np.flipud(m[list(range(1, half))])
m = np.hstack((a, b))
else:
p1 = rng.randn(half, 1) * 2.0 * np.pi
a = p1
b = np.flipud(p1).ravel()
p[list(range(1, n_samples))] = a - b
surrogates[i, :] = np.real(np.fft.ifft(np.exp(1j * p) * m))
return surrogates
|
from six.moves.urllib.parse import parse_qs
import six
from oic.utils.authn.user import logger, UsernamePasswordMako
from oic.utils.http_util import Unauthorized, Redirect
__author__ = 'danielevertsson'
class JavascriptFormMako(UsernamePasswordMako):
"""Do user authentication using the normal username password form in a
WSGI environment using Mako as template system"""
def verify(self, request, **kwargs):
"""
Verifies that the given username and password was correct
:param request: Either the query part of a URL a urlencoded
body of a HTTP message or a parse such.
:param kwargs: Catch whatever else is sent.
:return: redirect back to where ever the base applications
wants the user after authentication.
"""
logger.debug("verify(%s)" % request)
if isinstance(request, six.string_types):
_dict = parse_qs(request)
elif isinstance(request, dict):
_dict = request
else:
raise ValueError("Wrong type of input")
logger.debug("dict: %s" % _dict)
logger.debug("passwd: %s" % self.passwd)
# verify username and password
try:
assert _dict['login_parameter'][0] == 'logged_in'
except (AssertionError, KeyError):
resp = Unauthorized("You are not authorized. Javascript not executed")
return resp, False
else:
cookie = self.create_cookie("diana", "upm")
try:
_qp = _dict["query"][0]
except KeyError:
_qp = self.get_multi_auth_cookie(kwargs['cookie'])
try:
return_to = self.generate_return_url(kwargs["return_to"], _qp)
except KeyError:
return_to = self.generate_return_url(self.return_to, _qp)
resp = Redirect(return_to, headers=[cookie])
return resp, True
|
# -*- coding: utf-8 -*-
import asyncio
from .decorator import decorate
from .assertions import assert_corofunction
ExceptionMessage = 'paco: coroutine cannot be executed more than {} times'
@decorate
def times(coro, limit=1, raise_exception=False, return_value=None):
"""
Wraps a given coroutine function to be executed only a certain amount
of times.
If the execution limit is exceeded, the last execution return value will
be returned as result.
You can optionally define a custom return value on exceeded via
`return_value` param.
This function can be used as decorator.
arguments:
coro (coroutinefunction): coroutine function to wrap.
limit (int): max limit of coroutine executions.
raise_exception (bool): raise exception if execution times exceeded.
return_value (mixed): value to return when execution times exceeded.
Raises:
TypeError: if coro argument is not a coroutine function.
RuntimeError: if max execution excedeed (optional).
Returns:
coroutinefunction
Usage::
async def mul_2(num):
return num * 2
timed = paco.times(mul_2, 3)
await timed(2)
# => 4
await timed(3)
# => 6
await timed(4)
# => 8
await timed(5) # ignored!
# => 8
"""
assert_corofunction(coro=coro)
# Store call times
limit = max(limit, 1)
times = limit
# Store result from last execution
result = None
@asyncio.coroutine
def wrapper(*args, **kw):
nonlocal limit
nonlocal result
# Check execution limit
if limit == 0:
if raise_exception:
raise RuntimeError(ExceptionMessage.format(times))
if return_value:
return return_value
return result
# Decreases counter
limit -= 1
# If return_value is present, do not memoize result
if return_value:
return (yield from coro(*args, **kw))
# Schedule coroutine and memoize result
result = yield from coro(*args, **kw)
return result
return wrapper
|
# Generated by Django 3.0.6 on 2020-05-07 10:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='product',
name='updated',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='productimage',
name='updated',
field=models.DateTimeField(auto_now=True),
),
]
|
#! /usr/bin/env python3
from collections import defaultdict
import crypt
from manage_users.exceptions import (DuplicateUserError, NoSuchUserError,
SSHKeyNotFoundError)
import yaml
class AnsibleUsers(object):
"""
Manage users in an ansible playbook.
This playbook should be one entirely managed by this class.
"""
state_mapping = {
'present': True,
'absent': False,
}
playbook = None
def __init__(self,
playbook_path='test/example.yml',
default_shell='/bin/bash',
default_groups='sudo',
base_id=10000):
"""
Initialise the ansible user management module.
Keyword arguments:
playbook_path -- Path to the playbook you wish to manage.
base_id -- The base ID for users and groups. Any new IDs will be
at least this ID, or the highest currently in the file
+ 1, if there are existing users.
"""
self.playbook_path = playbook_path
self.next_id = base_id
self.default_shell = default_shell
self.default_groups = default_groups
def _determine_defaults_on_load(self):
"""
Get the current defaults from the playbook.
Intended to be used when the playbook is loading.
"""
tasks = self.playbook[0]['tasks']
for task in tasks:
if task['name'] == 'manage enabled users':
default_user = task['user']
default_user = dict([
item.split('=')
for item in default_user.split()
if len(item.split('=')) == 2
])
for setting, value in default_user.items():
if setting == 'groups':
self.default_groups = value
elif setting == 'shell':
self.default_shell = value
return
def load_playbook(self):
"""
Load the playbook this class is associated with.
"""
with open(self.playbook_path) as playbook_handle:
data = playbook_handle.read()
self.playbook = yaml.load(data)
self._determine_defaults_on_load()
# Update the base ID
users = self.get_users(include_active=True, include_inactive=True)
for user in users.values():
if user['uid'] >= self.next_id:
self.next_id = user['uid'] + 1
def save_playbook(self):
"""
Save the playbook this class is associated with.
"""
with open(self.playbook_path, 'w') as playbook_handle:
playbook_handle.write(yaml.dump(self.playbook))
def create_base_playbook(self, hosts='all'):
"""
Initialise the playbook with the base required data.
This is intended to be used to bootstrap a system which does not
already have a playbook managed by this application.
Keyword arguments:
hosts -- Which hosts/groups should be managed by this application.
This should be a comma separated string.
(default: all)
"""
self.playbook = [{
'hosts': hosts,
'tasks': [
{
'name': 'manage enabled user groups',
'group': 'name="{{item.username}}" '
'gid="{{item.uid}}" '
'state=present',
'with_items': 'enabled_users',
},
{
'name': 'manage enabled users',
'user': 'name="{{{{item.username}}}}" '
'group="{{{{item.username}}}}" '
'uid="{{{{item.uid}}}}" '
'state=present '
'groups={groups} '
'password="{{{{item.password}}}}" '
'shell={shell} '
'{{% if item.comment is defined %}}'
'"comment={{{{item.comment}}}}"'
'{{% endif %}}'.format(
groups=self.default_groups,
shell=self.default_shell),
'with_items': 'enabled_users',
},
{
'name': 'manage enabled ssh keys',
'authorized_key': 'key="{{item.1.type}} '
'{{item.1.key}} '
'{{item.1.id}}" '
'user="{{item.0.username}}" '
'state=present',
'with_subelements': [
'enabled_users',
'sshkey_enabled',
],
},
{
'name': 'manage disabled ssh keys',
'authorized_key': 'key="{{item.1.type}} '
'{{item.1.key}} '
'{{item.1.id}}" '
'user="{{item.0.username}}" '
'state=absent',
'with_subelements': [
'enabled_users',
'sshkey_disabled',
],
},
{
'name': 'manage disabled users',
'user': 'name="{{item.username}}" '
'uid="{{item.uid}}" '
'state=present',
'with_items': 'disabled_users',
},
{
'name': 'manage disabled user groups',
'group': 'name="{{item.username}}" '
'gid="{{item.uid}}" '
'state=absent',
'with_items': 'disabled_users',
},
],
'vars': [
{
'enabled_users': [],
},
{
'disabled_users': [],
},
],
}]
def _form_sshkey_dict(self, user):
"""
Take a user object (containing sshkey_enabled and sshkey_disabled
keys), and return a dict with the sshkeys keyed on ID.
Keyword arguments:
user -- A user from the ansible playbook.
"""
keys = {}
for state in ('enabled', 'disabled'):
for sshkey in user['sshkey_{state}'.format(state=state)]:
keys[sshkey['id']] = {
'type': sshkey['type'],
'key': sshkey['key'],
'id': sshkey['id'],
'enabled': True if state == 'enabled' else False,
}
return keys
def _get_userlist(self, enabled=True):
"""
Get the enabled or disabled user list.
"""
if enabled:
search_string = 'enabled_users'
else:
search_string = 'disabled_users'
for var in self.playbook[0]['vars']:
if search_string in var.keys():
return var[search_string]
def get_users(self, include_active=True, include_inactive=True):
"""
Return a dict indexed on the users in the playbook, with details
of those users as the associated values.
Keyword arguments:
include_active -- Include active users (default: True)
include_inactive -- Include inactive users (default: True)
"""
# Prepare the user list
users = defaultdict(dict)
if include_active:
enabled_users = self._get_userlist(enabled=True)
for user in enabled_users:
users[user['username']] = {
'enabled': True,
'uid': user['uid'],
'password': user['password'],
'shell': self.default_shell,
'groups': self.default_groups,
'sshkeys': self._form_sshkey_dict(user),
}
if include_inactive:
disabled_users = self._get_userlist(enabled=False)
for user in disabled_users:
users[user['username']] = {
'enabled': False,
'uid': user['uid'],
'password': user['password'],
'shell': self.default_shell,
'groups': self.default_groups,
'sshkeys': self._form_sshkey_dict(user),
}
return users
def _hash_password(self, password):
"""
Return the hashed form of the supplied password, in a format
usable in standard /etc/passwd on Ubuntu systems.
Note: SHA512 hashes only.
Keyword arguments:
password -- The password to hash.
"""
type_and_salt = crypt.mksalt(crypt.METHOD_SHA512)
passwd_hash = crypt.crypt(password, type_and_salt)
return passwd_hash
def create_user(self, user, password, comment=None, sshkeys=[]):
"""
Add a new user to the playbook.
Keyword arguments:
user -- The name of the new user.
password -- The password of the new user. This will be hashed
automatically as SHA512.
comment -- A comment to be added to this user's passwd entries.
(default: None)
sshkeys -- List of SSH keys to add to authorized_keys for this
user. (default: No keys (empty list))
"""
if user in self.get_users().keys():
raise DuplicateUserError(user)
user_settings = {
'username': user,
'password': self._hash_password(password),
'uid': self.next_id,
'sshkey_enabled': [],
'sshkey_disabled': [],
}
if comment is not None:
user_settings['comment'] = comment
self._get_userlist(enabled=True).append(
user_settings
)
for key in sshkeys:
self.add_sshkey(user, key)
def add_sshkey(self, user, key):
"""
Add an ssh key for a given user.
Keyword arguments:
user -- The name of the user to add the new key for.
key -- The key to add.
"""
users = self.get_users()
if user not in users.keys():
raise NoSuchUserError(user)
# TODO: Sanity checking- make sure the key is valid
key = key.split(maxsplit=2)
if len(key) == 1:
# TODO: Proper failure to be raised
raise ValueError
elif len(key) == 2:
key_id = str(len(users[user].get('sshkeys', [])) + 1)
else:
key_id = key.pop()
key_type = key[0]
key = key[1]
authorized_key = {
'type': key_type,
'key': key,
'id': key_id,
}
users = self._get_userlist(enabled=True)
for candidate_user in users:
if candidate_user['username'] == user:
candidate_user['sshkey_enabled'].append(authorized_key)
return
def _modify_user_attribute(self, user, attribute, new_value):
"""
Change a single attribute for a user.
Keyword arguments:
user -- The name of the user to change an attribute for.
attribute -- The name of the attribute to change.
new_value -- The new value of the attribute.
"""
users = self._get_userlist(enabled=True)
users.extend(self._get_userlist(enabled=False))
for candidate_user in users:
if candidate_user['username'] == user:
candidate_user[attribute] = new_value
# We've found the user, we can stop now
return
# If we don't find the user, complain
raise NoSuchUserError(user)
def change_password(self, user, password):
"""
Change the password for an existing user.
Keyword arguments:
user -- The name of the user you wish to change the password for.
password -- The new password for this user. This will be hashed
automatically as SHA512.
"""
self._modify_user_attribute(
user=user,
attribute='password',
new_value=self._hash_password(password),
)
def enable_user(self, user):
"""
Enable a previously disabled user. This will allow them to resume
logging in.
Keyword arguments:
user -- The user to enable.
"""
enabled_users = self._get_userlist(enabled=True)
disabled_users = self._get_userlist(enabled=False)
for candidate_user in disabled_users:
if candidate_user['username'] == user:
enabled_users.append(candidate_user)
disabled_users.remove(candidate_user)
return
# If we don't find the user, complain
raise NoSuchUserError(user)
def disable_user(self, user):
"""
Disable a user. This will render them unable to log in. Their
details will not be removed from this list, and their UID will not
be reused.
Keyword arguments:
user -- The user to disable.
"""
enabled_users = self._get_userlist(enabled=True)
disabled_users = self._get_userlist(enabled=False)
for candidate_user in enabled_users:
if candidate_user['username'] == user:
disabled_users.append(candidate_user)
enabled_users.remove(candidate_user)
return
# If we don't find the user, complain
raise NoSuchUserError(user)
def _modify_sshkey_attribute(self, user, target_key_id,
attribute, new_value):
"""
Change a single attribute for one of a user's SSH keys.
Keyword arguments:
user -- The name of the user whose SSH key should be affected.
target_key_id -- The ID of the key to change for that user.
attribute -- The name of the attribute to change.
new_value -- The new value of the attribute.
"""
users = self._get_userlist(enabled=True)
users.extend(self._get_userlist(enabled=False))
for candidate_user in users:
if candidate_user['username'] == user:
keys = candidate_user['sshkey_enabled']
keys.extend(candidate_user['sshkey_disabled'])
for key in keys:
if key['id'] == target_key_id:
key[attribute] = new_value
# We've found the key, we can stop now
return
raise SSHKeyNotFoundError(user, target_key_id)
def enable_sshkey(self, user, key_id):
"""
Enables a disabled SSH key for a user. This will allow logins for
that key again.
Keyword arguments:
user -- The name of the user whose SSH key should be affected.
key_id -- The ID of the key to be affected.
"""
users = self._get_userlist(enabled=True)
users.extend(self._get_userlist(enabled=False))
for candidate_user in users:
if candidate_user['username'] == user:
enabled_keys = candidate_user['sshkey_enabled']
disabled_keys = candidate_user['sshkey_disabled']
for key in disabled_keys:
if key['id'] == key_id:
enabled_keys.append(key)
disabled_keys.remove(key)
return
for key in enabled_keys:
if key['id'] == key_id:
# The key exists, but we don't need to do anything
return
raise SSHKeyNotFoundError(user, key_id)
def disable_sshkey(self, user, key_id):
"""
Disables an SSH key for a user, preventing that key from being
used to log in.
Keyword arguments:
user -- The name of the user whose SSH key should be affected.
key_id -- The ID of the key to be affected.
"""
users = self._get_userlist(enabled=True)
users.extend(self._get_userlist(enabled=False))
for candidate_user in users:
if candidate_user['username'] == user:
enabled_keys = candidate_user['sshkey_enabled']
disabled_keys = candidate_user['sshkey_disabled']
for key in enabled_keys:
if key['id'] == key_id:
disabled_keys.append(key)
enabled_keys.remove(key)
return
for key in disabled_keys:
if key['id'] == key_id:
# The key exists, but we don't need to do anything
return
raise SSHKeyNotFoundError(user, key_id)
|
import shutil
import tempfile
import unittest
from pandas.util.testing import assert_frame_equal
from sqlalchemy import create_engine
import atpy.data.cache.lmdb_cache as lmdb_cache
import atpy.data.iqfeed.iqfeed_history_provider as iq_history
from atpy.data.cache.postgres_cache import *
from atpy.data.iqfeed.iqfeed_level_1_provider import get_fundamentals, get_splits_dividends
from atpy.data.iqfeed.iqfeed_postgres_cache import *
class TestPostgresCache(unittest.TestCase):
"""
Test InfluxDBCache
"""
def test_update_to_latest_intraday(self):
with IQFeedHistoryProvider(num_connections=2) as history:
table_name = 'bars_test'
try:
url = 'postgresql://postgres:postgres@localhost:5432/test'
engine = create_engine(url)
con = psycopg2.connect(url)
con.autocommit = True
cur = con.cursor()
cur.execute(create_bars.format(table_name))
cur.execute(bars_indices.format(table_name))
bgn_prd = datetime.datetime(2017, 3, 1).astimezone(tz.gettz('US/Eastern'))
end_prd = datetime.datetime(2017, 3, 2)
filters = (BarsInPeriodFilter(ticker="IBM", bgn_prd=bgn_prd, end_prd=end_prd, interval_len=3600, ascend=True, interval_type='s'),
BarsInPeriodFilter(ticker="AAPL", bgn_prd=bgn_prd, end_prd=end_prd, interval_len=3600, ascend=True, interval_type='s'))
data = [history.request_data(f, sync_timestamps=False) for f in filters]
filters_no_limit = (BarsInPeriodFilter(ticker="IBM", bgn_prd=bgn_prd, end_prd=None, interval_len=3600, ascend=True, interval_type='s'),
BarsInPeriodFilter(ticker="AAPL", bgn_prd=bgn_prd, end_prd=None, interval_len=3600, ascend=True, interval_type='s'),
BarsInPeriodFilter(ticker="AMZN", bgn_prd=bgn_prd, end_prd=None, interval_len=3600, ascend=True, interval_type='s'))
for datum, f in zip(data, filters):
del datum['timestamp']
del datum['total_volume']
del datum['number_of_trades']
datum['symbol'] = f.ticker
datum['interval'] = '3600_s'
datum = datum.tz_localize(None)
datum.to_sql(table_name, con=engine, if_exists='append')
latest_old = pd.read_sql("select symbol, max(timestamp) as timestamp from {0} group by symbol".format(table_name), con=con, index_col=['symbol'])['timestamp']
update_to_latest(url=url, bars_table=table_name, symbols={('AAPL', 3600, 's'), ('AMZN', 3600, 's')}, noncache_provider=noncache_provider(history), time_delta_back=relativedelta(years=10))
data_no_limit = [history.request_data(f, sync_timestamps=False) for f in filters_no_limit]
latest_current = pd.read_sql("select symbol, max(timestamp) as timestamp from {0} group by symbol".format(table_name), con=con, index_col=['symbol'])['timestamp']
self.assertEqual(len(latest_current), len(latest_old) + 1)
self.assertEqual(len([k for k in latest_current.keys() & latest_old.keys()]) + 1, len(latest_current))
for k in latest_current.keys() & latest_old.keys():
self.assertGreater(latest_current[k], latest_old[k])
cache_data_no_limit = [request_bars(conn=engine, bars_table=table_name, interval_len=3600, interval_type='s', symbol=f.ticker,
bgn_prd=f.bgn_prd.astimezone(tz.tzutc()) + relativedelta(microseconds=1)) for f in filters_no_limit]
for df1, df2 in zip(data_no_limit, cache_data_no_limit):
del df1['timestamp']
del df1['total_volume']
del df1['number_of_trades']
del df1['symbol']
del df1['volume']
del df2['volume']
assert_frame_equal(df1, df2, check_exact=False, check_less_precise=True)
finally:
con.cursor().execute("DROP TABLE IF EXISTS {0};".format(table_name))
def test_update_to_latest_daily(self):
url = 'postgresql://postgres:postgres@localhost:5432/test'
con = psycopg2.connect(url)
con.autocommit = True
with IQFeedHistoryProvider(num_connections=2) as history:
table_name = 'bars_test'
try:
engine = create_engine(url)
cur = con.cursor()
cur.execute(create_bars.format(table_name))
cur.execute(bars_indices.format(table_name))
bgn_prd = datetime.datetime(2017, 3, 1).date()
end_prd = datetime.datetime(2017, 3, 2).date()
filters = (BarsDailyForDatesFilter(ticker="IBM", bgn_dt=bgn_prd, end_dt=end_prd, ascend=True),
BarsDailyForDatesFilter(ticker="AAPL", bgn_dt=bgn_prd, end_dt=end_prd, ascend=True))
filters_no_limit = (BarsDailyForDatesFilter(ticker="IBM", bgn_dt=bgn_prd, end_dt=None, ascend=True),
BarsDailyForDatesFilter(ticker="AAPL", bgn_dt=bgn_prd, end_dt=None, ascend=True),
BarsDailyForDatesFilter(ticker="AMZN", bgn_dt=bgn_prd, end_dt=None, ascend=True))
data = [history.request_data(f, sync_timestamps=False) for f in filters]
for datum, f in zip(data, filters):
del datum['timestamp']
del datum['open_interest']
datum['symbol'] = f.ticker
datum['interval'] = '1_d'
datum = datum.tz_localize(None)
datum.to_sql(table_name, con=engine, if_exists='append')
latest_old = pd.read_sql("select symbol, max(timestamp) as timestamp from {0} group by symbol".format(table_name), con=con, index_col=['symbol'])['timestamp']
update_to_latest(url=url, bars_table=table_name, symbols={('AAPL', 1, 'd'), ('AMZN', 1, 'd')}, noncache_provider=noncache_provider(history), time_delta_back=relativedelta(years=10))
latest_current = pd.read_sql("select symbol, max(timestamp) as timestamp from {0} group by symbol".format(table_name), con=con, index_col=['symbol'])['timestamp']
self.assertEqual(len(latest_current), len(latest_old) + 1)
self.assertEqual(len([k for k in latest_current.keys() & latest_old.keys()]) + 1, len(latest_current))
for k in latest_current.keys() & latest_old.keys():
self.assertGreater(latest_current[k], latest_old[k])
data_no_limit = [history.request_data(f, sync_timestamps=False) for f in filters_no_limit]
cache_data_no_limit = [request_bars(conn=engine,
bars_table=table_name,
interval_len=1, interval_type='d',
symbol=f.ticker,
bgn_prd=datetime.datetime.combine(f.bgn_dt, datetime.datetime.min.time()).astimezone(tz.tzutc()) + relativedelta(microseconds=1)) for f in filters_no_limit]
for df1, df2 in zip(data_no_limit, cache_data_no_limit):
del df1['timestamp']
del df1['open_interest']
del df1['symbol']
assert_frame_equal(df1, df2)
finally:
con.cursor().execute("DROP TABLE IF EXISTS {0};".format(table_name))
def test_bars_in_period(self):
with IQFeedHistoryProvider(num_connections=2) as history:
tmpdir = tempfile.mkdtemp()
table_name = 'bars_test'
url = 'postgresql://postgres:postgres@localhost:5432/test'
engine = create_engine(url)
con = psycopg2.connect(url)
con.autocommit = True
try:
cur = con.cursor()
cur.execute(create_bars.format(table_name))
cur.execute(bars_indices.format(table_name))
now = datetime.datetime.now()
bgn_prd = datetime.datetime(now.year - 1, 3, 1).astimezone(tz.gettz('US/Eastern'))
filters = (BarsInPeriodFilter(ticker="IBM", bgn_prd=bgn_prd, end_prd=None, interval_len=3600, ascend=True, interval_type='s'),
BarsInPeriodFilter(ticker="AAPL", bgn_prd=bgn_prd, end_prd=None, interval_len=3600, ascend=True, interval_type='s'))
data = [history.request_data(f, sync_timestamps=False) for f in filters]
for datum, f in zip(data, filters):
del datum['timestamp']
del datum['total_volume']
del datum['number_of_trades']
datum['symbol'] = f.ticker
datum['interval'] = '3600_s'
datum = datum.tz_localize(None)
datum.to_sql(table_name, con=engine, if_exists='append')
now = datetime.datetime.now()
# test all symbols no cache
bgn_prd = datetime.datetime(now.year - 1, 3, 1, tzinfo=tz.gettz('UTC'))
bars_in_period = BarsInPeriodProvider(conn=con, interval_len=3600, interval_type='s', bars_table=table_name, bgn_prd=bgn_prd, delta=relativedelta(days=30), overlap=relativedelta(microseconds=-1))
for i, df in enumerate(bars_in_period):
self.assertFalse(df.empty)
lmdb_cache.write(bars_in_period.current_cache_key(), df, tmpdir)
start, end = bars_in_period._periods[bars_in_period._deltas]
self.assertGreaterEqual(df.index[0][0], start)
self.assertGreater(end, df.index[-1][0])
self.assertGreater(end, df.index[0][0])
self.assertEqual(i, len(bars_in_period._periods) - 1)
self.assertGreater(i, 0)
# test all symbols cache
bgn_prd = datetime.datetime(now.year - 1, 3, 1, tzinfo=tz.gettz('UTC'))
bars_in_period = BarsInPeriodProvider(conn=con, interval_len=3600, interval_type='s', bars_table=table_name, bgn_prd=bgn_prd, delta=relativedelta(days=30), overlap=relativedelta(microseconds=-1),
cache=functools.partial(lmdb_cache.read_pickle, lmdb_path=tmpdir))
for i, df in enumerate(bars_in_period):
self.assertFalse(df.empty)
start, end = bars_in_period._periods[bars_in_period._deltas]
self.assertGreaterEqual(df.index[0][0], start)
self.assertGreater(end, df.index[-1][0])
self.assertGreater(end, df.index[0][0])
self.assertEqual(i, len(bars_in_period._periods) - 1)
self.assertGreater(i, 0)
# test symbols group
bgn_prd = datetime.datetime(now.year - 1, 3, 1, tzinfo=tz.gettz('UTC'))
bars_in_period = BarsInPeriodProvider(conn=con, interval_len=3600, interval_type='s', bars_table=table_name, bgn_prd=bgn_prd, symbol=['AAPL', 'IBM'], delta=relativedelta(days=30), overlap=relativedelta(microseconds=-1))
for i, df in enumerate(bars_in_period):
self.assertFalse(df.empty)
start, end = bars_in_period._periods[bars_in_period._deltas]
self.assertGreaterEqual(df.index[0][0], start)
self.assertGreater(end, df.index[-1][0])
self.assertGreater(end, df.index[0][0])
self.assertEqual(i, len(bars_in_period._periods) - 1)
self.assertGreater(i, 0)
finally:
shutil.rmtree(tmpdir)
con.cursor().execute("DROP TABLE IF EXISTS bars_test;")
def test_bars_by_symbol(self):
with IQFeedHistoryProvider(num_connections=2) as history:
tmpdir = tempfile.mkdtemp()
table_name = 'bars_test'
url = 'postgresql://postgres:postgres@localhost:5432/test'
engine = create_engine(url)
con = psycopg2.connect(url)
con.autocommit = True
try:
cur = con.cursor()
cur.execute(create_bars.format(table_name))
cur.execute(bars_indices.format(table_name))
iq_history.BarsFilter(ticker="IBM", interval_len=3600, interval_type='s', max_bars=1000)
filters = (iq_history.BarsFilter(ticker="IBM", interval_len=3600, interval_type='s', max_bars=1000),
iq_history.BarsFilter(ticker="AAPL", interval_len=3600, interval_type='s', max_bars=1000))
data = [history.request_data(f, sync_timestamps=False) for f in filters]
for datum, f in zip(data, filters):
del datum['timestamp']
del datum['total_volume']
del datum['number_of_trades']
datum['symbol'] = f.ticker
datum['interval'] = '3600_s'
datum = datum.tz_localize(None)
datum.to_sql(table_name, con=engine, if_exists='append')
bars_per_symbol = BarsBySymbolProvider(conn=con, records_per_query=1000, interval_len=3600, interval_type='s', table_name=table_name)
for i, df in enumerate(bars_per_symbol):
self.assertEqual(len(df), 1000)
self.assertEqual(i, 1)
bars_per_symbol = BarsBySymbolProvider(conn=con, records_per_query=100, interval_len=3600, interval_type='s', table_name=table_name)
for i, df in enumerate(bars_per_symbol):
self.assertEqual(len(df), 1000)
self.assertEqual(i, 1)
bars_per_symbol = BarsBySymbolProvider(conn=con, records_per_query=2000, interval_len=3600, interval_type='s', table_name=table_name)
for i, df in enumerate(bars_per_symbol):
self.assertEqual(len(df), 2000)
self.assertTrue(isinstance(df.index, pd.MultiIndex))
self.assertEqual(i, 0)
finally:
shutil.rmtree(tmpdir)
con.cursor().execute("DROP TABLE IF EXISTS bars_test;")
def test_symbol_counts(self):
with IQFeedHistoryProvider(num_connections=2) as history:
table_name = 'bars_test'
url = 'postgresql://postgres:postgres@localhost:5432/test'
engine = create_engine(url)
con = psycopg2.connect(url)
con.autocommit = True
try:
cur = con.cursor()
cur.execute(create_bars.format(table_name))
cur.execute(bars_indices.format(table_name))
now = datetime.datetime.now()
bgn_prd = datetime.datetime(now.year - 1, 3, 1).astimezone(tz.gettz('US/Eastern'))
filters = (BarsInPeriodFilter(ticker="IBM", bgn_prd=bgn_prd, end_prd=None, interval_len=3600, ascend=True, interval_type='s'),
BarsInPeriodFilter(ticker="AAPL", bgn_prd=bgn_prd, end_prd=None, interval_len=3600, ascend=True, interval_type='s'))
data = [history.request_data(f, sync_timestamps=False) for f in filters]
for datum, f in zip(data, filters):
del datum['timestamp']
del datum['total_volume']
del datum['number_of_trades']
datum['symbol'] = f.ticker
datum['interval'] = '3600_s'
datum = datum.tz_localize(None)
datum.to_sql(table_name, con=engine, if_exists='append')
counts = request_symbol_counts(conn=con, interval_len=3600, interval_type='s', symbol=["IBM", "AAPL"], bars_table=table_name)
self.assertEqual(counts.size, 2)
self.assertGreater(counts.min(), 0)
finally:
con.cursor().execute("DROP TABLE IF EXISTS bars_test;")
def test_update_adjustments(self):
table_name = 'adjustments_test'
url = 'postgresql://postgres:postgres@localhost:5432/test'
con = psycopg2.connect(url)
con.autocommit = True
try:
adjustments = get_splits_dividends({'IBM', 'AAPL', 'GOOG', 'MSFT'})
cur = con.cursor()
cur.execute(create_json_data.format(table_name))
insert_df_json(con, table_name, adjustments)
now = datetime.datetime.now()
df = request_adjustments(con, table_name, symbol=['IBM', 'AAPL', 'MSFT', 'GOOG'], bgn_prd=datetime.datetime(year=now.year - 30, month=now.month, day=now.day),
end_prd=datetime.datetime(year=now.year + 2, month=now.month, day=now.day), provider='iqfeed')
self.assertFalse(df.empty)
assert_frame_equal(adjustments, df)
finally:
con.cursor().execute("DROP TABLE IF EXISTS {0};".format(table_name))
def test_update_fundamentals(self):
table_name = 'iqfeed_fundamentals'
url = 'postgresql://postgres:postgres@localhost:5432/test'
con = psycopg2.connect(url)
con.autocommit = True
try:
cur = con.cursor()
cur.execute(create_json_data.format(table_name))
fundamentals = get_fundamentals({'IBM', 'AAPL', 'GOOG', 'MSFT'})
update_fundamentals(conn=con, fundamentals=fundamentals, table_name=table_name)
fund = request_fundamentals(con, symbol=['IBM', 'AAPL', 'GOOG'], table_name=table_name)
self.assertTrue(isinstance(fund, pd.DataFrame))
self.assertEqual(len(fund), 3)
finally:
con.cursor().execute("DROP TABLE IF EXISTS {0};".format(table_name))
if __name__ == '__main__':
unittest.main()
|
# send object to the back of the objects list
import variables as var
def send_to_back(entity):
var.entities.remove(entity)
var.entities.insert(0, entity)
|
#!/usr/bin/env python
# encoding: utf-8
import os
import sys
from relo.core.config import *
from relo.core.exceptions import ShellException
from relo.core.log import logger
def mkdirs(path):
if not os.path.exists(path):
os.makedirs(path)
return True
return False
|
"""Spread schedules accross servers."""
import datetime
import sys
from cicada.lib import postgres
from cicada.lib import scheduler
from cicada.lib import utils
def csv_to_list(comma_separated_string: str) -> [int]:
"""Convert list of string to a list of integers"""
try:
return list(map(int, comma_separated_string.split(",")))
except ValueError:
print("ERROR: Cannot convert list of strings to list of integers")
sys.exit(1)
def get_last_week_schedules_by_load(db_cur, server_ids: [int] = None):
"""Extract details of executable of a schedule"""
if server_ids:
sql_server_ids = ",".join(str(server_id) for server_id in server_ids)
now = datetime.datetime.now()
sqlquery = f"""
SELECT
sl.schedule_id as schedule_id,
sum(sl.end_time - sl.start_time) as total_run_duration
FROM schedule_log sl
INNER JOIN schedules s USING (schedule_id)
WHERE sl.start_time > to_char('{now}'::timestamp - interval '7 DAY', 'YYYY-MM-DD 00:00:00')::timestamp
AND sl.start_time < to_char('{now}'::timestamp, 'YYYY-MM-DD 00:00:00')::timestamp
AND s.server_id in ({sql_server_ids})
GROUP BY sl.schedule_id
ORDER BY total_run_duration DESC, sl.schedule_id ASC
"""
db_cur.execute(sqlquery)
cur_schedules_load_yesterday = db_cur
last_week_schedules_by_load = []
for row in cur_schedules_load_yesterday.fetchall():
last_week_schedules_by_load.append(str(row[0]))
return last_week_schedules_by_load
def get_enabled_servers(db_cur, enabled_only: bool = True, server_ids: [int] = None):
"""Get valid servers"""
sql_enabled_filter = " and is_enabled = 1" if enabled_only else ""
sql_server_id_filter = ""
if server_ids:
sql_server_ids = ",".join(str(server_id) for server_id in server_ids)
sql_server_id_filter = f" and server_id in ({sql_server_ids})"
sqlquery = f"""
SELECT server_id FROM servers
WHERE 1 = 1
{sql_enabled_filter}
{sql_server_id_filter}
ORDER BY server_id
"""
db_cur.execute(sqlquery)
enabled_servers = []
for row in db_cur.fetchall():
enabled_servers.append(str(row[0]))
return enabled_servers
@utils.named_exception_handler("spread_schedules")
def main(spread_details, dbname=None):
"""Spread schedules accross servers."""
db_conn = postgres.db_cicada(dbname)
db_cur = db_conn.cursor()
from_server_ids = csv_to_list(spread_details["from_server_ids"])
to_server_ids = csv_to_list(spread_details["to_server_ids"])
valid_target_servers = get_enabled_servers(db_cur, server_ids=to_server_ids)
valid_server_count = len(valid_target_servers)
if valid_server_count == 0:
print("ERROR: No enabled to_server_ids")
sys.exit(1)
next_enabled_server = 0
last_week_schedules_by_load = get_last_week_schedules_by_load(
db_cur, from_server_ids
)
for schedule_id in last_week_schedules_by_load:
current_schedule_details = scheduler.get_schedule_details(db_cur, schedule_id)
new_schedule_details = current_schedule_details.copy()
new_schedule_details["server_id"] = valid_target_servers[next_enabled_server]
next_enabled_server += 1
if next_enabled_server == valid_server_count:
next_enabled_server = 0
if spread_details["commit"] is True:
output_message = (
f"'{str(current_schedule_details['schedule_id'])}' has been reassigned : "
f"{str(current_schedule_details['server_id'])} -> {str(new_schedule_details['server_id'])}"
)
if (
(spread_details["force"] is True)
and (current_schedule_details["is_running"] == 1)
and (
current_schedule_details["server_id"]
!= new_schedule_details["server_id"]
)
):
new_schedule_details["abort_running"] = 1
new_schedule_details["adhoc_execute"] = 1
output_message += " | Forced abort_running and adhoc_execute"
scheduler.update_schedule_details(db_cur, new_schedule_details)
else:
output_message = (
f"'{str(current_schedule_details['schedule_id'])}' will be reassigned : "
f"{str(current_schedule_details['server_id'])} -> {str(new_schedule_details['server_id'])}"
)
print(output_message)
db_cur.close()
db_conn.close()
|
#!/usr/bin/env python3
import torch
if torch.cuda.is_available():
DEVICE = torch.device('cuda')
IS_CUDA = True
else:
DEVICE = torch.device('cpu')
IS_CUDA = False
VISUAL_FEATURES = 512
STATE_DIMS = 3
ACTION_DIMS = 2
NUM_PARTICLES = 500
RANDOM_SEED = 42
WIDTH, HEIGHT = 256, 256
BATCH_SIZE = 8
is_acts_disc = True
SEQ_LEN = 16
GAUSS_STD = 0.5
|
import torch
from pathlib import Path
class Walker_AI(torch.nn.Module):
def __init__(self):
super(Walker_AI, self).__init__()
self.net = torch.nn.Sequential(
torch.nn.Linear(14, 25),
torch.nn.LeakyReLU(),
torch.nn.Linear(25, 10),
torch.nn.LeakyReLU(),
torch.nn.Linear(10, 4),
torch.nn.Hardtanh(),
)
for param in self.parameters():
param.requires_grad = False
for layer in self.net:
if type(layer) == torch.nn.Linear:
layer.weight.data.fill_(0.0)
layer.bias.data.fill_(0.0)
def forward(self, x):
out = self.net(x)
return out
def eval_agent(agent, env, duration):
status = env.reset()[:14]
tot_reward = 0
for _ in range(duration):
action_t = agent(torch.Tensor(status))
action_np = action_t.numpy()
new_status, reward, done, _ = env.step(action_np)
# subtract reward for leg contact with the ground
tot_reward = tot_reward + reward - 0.035 * (new_status[8] + new_status[13])
status = new_status[:14]
if done:
break
return tot_reward
def load_agent(dir: Path, id: int = 0) -> Walker_AI:
file = dir / f"agent_file{id}.pth"
state_dict = torch.load(file)
agent = Walker_AI()
for param in agent.parameters():
param.requires_grad = False
agent.load_state_dict(state_dict)
return agent
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
DataLoader is an adapter for all other loaders.
This module can identify what loader should be used to load data.
"""
from mindinsight.datavisual.common.log import logger
from mindinsight.datavisual.data_transform.ms_data_loader import MSDataLoader
from mindinsight.datavisual.common import exceptions
class DataLoader:
"""
The adapter of all kinds of loaders.
Args:
summary_dir (str): A directory path.
"""
def __init__(self, summary_dir):
self._summary_dir = summary_dir
self._loader = None
def load(self):
"""Load the data when loader is exist."""
if self._loader is None:
ms_dataloader = MSDataLoader(self._summary_dir)
loaders = [ms_dataloader]
for loader in loaders:
if loader.filter_valid_files():
self._loader = loader
break
if self._loader is None:
logger.warning("No valid files can be loaded, summary_dir: %s.", self._summary_dir)
raise exceptions.SummaryLogPathInvalid()
self._loader.load()
def get_events_data(self):
"""
Get events data from log file.
Returns:
Optional[EventsData], None or events data.
"""
return self._loader.get_events_data()
def has_valid_files(self):
"""
Check the directory for valid files.
Returns:
bool, if the directory has valid files, return True.
"""
ms_dataloader = MSDataLoader(self._summary_dir)
return bool(ms_dataloader.filter_valid_files())
|
from django import forms
from allauth.account.forms import SignupForm
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from django.core.validators import MaxValueValidator, MinValueValidator
from .models import CustomUser
from .models import Booking
from .models import Contact
from .models import Service
class DateInput(forms.DateInput):
input_type = 'date'
class CustomUserCreationForm(UserCreationForm):
class Meta:
model = CustomUser
fields = ('username', 'email')
class CustomUserChangeForm(UserChangeForm):
password = None
HOUSING_TYPE = [
('ap', 'Apartment'),
('condo', 'Condo'),
('villa', 'Villa'),
('single', 'Single-family'),
('mansion', 'Mansion'),
('cottage', 'Cottage'),
('tiny', 'Tiny House'),
]
housing_type = forms.ChoiceField(choices=HOUSING_TYPE)
class Meta:
model = CustomUser
fields = ('username', 'first_name', 'last_name', 'email',
'phone', 'address', 'housing_type', 'surface_sqm')
class CustomSignupForm(SignupForm):
HOUSING_TYPE = [
('ap', 'Apartment'),
('condo', 'Condo'),
('villa', 'Villa'),
('single', 'Single-family'),
('mansion', 'Mansion'),
('cottage', 'Cottage'),
('tiny', 'Tiny House'),
]
first_name = forms.CharField(max_length=30, label='First Name')
last_name = forms.CharField(max_length=30, label='Last Name')
phone = forms.CharField(
max_length=12, label='Phone number')
address = forms.CharField(max_length=100, label='Address')
city = forms.CharField(max_length=60, label='City')
postcode = forms.CharField(max_length=5, label='Postcode')
housing_type = forms.ChoiceField(choices=HOUSING_TYPE)
surface_sqm = forms.IntegerField(
validators=[MinValueValidator(20),
MaxValueValidator(500)]
)
def save(self, request):
user = super(CustomSignupForm, self).save(request)
user.first_name = self.cleaned_data['first_name']
user.last_name = self.cleaned_data['last_name']
user.phone = self.cleaned_data['phone']
user.address = self.cleaned_data['address']
user.city = self.cleaned_data['city']
user.postcode = self.cleaned_data['postcode']
user.housing_type = self.cleaned_data['housing_type']
user.surface_sqm = self.cleaned_data['surface_sqm']
user.save()
return user
class Meta:
model = CustomUser
class ServiceModelChoiceField(forms.ModelChoiceField):
def label_from_instance(self, obj):
return obj.name
class BookingForm(forms.ModelForm):
service = ServiceModelChoiceField(queryset=Service.objects.all())
class Meta:
model = Booking
fields = ['service', 'date', 'mentions']
widgets = {
'date': DateInput(),
}
class ContactForm(forms.ModelForm):
class Meta:
model = Contact
fields = ['name', 'email', 'telephone', 'title', 'message']
|
class Config:
"""MTProto Proxy Config.
"""
users = []
n = 0
def __init__(self,
port=None,
fast_mode=None,
prefer_ipv6=None,
secure_only=None,
listen_addr_ipv4=None,
listen_addr_ipv6=None,
client_handshake_timeout=None,
client_keepalive=None,
client_ack_timeout=None,
server_connect_timeout=None,
to_client_buffer_size=None,
to_server_buffer_size=None,
block_mode=None,
reply_check_length=None,
ipv4=None,
ipv6=None):
self.port = port
self.fast_mode = fast_mode
self.prefer_ipv6 = prefer_ipv6
self.secure_only = secure_only
self.listen_addr_ipv4 = listen_addr_ipv4
self.listen_addr_ipv6 = listen_addr_ipv6
self.client_handshake_timeout = client_handshake_timeout
self.client_keepalive = client_keepalive
self.client_ack_timeout = client_ack_timeout
self.server_connect_timeout = server_connect_timeout
self.to_client_buffer_size = to_client_buffer_size
self.to_server_buffer_size = to_server_buffer_size
self.block_mode = block_mode
self.reply_check_length = reply_check_length
self.ipv4 = ipv4
self.ipv6 = ipv6
|
from django.db import models
#Modelo usuario de Django
from django.contrib.auth.models import User
#Modelo de Usuarios
class Usuarios(models.Model):
nombre = models.CharField(max_length= 50)
apellido = models.CharField(max_length = 50)
email = models.CharField(max_length = 30, null = True, blank = True)
clave = models.CharField(max_length = 30, null = True, blank = True)
#Modelo de Productos
class Lista(models.Model):
nombrelista = models.CharField(max_length=200, blank='false')
user = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return self.nombrelista
class Tienda(models.Model):
nombretienda = models.CharField(max_length=200, blank='false',unique=True)
nombresucursal = models.CharField(max_length=200, blank='true')
esOnline = models.BooleanField()
direccion = models.CharField(max_length=200, blank='true')
ciudad = models.CharField(max_length=200, blank='true')
region = models.CharField(max_length=200, blank='true')
verificado = models.BooleanField(default=False)
def __str__(self):
return self.nombretienda
class Producto(models.Model):
nombreproducto = models.CharField(max_length=200, blank='true')
costoP = models.IntegerField()
costoR = models.IntegerField()
tienda = models.ForeignKey(Tienda, null=True ,on_delete=models.SET_NULL)
lista = models.ForeignKey(Lista, null=True ,on_delete=models.SET_NULL, related_name="relatedlistas")
notas = models.CharField(max_length=200, blank='true')
comprado = models.BooleanField(default=False)
def __str__(self):
return self.nombreproducto
|
import copy
import numpy as np
import progressbar as pb
def value_iteration(number_actions, number_states, transitions, rewards, theta=0.0001, discount_factor=1.0):
def one_step_lookahead(state, V):
A = np.zeros(number_actions)
for a in range(number_actions):
for next_state in range(number_states):
if transitions[a][state, next_state]:
A[a] += transitions[a][state, next_state] * (rewards[a][state, next_state] + discount_factor * V[next_state])
return A
V = np.zeros(number_states)
while True:
# Stopping condition
delta = 0
# Update each state...
bar = pb.ProgressBar()
for s in bar(range(number_states)):
# Do a one-step lookahead to find the best action
A = one_step_lookahead(s, V)
best_action_value = np.max(A)
# Calculate delta across all states seen so far
delta = max(delta, np.abs(best_action_value - V[s]))
# Update the value function. Ref: Sutton book eq. 4.10.
V[s] = best_action_value
# Check if we can stop
print(delta)
if delta < theta:
break
# Create a deterministic policy using the optimal value function
policy = np.zeros(number_states)
for s in range(number_states):
# One step lookahead to find the best action for this state
A = one_step_lookahead(s, V)
best_action = np.argmax(A)
# Always take the best action
policy[s] = best_action
return policy, V
def valueIteration(states, actions, transitions, rewards, epsilon, discount):
num_states = len(states)
values = np.random.uniform(size=num_states)
delta = 0
while True:
bar = pb.ProgressBar()
for state_index in bar(range(len(states))):
potential_values = []
for action in actions:
action_result = 0
for next_state_index in range(len(states)):
action_result += transitions[action][state_index, next_state_index] * (rewards[action][state_index, next_state_index] + discount* values[next_state_index])
potential_values.append(action_result)
best_action_value = np.max(potential_values)
delta = max(delta, np.abs(best_action_value - values[state_index]))
values[state_index] = best_action_value
print(delta)
if delta < epsilon:
break
policy = np.zeros(num_states) - 1
bar = pb.ProgressBar()
for state_index in bar(range(len(states))):
potential_values = []
for action in actions:
action_result = 0
for next_state_index in range(len(states)):
action_result += transitions[action][state_index, next_state_index] * (rewards[action][state_index, next_state_index] + discount* values[next_state_index])
potential_values.append(action_result)
policy[state_index] = np.argmax(potential_values)
return values, policy
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.